diff --git a/.deepsource.toml b/.deepsource.toml new file mode 100644 index 000000000000..c68a25b03206 --- /dev/null +++ b/.deepsource.toml @@ -0,0 +1,18 @@ +version = 1 + +test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"] + +exclude_patterns = ["classic/**"] + +[[analyzers]] +name = "javascript" + +[analyzers.meta] +plugins = ["react"] +environment = ["nodejs"] + +[[analyzers]] +name = "python" + +[analyzers.meta] +runtime_version = "3.x.x" diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000000..b378d49590ab --- /dev/null +++ b/.dockerignore @@ -0,0 +1,61 @@ +# Ignore everything by default, selectively add things to context +* + +# Platform - Libs +!autogpt_platform/autogpt_libs/autogpt_libs/ +!autogpt_platform/autogpt_libs/pyproject.toml +!autogpt_platform/autogpt_libs/poetry.lock +!autogpt_platform/autogpt_libs/README.md + +# Platform - Backend +!autogpt_platform/backend/backend/ +!autogpt_platform/backend/migrations/ +!autogpt_platform/backend/schema.prisma +!autogpt_platform/backend/pyproject.toml +!autogpt_platform/backend/poetry.lock +!autogpt_platform/backend/README.md + +# Platform - Market +!autogpt_platform/market/market/ +!autogpt_platform/market/scripts.py +!autogpt_platform/market/schema.prisma +!autogpt_platform/market/pyproject.toml +!autogpt_platform/market/poetry.lock +!autogpt_platform/market/README.md + +# Platform - Frontend +!autogpt_platform/frontend/src/ +!autogpt_platform/frontend/public/ +!autogpt_platform/frontend/package.json +!autogpt_platform/frontend/yarn.lock +!autogpt_platform/frontend/tsconfig.json +!autogpt_platform/frontend/README.md +## config +!autogpt_platform/frontend/*.config.* +!autogpt_platform/frontend/.env.* + +# Classic - AutoGPT +!classic/original_autogpt/autogpt/ +!classic/original_autogpt/pyproject.toml +!classic/original_autogpt/poetry.lock +!classic/original_autogpt/README.md +!classic/original_autogpt/tests/ + +# Classic - Benchmark +!classic/benchmark/agbenchmark/ +!classic/benchmark/pyproject.toml +!classic/benchmark/poetry.lock +!classic/benchmark/README.md + +# Classic - Forge +!classic/forge/ +!classic/forge/pyproject.toml +!classic/forge/poetry.lock +!classic/forge/README.md + +# Classic - Frontend +!classic/frontend/build/web/ + +# Explicitly re-ignore some folders +.* +**/__pycache__ diff --git a/.gitattributes b/.gitattributes index d3adc9db4bad..fe0de10e0b4e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,10 @@ -frontend/build/* linguist-generated +classic/frontend/build/** linguist-generated **/poetry.lock linguist-generated + +docs/_javascript/** linguist-vendored + +# Exclude VCR cassettes from stats +classic/forge/tests/vcr_cassettes/**/**.y*ml linguist-generated + +* text=auto \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 110493c3f1a7..0251399af5f1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,5 +1,7 @@ -.github/workflows/ @Significant-Gravitas/maintainers -autogpts/autogpt/ @Pwuts -benchmark/ @Significant-Gravitas/benchmarkers -forge/ @Swiftyos -frontend/ @hunteraraujo +* @Significant-Gravitas/maintainers +.github/workflows/ @Significant-Gravitas/devops +classic/forge/ @Significant-Gravitas/forge-maintainers +classic/benchmark/ @Significant-Gravitas/benchmark-maintainers +classic/frontend/ @Significant-Gravitas/frontend-maintainers +autogpt_platform/infra @Significant-Gravitas/devops +.github/CODEOWNERS @Significant-Gravitas/admins diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml index 3b9b6d265efb..a74ef2b38f9d 100644 --- a/.github/ISSUE_TEMPLATE/1.bug.yml +++ b/.github/ISSUE_TEMPLATE/1.bug.yml @@ -16,7 +16,7 @@ body: [discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions [#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184 [existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue - [wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing + [wiki page on Contributing]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing - type: checkboxes attributes: @@ -88,14 +88,16 @@ body: - type: dropdown attributes: - label: Do you use OpenAI GPT-3 or GPT-4? + label: What LLM Provider do you use? description: > - If you are using AutoGPT with `--gpt3only`, your problems may be caused by + If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5. options: - - GPT-3.5 - - GPT-4 - - GPT-4(32k) + - Azure + - Groq + - Anthropic + - Llamafile + - Other (detail in issue) validations: required: true @@ -126,6 +128,13 @@ body: label: Specify the area description: Please specify the area you think is best related to the issue. + - type: input + attributes: + label: What commit or version are you using? + description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash. + validations: + required: true + - type: textarea attributes: label: Describe your issue. diff --git a/.github/ISSUE_TEMPLATE/2.feature.yml b/.github/ISSUE_TEMPLATE/2.feature.yml index 9d505cf7a069..d673c1f5ccdc 100644 --- a/.github/ISSUE_TEMPLATE/2.feature.yml +++ b/.github/ISSUE_TEMPLATE/2.feature.yml @@ -5,7 +5,7 @@ body: - type: markdown attributes: value: | - First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing) + First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing) Please provide a searchable summary of the issue in the title above ⬆️. - type: checkboxes attributes: diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 19eb09eee1bd..9b348b557da0 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,31 +1,38 @@ -### Background - ### Changes 🏗️ -### PR Quality Scorecard ✨ - - - -- [x] Have you used the PR description template?   `+2 pts` -- [ ] Is your pull request atomic, focusing on a single change?   `+5 pts` -- [ ] Have you linked the GitHub issue(s) that this PR addresses?   `+5 pts` -- [ ] Have you documented your changes clearly and comprehensively?   `+5 pts` -- [ ] Have you changed or added a feature?   `-4 pts` - - [ ] Have you added/updated corresponding documentation?   `+4 pts` - - [ ] Have you added/updated corresponding integration tests?   `+5 pts` -- [ ] Have you changed the behavior of AutoGPT?   `-5 pts` - - [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance?   `+10 pts` +### Checklist 📋 + +#### For code changes: +- [ ] I have clearly listed my changes in the PR description +- [ ] I have made a test plan +- [ ] I have tested my changes according to the test plan: + + - [ ] ... + +
+ Example test plan + + - [ ] Create from scratch and execute an agent with at least 3 blocks + - [ ] Import an agent from file upload, and confirm it executes correctly + - [ ] Upload agent to marketplace + - [ ] Import an agent from marketplace and confirm it executes correctly + - [ ] Edit an agent from monitor, and confirm it executes correctly +
+ +#### For configuration changes: +- [ ] `.env.example` is updated or already compatible with my changes +- [ ] `docker-compose.yml` is updated or already compatible with my changes +- [ ] I have included a list of my configuration changes in the PR description (under **Changes**) + +
+ Examples of configuration changes + + - Changing ports + - Adding new services that need to communicate with each other + - Secrets or environment variable changes + - New or infrastructure changes such as databases +
diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..853791e2c173 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,175 @@ +version: 2 +updates: + # autogpt_libs (Poetry project) + - package-ecosystem: "pip" + directory: "autogpt_platform/autogpt_libs" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + commit-message: + prefix: "chore(libs/deps)" + prefix-development: "chore(libs/deps-dev)" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + # backend (Poetry project) + - package-ecosystem: "pip" + directory: "autogpt_platform/backend" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + commit-message: + prefix: "chore(backend/deps)" + prefix-development: "chore(backend/deps-dev)" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + # frontend (Next.js project) + - package-ecosystem: "npm" + directory: "autogpt_platform/frontend" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + target-branch: "dev" + commit-message: + prefix: "chore(frontend/deps)" + prefix-development: "chore(frontend/deps-dev)" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + # infra (Terraform) + - package-ecosystem: "terraform" + directory: "autogpt_platform/infra" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + commit-message: + prefix: "chore(infra/deps)" + prefix-development: "chore(infra/deps-dev)" + + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Docker + - package-ecosystem: "docker" + directory: "autogpt_platform/" + schedule: + interval: "weekly" + open-pull-requests-limit: 5 + target-branch: "dev" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Submodules + - package-ecosystem: "gitsubmodule" + directory: "autogpt_platform/supabase" + schedule: + interval: "weekly" + open-pull-requests-limit: 1 + target-branch: "dev" + commit-message: + prefix: "chore(platform/deps)" + prefix-development: "chore(platform/deps-dev)" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" + + + # Docs + - package-ecosystem: 'pip' + directory: "docs/" + schedule: + interval: "weekly" + open-pull-requests-limit: 1 + target-branch: "dev" + commit-message: + prefix: "chore(docs/deps)" + groups: + production-dependencies: + dependency-type: "production" + update-types: + - "minor" + - "patch" + development-dependencies: + dependency-type: "development" + update-types: + - "minor" + - "patch" diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 000000000000..8d2346983805 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,32 @@ +Classic AutoGPT Agent: +- changed-files: + - any-glob-to-any-file: classic/original_autogpt/** + +Classic Benchmark: +- changed-files: + - any-glob-to-any-file: classic/benchmark/** + +Classic Frontend: +- changed-files: + - any-glob-to-any-file: classic/frontend/** + +Forge: +- changed-files: + - any-glob-to-any-file: classic/forge/** + +documentation: +- changed-files: + - any-glob-to-any-file: docs/** + +platform/frontend: +- changed-files: + - any-glob-to-any-file: autogpt_platform/frontend/** + +platform/backend: +- changed-files: + - any-glob-to-any-file: autogpt_platform/backend/** + - all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**' + +platform/blocks: +- changed-files: + - any-glob-to-any-file: autogpt_platform/backend/backend/blocks/** diff --git a/.github/workflows/autogpt-ci.yml b/.github/workflows/autogpt-ci.yml deleted file mode 100644 index 375a433a8851..000000000000 --- a/.github/workflows/autogpt-ci.yml +++ /dev/null @@ -1,258 +0,0 @@ -name: AutoGPT Python CI - -on: - push: - branches: [ master, development, ci-test* ] - paths: - - 'autogpts/autogpt/**' - - '!autogpts/autogpt/tests/vcr_cassettes' - pull_request: - branches: [ master, development, release-* ] - paths: - - 'autogpts/autogpt/**' - - '!autogpts/autogpt/tests/vcr_cassettes' - pull_request_target: - branches: [ master, development, release-*, ci-test* ] - paths: - - 'autogpts/autogpt/**' - - '!autogpts/autogpt/tests/vcr_cassettes' - -concurrency: - group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} - cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }} - -defaults: - run: - working-directory: autogpts/autogpt - -jobs: - lint: - # eliminate duplicate runs - if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target')) - - runs-on: ubuntu-latest - env: - min-python-version: "3.10" - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - - - name: Set up Python ${{ env.min-python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ env.min-python-version }} - - - id: get_date - name: Get date - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Set up Python dependency cache - uses: actions/cache@v3 - with: - path: ~/.cache/pypoetry - key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }} - - - name: Install Python dependencies - run: | - curl -sSL https://install.python-poetry.org | python3 - - poetry install - - - name: Lint with flake8 - run: poetry run flake8 - - - name: Check black formatting - run: poetry run black . --check - if: success() || failure() - - - name: Check isort formatting - run: poetry run isort . --check - if: success() || failure() - - - name: Check mypy formatting - run: poetry run mypy - if: success() || failure() - - - name: Check for unused imports and pass statements - run: | - cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests" - poetry run $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1) - - test: - # eliminate duplicate runs - if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target')) - - permissions: - # Gives the action the necessary permissions for publishing new - # comments in pull requests. - pull-requests: write - # Gives the action the necessary permissions for pushing data to the - # python-coverage-comment-action branch, and for editing existing - # comments (to avoid publishing multiple comments in the same PR) - contents: write - runs-on: ubuntu-latest - timeout-minutes: 30 - strategy: - matrix: - python-version: ["3.10"] - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - submodules: true - - - name: Configure git user Auto-GPT-Bot - run: | - git config --global user.name "Auto-GPT-Bot" - git config --global user.email "github-bot@agpt.co" - - - name: Checkout cassettes - if: ${{ startsWith(github.event_name, 'pull_request') }} - run: | - cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}" - cassette_base_branch="${{ github.event.pull_request.base.ref }}" - cd tests/vcr_cassettes - - if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then - cassette_base_branch="master" - fi - - if git ls-remote --exit-code --heads origin $cassette_branch ; then - git fetch origin $cassette_branch - git fetch origin $cassette_base_branch - - git checkout $cassette_branch - - # Pick non-conflicting cassette updates from the base branch - git merge --no-commit --strategy-option=ours origin/$cassette_base_branch - echo "Using cassettes from mirror branch '$cassette_branch'," \ - "synced to upstream branch '$cassette_base_branch'." - else - git checkout -b $cassette_branch - echo "Branch '$cassette_branch' does not exist in cassette submodule." \ - "Using cassettes from '$cassette_base_branch'." - fi - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - id: get_date - name: Get date - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Set up Python dependency cache - uses: actions/cache@v3 - with: - path: ~/.cache/pypoetry - key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }} - - - name: Install Python dependencies - run: | - curl -sSL https://install.python-poetry.org | python3 - - poetry install - - - name: Run pytest with coverage - run: | - poetry run pytest -vv \ - --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \ - --numprocesses=logical --durations=10 \ - tests/unit tests/integration - env: - CI: true - PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }} - AGENT_MODE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_MODE || '' }} - AGENT_TYPE: ${{ github.event_name == 'pull_request_target' && secrets.AGENT_TYPE || '' }} - OPENAI_API_KEY: ${{ github.event_name != 'pull_request_target' && secrets.OPENAI_API_KEY || '' }} - PLAIN_OUTPUT: True - - - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v3 - - - id: setup_git_auth - name: Set up git token authentication - # Cassettes may be pushed even when tests fail - if: success() || failure() - run: | - config_key="http.${{ github.server_url }}/.extraheader" - base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0) - - git config "$config_key" \ - "Authorization: Basic $base64_pat" - - cd tests/vcr_cassettes - git config "$config_key" \ - "Authorization: Basic $base64_pat" - - echo "config_key=$config_key" >> $GITHUB_OUTPUT - - - id: push_cassettes - name: Push updated cassettes - # For pull requests, push updated cassettes even when tests fail - if: github.event_name == 'push' || success() || failure() - run: | - if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then - is_pull_request=true - cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}" - else - cassette_branch="${{ github.ref_name }}" - fi - - cd tests/vcr_cassettes - # Commit & push changes to cassettes if any - if ! git diff --quiet; then - git add . - git commit -m "Auto-update cassettes" - git push origin HEAD:$cassette_branch - if [ ! $is_pull_request ]; then - cd ../.. - git add tests/vcr_cassettes - git commit -m "Update cassette submodule" - git push origin HEAD:$cassette_branch - fi - echo "updated=true" >> $GITHUB_OUTPUT - else - echo "updated=false" >> $GITHUB_OUTPUT - echo "No cassette changes to commit" - fi - - - name: Post Set up git token auth - if: steps.setup_git_auth.outcome == 'success' - run: | - git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}' - git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}' - - - name: Apply "behaviour change" label and comment on PR - if: ${{ startsWith(github.event_name, 'pull_request') }} - run: | - PR_NUMBER=${{ github.event.pull_request.number }} - TOKEN=${{ secrets.PAT_REVIEW }} - REPO=${{ github.repository }} - - if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then - echo "Adding label and comment..." - curl -X POST \ - -H "Authorization: Bearer $TOKEN" \ - -H "Accept: application/vnd.github.v3+json" \ - https://api.github.com/repos/$REPO/issues/$PR_NUMBER/labels \ - -d '{"labels":["behaviour change"]}' - - echo $TOKEN | gh auth login --with-token - gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged." - fi - - - name: Upload logs to artifact - if: always() - uses: actions/upload-artifact@v3 - with: - name: test-logs - path: autogpts/autogpt/logs/ diff --git a/.github/workflows/autogpt-docker-cache-clean.yml b/.github/workflows/autogpt-docker-cache-clean.yml deleted file mode 100644 index 780caf44f9c0..000000000000 --- a/.github/workflows/autogpt-docker-cache-clean.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Purge Auto-GPT Docker CI cache - -on: - schedule: - - cron: 20 4 * * 1,4 - -env: - BASE_BRANCH: development - IMAGE_NAME: auto-gpt - -jobs: - build: - runs-on: ubuntu-latest - strategy: - matrix: - build-type: [release, dev] - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - id: build - name: Build image - uses: docker/build-push-action@v3 - with: - context: autogpts/autogpt - build-args: BUILD_TYPE=${{ matrix.build-type }} - load: true # save to docker images - # use GHA cache as read-only - cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max - - - name: Generate build report - env: - event_name: ${{ github.event_name }} - event_ref: ${{ github.event.schedule }} - - build_type: ${{ matrix.build-type }} - - prod_branch: master - dev_branch: development - repository: ${{ github.repository }} - base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }} - - current_ref: ${{ github.ref_name }} - commit_hash: ${{ github.sha }} - source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }} - push_forced_label: - - new_commits_json: ${{ null }} - compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} - - github_context_json: ${{ toJSON(github) }} - job_env_json: ${{ toJSON(env) }} - vars_json: ${{ toJSON(vars) }} - - run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY - continue-on-error: true diff --git a/.github/workflows/autogpt-docker-ci.yml b/.github/workflows/autogpt-docker-ci.yml deleted file mode 100644 index a6f336a75ebb..000000000000 --- a/.github/workflows/autogpt-docker-ci.yml +++ /dev/null @@ -1,135 +0,0 @@ -name: AutoGPT Docker CI - -on: - push: - branches: [ master, development ] - paths: - - 'autogpts/autogpt/**' - - '!autogpts/autogpt/tests/vcr_cassettes' - pull_request: - branches: [ master, development, release-* ] - paths: - - 'autogpts/autogpt/**' - - '!autogpts/autogpt/tests/vcr_cassettes' - -concurrency: - group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} - -defaults: - run: - working-directory: autogpts/autogpt - -env: - IMAGE_NAME: auto-gpt - -jobs: - build: - runs-on: ubuntu-latest - strategy: - matrix: - build-type: [release, dev] - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - if: runner.debug - run: | - ls -al - du -hs * - - - id: build - name: Build image - uses: docker/build-push-action@v3 - with: - context: autogpts/autogpt - build-args: BUILD_TYPE=${{ matrix.build-type }} - tags: ${{ env.IMAGE_NAME }} - load: true # save to docker images - # cache layers in GitHub Actions cache to speed up builds - cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }} - cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max - - - name: Generate build report - env: - event_name: ${{ github.event_name }} - event_ref: ${{ github.event.ref }} - event_ref_type: ${{ github.event.ref}} - - build_type: ${{ matrix.build-type }} - - prod_branch: master - dev_branch: development - repository: ${{ github.repository }} - base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }} - - current_ref: ${{ github.ref_name }} - commit_hash: ${{ github.event.after }} - source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} - push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }} - - new_commits_json: ${{ toJSON(github.event.commits) }} - compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} - - github_context_json: ${{ toJSON(github) }} - job_env_json: ${{ toJSON(env) }} - vars_json: ${{ toJSON(vars) }} - - run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY - working-directory: ./ - continue-on-error: true - - test: - runs-on: ubuntu-latest - timeout-minutes: 10 - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: true - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - id: build - name: Build image - uses: docker/build-push-action@v3 - with: - context: autogpts/autogpt - build-args: BUILD_TYPE=dev # include pytest - tags: ${{ env.IMAGE_NAME }} - load: true # save to docker images - # cache layers in GitHub Actions cache to speed up builds - cache-from: type=gha,scope=autogpt-docker-dev - cache-to: type=gha,scope=autogpt-docker-dev,mode=max - - - id: test - name: Run tests - env: - CI: true - PLAIN_OUTPUT: True - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - run: | - set +e - test_output=$( - docker run --env CI --env OPENAI_API_KEY \ - --entrypoint poetry ${{ env.IMAGE_NAME }} run \ - pytest -v --cov=autogpt --cov-branch --cov-report term-missing \ - --numprocesses=4 --durations=10 \ - tests/unit tests/integration 2>&1 - ) - test_failure=$? - - echo "$test_output" - - cat << $EOF >> $GITHUB_STEP_SUMMARY - # Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌') - \`\`\` - $test_output - \`\`\` - $EOF - - exit $test_failure diff --git a/.github/workflows/autogpt-docker-release.yml b/.github/workflows/autogpt-docker-release.yml deleted file mode 100644 index df2f0e9f23f2..000000000000 --- a/.github/workflows/autogpt-docker-release.yml +++ /dev/null @@ -1,87 +0,0 @@ -name: AutoGPT Docker Release - -on: - release: - types: [ published, edited ] - - workflow_dispatch: - inputs: - no_cache: - type: boolean - description: 'Build from scratch, without using cached layers' - -defaults: - run: - working-directory: autogpts/autogpt - -env: - IMAGE_NAME: auto-gpt - DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Log in to Docker hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - # slashes are not allowed in image tags, but can appear in git branch or tag names - - id: sanitize_tag - name: Sanitize image tag - run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT - env: - raw_tag: ${{ github.ref_name }} - - - id: build - name: Build image - uses: docker/build-push-action@v3 - with: - context: autogpts/autogpt - build-args: BUILD_TYPE=release - load: true # save to docker images - # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555 - tags: > - ${{ env.IMAGE_NAME }}, - ${{ env.DEPLOY_IMAGE_NAME }}:latest, - ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }} - - # cache layers in GitHub Actions cache to speed up builds - cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release - cache-to: type=gha,scope=autogpt-docker-release,mode=max - - - name: Push image to Docker Hub - run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }} - - - name: Generate build report - env: - event_name: ${{ github.event_name }} - event_ref: ${{ github.event.ref }} - event_ref_type: ${{ github.event.ref}} - inputs_no_cache: ${{ inputs.no_cache }} - - prod_branch: master - dev_branch: development - repository: ${{ github.repository }} - base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }} - - ref_type: ${{ github.ref_type }} - current_ref: ${{ github.ref_name }} - commit_hash: ${{ github.sha }} - source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} - - github_context_json: ${{ toJSON(github) }} - job_env_json: ${{ toJSON(env) }} - vars_json: ${{ toJSON(vars) }} - - run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY - working-directory: ./ - continue-on-error: true diff --git a/.github/workflows/autogpts-ci.yml b/.github/workflows/autogpts-ci.yml deleted file mode 100644 index 4f89fb7c98ec..000000000000 --- a/.github/workflows/autogpts-ci.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Valid AutoGPTs - -on: - workflow_dispatch: - schedule: - - cron: '0 8 * * *' - push: - branches: [ master, development, ci-test* ] - pull_request: - branches: [ master, development, release-* ] - -jobs: - run-tests: - runs-on: ubuntu-latest - strategy: - matrix: - agent-name: [ autogpt, forge ] - fail-fast: false - timeout-minutes: 20 - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - submodules: true - - - name: Set up Python ${{ env.min-python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ env.min-python-version }} - - - name: Install Poetry - working-directory: ./autogpts/${{ matrix.agent-name }}/ - run: | - curl -sSL https://install.python-poetry.org | python - - - - name: Run regression tests - run: | - ./run agent start ${{ matrix.agent-name }} - cd autogpts/${{ matrix.agent-name }} - poetry run agbenchmark --mock - poetry run agbenchmark --test=WriteFile - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - AGENT_NAME: ${{ matrix.agent-name }} - HELICONE_API_KEY: ${{ secrets.HELICONE_API_KEY }} - REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt - HELICONE_CACHE_ENABLED: false - HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }} - REPORT_LOCATION: ${{ format('../../reports/{0}', matrix.agent-name) }} diff --git a/.github/workflows/benchmark-ci.yml b/.github/workflows/benchmark-ci.yml deleted file mode 100644 index fd8020d245c0..000000000000 --- a/.github/workflows/benchmark-ci.yml +++ /dev/null @@ -1,143 +0,0 @@ -name: Benchmark CI - -on: - push: - branches: [ master, development, ci-test* ] - paths: - - 'benchmark/**' - - .github/workflows/benchmark-ci.yml - - '!benchmark/reports/**' - pull_request: - branches: [ master, development, release-* ] - paths: - - 'benchmark/**' - - '!benchmark/reports/**' - - .github/workflows/benchmark-ci.yml - -jobs: - lint: - runs-on: ubuntu-latest - env: - min-python-version: '3.10' - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - - - name: Set up Python ${{ env.min-python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ env.min-python-version }} - - - id: get_date - name: Get date - working-directory: ./benchmark/ - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Install Poetry - working-directory: ./benchmark/ - run: | - curl -sSL https://install.python-poetry.org | python - - - - name: Install dependencies - working-directory: ./benchmark/ - run: | - export POETRY_VIRTUALENVS_IN_PROJECT=true - poetry install -vvv - - - name: Lint with flake8 - working-directory: ./benchmark/ - run: poetry run flake8 - - - name: Check black formatting - working-directory: ./benchmark/ - run: poetry run black . --exclude test.py --check - if: success() || failure() - - - name: Check isort formatting - working-directory: ./benchmark/ - run: poetry run isort . --check - if: success() || failure() - - - name: Check for unused imports and pass statements - working-directory: ./benchmark/ - run: | - cmd="poetry run autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring agbenchmark" - $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1) - if: success() || failure() - - tests-agbenchmark: - runs-on: ubuntu-latest - strategy: - matrix: - agent-name: [ forge ] - fail-fast: false - timeout-minutes: 20 - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - submodules: true - - - name: Set up Python ${{ env.min-python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ env.min-python-version }} - - - name: Install Poetry - working-directory: ./autogpts/${{ matrix.agent-name }}/ - run: | - curl -sSL https://install.python-poetry.org | python - - - - name: Run regression tests - run: | - ./run agent start ${{ matrix.agent-name }} - sleep 10 - cd autogpts/${{ matrix.agent-name }} - set +e # Ignore non-zero exit codes and continue execution - echo "Running the following command: poetry run agbenchmark --maintain --mock" - - poetry run agbenchmark --maintain --mock - EXIT_CODE=$? - set -e # Stop ignoring non-zero exit codes - # Check if the exit code was 5, and if so, exit with 0 instead - if [ $EXIT_CODE -eq 5 ]; then - echo "regression_tests.json is empty." - fi - - echo "Running the following command: poetry run agbenchmark --mock" - poetry run agbenchmark --mock - - echo "Running the following command: poetry run agbenchmark --mock --category=data" - poetry run agbenchmark --mock --category=data - - echo "Running the following command: poetry run agbenchmark --mock --category=coding" - poetry run agbenchmark --mock --category=coding - - echo "Running the following command: poetry run agbenchmark --test=WriteFile" - poetry run agbenchmark --test=WriteFile - cd ../../benchmark - poetry install - echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed" - export BUILD_SKILL_TREE=true - - poetry run agbenchmark --mock - poetry run pytest -vv -s tests - - CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs" - if [ ! -z "$CHANGED" ]; then - echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed." - echo "$CHANGED" - exit 1 - else - echo "No unstaged changes." - fi - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} diff --git a/.github/workflows/benchmark_publish_package.yml b/.github/workflows/benchmark_publish_package.yml deleted file mode 100644 index de0ca66fedde..000000000000 --- a/.github/workflows/benchmark_publish_package.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Publish to PyPI - -on: - workflow_dispatch: - -jobs: - deploy: - runs-on: ubuntu-latest - permissions: - contents: write - steps: - - name: Checkout repository - uses: actions/checkout@v2 - with: - submodules: true - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.8 - - - name: Install Poetry - working-directory: ./benchmark/ - run: | - curl -sSL https://install.python-poetry.org | python3 - - echo "$HOME/.poetry/bin" >> $GITHUB_PATH - - - name: Build project for distribution - working-directory: ./benchmark/ - run: poetry build - - - name: Install dependencies - working-directory: ./benchmark/ - run: poetry install - - - name: Check Version - working-directory: ./benchmark/ - id: check-version - run: | - echo version=$(poetry version --short) >> $GITHUB_OUTPUT - - - name: Create Release - uses: ncipollo/release-action@v1 - with: - artifacts: "benchmark/dist/*" - token: ${{ secrets.GITHUB_TOKEN }} - draft: false - generateReleaseNotes: false - tag: agbenchmark-v${{ steps.check-version.outputs.version }} - commit: master - - - name: Build and publish - working-directory: ./benchmark/ - run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml deleted file mode 100644 index af2383a63372..000000000000 --- a/.github/workflows/benchmarks.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: Benchmarks (legacy) - -on: - schedule: - - cron: '0 8 * * *' - workflow_dispatch: - -jobs: - Benchmark: - name: ${{ matrix.config.task-name }} - runs-on: ubuntu-latest - timeout-minutes: 30 - strategy: - fail-fast: false - matrix: - config: - - python-version: "3.10" - task: "tests/challenges" - task-name: "Mandatory Tasks" - - python-version: "3.10" - task: "--beat-challenges -ra tests/challenges" - task-name: "Challenging Tasks" - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - ref: master - - - name: Set up Python ${{ matrix.config.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.config.python-version }} - - - id: get_date - name: Get date - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Set up Python dependency cache - uses: actions/cache@v3 - with: - path: ~/.cache/pypoetry - key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }} - - - name: Install Python dependencies - run: | - curl -sSL https://install.python-poetry.org | python3 - - poetry install - - - name: Run pytest with coverage - run: | - rm -rf tests/vcr_cassettes - pytest -n auto --record-mode=all ${{ matrix.config.task }} - env: - CI: true - PROXY: ${{ secrets.PROXY }} - AGENT_MODE: ${{ secrets.AGENT_MODE }} - AGENT_TYPE: ${{ secrets.AGENT_TYPE }} - PLAIN_OUTPUT: True - - - name: Upload logs as artifact - if: always() - uses: actions/upload-artifact@v3 - with: - name: test-logs-${{ matrix.config.task-name }} - path: logs/ - - - name: Upload cassettes as artifact - if: always() - uses: actions/upload-artifact@v3 - with: - name: cassettes-${{ matrix.config.task-name }} - path: tests/vcr_cassettes/ diff --git a/.github/workflows/build-frontend.yml b/.github/workflows/build-frontend.yml deleted file mode 100644 index dedf67f3c85c..000000000000 --- a/.github/workflows/build-frontend.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Build and Commit Frontend - -on: - push: - branches: - - master - - development - - 'ci-test*' # This will match any branch that starts with "ci-test" - paths: - - 'frontend/**' - -jobs: - build: - permissions: - contents: write - runs-on: ubuntu-latest - steps: - - name: Checkout Repo - uses: actions/checkout@v2 - - name: Setup Flutter - uses: subosito/flutter-action@v1 - with: - flutter-version: '3.13.2' - - name: Build Flutter Web - run: | - cd frontend - flutter build web --base-href /app/ - - name: Set branch name - id: vars - run: echo "::set-output name=branch::frontend_build_${GITHUB_SHA}" - - name: Commit and Push - run: | - git config --local user.email "action@github.com" - git config --local user.name "GitHub Action" - git add frontend/build/web - git commit -m "Update frontend build" -a - git checkout -b ${{ steps.vars.outputs.branch }} - echo "Commit hash: ${GITHUB_SHA}" - git push origin ${{ steps.vars.outputs.branch }} - # - name: Create Pull Request - # uses: peter-evans/create-pull-request@v3 - # with: - # title: "Update frontend build" - # body: "This PR updates the frontend build." - # branch: ${{ steps.vars.outputs.branch }} - # base: "master" diff --git a/.github/workflows/classic-autogpt-ci.yml b/.github/workflows/classic-autogpt-ci.yml new file mode 100644 index 000000000000..e549da8ae05d --- /dev/null +++ b/.github/workflows/classic-autogpt-ci.yml @@ -0,0 +1,138 @@ +name: Classic - AutoGPT CI + +on: + push: + branches: [ master, dev, ci-test* ] + paths: + - '.github/workflows/classic-autogpt-ci.yml' + - 'classic/original_autogpt/**' + pull_request: + branches: [ master, dev, release-* ] + paths: + - '.github/workflows/classic-autogpt-ci.yml' + - 'classic/original_autogpt/**' + +concurrency: + group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }} + +defaults: + run: + shell: bash + working-directory: classic/original_autogpt + +jobs: + test: + permissions: + contents: read + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + python-version: ["3.10"] + platform-os: [ubuntu, macos, macos-arm64, windows] + runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }} + + steps: + # Quite slow on macOS (2~4 minutes to set up Docker) + # - name: Set up Docker (macOS) + # if: runner.os == 'macOS' + # uses: crazy-max/ghaction-setup-docker@v3 + + - name: Start MinIO service (Linux) + if: runner.os == 'Linux' + working-directory: '.' + run: | + docker pull minio/minio:edge-cicd + docker run -d -p 9000:9000 minio/minio:edge-cicd + + - name: Start MinIO service (macOS) + if: runner.os == 'macOS' + working-directory: ${{ runner.temp }} + run: | + brew install minio/stable/minio + mkdir data + minio server ./data & + + # No MinIO on Windows: + # - Windows doesn't support running Linux Docker containers + # - It doesn't seem possible to start background processes on Windows. They are + # killed after the step returns. + # See: https://github.com/actions/runner/issues/598#issuecomment-2011890429 + + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Configure git user Auto-GPT-Bot + run: | + git config --global user.name "Auto-GPT-Bot" + git config --global user.email "github-bot@agpt.co" + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - id: get_date + name: Get date + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT + + - name: Set up Python dependency cache + # On Windows, unpacking cached dependencies takes longer than just installing them + if: runner.os != 'Windows' + uses: actions/cache@v4 + with: + path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }} + key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }} + + - name: Install Poetry (Unix) + if: runner.os != 'Windows' + run: | + curl -sSL https://install.python-poetry.org | python3 - + + if [ "${{ runner.os }}" = "macOS" ]; then + PATH="$HOME/.local/bin:$PATH" + echo "$HOME/.local/bin" >> $GITHUB_PATH + fi + + - name: Install Poetry (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: | + (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python - + + $env:PATH += ";$env:APPDATA\Python\Scripts" + echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH + + - name: Install Python dependencies + run: poetry install + + - name: Run pytest with coverage + run: | + poetry run pytest -vv \ + --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \ + --numprocesses=logical --durations=10 \ + tests/unit tests/integration + env: + CI: true + PLAIN_OUTPUT: True + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }} + AWS_ACCESS_KEY_ID: minioadmin + AWS_SECRET_ACCESS_KEY: minioadmin + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + flags: autogpt-agent,${{ runner.os }} + + - name: Upload logs to artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-logs + path: classic/original_autogpt/logs/ diff --git a/.github/workflows/classic-autogpt-docker-cache-clean.yml b/.github/workflows/classic-autogpt-docker-cache-clean.yml new file mode 100644 index 000000000000..1a91e946cf18 --- /dev/null +++ b/.github/workflows/classic-autogpt-docker-cache-clean.yml @@ -0,0 +1,60 @@ +name: Classic - Purge Auto-GPT Docker CI cache + +on: + schedule: + - cron: 20 4 * * 1,4 + +env: + BASE_BRANCH: dev + IMAGE_NAME: auto-gpt + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + build-type: [release, dev] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - id: build + name: Build image + uses: docker/build-push-action@v6 + with: + context: classic/ + file: classic/Dockerfile.autogpt + build-args: BUILD_TYPE=${{ matrix.build-type }} + load: true # save to docker images + # use GHA cache as read-only + cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.schedule }} + + build_type: ${{ matrix.build-type }} + + prod_branch: master + dev_branch: dev + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }} + + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.sha }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }} + push_forced_label: + + new_commits_json: ${{ null }} + compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true diff --git a/.github/workflows/classic-autogpt-docker-ci.yml b/.github/workflows/classic-autogpt-docker-ci.yml new file mode 100644 index 000000000000..4dc955a5b935 --- /dev/null +++ b/.github/workflows/classic-autogpt-docker-ci.yml @@ -0,0 +1,166 @@ +name: Classic - AutoGPT Docker CI + +on: + push: + branches: [master, dev] + paths: + - '.github/workflows/classic-autogpt-docker-ci.yml' + - 'classic/original_autogpt/**' + - 'classic/forge/**' + pull_request: + branches: [ master, dev, release-* ] + paths: + - '.github/workflows/classic-autogpt-docker-ci.yml' + - 'classic/original_autogpt/**' + - 'classic/forge/**' + +concurrency: + group: ${{ format('classic-autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +defaults: + run: + working-directory: classic/original_autogpt + +env: + IMAGE_NAME: auto-gpt + DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt + DEV_IMAGE_TAG: latest-dev + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + build-type: [release, dev] + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - if: runner.debug + run: | + ls -al + du -hs * + + - id: build + name: Build image + uses: docker/build-push-action@v6 + with: + context: classic/ + file: classic/Dockerfile.autogpt + build-args: BUILD_TYPE=${{ matrix.build-type }} + tags: ${{ env.IMAGE_NAME }} + labels: GIT_REVISION=${{ github.sha }} + load: true # save to docker images + # cache layers in GitHub Actions cache to speed up builds + cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }} + cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.ref }} + event_ref_type: ${{ github.event.ref}} + + build_type: ${{ matrix.build-type }} + + prod_branch: master + dev_branch: dev + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }} + + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.event.after }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} + push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }} + + new_commits_json: ${{ toJSON(github.event.commits) }} + compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true + + test: + runs-on: ubuntu-latest + timeout-minutes: 10 + + services: + minio: + image: minio/minio:edge-cicd + options: > + --name=minio + --health-interval=10s --health-timeout=5s --health-retries=3 + --health-cmd="curl -f http://localhost:9000/minio/health/live" + + steps: + - name: Check out repository + uses: actions/checkout@v4 + with: + submodules: true + + - if: github.event_name == 'push' + name: Log in to Docker hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - id: build + name: Build image + uses: docker/build-push-action@v6 + with: + context: classic/ + file: classic/Dockerfile.autogpt + build-args: BUILD_TYPE=dev # include pytest + tags: > + ${{ env.IMAGE_NAME }}, + ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }} + labels: GIT_REVISION=${{ github.sha }} + load: true # save to docker images + # cache layers in GitHub Actions cache to speed up builds + cache-from: type=gha,scope=autogpt-docker-dev + cache-to: type=gha,scope=autogpt-docker-dev,mode=max + + - id: test + name: Run tests + env: + CI: true + PLAIN_OUTPUT: True + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + S3_ENDPOINT_URL: http://minio:9000 + AWS_ACCESS_KEY_ID: minioadmin + AWS_SECRET_ACCESS_KEY: minioadmin + run: | + set +e + docker run --env CI --env OPENAI_API_KEY \ + --network container:minio \ + --env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \ + --entrypoint poetry ${{ env.IMAGE_NAME }} run \ + pytest -v --cov=autogpt --cov-branch --cov-report term-missing \ + --numprocesses=4 --durations=10 \ + tests/unit tests/integration 2>&1 | tee test_output.txt + + test_failure=${PIPESTATUS[0]} + + cat << $EOF >> $GITHUB_STEP_SUMMARY + # Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌') + \`\`\` + $(cat test_output.txt) + \`\`\` + $EOF + + exit $test_failure + + - if: github.event_name == 'push' && github.ref_name == 'master' + name: Push image to Docker Hub + run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }} diff --git a/.github/workflows/classic-autogpt-docker-release.yml b/.github/workflows/classic-autogpt-docker-release.yml new file mode 100644 index 000000000000..fd333395400c --- /dev/null +++ b/.github/workflows/classic-autogpt-docker-release.yml @@ -0,0 +1,87 @@ +name: Classic - AutoGPT Docker Release + +on: + release: + types: [published, edited] + + workflow_dispatch: + inputs: + no_cache: + type: boolean + description: 'Build from scratch, without using cached layers' + +env: + IMAGE_NAME: auto-gpt + DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt + +jobs: + build: + if: startsWith(github.ref, 'refs/tags/autogpt-') + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to Docker hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + # slashes are not allowed in image tags, but can appear in git branch or tag names + - id: sanitize_tag + name: Sanitize image tag + run: | + tag=${raw_tag//\//-} + echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT + env: + raw_tag: ${{ github.ref_name }} + + - id: build + name: Build image + uses: docker/build-push-action@v6 + with: + context: classic/ + file: Dockerfile.autogpt + build-args: BUILD_TYPE=release + load: true # save to docker images + # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555 + tags: > + ${{ env.IMAGE_NAME }}, + ${{ env.DEPLOY_IMAGE_NAME }}:latest, + ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }} + labels: GIT_REVISION=${{ github.sha }} + + # cache layers in GitHub Actions cache to speed up builds + cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release + cache-to: type=gha,scope=autogpt-docker-release,mode=max + + - name: Push image to Docker Hub + run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }} + + - name: Generate build report + env: + event_name: ${{ github.event_name }} + event_ref: ${{ github.event.ref }} + event_ref_type: ${{ github.event.ref}} + inputs_no_cache: ${{ inputs.no_cache }} + + prod_branch: master + dev_branch: dev + repository: ${{ github.repository }} + base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }} + + ref_type: ${{ github.ref_type }} + current_ref: ${{ github.ref_name }} + commit_hash: ${{ github.sha }} + source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }} + + github_context_json: ${{ toJSON(github) }} + job_env_json: ${{ toJSON(env) }} + vars_json: ${{ toJSON(vars) }} + + run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY + continue-on-error: true diff --git a/.github/workflows/classic-autogpts-ci.yml b/.github/workflows/classic-autogpts-ci.yml new file mode 100644 index 000000000000..21bc945c0077 --- /dev/null +++ b/.github/workflows/classic-autogpts-ci.yml @@ -0,0 +1,76 @@ +name: Classic - Agent smoke tests + +on: + workflow_dispatch: + schedule: + - cron: '0 8 * * *' + push: + branches: [ master, dev, ci-test* ] + paths: + - '.github/workflows/classic-autogpts-ci.yml' + - 'classic/original_autogpt/**' + - 'classic/forge/**' + - 'classic/benchmark/**' + - 'classic/run' + - 'classic/cli.py' + - 'classic/setup.py' + - '!**/*.md' + pull_request: + branches: [ master, dev, release-* ] + paths: + - '.github/workflows/classic-autogpts-ci.yml' + - 'classic/original_autogpt/**' + - 'classic/forge/**' + - 'classic/benchmark/**' + - 'classic/run' + - 'classic/cli.py' + - 'classic/setup.py' + - '!**/*.md' + +defaults: + run: + shell: bash + working-directory: classic + +jobs: + serve-agent-protocol: + runs-on: ubuntu-latest + strategy: + matrix: + agent-name: [ original_autogpt ] + fail-fast: false + timeout-minutes: 20 + env: + min-python-version: '3.10' + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - name: Install Poetry + working-directory: ./classic/${{ matrix.agent-name }}/ + run: | + curl -sSL https://install.python-poetry.org | python - + + - name: Run regression tests + run: | + ./run agent start ${{ matrix.agent-name }} + cd ${{ matrix.agent-name }} + poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0 + poetry run agbenchmark --test=WriteFile + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + AGENT_NAME: ${{ matrix.agent-name }} + REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt + HELICONE_CACHE_ENABLED: false + HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }} + REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }} + TELEMETRY_ENVIRONMENT: autogpt-ci + TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }} diff --git a/.github/workflows/classic-benchmark-ci.yml b/.github/workflows/classic-benchmark-ci.yml new file mode 100644 index 000000000000..a239e7b29477 --- /dev/null +++ b/.github/workflows/classic-benchmark-ci.yml @@ -0,0 +1,169 @@ +name: Classic - AGBenchmark CI + +on: + push: + branches: [ master, dev, ci-test* ] + paths: + - 'classic/benchmark/**' + - '!classic/benchmark/reports/**' + - .github/workflows/classic-benchmark-ci.yml + pull_request: + branches: [ master, dev, release-* ] + paths: + - 'classic/benchmark/**' + - '!classic/benchmark/reports/**' + - .github/workflows/classic-benchmark-ci.yml + +concurrency: + group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }} + +defaults: + run: + shell: bash + +env: + min-python-version: '3.10' + +jobs: + test: + permissions: + contents: read + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + python-version: ["3.10"] + platform-os: [ubuntu, macos, macos-arm64, windows] + runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }} + defaults: + run: + shell: bash + working-directory: classic/benchmark + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Set up Python dependency cache + # On Windows, unpacking cached dependencies takes longer than just installing them + if: runner.os != 'Windows' + uses: actions/cache@v4 + with: + path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }} + key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }} + + - name: Install Poetry (Unix) + if: runner.os != 'Windows' + run: | + curl -sSL https://install.python-poetry.org | python3 - + + if [ "${{ runner.os }}" = "macOS" ]; then + PATH="$HOME/.local/bin:$PATH" + echo "$HOME/.local/bin" >> $GITHUB_PATH + fi + + - name: Install Poetry (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: | + (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python - + + $env:PATH += ";$env:APPDATA\Python\Scripts" + echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH + + - name: Install Python dependencies + run: poetry install + + - name: Run pytest with coverage + run: | + poetry run pytest -vv \ + --cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \ + --durations=10 \ + tests + env: + CI: true + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + flags: agbenchmark,${{ runner.os }} + + self-test-with-agent: + runs-on: ubuntu-latest + strategy: + matrix: + agent-name: [forge] + fail-fast: false + timeout-minutes: 20 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python - + + - name: Run regression tests + working-directory: classic + run: | + ./run agent start ${{ matrix.agent-name }} + cd ${{ matrix.agent-name }} + + set +e # Ignore non-zero exit codes and continue execution + echo "Running the following command: poetry run agbenchmark --maintain --mock" + poetry run agbenchmark --maintain --mock + EXIT_CODE=$? + set -e # Stop ignoring non-zero exit codes + # Check if the exit code was 5, and if so, exit with 0 instead + if [ $EXIT_CODE -eq 5 ]; then + echo "regression_tests.json is empty." + fi + + echo "Running the following command: poetry run agbenchmark --mock" + poetry run agbenchmark --mock + + echo "Running the following command: poetry run agbenchmark --mock --category=data" + poetry run agbenchmark --mock --category=data + + echo "Running the following command: poetry run agbenchmark --mock --category=coding" + poetry run agbenchmark --mock --category=coding + + # echo "Running the following command: poetry run agbenchmark --test=WriteFile" + # poetry run agbenchmark --test=WriteFile + cd ../benchmark + poetry install + echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed" + export BUILD_SKILL_TREE=true + + # poetry run agbenchmark --mock + + # CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs" + # if [ ! -z "$CHANGED" ]; then + # echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed." + # echo "$CHANGED" + # exit 1 + # else + # echo "No unstaged changes." + # fi + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci + TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }} diff --git a/.github/workflows/classic-benchmark_publish_package.yml b/.github/workflows/classic-benchmark_publish_package.yml new file mode 100644 index 000000000000..afcab0babe9c --- /dev/null +++ b/.github/workflows/classic-benchmark_publish_package.yml @@ -0,0 +1,55 @@ +name: Classic - Publish to PyPI + +on: + workflow_dispatch: + +jobs: + deploy: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: true + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: 3.8 + + - name: Install Poetry + working-directory: ./classic/benchmark/ + run: | + curl -sSL https://install.python-poetry.org | python3 - + echo "$HOME/.poetry/bin" >> $GITHUB_PATH + + - name: Build project for distribution + working-directory: ./classic/benchmark/ + run: poetry build + + - name: Install dependencies + working-directory: ./classic/benchmark/ + run: poetry install + + - name: Check Version + working-directory: ./classic/benchmark/ + id: check-version + run: | + echo version=$(poetry version --short) >> $GITHUB_OUTPUT + + - name: Create Release + uses: ncipollo/release-action@v1 + with: + artifacts: "classic/benchmark/dist/*" + token: ${{ secrets.GITHUB_TOKEN }} + draft: false + generateReleaseNotes: false + tag: agbenchmark-v${{ steps.check-version.outputs.version }} + commit: master + + - name: Build and publish + working-directory: ./classic/benchmark/ + run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/classic-forge-ci.yml b/.github/workflows/classic-forge-ci.yml new file mode 100644 index 000000000000..4642f57521f7 --- /dev/null +++ b/.github/workflows/classic-forge-ci.yml @@ -0,0 +1,236 @@ +name: Classic - Forge CI + +on: + push: + branches: [ master, dev, ci-test* ] + paths: + - '.github/workflows/classic-forge-ci.yml' + - 'classic/forge/**' + - '!classic/forge/tests/vcr_cassettes' + pull_request: + branches: [ master, dev, release-* ] + paths: + - '.github/workflows/classic-forge-ci.yml' + - 'classic/forge/**' + - '!classic/forge/tests/vcr_cassettes' + +concurrency: + group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }} + +defaults: + run: + shell: bash + working-directory: classic/forge + +jobs: + test: + permissions: + contents: read + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + python-version: ["3.10"] + platform-os: [ubuntu, macos, macos-arm64, windows] + runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }} + + steps: + # Quite slow on macOS (2~4 minutes to set up Docker) + # - name: Set up Docker (macOS) + # if: runner.os == 'macOS' + # uses: crazy-max/ghaction-setup-docker@v3 + + - name: Start MinIO service (Linux) + if: runner.os == 'Linux' + working-directory: '.' + run: | + docker pull minio/minio:edge-cicd + docker run -d -p 9000:9000 minio/minio:edge-cicd + + - name: Start MinIO service (macOS) + if: runner.os == 'macOS' + working-directory: ${{ runner.temp }} + run: | + brew install minio/stable/minio + mkdir data + minio server ./data & + + # No MinIO on Windows: + # - Windows doesn't support running Linux Docker containers + # - It doesn't seem possible to start background processes on Windows. They are + # killed after the step returns. + # See: https://github.com/actions/runner/issues/598#issuecomment-2011890429 + + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Checkout cassettes + if: ${{ startsWith(github.event_name, 'pull_request') }} + env: + PR_BASE: ${{ github.event.pull_request.base.ref }} + PR_BRANCH: ${{ github.event.pull_request.head.ref }} + PR_AUTHOR: ${{ github.event.pull_request.user.login }} + run: | + cassette_branch="${PR_AUTHOR}-${PR_BRANCH}" + cassette_base_branch="${PR_BASE}" + cd tests/vcr_cassettes + + if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then + cassette_base_branch="master" + fi + + if git ls-remote --exit-code --heads origin $cassette_branch ; then + git fetch origin $cassette_branch + git fetch origin $cassette_base_branch + + git checkout $cassette_branch + + # Pick non-conflicting cassette updates from the base branch + git merge --no-commit --strategy-option=ours origin/$cassette_base_branch + echo "Using cassettes from mirror branch '$cassette_branch'," \ + "synced to upstream branch '$cassette_base_branch'." + else + git checkout -b $cassette_branch + echo "Branch '$cassette_branch' does not exist in cassette submodule." \ + "Using cassettes from '$cassette_base_branch'." + fi + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Set up Python dependency cache + # On Windows, unpacking cached dependencies takes longer than just installing them + if: runner.os != 'Windows' + uses: actions/cache@v4 + with: + path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }} + key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }} + + - name: Install Poetry (Unix) + if: runner.os != 'Windows' + run: | + curl -sSL https://install.python-poetry.org | python3 - + + if [ "${{ runner.os }}" = "macOS" ]; then + PATH="$HOME/.local/bin:$PATH" + echo "$HOME/.local/bin" >> $GITHUB_PATH + fi + + - name: Install Poetry (Windows) + if: runner.os == 'Windows' + shell: pwsh + run: | + (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python - + + $env:PATH += ";$env:APPDATA\Python\Scripts" + echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH + + - name: Install Python dependencies + run: poetry install + + - name: Run pytest with coverage + run: | + poetry run pytest -vv \ + --cov=forge --cov-branch --cov-report term-missing --cov-report xml \ + --durations=10 \ + forge + env: + CI: true + PLAIN_OUTPUT: True + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }} + AWS_ACCESS_KEY_ID: minioadmin + AWS_SECRET_ACCESS_KEY: minioadmin + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} + flags: forge,${{ runner.os }} + + - id: setup_git_auth + name: Set up git token authentication + # Cassettes may be pushed even when tests fail + if: success() || failure() + run: | + config_key="http.${{ github.server_url }}/.extraheader" + if [ "${{ runner.os }}" = 'macOS' ]; then + base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64) + else + base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0) + fi + + git config "$config_key" \ + "Authorization: Basic $base64_pat" + + cd tests/vcr_cassettes + git config "$config_key" \ + "Authorization: Basic $base64_pat" + + echo "config_key=$config_key" >> $GITHUB_OUTPUT + + - id: push_cassettes + name: Push updated cassettes + # For pull requests, push updated cassettes even when tests fail + if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure())) + env: + PR_BRANCH: ${{ github.event.pull_request.head.ref }} + PR_AUTHOR: ${{ github.event.pull_request.user.login }} + run: | + if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then + is_pull_request=true + cassette_branch="${PR_AUTHOR}-${PR_BRANCH}" + else + cassette_branch="${{ github.ref_name }}" + fi + + cd tests/vcr_cassettes + # Commit & push changes to cassettes if any + if ! git diff --quiet; then + git add . + git commit -m "Auto-update cassettes" + git push origin HEAD:$cassette_branch + if [ ! $is_pull_request ]; then + cd ../.. + git add tests/vcr_cassettes + git commit -m "Update cassette submodule" + git push origin HEAD:$cassette_branch + fi + echo "updated=true" >> $GITHUB_OUTPUT + else + echo "updated=false" >> $GITHUB_OUTPUT + echo "No cassette changes to commit" + fi + + - name: Post Set up git token auth + if: steps.setup_git_auth.outcome == 'success' + run: | + git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}' + git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}' + + - name: Apply "behaviour change" label and comment on PR + if: ${{ startsWith(github.event_name, 'pull_request') }} + run: | + PR_NUMBER="${{ github.event.pull_request.number }}" + TOKEN="${{ secrets.PAT_REVIEW }}" + REPO="${{ github.repository }}" + + if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then + echo "Adding label and comment..." + echo $TOKEN | gh auth login --with-token + gh issue edit $PR_NUMBER --add-label "behaviour change" + gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged." + fi + + - name: Upload logs to artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: test-logs + path: classic/forge/logs/ diff --git a/.github/workflows/classic-frontend-ci.yml b/.github/workflows/classic-frontend-ci.yml new file mode 100644 index 000000000000..13d518db57d4 --- /dev/null +++ b/.github/workflows/classic-frontend-ci.yml @@ -0,0 +1,60 @@ +name: Classic - Frontend CI/CD + +on: + push: + branches: + - master + - dev + - 'ci-test*' # This will match any branch that starts with "ci-test" + paths: + - 'classic/frontend/**' + - '.github/workflows/classic-frontend-ci.yml' + pull_request: + paths: + - 'classic/frontend/**' + - '.github/workflows/classic-frontend-ci.yml' + +jobs: + build: + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + env: + BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }} + + steps: + - name: Checkout Repo + uses: actions/checkout@v4 + + - name: Setup Flutter + uses: subosito/flutter-action@v2 + with: + flutter-version: '3.13.2' + + - name: Build Flutter to Web + run: | + cd classic/frontend + flutter build web --base-href /app/ + + # - name: Commit and Push to ${{ env.BUILD_BRANCH }} + # if: github.event_name == 'push' + # run: | + # git config --local user.email "action@github.com" + # git config --local user.name "GitHub Action" + # git add classic/frontend/build/web + # git checkout -B ${{ env.BUILD_BRANCH }} + # git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a + # git push -f origin ${{ env.BUILD_BRANCH }} + + - name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }} + if: github.event_name == 'push' + uses: peter-evans/create-pull-request@v7 + with: + add-paths: classic/frontend/build/web + base: ${{ github.ref_name }} + branch: ${{ env.BUILD_BRANCH }} + delete-branch: true + title: "Update frontend build in `${{ github.ref_name }}`" + body: "This PR updates the frontend build based on commit ${{ github.sha }}." + commit-message: "Update frontend build based on commit ${{ github.sha }}" diff --git a/.github/workflows/classic-python-checks.yml b/.github/workflows/classic-python-checks.yml new file mode 100644 index 000000000000..59d90a7faea2 --- /dev/null +++ b/.github/workflows/classic-python-checks.yml @@ -0,0 +1,151 @@ +name: Classic - Python checks + +on: + push: + branches: [ master, dev, ci-test* ] + paths: + - '.github/workflows/classic-python-checks-ci.yml' + - 'classic/original_autogpt/**' + - 'classic/forge/**' + - 'classic/benchmark/**' + - '**.py' + - '!classic/forge/tests/vcr_cassettes' + pull_request: + branches: [ master, dev, release-* ] + paths: + - '.github/workflows/classic-python-checks-ci.yml' + - 'classic/original_autogpt/**' + - 'classic/forge/**' + - 'classic/benchmark/**' + - '**.py' + - '!classic/forge/tests/vcr_cassettes' + +concurrency: + group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }} + +defaults: + run: + shell: bash + +jobs: + get-changed-parts: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - id: changes-in + name: Determine affected subprojects + uses: dorny/paths-filter@v3 + with: + filters: | + original_autogpt: + - classic/original_autogpt/autogpt/** + - classic/original_autogpt/tests/** + - classic/original_autogpt/poetry.lock + forge: + - classic/forge/forge/** + - classic/forge/tests/** + - classic/forge/poetry.lock + benchmark: + - classic/benchmark/agbenchmark/** + - classic/benchmark/tests/** + - classic/benchmark/poetry.lock + outputs: + changed-parts: ${{ steps.changes-in.outputs.changes }} + + lint: + needs: get-changed-parts + runs-on: ubuntu-latest + env: + min-python-version: "3.10" + + strategy: + matrix: + sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }} + fail-fast: false + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - name: Set up Python dependency cache + uses: actions/cache@v4 + with: + path: ~/.cache/pypoetry + key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }} + + - name: Install Poetry + run: curl -sSL https://install.python-poetry.org | python3 - + + # Install dependencies + + - name: Install Python dependencies + run: poetry -C classic/${{ matrix.sub-package }} install + + # Lint + + - name: Lint (isort) + run: poetry run isort --check . + working-directory: classic/${{ matrix.sub-package }} + + - name: Lint (Black) + if: success() || failure() + run: poetry run black --check . + working-directory: classic/${{ matrix.sub-package }} + + - name: Lint (Flake8) + if: success() || failure() + run: poetry run flake8 . + working-directory: classic/${{ matrix.sub-package }} + + types: + needs: get-changed-parts + runs-on: ubuntu-latest + env: + min-python-version: "3.10" + + strategy: + matrix: + sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }} + fail-fast: false + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python ${{ env.min-python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.min-python-version }} + + - name: Set up Python dependency cache + uses: actions/cache@v4 + with: + path: ~/.cache/pypoetry + key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }} + + - name: Install Poetry + run: curl -sSL https://install.python-poetry.org | python3 - + + # Install dependencies + + - name: Install Python dependencies + run: poetry -C classic/${{ matrix.sub-package }} install + + # Typecheck + + - name: Typecheck + if: success() || failure() + run: poetry run pyright + working-directory: classic/${{ matrix.sub-package }} diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml deleted file mode 100644 index f8d66dceaf75..000000000000 --- a/.github/workflows/close-stale-issues.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: 'Close stale issues' -on: - schedule: - - cron: '30 1 * * *' - workflow_dispatch: - -permissions: - issues: write - -jobs: - stale: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v8 - with: - # operations-per-run: 5000 - stale-issue-message: > - This issue has automatically been marked as _stale_ because it has not had - any activity in the last 50 days. You can _unstale_ it by commenting or - removing the label. Otherwise, this issue will be closed in 10 days. - stale-pr-message: > - This pull request has automatically been marked as _stale_ because it has - not had any activity in the last 50 days. You can _unstale_ it by commenting - or removing the label. - close-issue-message: > - This issue was closed automatically because it has been stale for 10 days - with no activity. - days-before-stale: 50 - days-before-close: 10 - # Do not touch meta issues: - exempt-issue-labels: meta,fridge,project management - # Do not affect pull requests: - days-before-pr-stale: -1 - days-before-pr-close: -1 diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000000..a6c36ed86c54 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,98 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "master", "release-*", "dev" ] + pull_request: + branches: [ "master", "release-*", "dev" ] + merge_group: + schedule: + - cron: '15 4 * * 0' + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners (GitHub.com only) + # Consider using larger runners or machines with greater resources for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + permissions: + # required for all workflows + security-events: write + + # required to fetch internal or private CodeQL packs + packages: read + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + include: + - language: typescript + build-mode: none + - language: python + build-mode: none + # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' + # Use `c-cpp` to analyze code written in C, C++ or both + # Use 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, + # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. + # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how + # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + config: | + paths-ignore: + - classic/frontend/build/** + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + # If the analyze step fails for one of the languages you are analyzing with + # "We were unable to automatically build your code", modify the matrix above + # to set the build mode to "manual" for that language. Then modify this step + # to build your code. + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + shell: bash + run: | + echo 'If you are using a "manual" build mode for one or more of the' \ + 'languages you are analyzing, replace this with the commands to build' \ + 'your code, for example:' + echo ' make bootstrap' + echo ' make release' + exit 1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/hackathon.yml b/.github/workflows/hackathon.yml deleted file mode 100644 index 9bd5d5f11c30..000000000000 --- a/.github/workflows/hackathon.yml +++ /dev/null @@ -1,136 +0,0 @@ -name: Hackathon - -on: - workflow_dispatch: - branches: [hackathon] - inputs: - agents: - description: "Agents to run (comma-separated)" - required: false - default: "autogpt" # Default agents if none are specified - -jobs: - matrix-setup: - runs-on: ubuntu-latest - # Service containers to run with `matrix-setup` - services: - # Label used to access the service container - postgres: - # Docker Hub image - image: postgres - # Provide the password for postgres - env: - POSTGRES_PASSWORD: postgres - # Set health checks to wait until postgres has started - options: >- - --health-cmd pg_isready - --health-interval 10s - --health-timeout 5s - --health-retries 5 - ports: - # Maps tcp port 5432 on service container to the host - - 5432:5432 - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - env-name: ${{ steps.set-matrix.outputs.env-name }} - steps: - - id: set-matrix - run: | - if [ "${{ github.event_name }}" == "schedule" ]; then - echo "::set-output name=env-name::production" - echo "::set-output name=matrix::[ 'irrelevant']" - elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then - IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}" - matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]" - echo "::set-output name=env-name::production" - echo "::set-output name=matrix::$matrix_string" - else - echo "::set-output name=env-name::testing" - echo "::set-output name=matrix::[ 'irrelevant' ]" - fi - - tests: - environment: - name: "${{ needs.matrix-setup.outputs.env-name }}" - needs: matrix-setup - env: - min-python-version: "3.10" - name: "${{ matrix.agent-name }}" - runs-on: ubuntu-latest - services: - # Label used to access the service container - postgres: - # Docker Hub image - image: postgres - # Provide the password for postgres - env: - POSTGRES_PASSWORD: postgres - # Set health checks to wait until postgres has started - options: >- - --health-cmd pg_isready - --health-interval 10s - --health-timeout 5s - --health-retries 5 - ports: - # Maps tcp port 5432 on service container to the host - - 5432:5432 - timeout-minutes: 50 - strategy: - fail-fast: false - matrix: - agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}} - steps: - - name: Print Environment Name - run: | - echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}" - - - name: Check Docker Container - id: check - run: docker ps - - - name: Checkout repository - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} - submodules: true - - - name: Set up Python ${{ env.min-python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ env.min-python-version }} - - - id: get_date - name: Get date - run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT - - - name: Install Poetry - run: | - curl -sSL https://install.python-poetry.org | python - - - - name: Install Node.js - uses: actions/setup-node@v1 - with: - node-version: v18.15 - - - name: Run benchmark - run: | - link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json) - branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json) - git clone "$link" -b "$branch" "$AGENT_NAME" - cd $AGENT_NAME - cp ./autogpts/$AGENT_NAME/.env.example ./autogpts/$AGENT_NAME/.env || echo "file not found" - ./run agent start $AGENT_NAME - cd ../benchmark - poetry install - poetry run agbenchmark --no_dep - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - SERP_API_KEY: ${{ secrets.SERP_API_KEY }} - SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }} - WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }} - WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }} - GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} - GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }} - AGENT_NAME: ${{ matrix.agent-name }} \ No newline at end of file diff --git a/.github/workflows/platform-autgpt-deploy-prod.yml b/.github/workflows/platform-autgpt-deploy-prod.yml new file mode 100644 index 000000000000..aee6e0505624 --- /dev/null +++ b/.github/workflows/platform-autgpt-deploy-prod.yml @@ -0,0 +1,49 @@ +name: AutoGPT Platform - Deploy Prod Environment + +on: + release: + types: [published] + +permissions: + contents: 'read' + id-token: 'write' + +jobs: + migrate: + environment: production + name: Run migrations for AutoGPT Platform + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install prisma + + - name: Run Backend Migrations + working-directory: ./autogpt_platform/backend + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }} + + + trigger: + needs: migrate + runs-on: ubuntu-latest + steps: + - name: Trigger deploy workflow + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.DEPLOY_TOKEN }} + repository: Significant-Gravitas/AutoGPT_cloud_infrastructure + event-type: build_deploy_prod + client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}' \ No newline at end of file diff --git a/.github/workflows/platform-autogpt-deploy-dev.yaml b/.github/workflows/platform-autogpt-deploy-dev.yaml new file mode 100644 index 000000000000..7402bfbfcade --- /dev/null +++ b/.github/workflows/platform-autogpt-deploy-dev.yaml @@ -0,0 +1,50 @@ +name: AutoGPT Platform - Deploy Dev Environment + +on: + push: + branches: [ dev ] + paths: + - 'autogpt_platform/**' + +permissions: + contents: 'read' + id-token: 'write' + +jobs: + migrate: + environment: develop + name: Run migrations for AutoGPT Platform + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install prisma + + - name: Run Backend Migrations + working-directory: ./autogpt_platform/backend + run: | + python -m prisma migrate deploy + env: + DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }} + + trigger: + needs: migrate + runs-on: ubuntu-latest + steps: + - name: Trigger deploy workflow + uses: peter-evans/repository-dispatch@v3 + with: + token: ${{ secrets.DEPLOY_TOKEN }} + repository: Significant-Gravitas/AutoGPT_cloud_infrastructure + event-type: build_deploy_dev + client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}' diff --git a/.github/workflows/platform-backend-ci.yml b/.github/workflows/platform-backend-ci.yml new file mode 100644 index 000000000000..cdab6ed30064 --- /dev/null +++ b/.github/workflows/platform-backend-ci.yml @@ -0,0 +1,147 @@ +name: AutoGPT Platform - Backend CI + +on: + push: + branches: [master, dev, ci-test*] + paths: + - ".github/workflows/platform-backend-ci.yml" + - "autogpt_platform/backend/**" + - "autogpt_platform/autogpt_libs/**" + pull_request: + branches: [master, dev, release-*] + paths: + - ".github/workflows/platform-backend-ci.yml" + - "autogpt_platform/backend/**" + - "autogpt_platform/autogpt_libs/**" + merge_group: + +concurrency: + group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} + cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }} + +defaults: + run: + shell: bash + working-directory: autogpt_platform/backend + +jobs: + test: + permissions: + contents: read + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + python-version: ["3.10"] + runs-on: ubuntu-latest + + services: + redis: + image: bitnami/redis:6.2 + env: + REDIS_PASSWORD: testpassword + ports: + - 6379:6379 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Setup Supabase + uses: supabase/setup-cli@v1 + with: + version: latest + + - id: get_date + name: Get date + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT + + - name: Set up Python dependency cache + uses: actions/cache@v4 + with: + path: ~/.cache/pypoetry + key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }} + + - name: Install Poetry (Unix) + run: | + curl -sSL https://install.python-poetry.org | python3 - + + if [ "${{ runner.os }}" = "macOS" ]; then + PATH="$HOME/.local/bin:$PATH" + echo "$HOME/.local/bin" >> $GITHUB_PATH + fi + + - name: Check poetry.lock + run: | + poetry lock + + if ! git diff --quiet poetry.lock; then + echo "Error: poetry.lock not up to date." + echo + git diff poetry.lock + exit 1 + fi + + - name: Install Python dependencies + run: poetry install + + - name: Generate Prisma Client + run: poetry run prisma generate + + - id: supabase + name: Start Supabase + working-directory: . + run: | + supabase init + supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor + supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT + # outputs: + # DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET + + - name: Run Database Migrations + run: poetry run prisma migrate dev --name updates + env: + DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }} + + - id: lint + name: Run Linter + run: poetry run lint + + - name: Run pytest with coverage + run: | + if [[ "${{ runner.debug }}" == "1" ]]; then + poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test + else + poetry run pytest -s -vv test + fi + if: success() || (failure() && steps.lint.outcome == 'failure') + env: + LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }} + DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }} + SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }} + SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }} + SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }} + REDIS_HOST: 'localhost' + REDIS_PORT: '6379' + REDIS_PASSWORD: 'testpassword' + + env: + CI: true + PLAIN_OUTPUT: True + RUN_ENV: local + PORT: 8080 + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + + # - name: Upload coverage reports to Codecov + # uses: codecov/codecov-action@v4 + # with: + # token: ${{ secrets.CODECOV_TOKEN }} + # flags: backend,${{ runner.os }} diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml new file mode 100644 index 000000000000..8fc27fde5689 --- /dev/null +++ b/.github/workflows/platform-frontend-ci.yml @@ -0,0 +1,120 @@ +name: AutoGPT Platform - Frontend CI + +on: + push: + branches: [master, dev] + paths: + - ".github/workflows/platform-frontend-ci.yml" + - "autogpt_platform/frontend/**" + pull_request: + paths: + - ".github/workflows/platform-frontend-ci.yml" + - "autogpt_platform/frontend/**" + merge_group: + +defaults: + run: + shell: bash + working-directory: autogpt_platform/frontend + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "21" + + - name: Install dependencies + run: | + yarn install --frozen-lockfile + + - name: Run lint + run: | + yarn lint + + type-check: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "21" + + - name: Install dependencies + run: | + yarn install --frozen-lockfile + + - name: Run tsc check + run: | + yarn type-check + + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + browser: [chromium, webkit] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "21" + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + large-packages: false # slow + docker-images: false # limited benefit + + - name: Copy default supabase .env + run: | + cp ../supabase/docker/.env.example ../.env + + - name: Copy backend .env + run: | + cp ../backend/.env.example ../backend/.env + + - name: Run docker compose + run: | + docker compose -f ../docker-compose.yml up -d + + - name: Install dependencies + run: | + yarn install --frozen-lockfile + + - name: Setup Builder .env + run: | + cp .env.example .env + + - name: Install Browser '${{ matrix.browser }}' + run: yarn playwright install --with-deps ${{ matrix.browser }} + + - name: Run tests + run: | + yarn test --project=${{ matrix.browser }} + + - name: Print Docker Compose logs in debug mode + if: runner.debug + run: | + docker compose -f ../docker-compose.yml logs + + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: playwright-report-${{ matrix.browser }} + path: playwright-report/ + retention-days: 30 diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml deleted file mode 100644 index 512daf116147..000000000000 --- a/.github/workflows/pr-label.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: "Pull Request auto-label" - -on: - # So that PRs touching the same files as the push are updated - push: - branches: [ master, development, release-* ] - paths-ignore: - - 'autogpts/autogpt/tests/vcr_cassettes' - - 'benchmark/reports/**' - # So that the `dirtyLabel` is removed if conflicts are resolve - # We recommend `pull_request_target` so that github secrets are available. - # In `pull_request` we wouldn't be able to change labels of fork PRs - pull_request_target: - types: [ opened, synchronize ] - -concurrency: - group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }} - cancel-in-progress: true - -jobs: - conflicts: - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: write - steps: - - name: Update PRs with conflict labels - uses: eps1lon/actions-label-merge-conflict@releases/2.x - with: - dirtyLabel: "conflicts" - #removeOnDirtyLabel: "PR: ready to ship" - repoToken: "${{ secrets.GITHUB_TOKEN }}" - commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request." - commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly." - - size: - if: ${{ github.event_name == 'pull_request_target' }} - permissions: - issues: write - pull-requests: write - runs-on: ubuntu-latest - steps: - - uses: codelytv/pr-size-labeler@v1 - with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - xs_label: 'size/xs' - xs_max_size: 2 - s_label: 'size/s' - s_max_size: 10 - m_label: 'size/m' - m_max_size: 100 - l_label: 'size/l' - l_max_size: 500 - xl_label: 'size/xl' - message_if_xl: > - This PR exceeds the recommended size of 500 lines. - Please make sure you are NOT addressing multiple issues with one PR. diff --git a/.github/workflows/repo-close-stale-issues.yml b/.github/workflows/repo-close-stale-issues.yml new file mode 100644 index 000000000000..8174c9dbe44e --- /dev/null +++ b/.github/workflows/repo-close-stale-issues.yml @@ -0,0 +1,34 @@ +name: Repo - Close stale issues +on: + schedule: + - cron: '30 1 * * *' + workflow_dispatch: + +permissions: + issues: write + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + # operations-per-run: 5000 + stale-issue-message: > + This issue has automatically been marked as _stale_ because it has not had + any activity in the last 50 days. You can _unstale_ it by commenting or + removing the label. Otherwise, this issue will be closed in 10 days. + stale-pr-message: > + This pull request has automatically been marked as _stale_ because it has + not had any activity in the last 50 days. You can _unstale_ it by commenting + or removing the label. + close-issue-message: > + This issue was closed automatically because it has been stale for 10 days + with no activity. + days-before-stale: 100 + days-before-close: 10 + # Do not touch meta issues: + exempt-issue-labels: meta,fridge,project management + # Do not affect pull requests: + days-before-pr-stale: -1 + days-before-pr-close: -1 diff --git a/.github/workflows/repo-pr-enforce-base-branch.yml b/.github/workflows/repo-pr-enforce-base-branch.yml new file mode 100644 index 000000000000..3d4bd9096a52 --- /dev/null +++ b/.github/workflows/repo-pr-enforce-base-branch.yml @@ -0,0 +1,21 @@ +name: Repo - Enforce dev as base branch +on: + pull_request_target: + branches: [ master ] + types: [ opened ] + +jobs: + check_pr_target: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Check if PR is from dev or hotfix + if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }} + run: | + gh pr comment ${{ github.event.number }} --repo "$REPO" \ + --body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.' + gh pr edit ${{ github.event.number }} --base dev --repo "$REPO" + env: + GITHUB_TOKEN: ${{ github.token }} + REPO: ${{ github.repository }} diff --git a/.github/workflows/repo-pr-label.yml b/.github/workflows/repo-pr-label.yml new file mode 100644 index 000000000000..eef928ef1620 --- /dev/null +++ b/.github/workflows/repo-pr-label.yml @@ -0,0 +1,66 @@ +name: Repo - Pull Request auto-label + +on: + # So that PRs touching the same files as the push are updated + push: + branches: [ master, dev, release-* ] + paths-ignore: + - 'classic/forge/tests/vcr_cassettes' + - 'classic/benchmark/reports/**' + # So that the `dirtyLabel` is removed if conflicts are resolve + # We recommend `pull_request_target` so that github secrets are available. + # In `pull_request` we wouldn't be able to change labels of fork PRs + pull_request_target: + types: [ opened, synchronize ] + +concurrency: + group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }} + cancel-in-progress: true + +jobs: + conflicts: + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - name: Update PRs with conflict labels + uses: eps1lon/actions-label-merge-conflict@releases/2.x + with: + dirtyLabel: "conflicts" + #removeOnDirtyLabel: "PR: ready to ship" + repoToken: "${{ secrets.GITHUB_TOKEN }}" + commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request." + commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly." + + size: + if: ${{ github.event_name == 'pull_request_target' }} + permissions: + issues: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: codelytv/pr-size-labeler@v1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + xs_label: 'size/xs' + xs_max_size: 2 + s_label: 'size/s' + s_max_size: 10 + m_label: 'size/m' + m_max_size: 100 + l_label: 'size/l' + l_max_size: 500 + xl_label: 'size/xl' + message_if_xl: + + scope: + if: ${{ github.event_name == 'pull_request_target' }} + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 + with: + sync-labels: true diff --git a/.github/workflows/repo-stats.yml b/.github/workflows/repo-stats.yml index e41885f5ddcf..15419688da3a 100644 --- a/.github/workflows/repo-stats.yml +++ b/.github/workflows/repo-stats.yml @@ -1,4 +1,4 @@ -name: github-repo-stats +name: Repo - Github Stats on: schedule: diff --git a/.github/workflows/repo-workflow-checker.yml b/.github/workflows/repo-workflow-checker.yml new file mode 100644 index 000000000000..35536ba92215 --- /dev/null +++ b/.github/workflows/repo-workflow-checker.yml @@ -0,0 +1,32 @@ +name: Repo - PR Status Checker +on: + pull_request: + types: [opened, synchronize, reopened] + merge_group: + +jobs: + status-check: + name: Check PR Status + runs-on: ubuntu-latest + steps: + # - name: Wait some time for all actions to start + # run: sleep 30 + - uses: actions/checkout@v4 + # with: + # fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install requests + - name: Check PR Status + run: | + echo "Current directory before running Python script:" + pwd + echo "Attempting to run Python script:" + python .github/workflows/scripts/check_actions_status.py + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/scripts/check_actions_status.py b/.github/workflows/scripts/check_actions_status.py new file mode 100644 index 000000000000..37f83da45271 --- /dev/null +++ b/.github/workflows/scripts/check_actions_status.py @@ -0,0 +1,116 @@ +import json +import os +import requests +import sys +import time +from typing import Dict, List, Tuple + +CHECK_INTERVAL = 30 + + +def get_environment_variables() -> Tuple[str, str, str, str, str]: + """Retrieve and return necessary environment variables.""" + try: + with open(os.environ["GITHUB_EVENT_PATH"]) as f: + event = json.load(f) + + # Handle both PR and merge group events + if "pull_request" in event: + sha = event["pull_request"]["head"]["sha"] + else: + sha = os.environ["GITHUB_SHA"] + + return ( + os.environ["GITHUB_API_URL"], + os.environ["GITHUB_REPOSITORY"], + sha, + os.environ["GITHUB_TOKEN"], + os.environ["GITHUB_RUN_ID"], + ) + except KeyError as e: + print(f"Error: Missing required environment variable or event data: {e}") + sys.exit(1) + + +def make_api_request(url: str, headers: Dict[str, str]) -> Dict: + """Make an API request and return the JSON response.""" + try: + print("Making API request to:", url) + response = requests.get(url, headers=headers, timeout=10) + response.raise_for_status() + return response.json() + except requests.RequestException as e: + print(f"Error: API request failed. {e}") + sys.exit(1) + + +def process_check_runs(check_runs: List[Dict]) -> Tuple[bool, bool]: + """Process check runs and return their status.""" + runs_in_progress = False + all_others_passed = True + + for run in check_runs: + if str(run["name"]) != "Check PR Status": + status = run["status"] + conclusion = run["conclusion"] + + if status == "completed": + if conclusion not in ["success", "skipped", "neutral"]: + all_others_passed = False + print( + f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}" + ) + else: + runs_in_progress = True + print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.") + all_others_passed = False + else: + print( + f"Skipping check run {run['name']} (ID: {run['id']}) as it is the current run." + ) + + return runs_in_progress, all_others_passed + + +def main(): + api_url, repo, sha, github_token, current_run_id = get_environment_variables() + + endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs" + headers = { + "Accept": "application/vnd.github.v3+json", + } + if github_token: + headers["Authorization"] = f"token {github_token}" + + print(f"Current run ID: {current_run_id}") + + while True: + data = make_api_request(endpoint, headers) + + check_runs = data["check_runs"] + + print("Processing check runs...") + + print(check_runs) + + runs_in_progress, all_others_passed = process_check_runs(check_runs) + + if not runs_in_progress: + break + + print( + "Some check runs are still in progress. " + f"Waiting {CHECK_INTERVAL} seconds before checking again..." + ) + time.sleep(CHECK_INTERVAL) + + if all_others_passed: + print("All other completed check runs have passed. This check passes.") + sys.exit(0) + else: + print("Some check runs have failed or have not completed. This check fails.") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/.gitignore b/.gitignore index dcfbc4df72f7..d00ab276ce1f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,11 @@ ## Original ignores .github_access_token -autogpt/keys.py -autogpt/*.json +classic/original_autogpt/keys.py +classic/original_autogpt/*.json auto_gpt_workspace/* *.mpeg .env azure.yaml -ai_settings.yaml -last_run_ai_settings.yaml .vscode .idea/* auto-gpt.json @@ -34,7 +32,6 @@ dist/ downloads/ eggs/ .eggs/ -lib/ lib64/ parts/ sdist/ @@ -160,11 +157,11 @@ openai/ CURRENT_BULLETIN.md # AgBenchmark -agbenchmark/reports/ +classic/benchmark/agbenchmark/reports/ # Nodejs package-lock.json -package.json + # Allow for locally private items # private @@ -172,4 +169,10 @@ pri* # ignore ig* .github_access_token -arena/TestAgent.json \ No newline at end of file +LICENSE.rtf +autogpt_platform/backend/settings.py +/.auth +/autogpt_platform/frontend/.auth + +*.ign.* +.test-contents diff --git a/.gitmodules b/.gitmodules index c7d5712200ea..4db81f42c094 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ -[submodule "autogpts/autogpt/tests/vcr_cassettes"] - path = autogpts/autogpt/tests/vcr_cassettes +[submodule "classic/forge/tests/vcr_cassettes"] + path = classic/forge/tests/vcr_cassettes url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes +[submodule "autogpt_platform/supabase"] + path = autogpt_platform/supabase + url = https://github.com/supabase/supabase.git diff --git a/.pr_agent.toml b/.pr_agent.toml new file mode 100644 index 000000000000..9c059d1b5bce --- /dev/null +++ b/.pr_agent.toml @@ -0,0 +1,6 @@ +[pr_reviewer] +num_code_suggestions=0 + +[pr_code_suggestions] +commitable_code_suggestions=false +num_code_suggestions=0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000..293a9e2d43c2 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,258 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-added-large-files + args: ["--maxkb=500"] + - id: fix-byte-order-marker + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks + - id: debug-statements + + - repo: https://github.com/Yelp/detect-secrets + rev: v1.5.0 + hooks: + - id: detect-secrets + name: Detect secrets + description: Detects high entropy strings that are likely to be passwords. + files: ^autogpt_platform/ + stages: [push] + + - repo: local + # For proper type checking, all dependencies need to be up-to-date. + # It's also a good idea to check that poetry.lock is consistent with pyproject.toml. + hooks: + - id: poetry-install + name: Check & Install dependencies - AutoGPT Platform - Backend + alias: poetry-install-platform-backend + entry: poetry -C autogpt_platform/backend install + # include autogpt_libs source (since it's a path dependency) + files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$ + types: [file] + language: system + pass_filenames: false + + - id: poetry-install + name: Check & Install dependencies - AutoGPT Platform - Libs + alias: poetry-install-platform-libs + entry: poetry -C autogpt_platform/autogpt_libs install + files: ^autogpt_platform/autogpt_libs/poetry\.lock$ + types: [file] + language: system + pass_filenames: false + + - id: poetry-install + name: Check & Install dependencies - Classic - AutoGPT + alias: poetry-install-classic-autogpt + entry: poetry -C classic/original_autogpt install + # include forge source (since it's a path dependency) + files: ^classic/(original_autogpt|forge)/poetry\.lock$ + types: [file] + language: system + pass_filenames: false + + - id: poetry-install + name: Check & Install dependencies - Classic - Forge + alias: poetry-install-classic-forge + entry: poetry -C classic/forge install + files: ^classic/forge/poetry\.lock$ + types: [file] + language: system + pass_filenames: false + + - id: poetry-install + name: Check & Install dependencies - Classic - Benchmark + alias: poetry-install-classic-benchmark + entry: poetry -C classic/benchmark install + files: ^classic/benchmark/poetry\.lock$ + types: [file] + language: system + pass_filenames: false + + - repo: local + # For proper type checking, Prisma client must be up-to-date. + hooks: + - id: prisma-generate + name: Prisma Generate - AutoGPT Platform - Backend + alias: prisma-generate-platform-backend + entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate' + # include everything that triggers poetry install + the prisma schema + files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$ + types: [file] + language: system + pass_filenames: false + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.7.2 + hooks: + - id: ruff + name: Lint (Ruff) - AutoGPT Platform - Backend + alias: ruff-lint-platform-backend + files: ^autogpt_platform/backend/ + args: [--fix] + + - id: ruff + name: Lint (Ruff) - AutoGPT Platform - Libs + alias: ruff-lint-platform-libs + files: ^autogpt_platform/autogpt_libs/ + args: [--fix] + + - id: ruff-format + name: Format (Ruff) - AutoGPT Platform - Libs + alias: ruff-lint-platform-libs + files: ^autogpt_platform/autogpt_libs/ + + - repo: local + # isort needs the context of which packages are installed to function, so we + # can't use a vendored isort pre-commit hook (which runs in its own isolated venv). + hooks: + - id: isort + name: Lint (isort) - AutoGPT Platform - Backend + alias: isort-platform-backend + entry: poetry -P autogpt_platform/backend run isort -p backend + files: ^autogpt_platform/backend/ + types: [file, python] + language: system + + - id: isort + name: Lint (isort) - Classic - AutoGPT + alias: isort-classic-autogpt + entry: poetry -P classic/original_autogpt run isort -p autogpt + files: ^classic/original_autogpt/ + types: [file, python] + language: system + + - id: isort + name: Lint (isort) - Classic - Forge + alias: isort-classic-forge + entry: poetry -P classic/forge run isort -p forge + files: ^classic/forge/ + types: [file, python] + language: system + + - id: isort + name: Lint (isort) - Classic - Benchmark + alias: isort-classic-benchmark + entry: poetry -P classic/benchmark run isort -p agbenchmark + files: ^classic/benchmark/ + types: [file, python] + language: system + + - repo: https://github.com/psf/black + rev: 23.12.1 + # Black has sensible defaults, doesn't need package context, and ignores + # everything in .gitignore, so it works fine without any config or arguments. + hooks: + - id: black + name: Format (Black) + + - repo: https://github.com/PyCQA/flake8 + rev: 7.0.0 + # To have flake8 load the config of the individual subprojects, we have to call + # them separately. + hooks: + - id: flake8 + name: Lint (Flake8) - Classic - AutoGPT + alias: flake8-classic-autogpt + files: ^classic/original_autogpt/(autogpt|scripts|tests)/ + args: [--config=classic/original_autogpt/.flake8] + + - id: flake8 + name: Lint (Flake8) - Classic - Forge + alias: flake8-classic-forge + files: ^classic/forge/(forge|tests)/ + args: [--config=classic/forge/.flake8] + + - id: flake8 + name: Lint (Flake8) - Classic - Benchmark + alias: flake8-classic-benchmark + files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.] + args: [--config=classic/benchmark/.flake8] + + - repo: local + # To have watertight type checking, we check *all* the files in an affected + # project. To trigger on poetry.lock we also reset the file `types` filter. + hooks: + - id: pyright + name: Typecheck - AutoGPT Platform - Backend + alias: pyright-platform-backend + entry: poetry -C autogpt_platform/backend run pyright + # include forge source (since it's a path dependency) but exclude *_test.py files: + files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(? + +[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing +[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki +[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971 +[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1 + +## Contributing to the AutoGPT Platform Folder +All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license. ## In short 1. Avoid duplicate work, issues, PRs etc. -2. Also consider contributing something other than code; see the [contribution guide] - for options. -3. Create a draft PR before starting work on non-small changes. Also post your proposal - in the [dev channel]. -4. Clearly explain your changes when submitting a PR. -5. Don't submit stuff that's broken. -6. Avoid making unnecessary changes, especially if they're purely based on your personal - preferences. Doing so is the maintainers' job. +2. We encourage you to collaborate with fellow community members on some of our bigger + [todo's][roadmap]! + * We highly recommend to post your idea and discuss it in the [dev channel]. +3. Create a draft PR when starting work on bigger changes. +4. Adhere to the [Code Guidelines] +5. Clearly explain your changes when submitting a PR. +6. Don't submit broken code: test/validate your changes. +7. Avoid making unnecessary changes, especially if they're purely based on your personal + preferences. Doing so is the maintainers' job. ;-) +8. Please also consider contributing something other than code; see the + [contribution guide] for options. [dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305 - -## Why instructions like these are necessary -We would like to say "We value all contributions". After all, we are an open-source -project, so we should welcome any input that people are willing to give, right? - -Well, the reality is that some contributions are SUPER-valuable, while others create -more trouble than they are worth and actually _create_ work for the core team. So to -ensure maximum chances of a smooth ride, please stick to the guidelines. +[code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines If you wish to involve with the project (beyond just contributing PRs), please read the -wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page. +wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing). In fact, why not just look through the whole wiki (it's only a few pages) and hop on our Discord. See you there! :-) diff --git a/LICENSE b/LICENSE index 601935b85ec8..52c6e9a8d528 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,13 @@ +All portions of this repository are under one of two licenses. The majority of the AutoGPT repository is under the MIT License below. The autogpt_platform folder is under the +Polyform Shield License. + + MIT License + Copyright (c) 2023 Toran Bruce Richards + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights @@ -9,9 +15,11 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE diff --git a/QUICKSTART.md b/QUICKSTART.md deleted file mode 100644 index 5eab892e348e..000000000000 --- a/QUICKSTART.md +++ /dev/null @@ -1,190 +0,0 @@ -# Quickstart Guide - -> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here - -Welcome to the Quickstart Guide! This guide will walk you through the process of setting up and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the necessary steps to jumpstart your journey in the world of AI development with AutoGPT. - -## System Requirements - -This project supports Linux (Debian based), Mac, and Windows Subsystem for Linux (WSL). If you are using a Windows system, you will need to install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/). - - -## Getting Setup -1. **Fork the Repository** - To fork the repository, follow these steps: - - Navigate to the main page of the repository. - - ![Repository](docs/content/imgs/quickstart/001_repo.png) - - In the top-right corner of the page, click Fork. - - ![Create Fork UI](docs/content/imgs/quickstart/002_fork.png) - - On the next page, select your GitHub account to create the fork under. - - Wait for the forking process to complete. You now have a copy of the repository in your GitHub account. - -2. **Clone the Repository** - To clone the repository, you need to have Git installed on your system. If you don't have Git installed, you can download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps: - - Open your terminal. - - Navigate to the directory where you want to clone the repository. - - Run the git clone command for the fork you just created - - ![Clone the Repository](docs/content/imgs/quickstart/003_clone.png) - - - Then open your project in your ide - - ![Open the Project in your IDE](docs/content/imgs/quickstart/004_ide.png) - -4. **Setup the Project** - Next we need to setup the required dependencies. We have a tool for helping you do all the tasks you need to on the repo. - It can be accessed by running the `run` command by typing `./run` in the terminal. - - The first command you need to use is `./run setup` This will guide you through the process of setting up your system. - Initially you will get instructions for installing flutter, chrome and setting up your github access token like the following image: - - > Note: for advanced users. The github access token is only needed for the ./run arena enter command so the system can automatically create a PR - - - ![Setup the Project](docs/content/imgs/quickstart/005_setup.png) - -### For Windows Users - -If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them. - -#### Update WSL -Run the following command in Powershell or Command Prompt to: -1. Enable the optional WSL and Virtual Machine Platform components. -2. Download and install the latest Linux kernel. -3. Set WSL 2 as the default. -4. Download and install the Ubuntu Linux distribution (a reboot may be required). - -```shell -wsl --install -``` - -For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment). - -#### Resolve FileNotFoundError or "No such file or directory" Errors -When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed). - -To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script: - -```shell -sudo apt update -sudo apt install dos2unix -dos2unix ./run -``` - -After executing the above commands, running `./run setup` should work successfully. - -#### Store Project Files within the WSL File System -If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids issues related to path translations and permissions and provides a more consistent development environment. - - You can keep running the command to get feedback on where you are up to with your setup. - When setup has been completed, the command will return an output like this: - - ![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png) - -## Creating Your Agent - - Now setup has been completed its time to create your agent template. - Do so by running the `./run agent create YOUR_AGENT_NAME` replacing YOUR_AGENT_NAME with a name of your choice. Examples of valid names: swiftyosgpt or SwiftyosAgent or swiftyos_agent - - ![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png) - - Upon creating your agent its time to officially enter the Arena! - Do so by running `./run arena enter YOUR_AGENT_NAME` - - ![Enter the Arena](docs/content/imgs/quickstart/008_enter_arena.png) - - > Note: for advanced users, create a new branch and create a file called YOUR_AGENT_NAME.json in the arena directory. Then commit this and create a PR to merge into the main repo. Only single file entries will be permitted. The json file needs the following format. - ```json - { - "github_repo_url": "https://github.com/Swiftyos/YourAgentName", - "timestamp": "2023-09-18T10:03:38.051498", - "commit_hash_to_benchmark": "ac36f7bfc7f23ad8800339fa55943c1405d80d5e", - "branch_to_benchmark": "master" - } - ``` - - github_repo_url: the url to your fork - - timestamp: timestamp of the last update of this file - - commit_hash_to_benchmark: the commit hash of your entry. You update each time you have an something ready to be officially entered into the hackathon - - branch_to_benchmark: the branch you are using to develop your agent on, default is master. - - -## Running your Agent - -Your agent can started using the `./run agent start YOUR_AGENT_NAME` - -This start the agent on `http://localhost:8000/` - -![Start the Agent](docs/content/imgs/quickstart/009_start_agent.png) - -The frontend can be accessed from `http://localhost:8000/`, you will first need to login using either a google account or your github account. - -![Login](docs/content/imgs/quickstart/010_login.png) - -Upon logging in you will get a page that looks something like this. With your task history down the left hand side of the page and the 'chat' window to send tasks to your agent. - -![Login](docs/content/imgs/quickstart/011_home.png) - -When you have finished with your agent, or if you just need to restart it, use Ctl-C to end the session then you can re-run the start command. - -If you are having issues and want to ensure the agent has been stopped there is a `./run agent stop` command which will kill the process using port 8000, which should be the agent. - -## Benchmarking your Agent - -The benchmarking system can also be accessed using the cli too: - -```bash -agpt % ./run benchmark -Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]... - - Commands to start the benchmark and list tests and categories - -Options: - --help Show this message and exit. - -Commands: - categories Benchmark categories group command - start Starts the benchmark command - tests Benchmark tests group command -agpt % ./run benchmark categories -Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]... - - Benchmark categories group command - -Options: - --help Show this message and exit. - -Commands: - list List benchmark categories command -agpt % ./run benchmark tests -Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]... - - Benchmark tests group command - -Options: - --help Show this message and exit. - -Commands: - details Benchmark test details command - list List benchmark tests command -``` - -The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with -```bash -./run benchmark categories list -# And what tests are available with -./run benchmark tests list -``` - -![Login](docs/content/imgs/quickstart/012_tests.png) - - -Finally you can run the benchmark with - -```bash -./run benchmark start YOUR_AGENT_NAME - -``` - -> diff --git a/README.md b/README.md index 62863afd11f5..9cbaefdb2515 100644 --- a/README.md +++ b/README.md @@ -1,78 +1,178 @@ -# 🌟 AutoGPT: the heart of the open-source agent ecosystem +# AutoGPT: Build, Deploy, and Run AI Agents -[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) [![GitHub Repo stars](https://img.shields.io/github/stars/Significant-Gravitas/AutoGPT?style=social)](https://github.com/Significant-Gravitas/AutoGPT/stargazers) [![Twitter Follow](https://img.shields.io/twitter/follow/auto_gpt?style=social)](https://twitter.com/Auto_GPT) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt)   +[![Twitter Follow](https://img.shields.io/twitter/follow/Auto_GPT?style=social)](https://twitter.com/Auto_GPT)   +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) -**AutoGPT** is your go-to toolkit for supercharging agents. With its modular and extensible framework, you're empowered to focus on: +**AutoGPT** is a powerful platform that allows you to create, deploy, and manage continuous AI agents that automate complex workflows. + +## Hosting Options + - Download to self-host + - [Join the Waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta + +## How to Setup for Self-Hosting +> [!NOTE] +> Setting up and hosting the AutoGPT Platform yourself is a technical process. +> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta. + +https://github.com/user-attachments/assets/d04273a5-b36a-4a37-818e-f631ce72d603 + +This tutorial assumes you have Docker, VSCode, git and npm installed. + +### 🧱 AutoGPT Frontend + +The AutoGPT frontend is where users interact with our powerful AI automation platform. It offers multiple ways to engage with and leverage our AI agents. This is the interface where you'll bring your AI automation ideas to life: + + **Agent Builder:** For those who want to customize, our intuitive, low-code interface allows you to design and configure your own AI agents. + + **Workflow Management:** Build, modify, and optimize your automation workflows with ease. You build your agent by connecting blocks, where each block performs a single action. + + **Deployment Controls:** Manage the lifecycle of your agents, from testing to production. + + **Ready-to-Use Agents:** Don't want to build? Simply select from our library of pre-configured agents and put them to work immediately. + + **Agent Interaction:** Whether you've built your own or are using pre-configured agents, easily run and interact with them through our user-friendly interface. + + **Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes. + +[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks. + +### 💽 AutoGPT Server + +The AutoGPT Server is the powerhouse of our platform This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously. It contains all the essential components that make AutoGPT run smoothly. + + **Source Code:** The core logic that drives our agents and automation processes. + + **Infrastructure:** Robust systems that ensure reliable and scalable performance. + + **Marketplace:** A comprehensive marketplace where you can find and deploy a wide range of pre-built agents. + +### 🐙 Example Agents + +Here are two examples of what you can do with AutoGPT: + +1. **Generate Viral Videos from Trending Topics** + - This agent reads topics on Reddit. + - It identifies trending topics. + - It then automatically creates a short-form video based on the content. + +2. **Identify Top Quotes from Videos for Social Media** + - This agent subscribes to your YouTube channel. + - When you post a new video, it transcribes it. + - It uses AI to identify the most impactful quotes to generate a summary. + - Then, it writes a post to automatically publish to your social media. + +These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case. + +--- +### Mission and Licencing +Our mission is to provide the tools, so that you can focus on what matters: - 🏗️ **Building** - Lay the foundation for something amazing. - 🧪 **Testing** - Fine-tune your agent to perfection. -- 👀 **Viewing** - See your progress come to life. +- 🤝 **Delegating** - Let AI work for you, and have your ideas come to life. -Be part of the revolution! **AutoGPT** stays at the forefront of AI innovation, featuring the codebase for the reigning champion in the Open-Source ecosystem. +Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI innovation. ---- +**📖 [Documentation](https://docs.agpt.co)** + |  +**🚀 [Contributing](CONTRIBUTING.md)** -

- - AutoGPT Arena Hacks Hackathon - -

-

- We're hosting a Hackathon! -
- Click the banner above for details and registration! -

+**Licensing:** + +MIT License: The majority of the AutoGPT repository is under the MIT License. + +Polyform Shield License: This license applies to the autogpt_platform folder. + +For more information, see https://agpt.co/blog/introducing-the-autogpt-platform --- +## 🤖 AutoGPT Classic +> Below is information about the classic version of AutoGPT. -## 🥇 Current Best Agent: AutoGPT +**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)** -Among our currently benchmarked agents, AutoGPT scores the best. This will change after the hackathon - the top-performing generalist agent will earn the esteemed position as the primary AutoGPT 🎊 +### 🏗️ Forge -📈 To enter, submit your benchmark run through the UI. +**Forge your own agent!** – Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project. -## 🌟 Quickstart +🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) – +This guide will walk you through the process of creating your own agent and using the benchmark and user interface. -- **To build your own agent** and to be eligible for the hackathon, follow the quickstart guide [here](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/forge/tutorials/001_getting_started.md). This will guide you through the process of creating your own agent and using the benchmark and user interface. +📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge -- **To activate the best agent** follow the guide [here](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/README.md). +### 🎯 Benchmark -Want to build your own groundbreaking agent using AutoGPT? 🛠️ There are three major components to focus on: +**Measure your agent's performance!** The `agbenchmark` can be used with any agent that supports the agent protocol, and the integration with the project's [CLI] makes it even easier to use with AutoGPT and forge-based agents. The benchmark offers a stringent testing environment. Our framework allows for autonomous, objective performance evaluations, ensuring your agents are primed for real-world action. -### 🏗️ the Forge + -**Forge your future!** The `forge` is your innovation lab. All the boilerplate code is already handled, letting you channel all your creativity into building a revolutionary agent. It's more than a starting point, it's a launchpad for your ideas. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). +📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi + |  +📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark -📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/forge) +### 💻 UI -### 🎯 the Benchmark +**Makes agents easy to use!** The `frontend` gives you a user-friendly interface to control and monitor your agents. It connects to agents through the [agent protocol](#-agent-protocol), ensuring compatibility with many agents from both inside and outside of our ecosystem. -**Test to impress!** The `benchmark` offers a stringent testing environment. Our framework allows for autonomous, objective performance evaluations, ensuring your agents are primed for real-world action. + -📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) +The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice! -### 🎮 the UI +📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend -**Take Control!** The `frontend` is your personal command center. It gives you a user-friendly interface to control and monitor your agents, making it easier to bring your ideas to life. +### ⌨️ CLI -📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) +[CLI]: #-cli ---- +To make it as easy as possible to use all of the tools offered by the repository, a CLI is included at the root of the repo: -### 🔄 Agent Protocol +```shell +$ ./run +Usage: cli.py [OPTIONS] COMMAND [ARGS]... + +Options: + --help Show this message and exit. + +Commands: + agent Commands to create, start and stop agents + benchmark Commands to start the benchmark and list tests and categories + setup Installs dependencies needed for your system. +``` -🔌 **Standardize to Maximize!** To maintain a uniform standard and ensure seamless compatibility, AutoGPT employs the [agent protocol](https://agentprotocol.ai/) from the AI Engineer Foundation. This standardizes the communication pathways from your agent to the frontend and benchmark. +Just clone the repo, install dependencies with `./run setup`, and you should be good to go! -### 🤔 Questions? Problems? Suggestions? +## 🤔 Questions? Problems? Suggestions? -#### Get help - [Discord 💬](https://discord.gg/autogpt) +### Get help - [Discord 💬](https://discord.gg/autogpt) [![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt) To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn’t created an issue for the same topic. +## 🤝 Sister projects + +### 🔄 Agent Protocol + +To maintain a uniform standard and ensure seamless compatibility with many current and future applications, AutoGPT employs the [agent protocol](https://agentprotocol.ai/) standard by the AI Engineer Foundation. This standardizes the communication pathways from your agent to the frontend and benchmark. + +--- + +## Stars stats +

- - Star History Chart - + + + + + Star History Chart + +

+ + +## ⚡ Contributors + + + Contributors + diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000000..1bacc8ef83d2 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,47 @@ +# Security Policy + +## Reporting Security Issues + +We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.** + +> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code. + +Instead, please report them via: +- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new) + + +### Reporting Process +1. **Submit Report**: Use one of the above channels to submit your report +2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days. +3. **Collaboration**: We will collaborate with you to understand and validate the issue +4. **Resolution**: We will work on a fix and coordinate the release process + +### Disclosure Policy +- Please provide detailed reports with reproducible steps +- Include the version/commit hash where you discovered the vulnerability +- Allow us a 90-day security fix window before any public disclosure +- Share any potential mitigations or workarounds if known + +## Supported Versions +Only the following versions are eligible for security updates: + +| Version | Supported | +|---------|-----------| +| Latest release on master branch | ✅ | +| Development commits (pre-master) | ✅ | +| Classic folder (deprecated) | ❌ | +| All other versions | ❌ | + +## Security Best Practices +When using this project: +1. Always use the latest stable version +2. Review security advisories before updating +3. Follow our security documentation and guidelines +4. Keep your dependencies up to date +5. Do not use code from the `classic/` folder as it is deprecated and unsupported + +## Past Security Advisories +For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt). + +--- +Last updated: November 2024 diff --git a/ai_settings.yaml b/ai_settings.yaml deleted file mode 100644 index 6f5fe4aa296b..000000000000 --- a/ai_settings.yaml +++ /dev/null @@ -1,7 +0,0 @@ -ai_goals: -- Give a report of recent activities of linkedin competitors -- The report should be brief, contains only important messages -- If applicable, provide numbers and links -- Give conclusion and suggestions to CEO -ai_name: CEO Office -ai_role: an AI designed to study the competitors of Linkedin company diff --git a/arena/480bot.json b/arena/480bot.json deleted file mode 100644 index 819ac9cc9847..000000000000 --- a/arena/480bot.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/480/AutoGPT", - "timestamp": "2023-10-22T06:49:52.536177", - "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AGENT_GORDON.json b/arena/AGENT_GORDON.json deleted file mode 100644 index 98784273f92c..000000000000 --- a/arena/AGENT_GORDON.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/filipjakubowski/AutoGPT", - "timestamp": "2023-11-01T17:13:24.272333", - "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AGENT_JARVIS.json b/arena/AGENT_JARVIS.json deleted file mode 100644 index ac284f6aa1c6..000000000000 --- a/arena/AGENT_JARVIS.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/filipjakubowski/AutoGPT", - "timestamp": "2023-11-04T10:13:11.039444", - "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AI.json b/arena/AI.json deleted file mode 100644 index a6b27fdb1157..000000000000 --- a/arena/AI.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/QingquanBao/AutoGPT", - "timestamp": "2023-11-01T16:20:51.086235", - "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AKBAgent.json b/arena/AKBAgent.json deleted file mode 100644 index 71e8cea91b0b..000000000000 --- a/arena/AKBAgent.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "github_repo_url": "https://github.com/imakb/AKBAgent", - "timestamp": "2023-10-31T00:03:23.000000", - "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", - "branch_to_benchmark": "AKBAgent" -} - diff --git a/arena/ASSISTANT.json b/arena/ASSISTANT.json deleted file mode 100644 index bd0c0f055f88..000000000000 --- a/arena/ASSISTANT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/hongzzz/AutoGPT", - "timestamp": "2023-10-13T03:22:59.347424", - "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AUTO_ENGINEER.json b/arena/AUTO_ENGINEER.json deleted file mode 100644 index 5f8e28c973cf..000000000000 --- a/arena/AUTO_ENGINEER.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kaiomagalhaes/AutoGPT", - "timestamp": "2023-10-04T15:25:30.458687", - "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AUTO_GPT_JON001.json b/arena/AUTO_GPT_JON001.json deleted file mode 100644 index f36fad390296..000000000000 --- a/arena/AUTO_GPT_JON001.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Jonobinsoftware/AutoGPT-Tutorial", - "timestamp": "2023-10-10T06:01:23.439061", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Adtractive_Agent.json b/arena/Adtractive_Agent.json deleted file mode 100644 index ebec6e6ad4a7..000000000000 --- a/arena/Adtractive_Agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aivaras-mazylis/AutoGPT", - "timestamp": "2023-10-17T13:16:16.327237", - "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AgGPT.json b/arena/AgGPT.json deleted file mode 100644 index 07751b8ecac6..000000000000 --- a/arena/AgGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/althaf004/AutoGPT", - "timestamp": "2023-09-26T03:40:03.658369", - "commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AgentJPark.json b/arena/AgentJPark.json deleted file mode 100644 index 636e4d1f79c3..000000000000 --- a/arena/AgentJPark.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/againeureka/AutoGPT", - "timestamp": "2023-10-12T02:20:01.005361", - "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AgentKD.json b/arena/AgentKD.json deleted file mode 100644 index 1aa340eac8e5..000000000000 --- a/arena/AgentKD.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kitdesai/AgentKD", - "timestamp": "2023-10-14T02:35:09.979434", - "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Ahmad.json b/arena/Ahmad.json deleted file mode 100644 index 2b5b86f12481..000000000000 --- a/arena/Ahmad.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/JawadAbu/AutoGPT.git", - "timestamp": "2023-11-05T12:35:35.352028", - "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Alfred.json b/arena/Alfred.json deleted file mode 100644 index be510f1fd414..000000000000 --- a/arena/Alfred.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Shadowless422/Alfred", - "timestamp": "2023-10-03T10:42:45.473477", - "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AlphaCISO.json b/arena/AlphaCISO.json deleted file mode 100644 index 06791274b135..000000000000 --- a/arena/AlphaCISO.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/alphaciso/AutoGPT", - "timestamp": "2023-10-21T08:26:41.961187", - "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AndersLensway.json b/arena/AndersLensway.json deleted file mode 100644 index 6bbf68fdf90e..000000000000 --- a/arena/AndersLensway.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/4nd3rs/AutoGPT", - "timestamp": "2023-10-11T11:00:08.150159", - "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AntlerTestGPT.json b/arena/AntlerTestGPT.json deleted file mode 100644 index 9df76d4a8e16..000000000000 --- a/arena/AntlerTestGPT.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/pjw1/AntlerAI", "timestamp": "2023-10-07T11:46:39Z", "commit_hash_to_benchmark": "f81e086e5647370854ec639c531c900775a99207", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/AppleGPT.json b/arena/AppleGPT.json deleted file mode 100644 index 7fe3a7beeea9..000000000000 --- a/arena/AppleGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Nimit3-droid/AutoGPT", - "timestamp": "2023-10-03T11:59:15.495902", - "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AquaAgent.json b/arena/AquaAgent.json deleted file mode 100644 index 6deb549db137..000000000000 --- a/arena/AquaAgent.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/somnistudio/SomniGPT", "timestamp": "2023-10-06T16:40:14Z", "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/ArtistManagerGPT.json b/arena/ArtistManagerGPT.json deleted file mode 100644 index 881ed049b91f..000000000000 --- a/arena/ArtistManagerGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/AmahAjavon/AutoGPT", - "timestamp": "2023-10-28T20:32:15.845741", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AskOpie.json b/arena/AskOpie.json deleted file mode 100644 index a2f6bd3938a1..000000000000 --- a/arena/AskOpie.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/arunqa/AutoGPT", - "timestamp": "2023-09-26T05:13:24.466017", - "commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Auto.json b/arena/Auto.json deleted file mode 100644 index 9bad9db50e9d..000000000000 --- a/arena/Auto.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Nikhil8652/AutoGPT", - "timestamp": "2023-10-16T09:12:17.452121", - "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AutoGPT-ariel.json b/arena/AutoGPT-ariel.json deleted file mode 100644 index cefa43620551..000000000000 --- a/arena/AutoGPT-ariel.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/RedTachyon/AutoGPT", - "timestamp": "2023-10-21T22:31:30.871023", - "commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AutoGPT2.json b/arena/AutoGPT2.json deleted file mode 100644 index 11a71f66f04e..000000000000 --- a/arena/AutoGPT2.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/SarahGrevy/AutoGPT", "timestamp": "2023-10-20T17:21:22Z", "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/AutoGenius.json b/arena/AutoGenius.json deleted file mode 100644 index 3974b9dcc8eb..000000000000 --- a/arena/AutoGenius.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/JasonDRZ/AutoGPT", - "timestamp": "2023-10-26T13:27:58.805270", - "commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AutoTDD.json b/arena/AutoTDD.json deleted file mode 100644 index ea61ddd8261e..000000000000 --- a/arena/AutoTDD.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/vshneer/AutoTDD", - "timestamp": "2023-10-11T19:14:30.939747", - "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AutoTestGenerator.json b/arena/AutoTestGenerator.json deleted file mode 100644 index c28d6da87ad3..000000000000 --- a/arena/AutoTestGenerator.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/cagdasbas/AutoGPT", - "timestamp": "2023-10-15T08:43:40.193080", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/AwareAgent.json b/arena/AwareAgent.json deleted file mode 100644 index d4155dd67e9e..000000000000 --- a/arena/AwareAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/LuisLechugaRuiz/AwareAgent", - "timestamp": "2023-10-26T10:10:01.481205", - "commit_hash_to_benchmark": "c180063dde49af02ed95ec4c019611da0a5540d7", - "branch_to_benchmark": "master" -} diff --git a/arena/Bagi_agent.json b/arena/Bagi_agent.json deleted file mode 100644 index 4251bb4246c4..000000000000 --- a/arena/Bagi_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git", - "timestamp": "2023-10-20T09:21:48.837635", - "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/BanglaSgAgent.json b/arena/BanglaSgAgent.json deleted file mode 100644 index 12014fe8d058..000000000000 --- a/arena/BanglaSgAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aniruddha-adhikary/AutoGPT", - "timestamp": "2023-09-27T15:32:24.056105", - "commit_hash_to_benchmark": "6f289e6dfa8246f8993b76c933527f3707b8d7e5", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Baptiste.json b/arena/Baptiste.json deleted file mode 100644 index 691f62952f3d..000000000000 --- a/arena/Baptiste.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Baptistecaille/AutoGPT", - "timestamp": "2023-10-01T19:44:23.416591", - "commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Bravo06.json b/arena/Bravo06.json deleted file mode 100644 index 21ceec258b6d..000000000000 --- a/arena/Bravo06.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/jafar-albadarneh/Bravo06GPT", "timestamp": "2023-10-04T23:01:27Z", "commit_hash_to_benchmark": "f8c177b4b0e4ca45a3a104011b866c0415c648f1", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/Brillante-AI.json b/arena/Brillante-AI.json deleted file mode 100644 index 3c81a02c0d30..000000000000 --- a/arena/Brillante-AI.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/dabeer021/Brillante-AI", "timestamp": "2023-10-02T19:05:04Z", "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/Bunny.json b/arena/Bunny.json deleted file mode 100644 index 33c2b0d1a82a..000000000000 --- a/arena/Bunny.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/razorhasbeen/AutoGPT", - "timestamp": "2023-10-03T11:50:56.725628", - "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/CCAgent.json b/arena/CCAgent.json deleted file mode 100644 index 899172e343d1..000000000000 --- a/arena/CCAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ccsnow127/AutoGPT", - "timestamp": "2023-10-21T13:57:15.131761", - "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/CES-GPT.json b/arena/CES-GPT.json deleted file mode 100644 index 016804e65938..000000000000 --- a/arena/CES-GPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ces-sonnguyen/CES-GPT", - "timestamp": "2023-10-30T07:45:07.337258", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/CISLERK.json b/arena/CISLERK.json deleted file mode 100644 index 1370a0a2d30e..000000000000 --- a/arena/CISLERK.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/cislerk/AutoGPT", - "timestamp": "2023-10-10T18:40:50.718850", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/CONNECTBOT.json b/arena/CONNECTBOT.json deleted file mode 100644 index b43e147a98b8..000000000000 --- a/arena/CONNECTBOT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/myncow/DocumentAgent.git", - "timestamp": "2023-10-31T21:21:28.951345", - "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/CYNO_AGENT.json b/arena/CYNO_AGENT.json deleted file mode 100644 index 288802d5d7dc..000000000000 --- a/arena/CYNO_AGENT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/dr1yl/AutoGPT", - "timestamp": "2023-10-09T20:01:05.041446", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ChadGPT.json b/arena/ChadGPT.json deleted file mode 100644 index 6a378b1d8f7d..000000000000 --- a/arena/ChadGPT.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/Ahmad-Alaziz/ChadGPT", "timestamp": "2023-10-26T09:39:35Z", "commit_hash_to_benchmark": "84dd029c011379791a6fec8b148b2982a2ef159e", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/ChrisGPT.json b/arena/ChrisGPT.json deleted file mode 100644 index 6ec46681e366..000000000000 --- a/arena/ChrisGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/darkcyber-ninja/AutoGPT", - "timestamp": "2023-10-31T17:55:41.458834", - "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/CodeAutoGPT.json b/arena/CodeAutoGPT.json deleted file mode 100644 index 1780a4966ceb..000000000000 --- a/arena/CodeAutoGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/hugomastromauro/AutoGPT", - "timestamp": "2023-11-01T13:21:42.624202", - "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/CreaitorMarketing.json b/arena/CreaitorMarketing.json deleted file mode 100644 index 38ffca0f8304..000000000000 --- a/arena/CreaitorMarketing.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/simonfunk/Auto-GPT", "timestamp": "2023-10-08T02:10:18Z", "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/CurieAssistant.json b/arena/CurieAssistant.json deleted file mode 100644 index bdbd14c9c06d..000000000000 --- a/arena/CurieAssistant.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/HMDCrew/AutoGPT", - "timestamp": "2023-10-06T20:41:26.293944", - "commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/DE.json b/arena/DE.json deleted file mode 100644 index fcea35c9d3f6..000000000000 --- a/arena/DE.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/wic0144/AutoGPT", - "timestamp": "2023-10-26T09:05:21.013962", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/DavidsAgent.json b/arena/DavidsAgent.json deleted file mode 100644 index f824fd14dc93..000000000000 --- a/arena/DavidsAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/beisdog/AutoGPT", - "timestamp": "2023-09-29T22:06:18.846082", - "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Derpmaster.json b/arena/Derpmaster.json deleted file mode 100644 index 6a4e159e5370..000000000000 --- a/arena/Derpmaster.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/schumacher-m/Derpmaster", - "timestamp": "2023-10-30T21:10:27.407732", - "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/DevOpsAgent.json b/arena/DevOpsAgent.json deleted file mode 100644 index 6f3384cd64d3..000000000000 --- a/arena/DevOpsAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/rahuldotar/AutoGPT", - "timestamp": "2023-10-02T11:34:29.870077", - "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Drench.json b/arena/Drench.json deleted file mode 100644 index 49417551e2af..000000000000 --- a/arena/Drench.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/MohamedBasueny/AutoGPT-Drench", - "timestamp": "2023-10-27T01:28:13.869318", - "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Eduardo.json b/arena/Eduardo.json deleted file mode 100644 index dfffd902d869..000000000000 --- a/arena/Eduardo.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/MuriloEduardo/AutoGPT.git", - "timestamp": "2023-09-25T03:18:20.659056", - "commit_hash_to_benchmark": "ffa76c3a192c36827669335de4390262da5fd972", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/EmbeddedAg.json b/arena/EmbeddedAg.json deleted file mode 100644 index b26355e38e25..000000000000 --- a/arena/EmbeddedAg.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT", "timestamp": "2023-10-26T09:15:50Z", "commit_hash_to_benchmark": "6c9152a95c8994898c47c85ea90ba58e0cc02c28", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/EnglishTestpaperAgent.json b/arena/EnglishTestpaperAgent.json deleted file mode 100644 index 7271eb0c9ca6..000000000000 --- a/arena/EnglishTestpaperAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kyannai/AutoGPT", - "timestamp": "2023-09-29T03:05:45.504690", - "commit_hash_to_benchmark": "1f367618edf903f38dff4dd064f96e611ffc5242", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ExampleAgent.json b/arena/ExampleAgent.json deleted file mode 100644 index 2fb8c44a3c11..000000000000 --- a/arena/ExampleAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/janekdijkstra/AutoGPT", - "timestamp": "2023-10-16T12:12:54.998033", - "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/FLASH.json b/arena/FLASH.json deleted file mode 100644 index 7cce9c10e3f7..000000000000 --- a/arena/FLASH.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/flashdumper/AutoGPT", - "timestamp": "2023-10-30T23:02:13.653861", - "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/FactoryGPT.json b/arena/FactoryGPT.json deleted file mode 100644 index e66434c3961d..000000000000 --- a/arena/FactoryGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/neilmartindev/FactoryGPT", - "timestamp": "2023-10-04T16:24:58.525870", - "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/FcsummerGPT.json b/arena/FcsummerGPT.json deleted file mode 100644 index 2f2eb88fa59f..000000000000 --- a/arena/FcsummerGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/fbk111/FcsummerGPT", - "timestamp": "2023-10-25T09:58:39.801277", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/FynAgent.json b/arena/FynAgent.json deleted file mode 100644 index 1f006e63ea9d..000000000000 --- a/arena/FynAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/tomkat-cr/AutoGPT.git", - "timestamp": "2023-10-18T09:41:21.282992", - "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/GG.json b/arena/GG.json deleted file mode 100644 index 78421b484996..000000000000 --- a/arena/GG.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/IgorCIs/AutoGPT", - "timestamp": "2023-09-27T14:01:20.964953", - "commit_hash_to_benchmark": "a14aadd91493886663232bfd23c0412609f2a2fc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/GPTTest.json b/arena/GPTTest.json deleted file mode 100644 index e2c1c0af37b2..000000000000 --- a/arena/GPTTest.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/h3llix/GPTTest.git", - "timestamp": "2023-11-02T10:56:53.142288", - "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/GameSoundGPT.json b/arena/GameSoundGPT.json deleted file mode 100644 index 66fe962ab2a6..000000000000 --- a/arena/GameSoundGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mordvinov/AutoGPT", - "timestamp": "2023-10-13T14:48:02.852293", - "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/GeorgeGPT.json b/arena/GeorgeGPT.json deleted file mode 100644 index 83ce96df7385..000000000000 --- a/arena/GeorgeGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/norn93/GeorgeGPT", - "timestamp": "2023-10-17T14:38:41.051458", - "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Granger.json b/arena/Granger.json deleted file mode 100644 index 203e99c34433..000000000000 --- a/arena/Granger.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/balloch/AutoGPTProblemSolver", - "timestamp": "2023-09-29T15:11:44.876627", - "commit_hash_to_benchmark": "9fb6d5bbbd6928402a5718b8c249811c6f682a88", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/HACKATHON.json b/arena/HACKATHON.json deleted file mode 100644 index 7f29e7582d5d..000000000000 --- a/arena/HACKATHON.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/manuel-soria/AutoGPT", - "timestamp": "2023-10-07T16:55:38.741776", - "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/HMD2.json b/arena/HMD2.json deleted file mode 100644 index 5ef36bd18af0..000000000000 --- a/arena/HMD2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/HMDCrew/AutoGPT", - "timestamp": "2023-10-09T08:46:37.457740", - "commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Heisenberg.json b/arena/Heisenberg.json deleted file mode 100644 index a77ce87d775c..000000000000 --- a/arena/Heisenberg.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/georgehaws/Heisenberg", - "timestamp": "2023-10-02T16:07:18-07:00", - "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", - "branch_to_benchmark": "master" -} diff --git a/arena/HekolcuAutoGPT.json b/arena/HekolcuAutoGPT.json deleted file mode 100644 index e64dd9c632fe..000000000000 --- a/arena/HekolcuAutoGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/hekolcu/AutoGPT", - "timestamp": "2023-09-30T17:31:20.979122", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/HuitzilAiAgent.json b/arena/HuitzilAiAgent.json deleted file mode 100644 index 6e832eafa2af..000000000000 --- a/arena/HuitzilAiAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/codetitlan/AutoGPT-CDTHB", - "timestamp": "2023-10-03T15:04:54.856291", - "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Hypeman.json b/arena/Hypeman.json deleted file mode 100644 index d32bcb9e483d..000000000000 --- a/arena/Hypeman.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kennyu/KenGPT", - "timestamp": "2023-09-27T19:50:31.443494", - "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/IncredibubbleTea.json b/arena/IncredibubbleTea.json deleted file mode 100644 index 6908e6be2c84..000000000000 --- a/arena/IncredibubbleTea.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mariepop13/AutoGPT", - "timestamp": "2023-10-25T18:38:32.012583", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/JackGPT.json b/arena/JackGPT.json deleted file mode 100644 index 007286814efa..000000000000 --- a/arena/JackGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/JackDance/AutoGPT", - "timestamp": "2023-10-09T08:26:35.181112", - "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Jarvis.json b/arena/Jarvis.json deleted file mode 100644 index bb098270eca3..000000000000 --- a/arena/Jarvis.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/elynch303/AutoGPT", - "timestamp": "2023-10-12T14:15:17.014333", - "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/JarvisAgent.json b/arena/JarvisAgent.json deleted file mode 100644 index f8cc9810f326..000000000000 --- a/arena/JarvisAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/JadeCong/AutoGPT", - "timestamp": "2023-10-17T18:49:16.489653", - "commit_hash_to_benchmark": "0bd5d4420ec168194d5a93f62d890d33ab7d9940", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Jean-Michel.json b/arena/Jean-Michel.json deleted file mode 100644 index 30791d295c41..000000000000 --- a/arena/Jean-Michel.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Yanniswein/Jean-Michel", - "timestamp": "2023-10-30T09:21:14.984080", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Job_GPT.json b/arena/Job_GPT.json deleted file mode 100644 index de73fba89887..000000000000 --- a/arena/Job_GPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/SeaField-dev/AutoGPT.git", - "timestamp": "2023-09-25T09:35:03.022273", - "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/JoshAgent1.json b/arena/JoshAgent1.json deleted file mode 100644 index 99378066ae72..000000000000 --- a/arena/JoshAgent1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/joshyorko/AutoGPT", - "timestamp": "2023-09-28T17:05:27.689905", - "commit_hash_to_benchmark": "959e1304d11f126c5a6914c3bb886549638d6b35", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/KnowledgeExtractor.json b/arena/KnowledgeExtractor.json deleted file mode 100644 index 4a184f2fb5ba..000000000000 --- a/arena/KnowledgeExtractor.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/arromaljj/KnowledgeExtractor", - "timestamp": "2023-10-04T13:01:50.037123", - "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/LAWYER_EMAD.json b/arena/LAWYER_EMAD.json deleted file mode 100644 index 5d84d0872c49..000000000000 --- a/arena/LAWYER_EMAD.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/emads7/AutoGPT.git", - "timestamp": "2023-10-19T15:06:37.481038", - "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/LHRobot.json b/arena/LHRobot.json deleted file mode 100644 index 98feac3b9220..000000000000 --- a/arena/LHRobot.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/LH-Enterprise/AutoGPT", - "timestamp": "2023-10-07T01:05:31.627432", - "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Lab49Agent.json b/arena/Lab49Agent.json deleted file mode 100644 index cbb9922645db..000000000000 --- a/arena/Lab49Agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/FutureProofTechnology/AutoGPT", - "timestamp": "2023-10-12T10:28:34.275827", - "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/LbAgent.json b/arena/LbAgent.json deleted file mode 100644 index 8ff9c0cc099e..000000000000 --- a/arena/LbAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT", - "timestamp": "2023-10-30T10:20:40.082545", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/LegalAgent.json b/arena/LegalAgent.json deleted file mode 100644 index c57b30f85275..000000000000 --- a/arena/LegalAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/gengrui1983/LegalGPT", - "timestamp": "2023-10-25T02:46:41.860987", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Light_Agent.json b/arena/Light_Agent.json deleted file mode 100644 index 17fee68be6f1..000000000000 --- a/arena/Light_Agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mohammed-radha-LightRing/AutoGPT", - "timestamp": "2023-10-01T07:10:46.497391", - "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/LinuzGPT.json b/arena/LinuzGPT.json deleted file mode 100644 index 8cb096f0cf21..000000000000 --- a/arena/LinuzGPT.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/linusaltacc/AutoGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/Lirum.json b/arena/Lirum.json deleted file mode 100644 index da8dddd76a74..000000000000 --- a/arena/Lirum.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/rogerioth/AutoGPT", - "timestamp": "2023-10-12T23:04:51.600862", - "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MANU.json b/arena/MANU.json deleted file mode 100644 index 7e1caed1f20c..000000000000 --- a/arena/MANU.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/manuel-soria/AutoGPT", - "timestamp": "2023-10-07T16:50:11.634586", - "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MEGATRON.json b/arena/MEGATRON.json deleted file mode 100644 index 81182c372e06..000000000000 --- a/arena/MEGATRON.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/razorhasbeen/AutoGPT", - "timestamp": "2023-10-03T11:33:22.091896", - "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MOBILE.json b/arena/MOBILE.json deleted file mode 100644 index 13b9c175217b..000000000000 --- a/arena/MOBILE.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/nel349/AutoGPT", - "timestamp": "2023-10-08T03:10:40.860972", - "commit_hash_to_benchmark": "683257b697392e5551fb86c81a72728029d12aa0", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Maharathi.json b/arena/Maharathi.json deleted file mode 100644 index c2a312c8205a..000000000000 --- a/arena/Maharathi.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/sampatkalyan/AutoGPTHackathon", "timestamp": "2023-10-02T08:16:27Z", "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/MangoAI.json b/arena/MangoAI.json deleted file mode 100644 index 32250c07fe7a..000000000000 --- a/arena/MangoAI.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/stargatejy/MangoAI", - "timestamp": "2023-10-24T10:11:38.967772", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MangoAgent-3.json b/arena/MangoAgent-3.json deleted file mode 100644 index 72f5a832577c..000000000000 --- a/arena/MangoAgent-3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/stargatejy/MangoAI", - "timestamp": "2023-10-25T15:41:17.652038", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MangoAgent-4.json b/arena/MangoAgent-4.json deleted file mode 100644 index b49ad87078b3..000000000000 --- a/arena/MangoAgent-4.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/stargatejy/MangoAI", - "timestamp": "2023-10-27T16:28:23.804390", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MarketResearcherEduRob.json b/arena/MarketResearcherEduRob.json deleted file mode 100644 index 6ee0afb41c66..000000000000 --- a/arena/MarketResearcherEduRob.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/fzoric8/AutoGPT", - "timestamp": "2023-11-01T09:36:16.357944", - "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Marx.json b/arena/Marx.json deleted file mode 100644 index 69421b46829d..000000000000 --- a/arena/Marx.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/maxletemple/AutoGPT", - "timestamp": "2023-10-18T17:06:20.575710", - "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Mary.json b/arena/Mary.json deleted file mode 100644 index a47a8da58441..000000000000 --- a/arena/Mary.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bigfatball/Auto-GPT.git", - "timestamp": "2023-10-22T23:40:22.765334", - "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Melang.json b/arena/Melang.json deleted file mode 100644 index 5345ede6374a..000000000000 --- a/arena/Melang.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Brian-Mwangi-developer/AutoGPT.git", - "timestamp": "2023-10-06T08:50:14.080962", - "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Miao.json b/arena/Miao.json deleted file mode 100644 index f3a169e49841..000000000000 --- a/arena/Miao.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/StefanWan-Durham/AutoGPT.git", - "timestamp": "2023-10-02T15:05:19.789945", - "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MindwareGPT.json b/arena/MindwareGPT.json deleted file mode 100644 index 1be44df5dd12..000000000000 --- a/arena/MindwareGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/0xtotaylor/MindwareGPT.git", - "timestamp": "2023-10-03T14:56:05.228408", - "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Mira.json b/arena/Mira.json deleted file mode 100644 index 28585c526759..000000000000 --- a/arena/Mira.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/enricofranke/EnricoAssistant", - "timestamp": "2023-10-25T23:21:35.799138", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MoTS.json b/arena/MoTS.json deleted file mode 100644 index efad4ea97f67..000000000000 --- a/arena/MoTS.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ghd9201/AutoGPT.git", - "timestamp": "2023-10-25T09:04:02.534683", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MojoBurrito.json b/arena/MojoBurrito.json deleted file mode 100644 index b9c0ad78081e..000000000000 --- a/arena/MojoBurrito.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/dawnkelly09/MojoBurrito", - "timestamp": "2023-10-01T20:24:10.596062", - "commit_hash_to_benchmark": "de3e9e702a988c6028cc8b873aeffc9d5d82c572", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MyAgent.json b/arena/MyAgent.json deleted file mode 100644 index d6f92e188298..000000000000 --- a/arena/MyAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/AgentService/AutoGPT", - "timestamp": "2023-10-25T20:11:31.811596", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MyExample.json b/arena/MyExample.json deleted file mode 100644 index 508515aed709..000000000000 --- a/arena/MyExample.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/gabenitez/AutoGPT", - "timestamp": "2023-10-19T22:00:47.453159", - "commit_hash_to_benchmark": "b4588f6425912316e1512391e4392ca30d61e144", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MyExampleAgent.json b/arena/MyExampleAgent.json deleted file mode 100644 index cc3a9f86b7ef..000000000000 --- a/arena/MyExampleAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/moizsajid/AutoGPT", - "timestamp": "2023-10-25T20:20:04.910747", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MyFirstAgent.json b/arena/MyFirstAgent.json deleted file mode 100644 index 783c90f5477d..000000000000 --- a/arena/MyFirstAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/smaxaddington/AutoGPT", - "timestamp": "2023-10-14T15:27:15.090035", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MyFistAgent.json b/arena/MyFistAgent.json deleted file mode 100644 index baafc39a876c..000000000000 --- a/arena/MyFistAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/eslam-fakhry/AutoGPT", - "timestamp": "2023-11-02T10:19:58.187866", - "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/MyTestAgent.json b/arena/MyTestAgent.json deleted file mode 100644 index a4c28dc7e8fb..000000000000 --- a/arena/MyTestAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Penguin-N/AutoGPT.git", - "timestamp": "2023-10-18T14:01:28.986850", - "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/N.json b/arena/N.json deleted file mode 100644 index 1d8b2dd9f11a..000000000000 --- a/arena/N.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/gentaag/AutoGPT", - "timestamp": "2023-10-28T15:16:15.189228", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/NASAssistant2.json b/arena/NASAssistant2.json deleted file mode 100644 index 1359a3332975..000000000000 --- a/arena/NASAssistant2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/IHIaadj/AutoGPT", - "timestamp": "2023-10-07T22:06:59.410391", - "commit_hash_to_benchmark": "7a33af387e6959506eb8f01b49d296defe587e6d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/NHAN_BOT.json b/arena/NHAN_BOT.json deleted file mode 100644 index a0e649b0842f..000000000000 --- a/arena/NHAN_BOT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Vannhanhk12/AutoGPT", - "timestamp": "2023-09-28T07:18:38.959135", - "commit_hash_to_benchmark": "a555e936c48bca8c794c7116d62a91628e59ac14", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/NadeemAgent.json b/arena/NadeemAgent.json deleted file mode 100644 index 9898b7c19323..000000000000 --- a/arena/NadeemAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kiyanwang/AutoGPT", - "timestamp": "2023-10-19T14:11:40.660035", - "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/NanAutoGPT.json b/arena/NanAutoGPT.json deleted file mode 100644 index 8dd47a13047a..000000000000 --- a/arena/NanAutoGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/open-nan/NanAutoGPT", - "timestamp": "2023-10-30T10:25:02.617275", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/NoobSupreme.json b/arena/NoobSupreme.json deleted file mode 100644 index 42208e3d9bdd..000000000000 --- a/arena/NoobSupreme.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Ch0daboy/NoobSupreme.git", - "timestamp": "2023-10-01T08:08:13.753099", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/NumberOne.json b/arena/NumberOne.json deleted file mode 100644 index 36c626ca0e03..000000000000 --- a/arena/NumberOne.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/micwin/AutoGPT", - "timestamp": "2023-10-05T17:01:11.784397", - "commit_hash_to_benchmark": "3b7d83a1a6d3fef1d415bfd1d4ba32ca1ba797cc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Orange.json b/arena/Orange.json deleted file mode 100644 index 4a344241a6d9..000000000000 --- a/arena/Orange.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/lewispeel/AutoGPT", - "timestamp": "2023-10-27T22:57:16.348948", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/PAgentAI.json b/arena/PAgentAI.json deleted file mode 100644 index 55e7333e7026..000000000000 --- a/arena/PAgentAI.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/plopez10/GPT", - "timestamp": "2023-10-26T03:25:27.221299", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Pacific.json b/arena/Pacific.json deleted file mode 100644 index f7f8d5a3a9c4..000000000000 --- a/arena/Pacific.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/yifeng-qiu/AutoGPTAgent", - "timestamp": "2023-10-04T18:25:34.925806", - "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ParalegalAgent.json b/arena/ParalegalAgent.json deleted file mode 100644 index 92e4c2513542..000000000000 --- a/arena/ParalegalAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bRitch022/Auto-GPT", - "timestamp": "2023-10-06T18:48:23.644236", - "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Pelle.json b/arena/Pelle.json deleted file mode 100644 index 598c0708d2e0..000000000000 --- a/arena/Pelle.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/pilotniq/AutoGPT", - "timestamp": "2023-10-23T19:14:27.176891", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Portalen.json b/arena/Portalen.json deleted file mode 100644 index 6e4aa879f45d..000000000000 --- a/arena/Portalen.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/erlendjones/AutoGPT", "timestamp": "2023-09-22T20:39:08Z", "commit_hash_to_benchmark": "58d5b0d4a2fcc1bc12ed667db9d62a427a89c1a4", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/Pumu2_agent.json b/arena/Pumu2_agent.json deleted file mode 100644 index 52510f0b035f..000000000000 --- a/arena/Pumu2_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git", - "timestamp": "2023-10-20T09:26:07.885410", - "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Q.json b/arena/Q.json deleted file mode 100644 index 9fad0c9cf8de..000000000000 --- a/arena/Q.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/CopsGit/AutoGPT", - "timestamp": "2023-10-27T19:07:51.053794", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/QA_AGENT.json b/arena/QA_AGENT.json deleted file mode 100644 index 14816293f854..000000000000 --- a/arena/QA_AGENT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ada-lovecraft/Ada-GPT", - "timestamp": "2023-09-20T08:14:19.186952", - "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/QuantumQuill.json b/arena/QuantumQuill.json deleted file mode 100644 index 32e78e5eac8a..000000000000 --- a/arena/QuantumQuill.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/dleidisch/AutoAgent", - "timestamp": "2023-10-23T18:49:58.499309", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/RAGOptimizer.json b/arena/RAGOptimizer.json deleted file mode 100644 index f87cc692a9f7..000000000000 --- a/arena/RAGOptimizer.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/nel349/AutoGPT", - "timestamp": "2023-10-07T22:51:51.507768", - "commit_hash_to_benchmark": "683257b697392e5551fb86c81a72728029d12aa0", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/RFPScanner.json b/arena/RFPScanner.json deleted file mode 100644 index bc4ba260d79e..000000000000 --- a/arena/RFPScanner.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/vidhatanand/AutoRFP", - "timestamp": "2023-10-09T12:37:08.692968", - "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/RONNIN.json b/arena/RONNIN.json deleted file mode 100644 index 5e1b0ecc8acc..000000000000 --- a/arena/RONNIN.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Huarada/AutoGPT", - "timestamp": "2023-10-06T18:11:56.450481", - "commit_hash_to_benchmark": "a55ed27679f608003372feb9eb61f0104ca87858", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/RagsToRiches.json b/arena/RagsToRiches.json deleted file mode 100644 index 7a3669733cab..000000000000 --- a/arena/RagsToRiches.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/faichele/AutoGPT", - "timestamp": "2023-09-28T11:01:12.962590", - "commit_hash_to_benchmark": "4f15b1c5825b3f044c901995e3399d4eacf7ec66", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/RandomVampirePictureBot.json b/arena/RandomVampirePictureBot.json deleted file mode 100644 index 0c8b8dc48bcb..000000000000 --- a/arena/RandomVampirePictureBot.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/KleinerCodeDrago/AutoGPT", - "timestamp": "2023-09-29T14:06:38.055747", - "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Raslebot.json b/arena/Raslebot.json deleted file mode 100644 index 11169825d966..000000000000 --- a/arena/Raslebot.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Jimcau/AutoGPT.git", - "timestamp": "2023-10-16T10:50:47.524483", - "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ResearchAgent.json b/arena/ResearchAgent.json deleted file mode 100644 index 94855c1ceaa3..000000000000 --- a/arena/ResearchAgent.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/Umar-Azam/AutoGPT-ResearchAgent", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/RosterAgent.json b/arena/RosterAgent.json deleted file mode 100644 index 172d48e27773..000000000000 --- a/arena/RosterAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ricowong0730/AutoGPT", - "timestamp": "2023-10-17T01:17:01.540294", - "commit_hash_to_benchmark": "265255120b1a64d1dd0a3a92ae3a7e697a103ecb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/SaasWebDev.json b/arena/SaasWebDev.json deleted file mode 100644 index 98324aa371c8..000000000000 --- a/arena/SaasWebDev.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/giggei/AutoGPT", - "timestamp": "2023-10-02T15:44:54.390181", - "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/SaveAsPDF2.json b/arena/SaveAsPDF2.json deleted file mode 100644 index 6024d173b95a..000000000000 --- a/arena/SaveAsPDF2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/LFarmbot/AutoFarm", - "timestamp": "2023-10-28T04:32:40.914756", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ShiviBot.json b/arena/ShiviBot.json deleted file mode 100644 index c9ce171beeba..000000000000 --- a/arena/ShiviBot.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kshivang/DabblerGPT", - "timestamp": "2023-10-07T01:30:06.292423", - "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/SkorkobaniecAgent.json b/arena/SkorkobaniecAgent.json deleted file mode 100644 index 7b99a9e78d43..000000000000 --- a/arena/SkorkobaniecAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/RafalSkorka/AutoGPT", - "timestamp": "2023-10-30T19:05:24.676797", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/SmartAgent.json b/arena/SmartAgent.json deleted file mode 100644 index bc2f1563e8a3..000000000000 --- a/arena/SmartAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/AgentService/AutoGPT", - "timestamp": "2023-10-25T20:06:46.743984", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/SmartGPT.json b/arena/SmartGPT.json deleted file mode 100644 index fb27875a23f9..000000000000 --- a/arena/SmartGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Mertkaann/AutoGPT.git", - "timestamp": "2023-09-29T21:46:29.940080", - "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/SouAgent.json b/arena/SouAgent.json deleted file mode 100644 index 6a35c3699078..000000000000 --- a/arena/SouAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/SouSingh/AutoGPT.git", - "timestamp": "2023-10-01T07:26:31.428044", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Stragegy_Steve.json b/arena/Stragegy_Steve.json deleted file mode 100644 index fc4aa7aaea42..000000000000 --- a/arena/Stragegy_Steve.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/talumo/AutoGPT", - "timestamp": "2023-09-28T14:31:36.771515", - "commit_hash_to_benchmark": "e374e516633b0afca1ab644b378fe1973c455782", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Susan.json b/arena/Susan.json deleted file mode 100644 index 4689ef84e2b2..000000000000 --- a/arena/Susan.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/CodeZeno/Susan", - "timestamp": "2023-11-03T11:29:28.704822", - "commit_hash_to_benchmark": "82fecfae1b4fb5d64050eefa77d8f028292aa8f3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TEST_TPK.json b/arena/TEST_TPK.json deleted file mode 100644 index ec2967892521..000000000000 --- a/arena/TEST_TPK.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/seeseesky/AutoGPT", - "timestamp": "2023-10-31T04:31:39.337182", - "commit_hash_to_benchmark": "c3569d1842e6568ab1327e577603e71ad1feb622", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TLGPT.json b/arena/TLGPT.json deleted file mode 100644 index a402fcc6a02b..000000000000 --- a/arena/TLGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/TheoLawrence86/AutoGPT", - "timestamp": "2023-10-09T14:34:30.182635", - "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TMarafon.json b/arena/TMarafon.json deleted file mode 100644 index 9828a895bc64..000000000000 --- a/arena/TMarafon.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/TMarafon/AutoGPT", - "timestamp": "2023-10-28T05:34:54.785662", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TRAVIS.json b/arena/TRAVIS.json deleted file mode 100644 index 0e73f8841ca4..000000000000 --- a/arena/TRAVIS.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/tskaggs/AutoGPT", - "timestamp": "2023-10-14T02:33:28.089406", - "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TeslaBot.json b/arena/TeslaBot.json deleted file mode 100644 index e55ae0cd3f85..000000000000 --- a/arena/TeslaBot.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/momokrunic/AutoGPT", - "timestamp": "2023-11-02T17:17:06.663164", - "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Tessa_AutoGPT_agent.json b/arena/Tessa_AutoGPT_agent.json deleted file mode 100644 index 3f12f4959666..000000000000 --- a/arena/Tessa_AutoGPT_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/DelicaTessa/AutoGPT_hackathon", - "timestamp": "2023-10-03T14:10:19.975796", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TestAgent.json b/arena/TestAgent.json deleted file mode 100644 index 02c5b1b84047..000000000000 --- a/arena/TestAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Nilllas/AutoGPT", - "timestamp": "2023-10-20T11:27:15.343842", - "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TestLbAgent.json b/arena/TestLbAgent.json deleted file mode 100644 index 9c57304508cb..000000000000 --- a/arena/TestLbAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT", - "timestamp": "2023-10-31T03:25:23.064470", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TheAgency.json b/arena/TheAgency.json deleted file mode 100644 index 8470fe1b9982..000000000000 --- a/arena/TheAgency.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/shamantechnology/TheAgency", "timestamp": "2023-10-26T09:22:18Z", "commit_hash_to_benchmark": "3eef81f2579e3ab4822fb9155ee412c597fda9c2", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/TheAgent.json b/arena/TheAgent.json deleted file mode 100644 index 4a515aaa1013..000000000000 --- a/arena/TheAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT", - "timestamp": "2023-10-31T03:07:04.629241", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/TraceLLMAgent.json b/arena/TraceLLMAgent.json deleted file mode 100644 index d25ff491b0d5..000000000000 --- a/arena/TraceLLMAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/vmwsree/AutoGPT", - "timestamp": "2023-10-15T21:48:38.027553", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/UGYUJI.json b/arena/UGYUJI.json deleted file mode 100644 index 2d0abc304080..000000000000 --- a/arena/UGYUJI.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ugyuji/AutoGPT", - "timestamp": "2023-10-20T04:42:28.397067", - "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/UTC-Crew.json b/arena/UTC-Crew.json deleted file mode 100644 index 832d484f1b56..000000000000 --- a/arena/UTC-Crew.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/isayahc/AutoGPT.git", - "timestamp": "2023-10-04T17:06:48.154911", - "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/UmaruAgent.json b/arena/UmaruAgent.json deleted file mode 100644 index f3168d47a817..000000000000 --- a/arena/UmaruAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/g1331/Auto-GPT", - "timestamp": "2023-10-16T13:51:10.464650", - "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/UniAgent.json b/arena/UniAgent.json deleted file mode 100644 index 19d710fa21bf..000000000000 --- a/arena/UniAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/JovanKanevche/AutoGPT", - "timestamp": "2023-10-19T17:04:49.626683", - "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Verkiezingsprogrammas.json b/arena/Verkiezingsprogrammas.json deleted file mode 100644 index 4a18be40c74e..000000000000 --- a/arena/Verkiezingsprogrammas.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bergje0810/AutoGPT", - "timestamp": "2023-10-11T11:47:16.993332", - "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/WRITER.json b/arena/WRITER.json deleted file mode 100644 index 63849f43f4fc..000000000000 --- a/arena/WRITER.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ezirmusitua/AutoGPT", - "timestamp": "2023-10-27T09:43:35.725996", - "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/WYC.json b/arena/WYC.json deleted file mode 100644 index 0620b0aab264..000000000000 --- a/arena/WYC.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/131250208/AutoGPT_YC", - "timestamp": "2023-10-20T07:42:11.493899", - "commit_hash_to_benchmark": "9219bfba0e028a557109b8e39c0fd91c1df243f8", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/WarlockAgent.json b/arena/WarlockAgent.json deleted file mode 100644 index 55977a9f343e..000000000000 --- a/arena/WarlockAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/warlockee/AutoGPT-wl", - "timestamp": "2023-10-27T21:30:11.455084", - "commit_hash_to_benchmark": "6f66376bb8a4116330fe867d9dff83f938f7aa14", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/WeatherInformer.json b/arena/WeatherInformer.json deleted file mode 100644 index 4cc94787f168..000000000000 --- a/arena/WeatherInformer.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/colour-me-bibi/Auto-GPT", - "timestamp": "2023-09-19T14:11:53.195135", - "commit_hash_to_benchmark": "2098e192da0ec8eecf0010ae62704e6727dfa42a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/WiseAgent.json b/arena/WiseAgent.json deleted file mode 100644 index 02c03c0a2043..000000000000 --- a/arena/WiseAgent.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/Ashish-Soni08/SoniGPT", "timestamp": "2023-10-08T18:39:38Z", "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/XXY.json b/arena/XXY.json deleted file mode 100644 index 849438def548..000000000000 --- a/arena/XXY.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/xuxiangyang/AutoGPT", - "timestamp": "2023-10-14T04:40:39.828483", - "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/YOU.json b/arena/YOU.json deleted file mode 100644 index 64629cf403d1..000000000000 --- a/arena/YOU.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/gentaag/AutoGPT", - "timestamp": "2023-10-28T14:03:12.555466", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/YoudaoAutoGPT.json b/arena/YoudaoAutoGPT.json deleted file mode 100644 index 8e81970eb093..000000000000 --- a/arena/YoudaoAutoGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jiezhangGt/AutoGPT", - "timestamp": "2023-10-20T03:02:17.342168", - "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/YoutubePost_agent.json b/arena/YoutubePost_agent.json deleted file mode 100644 index 46b7d81b798f..000000000000 --- a/arena/YoutubePost_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ramirez3rg/Auto-GPT", - "timestamp": "2023-09-21T20:35:24.266598", - "commit_hash_to_benchmark": "c72a35e92e4f95aca25221e216c3a49d0dbc739b", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Yui3.json b/arena/Yui3.json deleted file mode 100644 index 439183005801..000000000000 --- a/arena/Yui3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/YuiChan04233/AutoGPT1", - "timestamp": "2023-10-08T02:03:48.189959", - "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Yutan_agent.json b/arena/Yutan_agent.json deleted file mode 100644 index 468f5f37352b..000000000000 --- a/arena/Yutan_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/zyt329/AutoGPT", - "timestamp": "2023-09-29T21:47:23.741942", - "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ZJgpt.json b/arena/ZJgpt.json deleted file mode 100644 index 0ac3d2567454..000000000000 --- a/arena/ZJgpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jiezhangGt/AutoGPT", - "timestamp": "2023-10-20T04:04:28.198603", - "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Zeus.json b/arena/Zeus.json deleted file mode 100644 index 0529b52c4421..000000000000 --- a/arena/Zeus.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/MerlimarCode/ZeusGPT", - "timestamp": "2023-10-08T02:31:50.347357", - "commit_hash_to_benchmark": "0d5c2a98c071336e1bb48716cc25d85df2656ced", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ZhaoJianAutoGPT.json b/arena/ZhaoJianAutoGPT.json deleted file mode 100644 index b2aa60f7ba43..000000000000 --- a/arena/ZhaoJianAutoGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/zhaojianchn/AutoGPT", - "timestamp": "2023-10-17T09:41:06.331671", - "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ZoeyGPT.json b/arena/ZoeyGPT.json deleted file mode 100644 index c2be10804ce2..000000000000 --- a/arena/ZoeyGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/carylaw/FatGPT", - "timestamp": "2023-10-25T10:03:47.295810", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/Zoidberg.json b/arena/Zoidberg.json deleted file mode 100644 index a56f26d43e1b..000000000000 --- a/arena/Zoidberg.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/KapitanFernand/Zoidberg", - "timestamp": "2023-10-24T09:09:27.540179", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/aWOL.json b/arena/aWOL.json deleted file mode 100644 index 62dc8026138b..000000000000 --- a/arena/aWOL.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aodrasa/aWOL", - "timestamp": "2023-10-11T01:24:01.516559", - "commit_hash_to_benchmark": "0856f6806177b30989b2be78004e059658efbbb4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/a_reverent_heart.json b/arena/a_reverent_heart.json deleted file mode 100644 index c0233bc389d3..000000000000 --- a/arena/a_reverent_heart.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/iamlockelightning/IAutoGPT", - "timestamp": "2023-10-08T08:03:31.352877", - "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/accidental-agent.json b/arena/accidental-agent.json deleted file mode 100644 index 853068771b43..000000000000 --- a/arena/accidental-agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/siddharthsarda/accidental-agent", - "timestamp": "2023-09-20T08:07:08.337479", - "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/actor_tester.json b/arena/actor_tester.json deleted file mode 100644 index ec1f0138e944..000000000000 --- a/arena/actor_tester.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/calmglow/mbtiagent", - "timestamp": "2023-10-25T13:15:04.296302", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/admariner.json b/arena/admariner.json deleted file mode 100644 index 2811c5d5c011..000000000000 --- a/arena/admariner.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/admariner/AutoGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/ag1.json b/arena/ag1.json deleted file mode 100644 index 0dcfe64d43c8..000000000000 --- a/arena/ag1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/gensy421/AutoGensy", - "timestamp": "2023-10-26T06:31:27.588150", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/agent2.json b/arena/agent2.json deleted file mode 100644 index 54b1247ca944..000000000000 --- a/arena/agent2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/lukadumancic/AutoGPT", - "timestamp": "2023-10-28T16:08:43.603669", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/agentSmith.json b/arena/agentSmith.json deleted file mode 100644 index 805e720e8fb3..000000000000 --- a/arena/agentSmith.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Nameless8243/AgentSmith", - "timestamp": "2023-10-28T20:05:53.168061", - "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/agent_2.json b/arena/agent_2.json deleted file mode 100644 index 1e169e0eecea..000000000000 --- a/arena/agent_2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Exstor/AutoGPT", - "timestamp": "2023-10-31T20:56:49.313875", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/agentgpt.json b/arena/agentgpt.json deleted file mode 100644 index 15aed81c4a37..000000000000 --- a/arena/agentgpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/UdaySagar-Git/AutoGPT.git", - "timestamp": "2023-10-24T05:24:58.972720", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/agsCehAgent.json b/arena/agsCehAgent.json deleted file mode 100644 index e628e79a3b99..000000000000 --- a/arena/agsCehAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/albags/AutoGPT.git", - "timestamp": "2023-10-19T11:30:12.759675", - "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ai_assistant.json b/arena/ai_assistant.json deleted file mode 100644 index 2a0d85dee973..000000000000 --- a/arena/ai_assistant.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/xhjxhj001/AutoGPT", - "timestamp": "2023-10-23T12:05:13.923218", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/aiaudit.json b/arena/aiaudit.json deleted file mode 100644 index e1ecbb1dd719..000000000000 --- a/arena/aiaudit.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bigsml/AutoGPT.git", - "timestamp": "2023-10-12T07:05:18.886183", - "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/aiwowo.json b/arena/aiwowo.json deleted file mode 100644 index 3412ba3cd364..000000000000 --- a/arena/aiwowo.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jeffxtang/AutoGPT", - "timestamp": "2023-10-09T05:25:37.720553", - "commit_hash_to_benchmark": "027054ae02657c37be0d28502bb5a22823eae9d9", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/aixiaoxin.json b/arena/aixiaoxin.json deleted file mode 100644 index a6fe001c5738..000000000000 --- a/arena/aixiaoxin.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aixiaoxin123/AutoGPT", - "timestamp": "2023-10-27T05:44:49.265845", - "commit_hash_to_benchmark": "6c9152a95c8994898c47c85ea90ba58e0cc02c28", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/akela.json b/arena/akela.json deleted file mode 100644 index 9c811d288316..000000000000 --- a/arena/akela.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/SarahGrevy/AutoGPT", - "timestamp": "2023-10-20T18:56:31.210825", - "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/analystgpt.json b/arena/analystgpt.json deleted file mode 100644 index 9227c97a1ed7..000000000000 --- a/arena/analystgpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/vleonidas/AutoGPT", - "timestamp": "2023-10-20T16:46:11.806635", - "commit_hash_to_benchmark": "825c3adf62879fa9f91a19c11010336de5c98bfc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/arbetsformedlingen.json b/arena/arbetsformedlingen.json deleted file mode 100644 index 5afc4316e335..000000000000 --- a/arena/arbetsformedlingen.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/esaiaswestberg/AutoGPT", - "timestamp": "2023-11-02T12:35:40.378520", - "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/assistant1.json b/arena/assistant1.json deleted file mode 100644 index 8bb51d2fea0e..000000000000 --- a/arena/assistant1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/BarneyChambers/AutoGPT", - "timestamp": "2023-10-16T18:35:05.779206", - "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/autoai.json b/arena/autoai.json deleted file mode 100644 index 5197905241d7..000000000000 --- a/arena/autoai.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/maanvithag/AutoGPT", - "timestamp": "2023-10-09T16:19:12.986257", - "commit_hash_to_benchmark": "3bd8ae48433fa46552719de050ded576a3bef4b9", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/autocoder.json b/arena/autocoder.json deleted file mode 100644 index 8d1fd33e6ce9..000000000000 --- a/arena/autocoder.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mtx-light/AutoGPT", - "timestamp": "2023-10-29T07:33:17.228393", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/autogbd.json b/arena/autogbd.json deleted file mode 100644 index 77f7f4b5ddaa..000000000000 --- a/arena/autogbd.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kylaro/AutoGBD", - "timestamp": "2023-10-09T11:45:26.637129", - "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/autogpt-hackathon2.json b/arena/autogpt-hackathon2.json deleted file mode 100644 index 41960393bd46..000000000000 --- a/arena/autogpt-hackathon2.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/autogpt.json b/arena/autogpt.json deleted file mode 100644 index 931aa3aa5cf8..000000000000 --- a/arena/autogpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT", - "timestamp": "2023-11-15T07:22:09.723393", - "commit_hash_to_benchmark": "fa357dd13928baa4d1e30054bc75edc5d68b08f1", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/autogpt_hackathon.json b/arena/autogpt_hackathon.json deleted file mode 100644 index 41960393bd46..000000000000 --- a/arena/autogpt_hackathon.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/autogpt_hackathon1.json b/arena/autogpt_hackathon1.json deleted file mode 100644 index 41960393bd46..000000000000 --- a/arena/autogpt_hackathon1.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/autogpt_warlock.json b/arena/autogpt_warlock.json deleted file mode 100644 index 5f6e9c0a52e8..000000000000 --- a/arena/autogpt_warlock.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/warlockee/AutoGPT-wl", - "timestamp": "2023-10-27T00:46:05.266939", - "commit_hash_to_benchmark": "6f66376bb8a4116330fe867d9dff83f938f7aa14", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/autogptagent.json b/arena/autogptagent.json deleted file mode 100644 index 589001597df6..000000000000 --- a/arena/autogptagent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/YasienDwieb/AutoGPT", - "timestamp": "2023-11-04T21:13:17.223261", - "commit_hash_to_benchmark": "0b55de62dc61a33ccf944d80b6d55c730286e07d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/avengaGPT.json b/arena/avengaGPT.json deleted file mode 100644 index f95163865726..000000000000 --- a/arena/avengaGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/sebabetz/AutoGPT", - "timestamp": "2023-10-24T05:25:26.059512", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/babe_perphorator_.json b/arena/babe_perphorator_.json deleted file mode 100644 index ed3396907e02..000000000000 --- a/arena/babe_perphorator_.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/beavishead/automaton.git", - "timestamp": "2023-10-11T09:43:19.859956", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/baby_agent.json b/arena/baby_agent.json deleted file mode 100644 index ee8f386cc338..000000000000 --- a/arena/baby_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/keli-61/AutoK", - "timestamp": "2023-10-19T07:39:13.300108", - "commit_hash_to_benchmark": "1a30d00194b46f8b923bab191404ce9123e34bdf", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/bait.json b/arena/bait.json deleted file mode 100644 index 9c886bfba9bc..000000000000 --- a/arena/bait.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/a0balaton/AutoGPT", - "timestamp": "2023-11-03T07:38:34.616504", - "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/beyond.json b/arena/beyond.json deleted file mode 100644 index dd51cc2febfa..000000000000 --- a/arena/beyond.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/sn0wdown/AutoGPT", - "timestamp": "2023-10-25T07:22:09.723393", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/bigman.json b/arena/bigman.json deleted file mode 100644 index 00d4395820f3..000000000000 --- a/arena/bigman.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bathrobe/autogpt", - "timestamp": "2023-10-04T18:32:29.402925", - "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/billy.json b/arena/billy.json deleted file mode 100644 index 44253ededb99..000000000000 --- a/arena/billy.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/khelil/AutoGPT", - "timestamp": "2023-10-14T17:51:54.044334", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/bingoTesting.json b/arena/bingoTesting.json deleted file mode 100644 index a8fd1e210e0c..000000000000 --- a/arena/bingoTesting.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bingotyty/AutoGPT", - "timestamp": "2023-11-06T04:16:38.612948", - "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/bosaeed_agent.json b/arena/bosaeed_agent.json deleted file mode 100644 index e2a1dcc97c97..000000000000 --- a/arena/bosaeed_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bosaeed/AutoGPT.git", - "timestamp": "2023-10-03T15:31:04.721867", - "commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/bot.json b/arena/bot.json deleted file mode 100644 index 3552e7447346..000000000000 --- a/arena/bot.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Sampson2016/AutoGPT", - "timestamp": "2023-09-26T07:44:15.563183", - "commit_hash_to_benchmark": "3d4307a848880c8509e8356bbb9146f0e6f917f4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/bot01.json b/arena/bot01.json deleted file mode 100644 index eca05f793a85..000000000000 --- a/arena/bot01.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Arthur-Heng/AutoGPT", - "timestamp": "2023-10-12T04:16:30.658280", - "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/buddy.json b/arena/buddy.json deleted file mode 100644 index 3b2653f9d065..000000000000 --- a/arena/buddy.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/danhamilt/AutoGPT", - "timestamp": "2023-10-09T01:07:11.246485", - "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/burt.json b/arena/burt.json deleted file mode 100644 index 7f9acb5ef2c9..000000000000 --- a/arena/burt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/D4t4DrU1d/burt", - "timestamp": "2023-10-05T14:00:59.740170", - "commit_hash_to_benchmark": "a55ed27679f608003372feb9eb61f0104ca87858", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/business.json b/arena/business.json deleted file mode 100644 index c086daeaad61..000000000000 --- a/arena/business.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/danielfebrero/AutoGPT", - "timestamp": "2023-10-21T16:12:05.424875", - "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/byl.json b/arena/byl.json deleted file mode 100644 index c57a574d51b8..000000000000 --- a/arena/byl.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/yoonh12/byl", - "timestamp": "2023-10-01T08:36:20.309716", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/career-agent.json b/arena/career-agent.json deleted file mode 100644 index ba2877abffc6..000000000000 --- a/arena/career-agent.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/asifdotpy/CareerGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/caud.json b/arena/caud.json deleted file mode 100644 index 63dcaeef4241..000000000000 --- a/arena/caud.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Swiftyos/CAUD", - "timestamp": "2023-10-07T15:44:40.526955", - "commit_hash_to_benchmark": "7a33af387e6959506eb8f01b49d296defe587e6d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ccace.json b/arena/ccace.json deleted file mode 100644 index ae1628cd8383..000000000000 --- a/arena/ccace.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ccsnow127/AutoGPT", - "timestamp": "2023-10-23T08:28:38.119283", - "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/chappigpt.json b/arena/chappigpt.json deleted file mode 100644 index a136db128551..000000000000 --- a/arena/chappigpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Wiradjuri/chappi.git", - "timestamp": "2023-10-08T06:20:43.527806", - "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/chappyAi.json b/arena/chappyAi.json deleted file mode 100644 index 3da98b8c727e..000000000000 --- a/arena/chappyAi.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Wiradjuri/chappi.git", - "timestamp": "2023-10-08T06:50:59.175273", - "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/chatgpt_taller.json b/arena/chatgpt_taller.json deleted file mode 100644 index 996c78970f46..000000000000 --- a/arena/chatgpt_taller.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/leobusar/AutoGPT", - "timestamp": "2023-10-10T04:06:42.480712", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/chengshu.json b/arena/chengshu.json deleted file mode 100644 index e4cffdb81d2f..000000000000 --- a/arena/chengshu.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/skadai/AutoGPT", - "timestamp": "2023-10-26T06:54:04.511066", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/chenzo.json b/arena/chenzo.json deleted file mode 100644 index 9717e91a74f2..000000000000 --- a/arena/chenzo.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/chenzino/AutoGPT", - "timestamp": "2023-10-05T00:25:37.141373", - "commit_hash_to_benchmark": "7f89b8aae8748bc88b29ca94c3604ba540bbef94", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/cislerk2.json b/arena/cislerk2.json deleted file mode 100644 index 3d4c9dd1009f..000000000000 --- a/arena/cislerk2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/cislerk/AutoGPT", - "timestamp": "2023-10-10T21:05:38.064647", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/codebutler.json b/arena/codebutler.json deleted file mode 100644 index c42fae82705f..000000000000 --- a/arena/codebutler.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/AJV009/AutoGPT", "timestamp": "2023-10-26T05:03:09Z", "commit_hash_to_benchmark": "03a95a5333db52ac5b129306e47423b638d649b0", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/coder_first.json b/arena/coder_first.json deleted file mode 100644 index 5e8048a328b0..000000000000 --- a/arena/coder_first.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mtx-light/AutoGPT", - "timestamp": "2023-10-29T07:22:26.774555", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/contentstrategy.json b/arena/contentstrategy.json deleted file mode 100644 index d1b9dd5aff77..000000000000 --- a/arena/contentstrategy.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/banderson12/AutoGPT", "timestamp": "2023-10-21T04:13:13Z", "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/cssupdater.json b/arena/cssupdater.json deleted file mode 100644 index 91959adcbe8d..000000000000 --- a/arena/cssupdater.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mplummeridge/AutoGPT", - "timestamp": "2023-10-24T01:25:47.059251", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/da-agent.json b/arena/da-agent.json deleted file mode 100644 index 78bce3e7e029..000000000000 --- a/arena/da-agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/rayzh-lab/AutoGPT", - "timestamp": "2023-10-12T13:37:26.964846", - "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/date-buffer.json b/arena/date-buffer.json deleted file mode 100644 index ea91442b8099..000000000000 --- a/arena/date-buffer.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jackbullen/AutoGPT", - "timestamp": "2023-10-14T03:55:27.817045", - "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/davidtest1.json b/arena/davidtest1.json deleted file mode 100644 index fbaa9445129e..000000000000 --- a/arena/davidtest1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ningzero/AutoGPTTest", - "timestamp": "2023-11-01T10:08:15.790059", - "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/davidtestagent.json b/arena/davidtestagent.json deleted file mode 100644 index 0fd27d2b58f3..000000000000 --- a/arena/davidtestagent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ningzero/AutoGPTTest", - "timestamp": "2023-11-01T09:29:35.474709", - "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/dda.json b/arena/dda.json deleted file mode 100644 index 3f628dd87ae3..000000000000 --- a/arena/dda.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aiherrera1/AutoGPT", - "timestamp": "2023-10-15T18:03:04.765167", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/decision-maker.json b/arena/decision-maker.json deleted file mode 100644 index 623522fe247b..000000000000 --- a/arena/decision-maker.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/vishnub1626/AutoGPT", - "timestamp": "2023-09-28T11:33:39.045838", - "commit_hash_to_benchmark": "4f15b1c5825b3f044c901995e3399d4eacf7ec66", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/dev_agent.json b/arena/dev_agent.json deleted file mode 100644 index 25aec8ac7d7d..000000000000 --- a/arena/dev_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/pedrovvitor/AutoGPT", - "timestamp": "2023-10-15T14:25:07.534330", - "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/devagent.json b/arena/devagent.json deleted file mode 100644 index f65809e14687..000000000000 --- a/arena/devagent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/w6m6/kkgpt", - "timestamp": "2023-10-20T08:29:25.708364", - "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/dive2code.json b/arena/dive2code.json deleted file mode 100644 index 2280c1bef980..000000000000 --- a/arena/dive2code.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/qwdqwqdwqd/autogpt", "timestamp": "2023-10-25T17:55:18Z", "commit_hash_to_benchmark": "c8d239ef6492d7fe30c099909e01a2eede678b70", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/dndagent.json b/arena/dndagent.json deleted file mode 100644 index 9617293dbe72..000000000000 --- a/arena/dndagent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/xSudoNymx/AutoGPT", - "timestamp": "2023-10-13T04:48:12.424344", - "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/dy_agent.json b/arena/dy_agent.json deleted file mode 100644 index fd5c981b1322..000000000000 --- a/arena/dy_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/dyabel/AutoGPT", - "timestamp": "2023-09-24T07:25:55.818276", - "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/dy_agent2.json b/arena/dy_agent2.json deleted file mode 100644 index c6ae45ee69bb..000000000000 --- a/arena/dy_agent2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/dyabel/AutoGPT", - "timestamp": "2023-09-24T09:30:13.885689", - "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/easn.json b/arena/easn.json deleted file mode 100644 index c7ba6bcad731..000000000000 --- a/arena/easn.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/qazwsxdshb/AutoGPT", - "timestamp": "2023-10-21T08:00:39.287093", - "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/eddy.json b/arena/eddy.json deleted file mode 100644 index 12e625b4c049..000000000000 --- a/arena/eddy.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ltxctdbnn/AutoGPT", - "timestamp": "2023-10-17T08:42:59.396592", - "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ekc911_agent.json b/arena/ekc911_agent.json deleted file mode 100644 index f755e78eadbc..000000000000 --- a/arena/ekc911_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/seacrest/ekc911GPT.git", - "timestamp": "2023-10-05T03:09:36.845932", - "commit_hash_to_benchmark": "73ef89e03a719ec1b2f01b0f04e9b1f64ffb2a7d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/engineer.json b/arena/engineer.json deleted file mode 100644 index ef0a2f12eafe..000000000000 --- a/arena/engineer.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bary12/AutoGPT", - "timestamp": "2023-10-18T07:21:47.127207", - "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/evlyn.json b/arena/evlyn.json deleted file mode 100644 index 115c41e113a2..000000000000 --- a/arena/evlyn.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/TimothyZhang/AutoGPT", - "timestamp": "2023-09-26T04:13:50.107902", - "commit_hash_to_benchmark": "e8aae7731919ee37444fd0871d05bff38f03ab66", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/evo-ninja.json b/arena/evo-ninja.json deleted file mode 100644 index e7ec02cb9e01..000000000000 --- a/arena/evo-ninja.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/polywrap/evo.ninja", - "timestamp": "2023-10-26T09:05:21.013962", - "commit_hash_to_benchmark": "8832a1008607ab8a27de81fbea69bc73c3febb6f", - "branch_to_benchmark": "dev" -} \ No newline at end of file diff --git a/arena/evo.json b/arena/evo.json deleted file mode 100644 index 48de830feaf5..000000000000 --- a/arena/evo.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/abdllahdev/evo", - "timestamp": "2023-09-24T04:36:48.363989", - "commit_hash_to_benchmark": "075529ddc9cbca45ff98f0701baed9b89a712c23", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/faran.json b/arena/faran.json deleted file mode 100644 index d67d39544caa..000000000000 --- a/arena/faran.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/faranbutt/AutoGPT", - "timestamp": "2023-10-03T11:37:15.047378", - "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/first-agent.json b/arena/first-agent.json deleted file mode 100644 index 34eb08d44108..000000000000 --- a/arena/first-agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/DG1202/AutoGPT.git", - "timestamp": "2023-10-22T15:08:00.869208", - "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/foobar.json b/arena/foobar.json deleted file mode 100644 index e502066763c3..000000000000 --- a/arena/foobar.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/sosthoff/AutoGPT", - "timestamp": "2023-10-07T17:23:59.763991", - "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/frankgarcia.json b/arena/frankgarcia.json deleted file mode 100644 index b02dd557dd99..000000000000 --- a/arena/frankgarcia.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/variavista/AutoGPT", - "timestamp": "2023-09-28T07:03:33.140557", - "commit_hash_to_benchmark": "a555e936c48bca8c794c7116d62a91628e59ac14", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/fritzgpt.json b/arena/fritzgpt.json deleted file mode 100644 index e9bae34ffb8f..000000000000 --- a/arena/fritzgpt.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/bsenst/FritzGPT", "timestamp": "2023-10-07T11:54:36Z", "commit_hash_to_benchmark": "bb960ffb9fadc45fe4fb5277053caa831f196578", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/fst.json b/arena/fst.json deleted file mode 100644 index 97216c4dd2ff..000000000000 --- a/arena/fst.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/onewesong/AutoGPT", - "timestamp": "2023-10-10T07:04:45.268630", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/fuzz_gen.json b/arena/fuzz_gen.json deleted file mode 100644 index 87273ae48207..000000000000 --- a/arena/fuzz_gen.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/stplaydog/AutoGPT", - "timestamp": "2023-09-29T16:15:33.360163", - "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/gaby_agent.json b/arena/gaby_agent.json deleted file mode 100644 index 7b57d6dae83c..000000000000 --- a/arena/gaby_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://ggonza156: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@github.com/ggonza156/AutoGPT", - "timestamp": "2023-10-21T23:52:39.199690", - "commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/gen_fuzz.json b/arena/gen_fuzz.json deleted file mode 100644 index c6486156ccaa..000000000000 --- a/arena/gen_fuzz.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/stplaydog/AutoGPT", - "timestamp": "2023-09-29T17:45:56.921760", - "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ghostcoder.json b/arena/ghostcoder.json deleted file mode 100644 index 738061238484..000000000000 --- a/arena/ghostcoder.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/aorwall/AutoGPT", "timestamp": "2023-10-26T07:02:18Z", "commit_hash_to_benchmark": "580b4467851b879ef6ce369128e8c7a0399f8877", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/gipity.json b/arena/gipity.json deleted file mode 100644 index 84d2d893e19f..000000000000 --- a/arena/gipity.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/koad/gpt", - "timestamp": "2023-10-02T19:47:45.668048", - "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/gpt-dev-engineer-agent.json b/arena/gpt-dev-engineer-agent.json deleted file mode 100644 index 080c9ab046b6..000000000000 --- a/arena/gpt-dev-engineer-agent.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/ATheorell/AutoGPTArenaHack", "timestamp": "2023-10-26T09:33:03Z", "commit_hash_to_benchmark": "1e4f2dc004b92b9f236543674f94fb9f0af9bb2e", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/gpt-eng-forge.json b/arena/gpt-eng-forge.json deleted file mode 100644 index 348120b3a452..000000000000 --- a/arena/gpt-eng-forge.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/pbharrin/AutoGPT", - "timestamp": "2023-09-26T17:55:18.530567", - "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/gpt-engineer.json b/arena/gpt-engineer.json deleted file mode 100644 index 080c9ab046b6..000000000000 --- a/arena/gpt-engineer.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/ATheorell/AutoGPTArenaHack", "timestamp": "2023-10-26T09:33:03Z", "commit_hash_to_benchmark": "1e4f2dc004b92b9f236543674f94fb9f0af9bb2e", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/gpt_for_beans.json b/arena/gpt_for_beans.json deleted file mode 100644 index 5f9e89282d83..000000000000 --- a/arena/gpt_for_beans.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/xiazaiba7/AutoGPT.git", - "timestamp": "2023-11-02T06:07:34.435957", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/hall_oto.json b/arena/hall_oto.json deleted file mode 100644 index 09928183c37f..000000000000 --- a/arena/hall_oto.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/helloworld4774/AutoGPT.git", - "timestamp": "2023-10-01T17:47:00.644268", - "commit_hash_to_benchmark": "26cf7c2e3f7b8f61ecda9e301f7a4b36f2b14f2f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/han.json b/arena/han.json deleted file mode 100644 index 8cf8cb54c963..000000000000 --- a/arena/han.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Hanhan0831/AutoGPT", - "timestamp": "2023-10-14T01:01:58.300995", - "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/happy_guy.json b/arena/happy_guy.json deleted file mode 100644 index d1df91da3f71..000000000000 --- a/arena/happy_guy.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jianglonghui/AutoGPT", - "timestamp": "2023-11-03T08:54:39.949387", - "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/hello.json b/arena/hello.json deleted file mode 100644 index 44d8836c8f67..000000000000 --- a/arena/hello.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ldnvnbl/AutoGPT", - "timestamp": "2023-10-20T09:37:16.860422", - "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/hodri.json b/arena/hodri.json deleted file mode 100644 index 32e489bfc565..000000000000 --- a/arena/hodri.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bilisim1995/AutoGPT", - "timestamp": "2023-10-27T10:51:20.447157", - "commit_hash_to_benchmark": "f4985395a94da84b79252bd4d88e040472e1bf6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/houxe.json b/arena/houxe.json deleted file mode 100644 index ab5a7072cc2e..000000000000 --- a/arena/houxe.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jiarung/AutoGPTTest", - "timestamp": "2023-10-30T08:30:59.320850", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/icode.json b/arena/icode.json deleted file mode 100644 index d71f8df81452..000000000000 --- a/arena/icode.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/krishnaji/AutoGPT", - "timestamp": "2023-10-13T01:09:31.395541", - "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/iku2.json b/arena/iku2.json deleted file mode 100644 index 63b33adfbc18..000000000000 --- a/arena/iku2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/IkuOhama/AutoGPT", - "timestamp": "2023-09-27T22:46:33.754238", - "commit_hash_to_benchmark": "793ff1c163bb0f9bd3e0c788b4978b8dc193ba6a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/illynet.json b/arena/illynet.json deleted file mode 100644 index 269222fc6900..000000000000 --- a/arena/illynet.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/illyx1/AutoGPT.git", - "timestamp": "2023-10-26T06:51:32.589776", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/illynetV2.json b/arena/illynetV2.json deleted file mode 100644 index 005672b39def..000000000000 --- a/arena/illynetV2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/illyx1/AutoGPT.git", - "timestamp": "2023-10-26T13:14:45.725000", - "commit_hash_to_benchmark": "19175badeefc1325f3fa1a7797ddcfb913c23076", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/illyx1.json b/arena/illyx1.json deleted file mode 100644 index 9cedd5c60b71..000000000000 --- a/arena/illyx1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/illyx1/AutoGPT.git", - "timestamp": "2023-10-26T12:36:26.810636", - "commit_hash_to_benchmark": "19175badeefc1325f3fa1a7797ddcfb913c23076", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/info-retrieval.json b/arena/info-retrieval.json deleted file mode 100644 index 1aa51aac7043..000000000000 --- a/arena/info-retrieval.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/paperMoose/AutoGPT", - "timestamp": "2023-10-07T21:38:11.070180", - "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ivangpt_agent.json b/arena/ivangpt_agent.json deleted file mode 100644 index edf940b2236e..000000000000 --- a/arena/ivangpt_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ivanliu1989/Auto-GPT", - "timestamp": "2023-10-29T11:24:30.873532", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/jarvis2.json b/arena/jarvis2.json deleted file mode 100644 index c628f8f54a8c..000000000000 --- a/arena/jarvis2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/MissingDLL/AutoGPT", - "timestamp": "2023-10-08T15:23:46.256775", - "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/jarvis3.json b/arena/jarvis3.json deleted file mode 100644 index c54000f16456..000000000000 --- a/arena/jarvis3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/MissingDLL/AutoGPT", - "timestamp": "2023-10-08T15:58:33.790030", - "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/jaxbob1.json b/arena/jaxbob1.json deleted file mode 100644 index db115ceb2be3..000000000000 --- a/arena/jaxbob1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/redthing1/AutoGPT", - "timestamp": "2023-10-05T20:02:22.372414", - "commit_hash_to_benchmark": "3b7d83a1a6d3fef1d415bfd1d4ba32ca1ba797cc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/job_apply.json b/arena/job_apply.json deleted file mode 100644 index afbeed4e911c..000000000000 --- a/arena/job_apply.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/tkbeili/AutoGPT", - "timestamp": "2023-10-01T04:49:20.239338", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/jonesyboi.json b/arena/jonesyboi.json deleted file mode 100644 index 93b617c172ae..000000000000 --- a/arena/jonesyboi.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/neilrjones/AutoGPT", - "timestamp": "2023-10-18T02:39:02.039894", - "commit_hash_to_benchmark": "d173dd772dfbcce1b75148271857092bc8c22b5c", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/justwondering.json b/arena/justwondering.json deleted file mode 100644 index 0d27545a785c..000000000000 --- a/arena/justwondering.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/tbxy09/JustWondering", "timestamp": "2023-10-26T09:48:15Z", "commit_hash_to_benchmark": "b52fea9ba7510adb8c1e7e5cfb83f5fa181d73cf", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/kingmitch.json b/arena/kingmitch.json deleted file mode 100644 index 304ea0521581..000000000000 --- a/arena/kingmitch.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mitch11223/AutoGPT.git", - "timestamp": "2023-10-20T17:15:31.044252", - "commit_hash_to_benchmark": "825c3adf62879fa9f91a19c11010336de5c98bfc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/lawk.json b/arena/lawk.json deleted file mode 100644 index 09d5cab74629..000000000000 --- a/arena/lawk.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/xiaolongtuan-yuan/AutoGPT", - "timestamp": "2023-10-26T06:18:01.049166", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/lcdegpt.json b/arena/lcdegpt.json deleted file mode 100644 index 637e1e1fa8cd..000000000000 --- a/arena/lcdegpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/tablc/lcdegpt", - "timestamp": "2023-10-17T07:00:24.125505", - "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/letst.json b/arena/letst.json deleted file mode 100644 index 0a0d582afa1a..000000000000 --- a/arena/letst.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jianhuanggo/AutoTestTest", - "timestamp": "2023-10-16T19:07:43.009481", - "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/letstest.json b/arena/letstest.json deleted file mode 100644 index 5862da1a7907..000000000000 --- a/arena/letstest.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jianhuanggo/AutoTestTest", - "timestamp": "2023-10-16T18:38:28.787259", - "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/lilAgent.json b/arena/lilAgent.json deleted file mode 100644 index cbd9f2fb0e96..000000000000 --- a/arena/lilAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mvuthegoat/AutoGPT.git", - "timestamp": "2023-10-29T17:17:08.476300", - "commit_hash_to_benchmark": "9a30e0f9a43fe05005e36f0bad8531e3a92fd9e6", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/linggong.json b/arena/linggong.json deleted file mode 100644 index c89fd2fe4c50..000000000000 --- a/arena/linggong.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/linggong2023/AutoGPT", - "timestamp": "2023-10-24T12:40:35.679665", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/liuzh.json b/arena/liuzh.json deleted file mode 100644 index 5b95e218d6b0..000000000000 --- a/arena/liuzh.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Hanzhang-lang/AutoGPT_zh", - "timestamp": "2023-10-24T10:25:02.790189", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ltzAgent.json b/arena/ltzAgent.json deleted file mode 100644 index 59635f03c100..000000000000 --- a/arena/ltzAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ltzmaxwell/AutoGPT", - "timestamp": "2023-10-25T08:58:41.646491", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/martingpt.json b/arena/martingpt.json deleted file mode 100644 index 849f42003589..000000000000 --- a/arena/martingpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/martinpeng/AutoGPT", - "timestamp": "2023-10-18T05:30:19.072793", - "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/medical-agent.json b/arena/medical-agent.json deleted file mode 100644 index 47e0a6a08d16..000000000000 --- a/arena/medical-agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/daviddhc20120601/AutoGPT", - "timestamp": "2023-11-02T02:08:34.264727", - "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/metware.json b/arena/metware.json deleted file mode 100644 index 8f433581c401..000000000000 --- a/arena/metware.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/chenxuya/AutoGPT", - "timestamp": "2023-10-23T02:23:48.775561", - "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/miniAgent.json b/arena/miniAgent.json deleted file mode 100644 index ad71b21b92d0..000000000000 --- a/arena/miniAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bigzz/AutoGPT", - "timestamp": "2023-10-23T02:41:41.828607", - "commit_hash_to_benchmark": "1a30d00194b46f8b923bab191404ce9123e34bdf", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/minister_agent.json b/arena/minister_agent.json deleted file mode 100644 index b66f0b76a608..000000000000 --- a/arena/minister_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/miandai/AutoGPT", - "timestamp": "2023-10-25T11:58:34.781500", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/misslu.json b/arena/misslu.json deleted file mode 100644 index 21dc02a45e4c..000000000000 --- a/arena/misslu.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/JasonZhang95/AutoGPT", - "timestamp": "2023-10-02T11:37:30.488121", - "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/mljar-agent.json b/arena/mljar-agent.json deleted file mode 100644 index 70c2e7b6d8d8..000000000000 --- a/arena/mljar-agent.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/mljar/mljar-agent", "timestamp": "2023-10-25T14:04:51Z", "commit_hash_to_benchmark": "2fbc4d6ef48f0201c046b649e7bc74b9d11ae4e5", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/momo.json b/arena/momo.json deleted file mode 100644 index b2045bccc607..000000000000 --- a/arena/momo.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/UICJohn/AutoGPT", "timestamp": "2023-10-19T09:52:19Z", "commit_hash_to_benchmark": "3aa92c082ac6912b45583b39d59a13cfda665322", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/monthly_summary.json b/arena/monthly_summary.json deleted file mode 100644 index 3f222a61eaaa..000000000000 --- a/arena/monthly_summary.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/corpetty/AutoGPT", - "timestamp": "2023-09-26T19:43:56.005780", - "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/mrSabelotodo.json b/arena/mrSabelotodo.json deleted file mode 100644 index 4d8a49f6cf0e..000000000000 --- a/arena/mrSabelotodo.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/joslangarica/AutoGPT.git", - "timestamp": "2023-10-03T01:11:32.290733", - "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/myGPT.json b/arena/myGPT.json deleted file mode 100644 index f5592ec06fc9..000000000000 --- a/arena/myGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Tianxu-Jia/AutoGPT.git", - "timestamp": "2023-10-03T10:59:48.149445", - "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/my_AutoGPT.json b/arena/my_AutoGPT.json deleted file mode 100644 index 2b48e64bdb67..000000000000 --- a/arena/my_AutoGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Tianxu-Jia/AutoGPT.git", - "timestamp": "2023-10-03T08:57:28.681756", - "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/my_fx_agent.json b/arena/my_fx_agent.json deleted file mode 100644 index 314e63482591..000000000000 --- a/arena/my_fx_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT.git", - "timestamp": "2023-10-18T07:09:36.565783", - "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/my_gpt.json b/arena/my_gpt.json deleted file mode 100644 index 2eb7006726ce..000000000000 --- a/arena/my_gpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/dawnchen123/AutoGPT", - "timestamp": "2023-11-01T02:08:06.032041", - "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/mygent.json b/arena/mygent.json deleted file mode 100644 index 5eda9ff63128..000000000000 --- a/arena/mygent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/prashanthi-instalily/AutoGPT", - "timestamp": "2023-10-24T13:31:28.287257", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/nawalj.json b/arena/nawalj.json deleted file mode 100644 index 0506380f1732..000000000000 --- a/arena/nawalj.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/nawaljunaid/AutoGPT.git", - "timestamp": "2023-10-03T18:41:12.930097", - "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/newAgent.json b/arena/newAgent.json deleted file mode 100644 index 9ace7df0a0e1..000000000000 --- a/arena/newAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/alexsoshnikov/AutoGPT", - "timestamp": "2023-10-10T09:27:10.249840", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/northfork.json b/arena/northfork.json deleted file mode 100644 index 0b5076ce738f..000000000000 --- a/arena/northfork.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Sewen/AutoGPT", - "timestamp": "2023-09-26T07:18:29.975526", - "commit_hash_to_benchmark": "3d4307a848880c8509e8356bbb9146f0e6f917f4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/od_agent_1.json b/arena/od_agent_1.json deleted file mode 100644 index 068becf683dc..000000000000 --- a/arena/od_agent_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mattsinnock/AutoGPT", - "timestamp": "2023-10-05T01:13:15.930770", - "commit_hash_to_benchmark": "73ef89e03a719ec1b2f01b0f04e9b1f64ffb2a7d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/operationAgent.json b/arena/operationAgent.json deleted file mode 100644 index f4587aaa07cf..000000000000 --- a/arena/operationAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/KMing-L/AutoGPT", - "timestamp": "2023-10-09T02:21:56.002832", - "commit_hash_to_benchmark": "2d865cc9e6d0b3c7f10777849adf9492b6400904", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/personal-al-website.json b/arena/personal-al-website.json deleted file mode 100644 index 905ae4ade427..000000000000 --- a/arena/personal-al-website.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Hazzari/AutoGPT", - "timestamp": "2023-10-01T11:59:23.504561", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/piGPT.json b/arena/piGPT.json deleted file mode 100644 index a01cb6c4a5bc..000000000000 --- a/arena/piGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/pihanya/AutoGPT", - "timestamp": "2023-10-06T20:37:37.445255", - "commit_hash_to_benchmark": "abf88fe5097770b1da3383a19208b5a23e2371f3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/pipeline.json b/arena/pipeline.json deleted file mode 100644 index 4ce4eed21dff..000000000000 --- a/arena/pipeline.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/geesugar/AutoGPT", - "timestamp": "2023-09-26T04:52:08.379642", - "commit_hash_to_benchmark": "075529ddc9cbca45ff98f0701baed9b89a712c23", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/podcast_agent.json b/arena/podcast_agent.json deleted file mode 100644 index 6b7487bfab3d..000000000000 --- a/arena/podcast_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aliumujib/AutoGPT", - "timestamp": "2023-10-28T06:03:18.488676", - "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/potato.json b/arena/potato.json deleted file mode 100644 index c78cec9d60c6..000000000000 --- a/arena/potato.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/volkov/AutoGPT", "timestamp": "2023-10-23T05:24:11Z", "commit_hash_to_benchmark": "7d2532c1814d624725e7a1fce8831dc0def27fb8", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/project_assitant.json b/arena/project_assitant.json deleted file mode 100644 index 239a7c92198e..000000000000 --- a/arena/project_assitant.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/milog1994/AutoGPT.git", - "timestamp": "2023-10-30T21:08:25.083221", - "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/project_master.json b/arena/project_master.json deleted file mode 100644 index 79e0f5a234cd..000000000000 --- a/arena/project_master.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/milog1994/AutoGPT.git", - "timestamp": "2023-10-30T21:14:18.974130", - "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/project_review.json b/arena/project_review.json deleted file mode 100644 index e5889d49a1b5..000000000000 --- a/arena/project_review.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/oneforce/AutoGPT", - "timestamp": "2023-10-24T09:51:05.658251", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/prometheus.json b/arena/prometheus.json deleted file mode 100644 index bcd8f6660358..000000000000 --- a/arena/prometheus.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/yashrahurikar23/prometheus", - "timestamp": "2023-10-04T15:21:16.474459", - "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/proudgpt.json b/arena/proudgpt.json deleted file mode 100644 index 383a4a2f8707..000000000000 --- a/arena/proudgpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/OmarHory/Star-Agent", - "timestamp": "2023-10-01T22:11:15.978902", - "commit_hash_to_benchmark": "8252a2fa8fee852a22093bf7fd8755f86c6b0ad5", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/qinghu3.json b/arena/qinghu3.json deleted file mode 100644 index 06b4a4d943de..000000000000 --- a/arena/qinghu3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/QingHu1227/AutoGPT.git", - "timestamp": "2023-11-06T04:11:34.227212", - "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ra.json b/arena/ra.json deleted file mode 100644 index b29e96cecd28..000000000000 --- a/arena/ra.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT", - "timestamp": "2023-10-23T18:03:39.069151", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/ra1.json b/arena/ra1.json deleted file mode 100644 index 4b50158c6468..000000000000 --- a/arena/ra1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT", - "timestamp": "2023-10-23T18:12:20.095032", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/rachael.json b/arena/rachael.json deleted file mode 100644 index fe57a0c5ddfb..000000000000 --- a/arena/rachael.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/dotdust/rachael.git", - "timestamp": "2023-10-08T13:18:35.946639", - "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/raindrop.json b/arena/raindrop.json deleted file mode 100644 index 10decc9c878d..000000000000 --- a/arena/raindrop.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/tianbinraindrop/AutoGPT", - "timestamp": "2023-10-01T02:24:57.822495", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/researchGPT.json b/arena/researchGPT.json deleted file mode 100644 index 3784933f0b7e..000000000000 --- a/arena/researchGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/gty3310/AutoGPT", - "timestamp": "2023-10-09T23:36:29.771968", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/researchGPT2.json b/arena/researchGPT2.json deleted file mode 100644 index eadb82df0a15..000000000000 --- a/arena/researchGPT2.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/gty3310/AutoGPT", - "timestamp": "2023-10-17T15:22:36.628578", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/research_analyst.json b/arena/research_analyst.json deleted file mode 100644 index 675df1ad8dcf..000000000000 --- a/arena/research_analyst.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT", - "timestamp": "2023-10-23T17:53:54.235178", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/robita.json b/arena/robita.json deleted file mode 100644 index 15f3d44ac75e..000000000000 --- a/arena/robita.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/atetsuka/AutoGPT", - "timestamp": "2023-10-02T07:16:13.845473", - "commit_hash_to_benchmark": "7ec92d8c063fc041eefd9522450e4ef52e5a34da", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/robot.json b/arena/robot.json deleted file mode 100644 index 3f1eded5cf8e..000000000000 --- a/arena/robot.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/brisklad/AutoGPT", - "timestamp": "2023-10-15T13:49:47.384228", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/searchagent.json b/arena/searchagent.json deleted file mode 100644 index 8136c1345685..000000000000 --- a/arena/searchagent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/sanjeevsaara/AutoGPT", - "timestamp": "2023-10-16T00:01:53.051453", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/set.json b/arena/set.json deleted file mode 100644 index 14efa0819c50..000000000000 --- a/arena/set.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/victorrica/AutoGPT", - "timestamp": "2023-10-24T05:12:51.971269", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/sgpt.json b/arena/sgpt.json deleted file mode 100644 index cf2ab22c0ee6..000000000000 --- a/arena/sgpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ya5has/sgpt", - "timestamp": "2023-11-02T05:51:01.446153", - "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/shivi.json b/arena/shivi.json deleted file mode 100644 index e7ed40a85015..000000000000 --- a/arena/shivi.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kshivang/DabblerGPT", - "timestamp": "2023-10-07T01:39:16.601657", - "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/sky.json b/arena/sky.json deleted file mode 100644 index 49690196df75..000000000000 --- a/arena/sky.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/hmslsky/Auto-GPT", - "timestamp": "2023-10-31T15:48:50.123435", - "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/smith.json b/arena/smith.json deleted file mode 100644 index c3bfd5978fd3..000000000000 --- a/arena/smith.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/kevinboudot/AutoGPT", - "timestamp": "2023-10-11T12:25:09.516293", - "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/songyalei.json b/arena/songyalei.json deleted file mode 100644 index 2c3b7dcc3032..000000000000 --- a/arena/songyalei.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/songyalei/AutoGPT", - "timestamp": "2023-11-16T07:11:39.746384", - "commit_hash_to_benchmark": "fa357dd13928baa4d1e30054bc75edc5d68b08f1", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/sql.json b/arena/sql.json deleted file mode 100644 index a9b357a8038c..000000000000 --- a/arena/sql.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/monsterooo/AutoGPT", - "timestamp": "2023-09-26T06:46:35.721082", - "commit_hash_to_benchmark": "bec207568a93e38bff971525c53612813aa60730", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/stefan.json b/arena/stefan.json deleted file mode 100644 index 96987be6bad8..000000000000 --- a/arena/stefan.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/sutefu23/AutoGPT", - "timestamp": "2023-10-21T01:03:06.362579", - "commit_hash_to_benchmark": "03e56fece5008d119dd5ae97da57eb4db3d14a1d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/stockAgent.json b/arena/stockAgent.json deleted file mode 100644 index b4a9c5d3d492..000000000000 --- a/arena/stockAgent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/SnowYoung/StockAgent", - "timestamp": "2023-10-19T09:49:44.372589", - "commit_hash_to_benchmark": "f62651ff3f1ece5520916bee7ee441e1949855f9", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/swarms.json b/arena/swarms.json deleted file mode 100644 index 7bd572350e01..000000000000 --- a/arena/swarms.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/ZackBradshaw/Auto-Swarms", "timestamp": "2023-10-16T15:03:21Z", "commit_hash_to_benchmark": "96b591c6f0918265e2256cb9c76ca2ff50f3983f", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/tdev.json b/arena/tdev.json deleted file mode 100644 index 68518c814bab..000000000000 --- a/arena/tdev.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/miguelcas12/tdev.git", - "timestamp": "2023-09-26T17:36:53.829436", - "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/teacher.json b/arena/teacher.json deleted file mode 100644 index 0e0291c006c5..000000000000 --- a/arena/teacher.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/miaowacao/AutoGPT1", - "timestamp": "2023-10-16T07:21:48.209351", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/test-tpk.json b/arena/test-tpk.json deleted file mode 100644 index 87f4f4e2c42f..000000000000 --- a/arena/test-tpk.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/seeseesky/AutoGPT", - "timestamp": "2023-10-27T04:06:10.599340", - "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/test.json b/arena/test.json deleted file mode 100644 index 00b762a09b78..000000000000 --- a/arena/test.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Nivek92/AutoGPT", - "timestamp": "2023-10-01T15:46:07.871808", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/test1.json b/arena/test1.json deleted file mode 100644 index e9f9ff00a30d..000000000000 --- a/arena/test1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mplummeridge/AutoGPT", - "timestamp": "2023-10-24T01:06:24.100385", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/testGPT.json b/arena/testGPT.json deleted file mode 100644 index f1078ed65a14..000000000000 --- a/arena/testGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/PZON2/testGPT", - "timestamp": "2023-10-15T12:06:56.373935", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/thebestagent.json b/arena/thebestagent.json deleted file mode 100644 index 0003b82b62cf..000000000000 --- a/arena/thebestagent.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/hisandan/AutoGPT", "timestamp": "2023-10-09T14:10:20Z", "commit_hash_to_benchmark": "da5109b07d94ae3de1b3399ad2be6171b14cb304", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/theone.json b/arena/theone.json deleted file mode 100644 index 0003b82b62cf..000000000000 --- a/arena/theone.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/hisandan/AutoGPT", "timestamp": "2023-10-09T14:10:20Z", "commit_hash_to_benchmark": "da5109b07d94ae3de1b3399ad2be6171b14cb304", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/tiffGPT.json b/arena/tiffGPT.json deleted file mode 100644 index 84833b637f37..000000000000 --- a/arena/tiffGPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/darkcyber-ninja/AutoGPT", - "timestamp": "2023-10-31T18:25:58.281391", - "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/trend_agent.json b/arena/trend_agent.json deleted file mode 100644 index ba7d6839c524..000000000000 --- a/arena/trend_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Asmedeus998/AutoGPT.git", - "timestamp": "2023-10-01T23:04:42.429686", - "commit_hash_to_benchmark": "8252a2fa8fee852a22093bf7fd8755f86c6b0ad5", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/umiuni_agent.json b/arena/umiuni_agent.json deleted file mode 100644 index 0dd76a137ef0..000000000000 --- a/arena/umiuni_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/umiuni-community/AutoGPT.git", - "timestamp": "2023-10-01T11:37:00.284821", - "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/uply.json b/arena/uply.json deleted file mode 100644 index f3058753ef9b..000000000000 --- a/arena/uply.json +++ /dev/null @@ -1 +0,0 @@ -{"github_repo_url": "https://github.com/uply23333/Uply-GPT", "timestamp": "2023-10-20T00:48:01Z", "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", "branch_to_benchmark": "master"} \ No newline at end of file diff --git a/arena/url-to-lead.json b/arena/url-to-lead.json deleted file mode 100644 index f7564d8636ca..000000000000 --- a/arena/url-to-lead.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/nikolajlovenhardt/AutoGPT", - "timestamp": "2023-11-01T15:18:00.402718", - "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/v-gpt.json b/arena/v-gpt.json deleted file mode 100644 index 1537194575d0..000000000000 --- a/arena/v-gpt.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Varun565/AutoGPT", - "timestamp": "2023-10-05T03:17:36.972978", - "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/victor2-0.json b/arena/victor2-0.json deleted file mode 100644 index b984c1bcca5e..000000000000 --- a/arena/victor2-0.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/victorleduc/AutoGPT", - "timestamp": "2023-10-23T23:35:53.044545", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/web_developer.json b/arena/web_developer.json deleted file mode 100644 index 7f1f9c4afb38..000000000000 --- a/arena/web_developer.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/paul726/AutoGPT", - "timestamp": "2023-10-15T13:36:03.387061", - "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/webagent.json b/arena/webagent.json deleted file mode 100644 index f1bccc9f71c0..000000000000 --- a/arena/webagent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/ddNTP/myagent.git", - "timestamp": "2023-09-20T11:21:05.331950", - "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/webgeek.json b/arena/webgeek.json deleted file mode 100644 index 33789db6b0f0..000000000000 --- a/arena/webgeek.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/webgeeksai/AutoGPT.git", - "timestamp": "2023-10-13T06:22:22.056151", - "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/wedding-planner.json b/arena/wedding-planner.json deleted file mode 100644 index b2acfa68685b..000000000000 --- a/arena/wedding-planner.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/mogronalol/AutoGPT", - "timestamp": "2023-10-08T20:31:43.422977", - "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/woohoo_agent.json b/arena/woohoo_agent.json deleted file mode 100644 index a805c34986a7..000000000000 --- a/arena/woohoo_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/FIresInWind/AutoGPT", - "timestamp": "2023-10-19T15:14:59.786203", - "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/wyjagent.json b/arena/wyjagent.json deleted file mode 100644 index e96772536dc7..000000000000 --- a/arena/wyjagent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/wangyijunlyy/AutoGPT", - "timestamp": "2023-11-03T09:21:36.143887", - "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/xmly.json b/arena/xmly.json deleted file mode 100644 index 23cf046e52e3..000000000000 --- a/arena/xmly.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/dongdaoguang/AutoGPT", - "timestamp": "2023-10-11T06:30:06.866694", - "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/xq_agent.json b/arena/xq_agent.json deleted file mode 100644 index cccf5586bb04..000000000000 --- a/arena/xq_agent.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/emptykid/AutoGPT", - "timestamp": "2023-10-24T10:37:55.170776", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/xt0m-GPT.json b/arena/xt0m-GPT.json deleted file mode 100644 index 130bbae2fc35..000000000000 --- a/arena/xt0m-GPT.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/jcartes/xt0m-GPT", - "timestamp": "2023-10-15T01:31:05.785913", - "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/xtest.json b/arena/xtest.json deleted file mode 100644 index e189babe38a4..000000000000 --- a/arena/xtest.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/popperxu/AutoGPT", - "timestamp": "2023-10-31T06:25:36.338549", - "commit_hash_to_benchmark": "c3569d1842e6568ab1327e577603e71ad1feb622", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/yarbis.json b/arena/yarbis.json deleted file mode 100644 index 65d6c50f23ae..000000000000 --- a/arena/yarbis.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/sintecba/AutoGPT", - "timestamp": "2023-10-10T18:11:07.473738", - "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/zaheer.json b/arena/zaheer.json deleted file mode 100644 index 01e4e72c8781..000000000000 --- a/arena/zaheer.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/zaheerahmad33/AutoGPT", - "timestamp": "2023-10-22T21:48:48.414779", - "commit_hash_to_benchmark": "b4ee485906c1d8da71ce9b3093996383322980fe", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/zcb.json b/arena/zcb.json deleted file mode 100644 index c1892107073e..000000000000 --- a/arena/zcb.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/asasasheshou/AutoGPT", - "timestamp": "2023-10-25T09:15:30.114147", - "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/zczc.json b/arena/zczc.json deleted file mode 100644 index b484f0bef80b..000000000000 --- a/arena/zczc.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/Howard-Cheung/AutoGPT", - "timestamp": "2023-10-26T12:48:30.729105", - "commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/zhizhi.json b/arena/zhizhi.json deleted file mode 100644 index 58d86008e690..000000000000 --- a/arena/zhizhi.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/bolyage/zhizhi", - "timestamp": "2023-10-19T11:38:51.332966", - "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/zlipknot_1.json b/arena/zlipknot_1.json deleted file mode 100644 index 0532417963a3..000000000000 --- a/arena/zlipknot_1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/zlipknot/AutoGPT.git", - "timestamp": "2023-10-25T19:20:38.529540", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/zlipknot_test_agent_4.json b/arena/zlipknot_test_agent_4.json deleted file mode 100644 index 2096d67b560c..000000000000 --- a/arena/zlipknot_test_agent_4.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/zlipknot/AutoGPT.git", - "timestamp": "2023-10-25T19:13:02.418676", - "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/arena/zze.json b/arena/zze.json deleted file mode 100644 index 7b69f1872b6a..000000000000 --- a/arena/zze.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "github_repo_url": "https://github.com/quasimodo7614/AutoGPT", - "timestamp": "2023-10-16T07:49:29.399457", - "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a", - "branch_to_benchmark": "master" -} \ No newline at end of file diff --git a/assets/gpt_dark_RGB.icns b/assets/gpt_dark_RGB.icns new file mode 100644 index 000000000000..c2efadc4e9e5 Binary files /dev/null and b/assets/gpt_dark_RGB.icns differ diff --git a/assets/gpt_dark_RGB.ico b/assets/gpt_dark_RGB.ico new file mode 100644 index 000000000000..3fc58e36fc5c Binary files /dev/null and b/assets/gpt_dark_RGB.ico differ diff --git a/assets/gpt_dark_RGB.png b/assets/gpt_dark_RGB.png new file mode 100644 index 000000000000..e99d5ec6c9f8 Binary files /dev/null and b/assets/gpt_dark_RGB.png differ diff --git a/autogpt_platform/.gitignore b/autogpt_platform/.gitignore new file mode 100644 index 000000000000..a5b214c8ee30 --- /dev/null +++ b/autogpt_platform/.gitignore @@ -0,0 +1,2 @@ +*.ignore.* +*.ign.* \ No newline at end of file diff --git a/autogpt_platform/Contributor License Agreement (CLA).md b/autogpt_platform/Contributor License Agreement (CLA).md new file mode 100644 index 000000000000..b15bd52e5698 --- /dev/null +++ b/autogpt_platform/Contributor License Agreement (CLA).md @@ -0,0 +1,21 @@ +**Determinist Ltd** + +**Contributor License Agreement (“Agreement”)** + +Thank you for your interest in the AutoGPT open source project at [https://github.com/Significant-Gravitas/AutoGPT](https://github.com/Significant-Gravitas/AutoGPT) stewarded by Determinist Ltd (“**Determinist**”), with offices at 3rd Floor 1 Ashley Road, Altrincham, Cheshire, WA14 2DT, United Kingdom. The form of license below is a document that clarifies the terms under which You, the person listed below, may contribute software code described below (the “**Contribution**”) to the project. We appreciate your participation in our project, and your help in improving our products, so we want you to understand what will be done with the Contributions. This license is for your protection as well as the protection of Determinist and its licensees; it does not change your rights to use your own Contributions for any other purpose. + +By submitting a Pull Request which modifies the content of the “autogpt\_platform” folder at [https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt\_platform](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpt_platform), You hereby agree: + +1\. **You grant us the ability to use the Contributions in any way**. You hereby grant to Determinist a non-exclusive, irrevocable, worldwide, royalty-free, sublicenseable, transferable license under all of Your relevant intellectual property rights (including copyright, patent, and any other rights), to use, copy, prepare derivative works of, distribute and publicly perform and display the Contributions on any licensing terms, including without limitation: (a) open source licenses like the GNU General Public License (GPL), the GNU Lesser General Public License (LGPL), the Common Public License, or the Berkeley Science Division license (BSD); and (b) binary, proprietary, or commercial licenses. + +2\. **Grant of Patent License**. You hereby grant to Determinist a worldwide, non-exclusive, royalty-free, irrevocable, license, under any rights you may have, now or in the future, in any patents or patent applications, to make, have made, use, offer to sell, sell, and import products containing the Contribution or portions of the Contribution. This license extends to patent claims that are infringed by the Contribution alone or by combination of the Contribution with other inventions. + +4\. **Limitations on Licenses**. The licenses granted in this Agreement will continue for the duration of the applicable patent or intellectual property right under which such license is granted. The licenses granted in this Agreement will include the right to grant and authorize sublicenses, so long as the sublicenses are within the scope of the licenses granted in this Agreement. Except for the licenses granted herein, You reserve all right, title, and interest in and to the Contribution. + +5\. **You are able to grant us these rights**. You represent that You are legally entitled to grant the above license. If Your employer has rights to intellectual property that You create, You represent that You are authorized to make the Contributions on behalf of that employer, or that Your employer has waived such rights for the Contributions. + +3\. **The Contributions are your original work**. You represent that the Contributions are Your original works of authorship, and to Your knowledge, no other person claims, or has the right to claim, any right in any invention or patent related to the Contributions. You also represent that You are not legally obligated, whether by entering into an agreement or otherwise, in any way that conflicts with the terms of this license. For example, if you have signed an agreement requiring you to assign the intellectual property rights in the Contributions to an employer or customer, that would conflict with the terms of this license. + +6\. **We determine the code that is in our products**. You understand that the decision to include the Contribution in any product or source repository is entirely that of Determinist, and this agreement does not guarantee that the Contributions will be included in any product. + +7\. **No Implied Warranties.** Determinist acknowledges that, except as explicitly described in this Agreement, the Contribution is provided on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. diff --git a/autogpt_platform/LICENCE.txt b/autogpt_platform/LICENCE.txt new file mode 100644 index 000000000000..3f09b052c0f4 --- /dev/null +++ b/autogpt_platform/LICENCE.txt @@ -0,0 +1,164 @@ +# PolyForm Shield License 1.0.0 + + + +## Acceptance + +In order to get any license under these terms, you must agree +to them as both strict obligations and conditions to all +your licenses. + +## Copyright License + +The licensor grants you a copyright license for the +software to do everything you might do with the software +that would otherwise infringe the licensor's copyright +in it for any permitted purpose. However, you may +only distribute the software according to [Distribution +License](#distribution-license) and make changes or new works +based on the software according to [Changes and New Works +License](#changes-and-new-works-license). + +## Distribution License + +The licensor grants you an additional copyright license +to distribute copies of the software. Your license +to distribute covers distributing the software with +changes and new works permitted by [Changes and New Works +License](#changes-and-new-works-license). + +## Notices + +You must ensure that anyone who gets a copy of any part of +the software from you also gets a copy of these terms or the +URL for them above, as well as copies of any plain-text lines +beginning with `Required Notice:` that the licensor provided +with the software. For example: + +> Required Notice: Copyright Yoyodyne, Inc. (http://example.com) + +## Changes and New Works License + +The licensor grants you an additional copyright license to +make changes and new works based on the software for any +permitted purpose. + +## Patent License + +The licensor grants you a patent license for the software that +covers patent claims the licensor can license, or becomes able +to license, that you would infringe by using the software. + +## Noncompete + +Any purpose is a permitted purpose, except for providing any +product that competes with the software or any product the +licensor or any of its affiliates provides using the software. + +## Competition + +Goods and services compete even when they provide functionality +through different kinds of interfaces or for different technical +platforms. Applications can compete with services, libraries +with plugins, frameworks with development tools, and so on, +even if they're written in different programming languages +or for different computer architectures. Goods and services +compete even when provided free of charge. If you market a +product as a practical substitute for the software or another +product, it definitely competes. + +## New Products + +If you are using the software to provide a product that does +not compete, but the licensor or any of its affiliates brings +your product into competition by providing a new version of +the software or another product using the software, you may +continue using versions of the software available under these +terms beforehand to provide your competing product, but not +any later versions. + +## Discontinued Products + +You may begin using the software to compete with a product +or service that the licensor or any of its affiliates has +stopped providing, unless the licensor includes a plain-text +line beginning with `Licensor Line of Business:` with the +software that mentions that line of business. For example: + +> Licensor Line of Business: YoyodyneCMS Content Management +System (http://example.com/cms) + +## Sales of Business + +If the licensor or any of its affiliates sells a line of +business developing the software or using the software +to provide a product, the buyer can also enforce +[Noncompete](#noncompete) for that product. + +## Fair Use + +You may have "fair use" rights for the software under the +law. These terms do not limit them. + +## No Other Rights + +These terms do not allow you to sublicense or transfer any of +your licenses to anyone else, or prevent the licensor from +granting licenses to anyone else. These terms do not imply +any other licenses. + +## Patent Defense + +If you make any written claim that the software infringes or +contributes to infringement of any patent, your patent license +for the software granted under these terms ends immediately. If +your company makes such a claim, your patent license ends +immediately for work on behalf of your company. + +## Violations + +The first time you are notified in writing that you have +violated any of these terms, or done anything with the software +not covered by your licenses, your licenses can nonetheless +continue if you come into full compliance with these terms, +and take practical steps to correct past violations, within +32 days of receiving notice. Otherwise, all your licenses +end immediately. + +## No Liability + +***As far as the law allows, the software comes as is, without +any warranty or condition, and the licensor will not be liable +to you for any damages arising out of these terms or the use +or nature of the software, under any kind of legal claim.*** + +## Definitions + +The **licensor** is the individual or entity offering these +terms, and the **software** is the software the licensor makes +available under these terms. + +A **product** can be a good or service, or a combination +of them. + +**You** refers to the individual or entity agreeing to these +terms. + +**Your company** is any legal entity, sole proprietorship, +or other kind of organization that you work for, plus all +its affiliates. + +**Affiliates** means the other organizations than an +organization has control over, is under the control of, or is +under common control with. + +**Control** means ownership of substantially all the assets of +an entity, or the power to direct its management and policies +by vote, contract, or otherwise. Control can be direct or +indirect. + +**Your licenses** are all the licenses granted to you for the +software under these terms. + +**Use** means anything you do with the software requiring one +of your licenses. diff --git a/autogpt_platform/README.md b/autogpt_platform/README.md new file mode 100644 index 000000000000..64e61e880c06 --- /dev/null +++ b/autogpt_platform/README.md @@ -0,0 +1,151 @@ +# AutoGPT Platform + +Welcome to the AutoGPT Platform - a powerful system for creating and running AI agents to solve business problems. This platform enables you to harness the power of artificial intelligence to automate tasks, analyze data, and generate insights for your organization. + +## Getting Started + +### Prerequisites + +- Docker +- Docker Compose V2 (comes with Docker Desktop, or can be installed separately) +- Node.js & NPM (for running the frontend application) + +### Running the System + +To run the AutoGPT Platform, follow these steps: + +1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository: + ``` + git clone + cd AutoGPT/autogpt_platform + ``` + +2. Run the following command: + ``` + git submodule update --init --recursive + ``` + This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory. + +3. Run the following command: + ``` + cp supabase/docker/.env.example .env + ``` + This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables. + +4. Run the following command: + ``` + docker compose up -d + ``` + This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode. + +5. Navigate to `frontend` within the `autogpt_platform` directory: + ``` + cd frontend + ``` + You will need to run your frontend application separately on your local machine. + +6. Run the following command: + ``` + cp .env.example .env.local + ``` + This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application. + +7. Run the following command: + ``` + npm install + npm run dev + ``` + This command will install the necessary dependencies and start the frontend application in development mode. + If you are using Yarn, you can run the following commands instead: + ``` + yarn install && yarn dev + ``` + +8. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend. + +### Docker Compose Commands + +Here are some useful Docker Compose commands for managing your AutoGPT Platform: + +- `docker compose up -d`: Start the services in detached mode. +- `docker compose stop`: Stop the running services without removing them. +- `docker compose rm`: Remove stopped service containers. +- `docker compose build`: Build or rebuild services. +- `docker compose down`: Stop and remove containers, networks, and volumes. +- `docker compose watch`: Watch for changes in your services and automatically update them. + + +### Sample Scenarios + +Here are some common scenarios where you might use multiple Docker Compose commands: + +1. Updating and restarting a specific service: + ``` + docker compose build api_srv + docker compose up -d --no-deps api_srv + ``` + This rebuilds the `api_srv` service and restarts it without affecting other services. + +2. Viewing logs for troubleshooting: + ``` + docker compose logs -f api_srv ws_srv + ``` + This shows and follows the logs for both `api_srv` and `ws_srv` services. + +3. Scaling a service for increased load: + ``` + docker compose up -d --scale executor=3 + ``` + This scales the `executor` service to 3 instances to handle increased load. + +4. Stopping the entire system for maintenance: + ``` + docker compose stop + docker compose rm -f + docker compose pull + docker compose up -d + ``` + This stops all services, removes containers, pulls the latest images, and restarts the system. + +5. Developing with live updates: + ``` + docker compose watch + ``` + This watches for changes in your code and automatically updates the relevant services. + +6. Checking the status of services: + ``` + docker compose ps + ``` + This shows the current status of all services defined in your docker-compose.yml file. + +These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively. + + +### Persisting Data + +To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how: + +1. Open the `docker-compose.yml` file in a text editor. +2. Add volume configurations for PostgreSQL and Redis services: + + ```yaml + services: + postgres: + # ... other configurations ... + volumes: + - postgres_data:/var/lib/postgresql/data + + redis: + # ... other configurations ... + volumes: + - redis_data:/data + + volumes: + postgres_data: + redis_data: + ``` + +3. Save the file and run `docker compose up -d` to apply the changes. + +This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts. diff --git a/autogpts/autogpt/agbenchmark_config/__init__.py b/autogpt_platform/__init__.py similarity index 100% rename from autogpts/autogpt/agbenchmark_config/__init__.py rename to autogpt_platform/__init__.py diff --git a/autogpt_platform/autogpt_libs/README.md b/autogpt_platform/autogpt_libs/README.md new file mode 100644 index 000000000000..e2d6a5e67e20 --- /dev/null +++ b/autogpt_platform/autogpt_libs/README.md @@ -0,0 +1,3 @@ +# AutoGPT Libs + +This is a new project to store shared functionality across different services in NextGen AutoGPT (e.g. authentication) diff --git a/autogpts/autogpt/autogpt/core/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/core/__init__.py rename to autogpt_platform/autogpt_libs/autogpt_libs/__init__.py diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/api_key/key_manager.py b/autogpt_platform/autogpt_libs/autogpt_libs/api_key/key_manager.py new file mode 100644 index 000000000000..257250a75393 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/api_key/key_manager.py @@ -0,0 +1,34 @@ +import hashlib +import secrets +from typing import NamedTuple + + +class APIKeyContainer(NamedTuple): + """Container for API key parts.""" + + raw: str + prefix: str + postfix: str + hash: str + + +class APIKeyManager: + PREFIX: str = "agpt_" + PREFIX_LENGTH: int = 8 + POSTFIX_LENGTH: int = 8 + + def generate_api_key(self) -> APIKeyContainer: + """Generate a new API key with all its parts.""" + raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}" + return APIKeyContainer( + raw=raw_key, + prefix=raw_key[: self.PREFIX_LENGTH], + postfix=raw_key[-self.POSTFIX_LENGTH :], + hash=hashlib.sha256(raw_key.encode()).hexdigest(), + ) + + def verify_api_key(self, provided_key: str, stored_hash: str) -> bool: + """Verify if a provided API key matches the stored hash.""" + if not provided_key.startswith(self.PREFIX): + return False + return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/auth/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/auth/__init__.py new file mode 100644 index 000000000000..5090cb4f035d --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/auth/__init__.py @@ -0,0 +1,14 @@ +from .config import Settings +from .depends import requires_admin_user, requires_user +from .jwt_utils import parse_jwt_token +from .middleware import auth_middleware +from .models import User + +__all__ = [ + "Settings", + "parse_jwt_token", + "requires_user", + "requires_admin_user", + "auth_middleware", + "User", +] diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/auth/config.py b/autogpt_platform/autogpt_libs/autogpt_libs/auth/config.py new file mode 100644 index 000000000000..1c7bc182908f --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/auth/config.py @@ -0,0 +1,18 @@ +import os + +from dotenv import load_dotenv + +load_dotenv() + + +class Settings: + JWT_SECRET_KEY: str = os.getenv("SUPABASE_JWT_SECRET", "") + ENABLE_AUTH: bool = os.getenv("ENABLE_AUTH", "false").lower() == "true" + JWT_ALGORITHM: str = "HS256" + + @property + def is_configured(self) -> bool: + return bool(self.JWT_SECRET_KEY) + + +settings = Settings() diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/auth/depends.py b/autogpt_platform/autogpt_libs/autogpt_libs/auth/depends.py new file mode 100644 index 000000000000..88409e413cea --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/auth/depends.py @@ -0,0 +1,46 @@ +import fastapi + +from .config import Settings +from .middleware import auth_middleware +from .models import DEFAULT_USER_ID, User + + +def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User: + return verify_user(payload, admin_only=False) + + +def requires_admin_user( + payload: dict = fastapi.Depends(auth_middleware), +) -> User: + return verify_user(payload, admin_only=True) + + +def verify_user(payload: dict | None, admin_only: bool) -> User: + if not payload: + if Settings.ENABLE_AUTH: + raise fastapi.HTTPException( + status_code=401, detail="Authorization header is missing" + ) + # This handles the case when authentication is disabled + payload = {"sub": DEFAULT_USER_ID, "role": "admin"} + + user_id = payload.get("sub") + + if not user_id: + raise fastapi.HTTPException( + status_code=401, detail="User ID not found in token" + ) + + if admin_only and payload["role"] != "admin": + raise fastapi.HTTPException(status_code=403, detail="Admin access required") + + return User.from_payload(payload) + + +def get_user_id(payload: dict = fastapi.Depends(auth_middleware)) -> str: + user_id = payload.get("sub") + if not user_id: + raise fastapi.HTTPException( + status_code=401, detail="User ID not found in token" + ) + return user_id diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/auth/depends_tests.py b/autogpt_platform/autogpt_libs/autogpt_libs/auth/depends_tests.py new file mode 100644 index 000000000000..b6ac4d2a7bd9 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/auth/depends_tests.py @@ -0,0 +1,68 @@ +import pytest + +from .depends import requires_admin_user, requires_user, verify_user + + +def test_verify_user_no_payload(): + user = verify_user(None, admin_only=False) + assert user.user_id == "3e53486c-cf57-477e-ba2a-cb02dc828e1a" + assert user.role == "admin" + + +def test_verify_user_no_user_id(): + with pytest.raises(Exception): + verify_user({"role": "admin"}, admin_only=False) + + +def test_verify_user_not_admin(): + with pytest.raises(Exception): + verify_user( + {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "user"}, + admin_only=True, + ) + + +def test_verify_user_with_admin_role(): + user = verify_user( + {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "admin"}, + admin_only=True, + ) + assert user.user_id == "3e53486c-cf57-477e-ba2a-cb02dc828e1a" + assert user.role == "admin" + + +def test_verify_user_with_user_role(): + user = verify_user( + {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "user"}, + admin_only=False, + ) + assert user.user_id == "3e53486c-cf57-477e-ba2a-cb02dc828e1a" + assert user.role == "user" + + +def test_requires_user(): + user = requires_user( + {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "user"} + ) + assert user.user_id == "3e53486c-cf57-477e-ba2a-cb02dc828e1a" + assert user.role == "user" + + +def test_requires_user_no_user_id(): + with pytest.raises(Exception): + requires_user({"role": "user"}) + + +def test_requires_admin_user(): + user = requires_admin_user( + {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "admin"} + ) + assert user.user_id == "3e53486c-cf57-477e-ba2a-cb02dc828e1a" + assert user.role == "admin" + + +def test_requires_admin_user_not_admin(): + with pytest.raises(Exception): + requires_admin_user( + {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "user"} + ) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/auth/jwt_utils.py b/autogpt_platform/autogpt_libs/autogpt_libs/auth/jwt_utils.py new file mode 100644 index 000000000000..900275f0bbb1 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/auth/jwt_utils.py @@ -0,0 +1,27 @@ +from typing import Any, Dict + +import jwt + +from .config import settings + + +def parse_jwt_token(token: str) -> Dict[str, Any]: + """ + Parse and validate a JWT token. + + :param token: The token to parse + :return: The decoded payload + :raises ValueError: If the token is invalid or expired + """ + try: + payload = jwt.decode( + token, + settings.JWT_SECRET_KEY, + algorithms=[settings.JWT_ALGORITHM], + audience="authenticated", + ) + return payload + except jwt.ExpiredSignatureError: + raise ValueError("Token has expired") + except jwt.InvalidTokenError as e: + raise ValueError(f"Invalid token: {str(e)}") diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/auth/middleware.py b/autogpt_platform/autogpt_libs/autogpt_libs/auth/middleware.py new file mode 100644 index 000000000000..783e1b35beab --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/auth/middleware.py @@ -0,0 +1,31 @@ +import logging + +from fastapi import HTTPException, Request +from fastapi.security import HTTPBearer + +from .config import settings +from .jwt_utils import parse_jwt_token + +security = HTTPBearer() +logger = logging.getLogger(__name__) + + +async def auth_middleware(request: Request): + if not settings.ENABLE_AUTH: + # If authentication is disabled, allow the request to proceed + logger.warn("Auth disabled") + return {} + + security = HTTPBearer() + credentials = await security(request) + + if not credentials: + raise HTTPException(status_code=401, detail="Authorization header is missing") + + try: + payload = parse_jwt_token(credentials.credentials) + request.state.user = payload + logger.debug("Token decoded successfully") + except ValueError as e: + raise HTTPException(status_code=401, detail=str(e)) + return payload diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/auth/models.py b/autogpt_platform/autogpt_libs/autogpt_libs/auth/models.py new file mode 100644 index 000000000000..1bb8fe179b13 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/auth/models.py @@ -0,0 +1,22 @@ +from dataclasses import dataclass + +DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a" +DEFAULT_EMAIL = "default@example.com" + + +# Using dataclass here to avoid adding dependency on pydantic +@dataclass(frozen=True) +class User: + user_id: str + email: str + phone_number: str + role: str + + @classmethod + def from_payload(cls, payload): + return cls( + user_id=payload["sub"], + email=payload.get("email", ""), + phone_number=payload.get("phone", ""), + role=payload["role"], + ) diff --git a/autogpts/autogpt/autogpt/core/runner/cli_app/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/core/runner/cli_app/__init__.py rename to autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/__init__.py diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py new file mode 100644 index 000000000000..dde516c1d8fa --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client.py @@ -0,0 +1,167 @@ +import asyncio +import contextlib +import logging +from functools import wraps +from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar, Union, cast + +import ldclient +from fastapi import HTTPException +from ldclient import Context, LDClient +from ldclient.config import Config +from typing_extensions import ParamSpec + +from .config import SETTINGS + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.DEBUG) + +P = ParamSpec("P") +T = TypeVar("T") + + +def get_client() -> LDClient: + """Get the LaunchDarkly client singleton.""" + return ldclient.get() + + +def initialize_launchdarkly() -> None: + sdk_key = SETTINGS.launch_darkly_sdk_key + logger.debug( + f"Initializing LaunchDarkly with SDK key: {'present' if sdk_key else 'missing'}" + ) + + if not sdk_key: + logger.warning("LaunchDarkly SDK key not configured") + return + + config = Config(sdk_key) + ldclient.set_config(config) + + if ldclient.get().is_initialized(): + logger.info("LaunchDarkly client initialized successfully") + else: + logger.error("LaunchDarkly client failed to initialize") + + +def shutdown_launchdarkly() -> None: + """Shutdown the LaunchDarkly client.""" + if ldclient.get().is_initialized(): + ldclient.get().close() + logger.info("LaunchDarkly client closed successfully") + + +def create_context( + user_id: str, additional_attributes: Optional[Dict[str, Any]] = None +) -> Context: + """Create LaunchDarkly context with optional additional attributes.""" + builder = Context.builder(str(user_id)).kind("user") + if additional_attributes: + for key, value in additional_attributes.items(): + builder.set(key, value) + return builder.build() + + +def feature_flag( + flag_key: str, + default: bool = False, +) -> Callable[ + [Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]] +]: + """ + Decorator for feature flag protected endpoints. + """ + + def decorator( + func: Callable[P, Union[T, Awaitable[T]]], + ) -> Callable[P, Union[T, Awaitable[T]]]: + @wraps(func) + async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T: + try: + user_id = kwargs.get("user_id") + if not user_id: + raise ValueError("user_id is required") + + if not get_client().is_initialized(): + logger.warning( + f"LaunchDarkly not initialized, using default={default}" + ) + is_enabled = default + else: + context = create_context(str(user_id)) + is_enabled = get_client().variation(flag_key, context, default) + + if not is_enabled: + raise HTTPException(status_code=404, detail="Feature not available") + + result = func(*args, **kwargs) + if asyncio.iscoroutine(result): + return await result + return cast(T, result) + except Exception as e: + logger.error(f"Error evaluating feature flag {flag_key}: {e}") + raise + + @wraps(func) + def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> T: + try: + user_id = kwargs.get("user_id") + if not user_id: + raise ValueError("user_id is required") + + if not get_client().is_initialized(): + logger.warning( + f"LaunchDarkly not initialized, using default={default}" + ) + is_enabled = default + else: + context = create_context(str(user_id)) + is_enabled = get_client().variation(flag_key, context, default) + + if not is_enabled: + raise HTTPException(status_code=404, detail="Feature not available") + + return cast(T, func(*args, **kwargs)) + except Exception as e: + logger.error(f"Error evaluating feature flag {flag_key}: {e}") + raise + + return cast( + Callable[P, Union[T, Awaitable[T]]], + async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper, + ) + + return decorator + + +def percentage_rollout( + flag_key: str, + default: bool = False, +) -> Callable[ + [Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]] +]: + """Decorator for percentage-based rollouts.""" + return feature_flag(flag_key, default) + + +def beta_feature( + flag_key: Optional[str] = None, + unauthorized_response: Any = {"message": "Not available in beta"}, +) -> Callable[ + [Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]] +]: + """Decorator for beta features.""" + actual_key = f"beta-{flag_key}" if flag_key else "beta" + return feature_flag(actual_key, False) + + +@contextlib.contextmanager +def mock_flag_variation(flag_key: str, return_value: Any): + """Context manager for testing feature flags.""" + original_variation = get_client().variation + get_client().variation = lambda key, context, default: ( + return_value if key == flag_key else original_variation(key, context, default) + ) + try: + yield + finally: + get_client().variation = original_variation diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client_test.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client_test.py new file mode 100644 index 000000000000..8fccfb28b501 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/client_test.py @@ -0,0 +1,45 @@ +import pytest +from ldclient import LDClient + +from autogpt_libs.feature_flag.client import feature_flag, mock_flag_variation + + +@pytest.fixture +def ld_client(mocker): + client = mocker.Mock(spec=LDClient) + mocker.patch("ldclient.get", return_value=client) + client.is_initialized.return_value = True + return client + + +@pytest.mark.asyncio +async def test_feature_flag_enabled(ld_client): + ld_client.variation.return_value = True + + @feature_flag("test-flag") + async def test_function(user_id: str): + return "success" + + result = test_function(user_id="test-user") + assert result == "success" + ld_client.variation.assert_called_once() + + +@pytest.mark.asyncio +async def test_feature_flag_unauthorized_response(ld_client): + ld_client.variation.return_value = False + + @feature_flag("test-flag") + async def test_function(user_id: str): + return "success" + + result = test_function(user_id="test-user") + assert result == {"error": "disabled"} + + +def test_mock_flag_variation(ld_client): + with mock_flag_variation("test-flag", True): + assert ld_client.variation("test-flag", None, False) + + with mock_flag_variation("test-flag", False): + assert ld_client.variation("test-flag", None, False) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/config.py b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/config.py new file mode 100644 index 000000000000..e01c285d1e66 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/feature_flag/config.py @@ -0,0 +1,15 @@ +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + launch_darkly_sdk_key: str = Field( + default="", + description="The Launch Darkly SDK key", + validation_alias="LAUNCH_DARKLY_SDK_KEY", + ) + + model_config = SettingsConfigDict(case_sensitive=True, extra="ignore") + + +SETTINGS = Settings() diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/logging/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/__init__.py new file mode 100644 index 000000000000..cf327efde222 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/logging/__init__.py @@ -0,0 +1,9 @@ +from .config import configure_logging +from .filters import BelowLevelFilter +from .formatters import FancyConsoleFormatter + +__all__ = [ + "configure_logging", + "BelowLevelFilter", + "FancyConsoleFormatter", +] diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py new file mode 100644 index 000000000000..eae811231fdd --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py @@ -0,0 +1,166 @@ +"""Logging module for Auto-GPT.""" + +import logging +import sys +from pathlib import Path + +from pydantic import Field, field_validator +from pydantic_settings import BaseSettings, SettingsConfigDict + +from .filters import BelowLevelFilter +from .formatters import AGPTFormatter, StructuredLoggingFormatter + +LOG_DIR = Path(__file__).parent.parent.parent.parent / "logs" +LOG_FILE = "activity.log" +DEBUG_LOG_FILE = "debug.log" +ERROR_LOG_FILE = "error.log" + +SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s" + +DEBUG_LOG_FORMAT = ( + "%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(title)s%(message)s" +) + + +class LoggingConfig(BaseSettings): + level: str = Field( + default="INFO", + description="Logging level", + validation_alias="LOG_LEVEL", + ) + + enable_cloud_logging: bool = Field( + default=False, + description="Enable logging to Google Cloud Logging", + ) + + enable_file_logging: bool = Field( + default=False, + description="Enable logging to file", + ) + # File output + log_dir: Path = Field( + default=LOG_DIR, + description="Log directory", + ) + + model_config = SettingsConfigDict( + env_prefix="", + env_file=".env", + env_file_encoding="utf-8", + extra="ignore", + ) + + @field_validator("level", mode="before") + @classmethod + def parse_log_level(cls, v): + if isinstance(v, str): + v = v.upper() + if v not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]: + raise ValueError(f"Invalid log level: {v}") + return v + return v + + +def configure_logging(force_cloud_logging: bool = False) -> None: + """Configure the native logging module based on the LoggingConfig settings. + + This function sets up logging handlers and formatters according to the + configuration specified in the LoggingConfig object. It supports various + logging outputs including console, file, cloud, and JSON logging. + + The function uses the LoggingConfig object to determine which logging + features to enable and how to configure them. This includes setting + log levels, log formats, and output destinations. + + No arguments are required as the function creates its own LoggingConfig + instance internally. + + Note: This function is typically called at the start of the application + to set up the logging infrastructure. + """ + + config = LoggingConfig() + + log_handlers: list[logging.Handler] = [] + + # Cloud logging setup + if config.enable_cloud_logging or force_cloud_logging: + import google.cloud.logging + from google.cloud.logging.handlers import CloudLoggingHandler + from google.cloud.logging_v2.handlers.transports.sync import SyncTransport + + client = google.cloud.logging.Client() + cloud_handler = CloudLoggingHandler( + client, + name="autogpt_logs", + transport=SyncTransport, + ) + cloud_handler.setLevel(config.level) + cloud_handler.setFormatter(StructuredLoggingFormatter()) + log_handlers.append(cloud_handler) + print("Cloud logging enabled") + else: + # Console output handlers + stdout = logging.StreamHandler(stream=sys.stdout) + stdout.setLevel(config.level) + stdout.addFilter(BelowLevelFilter(logging.WARNING)) + if config.level == logging.DEBUG: + stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT)) + else: + stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT)) + + stderr = logging.StreamHandler() + stderr.setLevel(logging.WARNING) + if config.level == logging.DEBUG: + stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT)) + else: + stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT)) + + log_handlers += [stdout, stderr] + print("Console logging enabled") + + # File logging setup + if config.enable_file_logging: + # create log directory if it doesn't exist + if not config.log_dir.exists(): + config.log_dir.mkdir(parents=True, exist_ok=True) + + print(f"Log directory: {config.log_dir}") + + # Activity log handler (INFO and above) + activity_log_handler = logging.FileHandler( + config.log_dir / LOG_FILE, "a", "utf-8" + ) + activity_log_handler.setLevel(config.level) + activity_log_handler.setFormatter( + AGPTFormatter(SIMPLE_LOG_FORMAT, no_color=True) + ) + log_handlers.append(activity_log_handler) + + if config.level == logging.DEBUG: + # Debug log handler (all levels) + debug_log_handler = logging.FileHandler( + config.log_dir / DEBUG_LOG_FILE, "a", "utf-8" + ) + debug_log_handler.setLevel(logging.DEBUG) + debug_log_handler.setFormatter( + AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True) + ) + log_handlers.append(debug_log_handler) + + # Error log handler (ERROR and above) + error_log_handler = logging.FileHandler( + config.log_dir / ERROR_LOG_FILE, "a", "utf-8" + ) + error_log_handler.setLevel(logging.ERROR) + error_log_handler.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True)) + log_handlers.append(error_log_handler) + print("File logging enabled") + + # Configure the root logger + logging.basicConfig( + format=DEBUG_LOG_FORMAT if config.level == logging.DEBUG else SIMPLE_LOG_FORMAT, + level=config.level, + handlers=log_handlers, + ) diff --git a/autogpts/autogpt/autogpt/logs/filters.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/filters.py similarity index 100% rename from autogpts/autogpt/autogpt/logs/filters.py rename to autogpt_platform/autogpt_libs/autogpt_libs/logging/filters.py diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/logging/formatters.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/formatters.py new file mode 100644 index 000000000000..cf27bc4667fd --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/logging/formatters.py @@ -0,0 +1,95 @@ +import logging + +from colorama import Fore, Style +from google.cloud.logging_v2.handlers import CloudLoggingFilter, StructuredLogHandler + +from .utils import remove_color_codes + + +class FancyConsoleFormatter(logging.Formatter): + """ + A custom logging formatter designed for console output. + + This formatter enhances the standard logging output with color coding. The color + coding is based on the level of the log message, making it easier to distinguish + between different types of messages in the console output. + + The color for each level is defined in the LEVEL_COLOR_MAP class attribute. + """ + + # level -> (level & text color, title color) + LEVEL_COLOR_MAP = { + logging.DEBUG: Fore.LIGHTBLACK_EX, + logging.INFO: Fore.BLUE, + logging.WARNING: Fore.YELLOW, + logging.ERROR: Fore.RED, + logging.CRITICAL: Fore.RED + Style.BRIGHT, + } + + def format(self, record: logging.LogRecord) -> str: + # Make sure `msg` is a string + if not hasattr(record, "msg"): + record.msg = "" + elif type(record.msg) is not str: + record.msg = str(record.msg) + + # Determine default color based on error level + level_color = "" + if record.levelno in self.LEVEL_COLOR_MAP: + level_color = self.LEVEL_COLOR_MAP[record.levelno] + record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}" + + # Determine color for message + color = getattr(record, "color", level_color) + color_is_specified = hasattr(record, "color") + + # Don't color INFO messages unless the color is explicitly specified. + if color and (record.levelno != logging.INFO or color_is_specified): + record.msg = f"{color}{record.msg}{Style.RESET_ALL}" + + return super().format(record) + + +class AGPTFormatter(FancyConsoleFormatter): + def __init__(self, *args, no_color: bool = False, **kwargs): + super().__init__(*args, **kwargs) + self.no_color = no_color + + def format(self, record: logging.LogRecord) -> str: + # Make sure `msg` is a string + if not hasattr(record, "msg"): + record.msg = "" + elif type(record.msg) is not str: + record.msg = str(record.msg) + + # Strip color from the message to prevent color spoofing + if record.msg and not getattr(record, "preserve_color", False): + record.msg = remove_color_codes(record.msg) + + # Determine color for title + title = getattr(record, "title", "") + title_color = getattr(record, "title_color", "") or self.LEVEL_COLOR_MAP.get( + record.levelno, "" + ) + if title and title_color: + title = f"{title_color + Style.BRIGHT}{title}{Style.RESET_ALL}" + # Make sure record.title is set, and padded with a space if not empty + record.title = f"{title} " if title else "" + + if self.no_color: + return remove_color_codes(super().format(record)) + else: + return super().format(record) + + +class StructuredLoggingFormatter(StructuredLogHandler, logging.Formatter): + def __init__(self): + # Set up CloudLoggingFilter to add diagnostic info to the log records + self.cloud_logging_filter = CloudLoggingFilter() + + # Init StructuredLogHandler + super().__init__() + + def format(self, record: logging.LogRecord) -> str: + self.cloud_logging_filter.filter(record) + return super().format(record) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/logging/handlers.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/handlers.py new file mode 100644 index 000000000000..57d1fb630892 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/logging/handlers.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +import json +import logging + + +class JsonFileHandler(logging.FileHandler): + def format(self, record: logging.LogRecord) -> str: + record.json_data = json.loads(record.getMessage()) + return json.dumps(getattr(record, "json_data"), ensure_ascii=False, indent=4) + + def emit(self, record: logging.LogRecord) -> None: + with open(self.baseFilename, "w", encoding="utf-8") as f: + f.write(self.format(record)) diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/logging/test_utils.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/test_utils.py new file mode 100644 index 000000000000..24e20986549c --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/logging/test_utils.py @@ -0,0 +1,36 @@ +import pytest + +from .utils import remove_color_codes + + +@pytest.mark.parametrize( + "raw_text, clean_text", + [ + ( + "COMMAND = \x1b[36mbrowse_website\x1b[0m " + "ARGUMENTS = \x1b[36m{'url': 'https://www.google.com'," + " 'question': 'What is the capital of France?'}\x1b[0m", + "COMMAND = browse_website " + "ARGUMENTS = {'url': 'https://www.google.com'," + " 'question': 'What is the capital of France?'}", + ), + ( + "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': " + "'https://github.com/Significant-Gravitas/AutoGPT," + " https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}", + "{'Schaue dir meine Projekte auf github () an, als auch meine Webseiten': " + "'https://github.com/Significant-Gravitas/AutoGPT," + " https://discord.gg/autogpt und https://twitter.com/Auto_GPT'}", + ), + ("", ""), + ("hello", "hello"), + ("hello\x1b[31m world", "hello world"), + ("\x1b[36mHello,\x1b[32m World!", "Hello, World!"), + ( + "\x1b[1m\x1b[31mError:\x1b[0m\x1b[31m file not found", + "Error: file not found", + ), + ], +) +def test_remove_color_codes(raw_text, clean_text): + assert remove_color_codes(raw_text) == clean_text diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/logging/utils.py b/autogpt_platform/autogpt_libs/autogpt_libs/logging/utils.py new file mode 100644 index 000000000000..5c5c09221c32 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/logging/utils.py @@ -0,0 +1,27 @@ +import logging +import re +from typing import Any + +from colorama import Fore + + +def remove_color_codes(s: str) -> str: + return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s) + + +def fmt_kwargs(kwargs: dict) -> str: + return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items()) + + +def print_attribute( + title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = "" +) -> None: + logger = logging.getLogger() + logger.info( + str(value), + extra={ + "title": f"{title.rstrip(':')}:", + "title_color": title_color, + "color": value_color, + }, + ) diff --git a/autogpts/autogpt/autogpt/core/runner/cli_web_app/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/core/runner/cli_web_app/__init__.py rename to autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/__init__.py diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/config.py b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/config.py new file mode 100644 index 000000000000..76c9abaa0729 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/config.py @@ -0,0 +1,31 @@ +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class RateLimitSettings(BaseSettings): + redis_host: str = Field( + default="redis://localhost:6379", + description="Redis host", + validation_alias="REDIS_HOST", + ) + + redis_port: str = Field( + default="6379", description="Redis port", validation_alias="REDIS_PORT" + ) + + redis_password: str = Field( + default="password", + description="Redis password", + validation_alias="REDIS_PASSWORD", + ) + + requests_per_minute: int = Field( + default=60, + description="Maximum number of requests allowed per minute per API key", + validation_alias="RATE_LIMIT_REQUESTS_PER_MINUTE", + ) + + model_config = SettingsConfigDict(case_sensitive=True, extra="ignore") + + +RATE_LIMIT_SETTINGS = RateLimitSettings() diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/limiter.py b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/limiter.py new file mode 100644 index 000000000000..7ac122429c22 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/limiter.py @@ -0,0 +1,51 @@ +import time +from typing import Tuple + +from redis import Redis + +from .config import RATE_LIMIT_SETTINGS + + +class RateLimiter: + def __init__( + self, + redis_host: str = RATE_LIMIT_SETTINGS.redis_host, + redis_port: str = RATE_LIMIT_SETTINGS.redis_port, + redis_password: str = RATE_LIMIT_SETTINGS.redis_password, + requests_per_minute: int = RATE_LIMIT_SETTINGS.requests_per_minute, + ): + self.redis = Redis( + host=redis_host, + port=int(redis_port), + password=redis_password, + decode_responses=True, + ) + self.window = 60 + self.max_requests = requests_per_minute + + async def check_rate_limit(self, api_key_id: str) -> Tuple[bool, int, int]: + """ + Check if request is within rate limits. + + Args: + api_key_id: The API key identifier to check + + Returns: + Tuple of (is_allowed, remaining_requests, reset_time) + """ + now = time.time() + window_start = now - self.window + key = f"ratelimit:{api_key_id}:1min" + + pipe = self.redis.pipeline() + pipe.zremrangebyscore(key, 0, window_start) + pipe.zadd(key, {str(now): now}) + pipe.zcount(key, window_start, now) + pipe.expire(key, self.window) + + _, _, request_count, _ = pipe.execute() + + remaining = max(0, self.max_requests - request_count) + reset_time = int(now + self.window) + + return request_count <= self.max_requests, remaining, reset_time diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/middleware.py b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/middleware.py new file mode 100644 index 000000000000..496697d8b1e2 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/rate_limit/middleware.py @@ -0,0 +1,32 @@ +from fastapi import HTTPException, Request +from starlette.middleware.base import RequestResponseEndpoint + +from .limiter import RateLimiter + + +async def rate_limit_middleware(request: Request, call_next: RequestResponseEndpoint): + """FastAPI middleware for rate limiting API requests.""" + limiter = RateLimiter() + + if not request.url.path.startswith("/api"): + return await call_next(request) + + api_key = request.headers.get("Authorization") + if not api_key: + return await call_next(request) + + api_key = api_key.replace("Bearer ", "") + + is_allowed, remaining, reset_time = await limiter.check_rate_limit(api_key) + + if not is_allowed: + raise HTTPException( + status_code=429, detail="Rate limit exceeded. Please try again later." + ) + + response = await call_next(request) + response.headers["X-RateLimit-Limit"] = str(limiter.max_requests) + response.headers["X-RateLimit-Remaining"] = str(remaining) + response.headers["X-RateLimit-Reset"] = str(reset_time) + + return response diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py new file mode 100644 index 000000000000..04c6fa2a7728 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py @@ -0,0 +1,76 @@ +from typing import Annotated, Any, Literal, Optional, TypedDict +from uuid import uuid4 + +from pydantic import BaseModel, Field, SecretStr, field_serializer + + +class _BaseCredentials(BaseModel): + id: str = Field(default_factory=lambda: str(uuid4())) + provider: str + title: Optional[str] + + @field_serializer("*") + def dump_secret_strings(value: Any, _info): + if isinstance(value, SecretStr): + return value.get_secret_value() + return value + + +class OAuth2Credentials(_BaseCredentials): + type: Literal["oauth2"] = "oauth2" + username: Optional[str] + """Username of the third-party service user that these credentials belong to""" + access_token: SecretStr + access_token_expires_at: Optional[int] + """Unix timestamp (seconds) indicating when the access token expires (if at all)""" + refresh_token: Optional[SecretStr] + refresh_token_expires_at: Optional[int] + """Unix timestamp (seconds) indicating when the refresh token expires (if at all)""" + scopes: list[str] + metadata: dict[str, Any] = Field(default_factory=dict) + + def bearer(self) -> str: + return f"Bearer {self.access_token.get_secret_value()}" + + +class APIKeyCredentials(_BaseCredentials): + type: Literal["api_key"] = "api_key" + api_key: SecretStr + expires_at: Optional[int] + """Unix timestamp (seconds) indicating when the API key expires (if at all)""" + + def bearer(self) -> str: + return f"Bearer {self.api_key.get_secret_value()}" + + +Credentials = Annotated[ + OAuth2Credentials | APIKeyCredentials, + Field(discriminator="type"), +] + + +CredentialsType = Literal["api_key", "oauth2"] + + +class OAuthState(BaseModel): + token: str + provider: str + expires_at: int + code_verifier: Optional[str] = None + scopes: list[str] + """Unix timestamp (seconds) indicating when this OAuth state expires""" + + +class UserMetadata(BaseModel): + integration_credentials: list[Credentials] = Field(default_factory=list) + integration_oauth_states: list[OAuthState] = Field(default_factory=list) + + +class UserMetadataRaw(TypedDict, total=False): + integration_credentials: list[dict] + integration_oauth_states: list[dict] + + +class UserIntegrations(BaseModel): + credentials: list[Credentials] = Field(default_factory=list) + oauth_states: list[OAuthState] = Field(default_factory=list) diff --git a/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/core/runner/cli_web_app/server/__init__.py rename to autogpt_platform/autogpt_libs/autogpt_libs/utils/__init__.py diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py new file mode 100644 index 000000000000..5c0241c258cb --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py @@ -0,0 +1,20 @@ +import threading +from typing import Callable, ParamSpec, TypeVar + +P = ParamSpec("P") +R = TypeVar("R") + + +def thread_cached(func: Callable[P, R]) -> Callable[P, R]: + thread_local = threading.local() + + def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + cache = getattr(thread_local, "cache", None) + if cache is None: + cache = thread_local.cache = {} + key = (args, tuple(sorted(kwargs.items()))) + if key not in cache: + cache[key] = func(*args, **kwargs) + return cache[key] + + return wrapper diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/utils/synchronize.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/synchronize.py new file mode 100644 index 000000000000..ca44b1ce74b1 --- /dev/null +++ b/autogpt_platform/autogpt_libs/autogpt_libs/utils/synchronize.py @@ -0,0 +1,57 @@ +from contextlib import contextmanager +from threading import Lock +from typing import TYPE_CHECKING, Any + +from expiringdict import ExpiringDict + +if TYPE_CHECKING: + from redis import Redis + from redis.lock import Lock as RedisLock + + +class RedisKeyedMutex: + """ + This class provides a mutex that can be locked and unlocked by a specific key, + using Redis as a distributed locking provider. + It uses an ExpiringDict to automatically clear the mutex after a specified timeout, + in case the key is not unlocked for a specified duration, to prevent memory leaks. + """ + + def __init__(self, redis: "Redis", timeout: int | None = 60): + self.redis = redis + self.timeout = timeout + self.locks: dict[Any, "RedisLock"] = ExpiringDict( + max_len=6000, max_age_seconds=self.timeout + ) + self.locks_lock = Lock() + + @contextmanager + def locked(self, key: Any): + lock = self.acquire(key) + try: + yield + finally: + if lock.locked(): + lock.release() + + def acquire(self, key: Any) -> "RedisLock": + """Acquires and returns a lock with the given key""" + with self.locks_lock: + if key not in self.locks: + self.locks[key] = self.redis.lock( + str(key), self.timeout, thread_local=False + ) + lock = self.locks[key] + lock.acquire() + return lock + + def release(self, key: Any): + if (lock := self.locks.get(key)) and lock.locked() and lock.owned(): + lock.release() + + def release_all_locks(self): + """Call this on process termination to ensure all locks are released""" + self.locks_lock.acquire(blocking=False) + for lock in self.locks.values(): + if lock.locked() and lock.owned(): + lock.release() diff --git a/autogpt_platform/autogpt_libs/poetry.lock b/autogpt_platform/autogpt_libs/poetry.lock new file mode 100644 index 000000000000..3c8b01b1f335 --- /dev/null +++ b/autogpt_platform/autogpt_libs/poetry.lock @@ -0,0 +1,1931 @@ +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.0" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"}, + {file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"}, +] + +[[package]] +name = "aiohttp" +version = "3.10.5" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:18a01eba2574fb9edd5f6e5fb25f66e6ce061da5dab5db75e13fe1558142e0a3"}, + {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:94fac7c6e77ccb1ca91e9eb4cb0ac0270b9fb9b289738654120ba8cebb1189c6"}, + {file = "aiohttp-3.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f1f1c75c395991ce9c94d3e4aa96e5c59c8356a15b1c9231e783865e2772699"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f7acae3cf1a2a2361ec4c8e787eaaa86a94171d2417aae53c0cca6ca3118ff6"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94c4381ffba9cc508b37d2e536b418d5ea9cfdc2848b9a7fea6aebad4ec6aac1"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c31ad0c0c507894e3eaa843415841995bf8de4d6b2d24c6e33099f4bc9fc0d4f"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0912b8a8fadeb32ff67a3ed44249448c20148397c1ed905d5dac185b4ca547bb"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d93400c18596b7dc4794d48a63fb361b01a0d8eb39f28800dc900c8fbdaca91"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d00f3c5e0d764a5c9aa5a62d99728c56d455310bcc288a79cab10157b3af426f"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d742c36ed44f2798c8d3f4bc511f479b9ceef2b93f348671184139e7d708042c"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:814375093edae5f1cb31e3407997cf3eacefb9010f96df10d64829362ae2df69"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8224f98be68a84b19f48e0bdc14224b5a71339aff3a27df69989fa47d01296f3"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d9a487ef090aea982d748b1b0d74fe7c3950b109df967630a20584f9a99c0683"}, + {file = "aiohttp-3.10.5-cp310-cp310-win32.whl", hash = "sha256:d9ef084e3dc690ad50137cc05831c52b6ca428096e6deb3c43e95827f531d5ef"}, + {file = "aiohttp-3.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:66bf9234e08fe561dccd62083bf67400bdbf1c67ba9efdc3dac03650e97c6088"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c6a4e5e40156d72a40241a25cc226051c0a8d816610097a8e8f517aeacd59a2"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c634a3207a5445be65536d38c13791904fda0748b9eabf908d3fe86a52941cf"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4aff049b5e629ef9b3e9e617fa6e2dfeda1bf87e01bcfecaf3949af9e210105e"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1942244f00baaacaa8155eca94dbd9e8cc7017deb69b75ef67c78e89fdad3c77"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04a1f2a65ad2f93aa20f9ff9f1b672bf912413e5547f60749fa2ef8a644e061"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f2bfc0032a00405d4af2ba27f3c429e851d04fad1e5ceee4080a1c570476697"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:424ae21498790e12eb759040bbb504e5e280cab64693d14775c54269fd1d2bb7"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:975218eee0e6d24eb336d0328c768ebc5d617609affaca5dbbd6dd1984f16ed0"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4120d7fefa1e2d8fb6f650b11489710091788de554e2b6f8347c7a20ceb003f5"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b90078989ef3fc45cf9221d3859acd1108af7560c52397ff4ace8ad7052a132e"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ba5a8b74c2a8af7d862399cdedce1533642fa727def0b8c3e3e02fcb52dca1b1"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:02594361128f780eecc2a29939d9dfc870e17b45178a867bf61a11b2a4367277"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8fb4fc029e135859f533025bc82047334e24b0d489e75513144f25408ecaf058"}, + {file = "aiohttp-3.10.5-cp311-cp311-win32.whl", hash = "sha256:e1ca1ef5ba129718a8fc827b0867f6aa4e893c56eb00003b7367f8a733a9b072"}, + {file = "aiohttp-3.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:349ef8a73a7c5665cca65c88ab24abe75447e28aa3bc4c93ea5093474dfdf0ff"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:305be5ff2081fa1d283a76113b8df7a14c10d75602a38d9f012935df20731487"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3a1c32a19ee6bbde02f1cb189e13a71b321256cc1d431196a9f824050b160d5a"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:61645818edd40cc6f455b851277a21bf420ce347baa0b86eaa41d51ef58ba23d"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c225286f2b13bab5987425558baa5cbdb2bc925b2998038fa028245ef421e75"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ba01ebc6175e1e6b7275c907a3a36be48a2d487549b656aa90c8a910d9f3178"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8eaf44ccbc4e35762683078b72bf293f476561d8b68ec8a64f98cf32811c323e"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c43eb1ab7cbf411b8e387dc169acb31f0ca0d8c09ba63f9eac67829585b44f"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de7a5299827253023c55ea549444e058c0eb496931fa05d693b95140a947cb73"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4790f0e15f00058f7599dab2b206d3049d7ac464dc2e5eae0e93fa18aee9e7bf"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44b324a6b8376a23e6ba25d368726ee3bc281e6ab306db80b5819999c737d820"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0d277cfb304118079e7044aad0b76685d30ecb86f83a0711fc5fb257ffe832ca"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:54d9ddea424cd19d3ff6128601a4a4d23d54a421f9b4c0fff740505813739a91"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4f1c9866ccf48a6df2b06823e6ae80573529f2af3a0992ec4fe75b1a510df8a6"}, + {file = "aiohttp-3.10.5-cp312-cp312-win32.whl", hash = "sha256:dc4826823121783dccc0871e3f405417ac116055bf184ac04c36f98b75aacd12"}, + {file = "aiohttp-3.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:22c0a23a3b3138a6bf76fc553789cb1a703836da86b0f306b6f0dc1617398abc"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7f6b639c36734eaa80a6c152a238242bedcee9b953f23bb887e9102976343092"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f29930bc2921cef955ba39a3ff87d2c4398a0394ae217f41cb02d5c26c8b1b77"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f489a2c9e6455d87eabf907ac0b7d230a9786be43fbe884ad184ddf9e9c1e385"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:123dd5b16b75b2962d0fff566effb7a065e33cd4538c1692fb31c3bda2bfb972"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b98e698dc34966e5976e10bbca6d26d6724e6bdea853c7c10162a3235aba6e16"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3b9162bab7e42f21243effc822652dc5bb5e8ff42a4eb62fe7782bcbcdfacf6"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1923a5c44061bffd5eebeef58cecf68096e35003907d8201a4d0d6f6e387ccaa"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d55f011da0a843c3d3df2c2cf4e537b8070a419f891c930245f05d329c4b0689"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:afe16a84498441d05e9189a15900640a2d2b5e76cf4efe8cbb088ab4f112ee57"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8112fb501b1e0567a1251a2fd0747baae60a4ab325a871e975b7bb67e59221f"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1e72589da4c90337837fdfe2026ae1952c0f4a6e793adbbfbdd40efed7c63599"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4d46c7b4173415d8e583045fbc4daa48b40e31b19ce595b8d92cf639396c15d5"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33e6bc4bab477c772a541f76cd91e11ccb6d2efa2b8d7d7883591dfb523e5987"}, + {file = "aiohttp-3.10.5-cp313-cp313-win32.whl", hash = "sha256:c58c6837a2c2a7cf3133983e64173aec11f9c2cd8e87ec2fdc16ce727bcf1a04"}, + {file = "aiohttp-3.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:38172a70005252b6893088c0f5e8a47d173df7cc2b2bd88650957eb84fcf5022"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f6f18898ace4bcd2d41a122916475344a87f1dfdec626ecde9ee802a711bc569"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5ede29d91a40ba22ac1b922ef510aab871652f6c88ef60b9dcdf773c6d32ad7a"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:673f988370f5954df96cc31fd99c7312a3af0a97f09e407399f61583f30da9bc"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58718e181c56a3c02d25b09d4115eb02aafe1a732ce5714ab70326d9776457c3"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b38b1570242fbab8d86a84128fb5b5234a2f70c2e32f3070143a6d94bc854cf"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:074d1bff0163e107e97bd48cad9f928fa5a3eb4b9d33366137ffce08a63e37fe"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd31f176429cecbc1ba499d4aba31aaccfea488f418d60376b911269d3b883c5"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7384d0b87d4635ec38db9263e6a3f1eb609e2e06087f0aa7f63b76833737b471"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8989f46f3d7ef79585e98fa991e6ded55d2f48ae56d2c9fa5e491a6e4effb589"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c83f7a107abb89a227d6c454c613e7606c12a42b9a4ca9c5d7dad25d47c776ae"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cde98f323d6bf161041e7627a5fd763f9fd829bcfcd089804a5fdce7bb6e1b7d"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:676f94c5480d8eefd97c0c7e3953315e4d8c2b71f3b49539beb2aa676c58272f"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2d21ac12dc943c68135ff858c3a989f2194a709e6e10b4c8977d7fcd67dfd511"}, + {file = "aiohttp-3.10.5-cp38-cp38-win32.whl", hash = "sha256:17e997105bd1a260850272bfb50e2a328e029c941c2708170d9d978d5a30ad9a"}, + {file = "aiohttp-3.10.5-cp38-cp38-win_amd64.whl", hash = "sha256:1c19de68896747a2aa6257ae4cf6ef59d73917a36a35ee9d0a6f48cff0f94db8"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7e2fe37ac654032db1f3499fe56e77190282534810e2a8e833141a021faaab0e"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5bf3ead3cb66ab990ee2561373b009db5bc0e857549b6c9ba84b20bc462e172"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b2c16a919d936ca87a3c5f0e43af12a89a3ce7ccbce59a2d6784caba945b68b"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad146dae5977c4dd435eb31373b3fe9b0b1bf26858c6fc452bf6af394067e10b"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c5c6fa16412b35999320f5c9690c0f554392dc222c04e559217e0f9ae244b92"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95c4dc6f61d610bc0ee1edc6f29d993f10febfe5b76bb470b486d90bbece6b22"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da452c2c322e9ce0cfef392e469a26d63d42860f829026a63374fde6b5c5876f"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:898715cf566ec2869d5cb4d5fb4be408964704c46c96b4be267442d265390f32"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:391cc3a9c1527e424c6865e087897e766a917f15dddb360174a70467572ac6ce"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:380f926b51b92d02a34119d072f178d80bbda334d1a7e10fa22d467a66e494db"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce91db90dbf37bb6fa0997f26574107e1b9d5ff939315247b7e615baa8ec313b"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9093a81e18c45227eebe4c16124ebf3e0d893830c6aca7cc310bfca8fe59d857"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ee40b40aa753d844162dcc80d0fe256b87cba48ca0054f64e68000453caead11"}, + {file = "aiohttp-3.10.5-cp39-cp39-win32.whl", hash = "sha256:03f2645adbe17f274444953bdea69f8327e9d278d961d85657cb0d06864814c1"}, + {file = "aiohttp-3.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:d17920f18e6ee090bdd3d0bfffd769d9f2cb4c8ffde3eb203777a3895c128862"}, + {file = "aiohttp-3.10.5.tar.gz", hash = "sha256:f071854b47d39591ce9a17981c46790acb30518e2f83dfca8db2dfa091178691"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.4.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] +markers = {main = "python_version < \"3.11\"", dev = "python_full_version < \"3.11.3\""} + +[[package]] +name = "attrs" +version = "24.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, +] + +[package.extras] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] + +[[package]] +name = "cachetools" +version = "5.5.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +groups = ["main"] +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main"] +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] + +[[package]] +name = "deprecation" +version = "2.1.0" +description = "A library to handle automated deprecations" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, + {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, +] + +[package.dependencies] +packaging = "*" + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.11\"" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "expiringdict" +version = "1.2.2" +description = "Dictionary with auto-expiring values for caching purposes" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "expiringdict-1.2.2-py3-none-any.whl", hash = "sha256:09a5d20bc361163e6432a874edd3179676e935eb81b925eccef48d409a8a45e8"}, + {file = "expiringdict-1.2.2.tar.gz", hash = "sha256:300fb92a7e98f15b05cf9a856c1415b3bc4f2e132be07daa326da6414c23ee09"}, +] + +[package.extras] +tests = ["coverage", "coveralls", "dill", "mock", "nose"] + +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + +[[package]] +name = "google-api-core" +version = "2.19.2" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_api_core-2.19.2-py3-none-any.whl", hash = "sha256:53ec0258f2837dd53bbd3d3df50f5359281b3cc13f800c941dd15a9b5a415af4"}, + {file = "google_api_core-2.19.2.tar.gz", hash = "sha256:ca07de7e8aa1c98a8bfca9321890ad2340ef7f2eb136e558cee68f24b94b0a8f"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +grpcio = [ + {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +grpcio-status = [ + {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-auth" +version = "2.34.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65"}, + {file = "google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-cloud-appengine-logging" +version = "1.4.5" +description = "Google Cloud Appengine Logging API client library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_cloud_appengine_logging-1.4.5-py2.py3-none-any.whl", hash = "sha256:344e0244404049b42164e4d6dc718ca2c81b393d066956e7cb85fd9407ed9c48"}, + {file = "google_cloud_appengine_logging-1.4.5.tar.gz", hash = "sha256:de7d766e5d67b19fc5833974b505b32d2a5bbdfb283fd941e320e7cfdae4cb83"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-audit-log" +version = "0.3.0" +description = "Google Cloud Audit Protos" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_cloud_audit_log-0.3.0-py2.py3-none-any.whl", hash = "sha256:8340793120a1d5aa143605def8704ecdcead15106f754ef1381ae3bab533722f"}, + {file = "google_cloud_audit_log-0.3.0.tar.gz", hash = "sha256:901428b257020d8c1d1133e0fa004164a555e5a395c7ca3cdbb8486513df3a65"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.56.2,<2.0dev" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-core" +version = "2.4.1" +description = "Google Cloud API client core library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"}, + {file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] + +[[package]] +name = "google-cloud-logging" +version = "3.11.3" +description = "Stackdriver Logging API client library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_cloud_logging-3.11.3-py2.py3-none-any.whl", hash = "sha256:b8ec23f2998f76a58f8492db26a0f4151dd500425c3f08448586b85972f3c494"}, + {file = "google_cloud_logging-3.11.3.tar.gz", hash = "sha256:0a73cd94118875387d4535371d9e9426861edef8e44fba1261e86782d5b8d54f"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +google-cloud-appengine-logging = ">=0.1.3,<2.0.0dev" +google-cloud-audit-log = ">=0.2.4,<1.0.0dev" +google-cloud-core = ">=2.0.0,<3.0.0dev" +grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" +opentelemetry-api = ">=1.9.0" +proto-plus = [ + {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, + {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, +] +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "googleapis-common-protos" +version = "1.65.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, + {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, +] + +[package.dependencies] +grpcio = {version = ">=1.44.0,<2.0.0.dev0", optional = true, markers = "extra == \"grpc\""} +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "gotrue" +version = "2.11.1" +description = "Python Client Library for Supabase Auth" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "gotrue-2.11.1-py3-none-any.whl", hash = "sha256:1b2d915bdc65fd0ad608532759ce9c72fa2e910145c1e6901f2188519e7bcd2d"}, + {file = "gotrue-2.11.1.tar.gz", hash = "sha256:5594ceee60bd873e5f4fdd028b08dece3906f6013b6ed08e7786b71c0092fed0"}, +] + +[package.dependencies] +httpx = {version = ">=0.26,<0.29", extras = ["http2"]} +pydantic = ">=1.10,<3" + +[[package]] +name = "grpc-google-iam-v1" +version = "0.13.1" +description = "IAM API client library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "grpc-google-iam-v1-0.13.1.tar.gz", hash = "sha256:3ff4b2fd9d990965e410965253c0da6f66205d5a8291c4c31c6ebecca18a9001"}, + {file = "grpc_google_iam_v1-0.13.1-py2.py3-none-any.whl", hash = "sha256:c3e86151a981811f30d5e7330f271cee53e73bb87755e88cc3b6f0c7b5fe374e"}, +] + +[package.dependencies] +googleapis-common-protos = {version = ">=1.56.0,<2.0.0dev", extras = ["grpc"]} +grpcio = ">=1.44.0,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "grpcio" +version = "1.66.1" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "grpcio-1.66.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:4877ba180591acdf127afe21ec1c7ff8a5ecf0fe2600f0d3c50e8c4a1cbc6492"}, + {file = "grpcio-1.66.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3750c5a00bd644c75f4507f77a804d0189d97a107eb1481945a0cf3af3e7a5ac"}, + {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a013c5fbb12bfb5f927444b477a26f1080755a931d5d362e6a9a720ca7dbae60"}, + {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1b24c23d51a1e8790b25514157d43f0a4dce1ac12b3f0b8e9f66a5e2c4c132f"}, + {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffb8ea674d68de4cac6f57d2498fef477cef582f1fa849e9f844863af50083"}, + {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:307b1d538140f19ccbd3aed7a93d8f71103c5d525f3c96f8616111614b14bf2a"}, + {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1c17ebcec157cfb8dd445890a03e20caf6209a5bd4ac5b040ae9dbc59eef091d"}, + {file = "grpcio-1.66.1-cp310-cp310-win32.whl", hash = "sha256:ef82d361ed5849d34cf09105d00b94b6728d289d6b9235513cb2fcc79f7c432c"}, + {file = "grpcio-1.66.1-cp310-cp310-win_amd64.whl", hash = "sha256:292a846b92cdcd40ecca46e694997dd6b9be6c4c01a94a0dfb3fcb75d20da858"}, + {file = "grpcio-1.66.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:c30aeceeaff11cd5ddbc348f37c58bcb96da8d5aa93fed78ab329de5f37a0d7a"}, + {file = "grpcio-1.66.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8a1e224ce6f740dbb6b24c58f885422deebd7eb724aff0671a847f8951857c26"}, + {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a66fe4dc35d2330c185cfbb42959f57ad36f257e0cc4557d11d9f0a3f14311df"}, + {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3ba04659e4fce609de2658fe4dbf7d6ed21987a94460f5f92df7579fd5d0e22"}, + {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4573608e23f7e091acfbe3e84ac2045680b69751d8d67685ffa193a4429fedb1"}, + {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7e06aa1f764ec8265b19d8f00140b8c4b6ca179a6dc67aa9413867c47e1fb04e"}, + {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3885f037eb11f1cacc41f207b705f38a44b69478086f40608959bf5ad85826dd"}, + {file = "grpcio-1.66.1-cp311-cp311-win32.whl", hash = "sha256:97ae7edd3f3f91480e48ede5d3e7d431ad6005bfdbd65c1b56913799ec79e791"}, + {file = "grpcio-1.66.1-cp311-cp311-win_amd64.whl", hash = "sha256:cfd349de4158d797db2bd82d2020554a121674e98fbe6b15328456b3bf2495bb"}, + {file = "grpcio-1.66.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:a92c4f58c01c77205df6ff999faa008540475c39b835277fb8883b11cada127a"}, + {file = "grpcio-1.66.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fdb14bad0835914f325349ed34a51940bc2ad965142eb3090081593c6e347be9"}, + {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f03a5884c56256e08fd9e262e11b5cfacf1af96e2ce78dc095d2c41ccae2c80d"}, + {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ca2559692d8e7e245d456877a85ee41525f3ed425aa97eb7a70fc9a79df91a0"}, + {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ca1be089fb4446490dd1135828bd42a7c7f8421e74fa581611f7afdf7ab761"}, + {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d639c939ad7c440c7b2819a28d559179a4508783f7e5b991166f8d7a34b52815"}, + {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b9feb4e5ec8dc2d15709f4d5fc367794d69277f5d680baf1910fc9915c633524"}, + {file = "grpcio-1.66.1-cp312-cp312-win32.whl", hash = "sha256:7101db1bd4cd9b880294dec41a93fcdce465bdbb602cd8dc5bd2d6362b618759"}, + {file = "grpcio-1.66.1-cp312-cp312-win_amd64.whl", hash = "sha256:b0aa03d240b5539648d996cc60438f128c7f46050989e35b25f5c18286c86734"}, + {file = "grpcio-1.66.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:ecfe735e7a59e5a98208447293ff8580e9db1e890e232b8b292dc8bd15afc0d2"}, + {file = "grpcio-1.66.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4825a3aa5648010842e1c9d35a082187746aa0cdbf1b7a2a930595a94fb10fce"}, + {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f517fd7259fe823ef3bd21e508b653d5492e706e9f0ef82c16ce3347a8a5620c"}, + {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1fe60d0772831d96d263b53d83fb9a3d050a94b0e94b6d004a5ad111faa5b5b"}, + {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31a049daa428f928f21090403e5d18ea02670e3d5d172581670be006100db9ef"}, + {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f914386e52cbdeb5d2a7ce3bf1fdfacbe9d818dd81b6099a05b741aaf3848bb"}, + {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bff2096bdba686019fb32d2dde45b95981f0d1490e054400f70fc9a8af34b49d"}, + {file = "grpcio-1.66.1-cp38-cp38-win32.whl", hash = "sha256:aa8ba945c96e73de29d25331b26f3e416e0c0f621e984a3ebdb2d0d0b596a3b3"}, + {file = "grpcio-1.66.1-cp38-cp38-win_amd64.whl", hash = "sha256:161d5c535c2bdf61b95080e7f0f017a1dfcb812bf54093e71e5562b16225b4ce"}, + {file = "grpcio-1.66.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:d0cd7050397b3609ea51727b1811e663ffda8bda39c6a5bb69525ef12414b503"}, + {file = "grpcio-1.66.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0e6c9b42ded5d02b6b1fea3a25f036a2236eeb75d0579bfd43c0018c88bf0a3e"}, + {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c9f80f9fad93a8cf71c7f161778ba47fd730d13a343a46258065c4deb4b550c0"}, + {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dd67ed9da78e5121efc5c510f0122a972216808d6de70953a740560c572eb44"}, + {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48b0d92d45ce3be2084b92fb5bae2f64c208fea8ceed7fccf6a7b524d3c4942e"}, + {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4d813316d1a752be6f5c4360c49f55b06d4fe212d7df03253dfdae90c8a402bb"}, + {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c9bebc6627873ec27a70fc800f6083a13c70b23a5564788754b9ee52c5aef6c"}, + {file = "grpcio-1.66.1-cp39-cp39-win32.whl", hash = "sha256:30a1c2cf9390c894c90bbc70147f2372130ad189cffef161f0432d0157973f45"}, + {file = "grpcio-1.66.1-cp39-cp39-win_amd64.whl", hash = "sha256:17663598aadbedc3cacd7bbde432f541c8e07d2496564e22b214b22c7523dac8"}, + {file = "grpcio-1.66.1.tar.gz", hash = "sha256:35334f9c9745add3e357e3372756fd32d925bd52c41da97f4dfdafbde0bf0ee2"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.66.1)"] + +[[package]] +name = "grpcio-status" +version = "1.66.1" +description = "Status proto mapping for gRPC" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "grpcio_status-1.66.1-py3-none-any.whl", hash = "sha256:cf9ed0b4a83adbe9297211c95cb5488b0cd065707e812145b842c85c4782ff02"}, + {file = "grpcio_status-1.66.1.tar.gz", hash = "sha256:b3f7d34ccc46d83fea5261eea3786174459f763c31f6e34f1d24eba6d515d024"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.5.5" +grpcio = ">=1.66.1" +protobuf = ">=5.26.1,<6.0dev" + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "h2" +version = "4.1.0" +description = "HTTP/2 State-Machine based protocol implementation" +optional = false +python-versions = ">=3.6.1" +groups = ["main"] +files = [ + {file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"}, + {file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"}, +] + +[package.dependencies] +hpack = ">=4.0,<5" +hyperframe = ">=6.0,<7" + +[[package]] +name = "hpack" +version = "4.0.0" +description = "Pure-Python HPACK header compression" +optional = false +python-versions = ">=3.6.1" +groups = ["main"] +files = [ + {file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"}, + {file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""} +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "hyperframe" +version = "6.0.1" +description = "HTTP/2 framing layer for Python" +optional = false +python-versions = ">=3.6.1" +groups = ["main"] +files = [ + {file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"}, + {file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"}, +] + +[[package]] +name = "idna" +version = "3.8" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, + {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, +] + +[[package]] +name = "importlib-metadata" +version = "8.4.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, + {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "opentelemetry-api" +version = "1.27.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, + {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<=8.4.0" + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "postgrest" +version = "0.19.1" +description = "PostgREST client for Python. This library provides an ORM interface to PostgREST." +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "postgrest-0.19.1-py3-none-any.whl", hash = "sha256:a8e7be4e1abc69fd8eee5a49d7dc3a76dfbffbd778beed0b2bd7accb3f4f3a2a"}, + {file = "postgrest-0.19.1.tar.gz", hash = "sha256:d8fa88953cced4f45efa0f412056c364f64ece8a35b5b35f458a7e58c133fbca"}, +] + +[package.dependencies] +deprecation = ">=2.1.0,<3.0.0" +httpx = {version = ">=0.26,<0.29", extras = ["http2"]} +pydantic = ">=1.9,<3.0" +strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "proto-plus" +version = "1.24.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, + {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "5.28.0" +description = "" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "protobuf-5.28.0-cp310-abi3-win32.whl", hash = "sha256:66c3edeedb774a3508ae70d87b3a19786445fe9a068dd3585e0cefa8a77b83d0"}, + {file = "protobuf-5.28.0-cp310-abi3-win_amd64.whl", hash = "sha256:6d7cc9e60f976cf3e873acb9a40fed04afb5d224608ed5c1a105db4a3f09c5b6"}, + {file = "protobuf-5.28.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:532627e8fdd825cf8767a2d2b94d77e874d5ddb0adefb04b237f7cc296748681"}, + {file = "protobuf-5.28.0-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:018db9056b9d75eb93d12a9d35120f97a84d9a919bcab11ed56ad2d399d6e8dd"}, + {file = "protobuf-5.28.0-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:6206afcb2d90181ae8722798dcb56dc76675ab67458ac24c0dd7d75d632ac9bd"}, + {file = "protobuf-5.28.0-cp38-cp38-win32.whl", hash = "sha256:eef7a8a2f4318e2cb2dee8666d26e58eaf437c14788f3a2911d0c3da40405ae8"}, + {file = "protobuf-5.28.0-cp38-cp38-win_amd64.whl", hash = "sha256:d001a73c8bc2bf5b5c1360d59dd7573744e163b3607fa92788b7f3d5fefbd9a5"}, + {file = "protobuf-5.28.0-cp39-cp39-win32.whl", hash = "sha256:dde9fcaa24e7a9654f4baf2a55250b13a5ea701493d904c54069776b99a8216b"}, + {file = "protobuf-5.28.0-cp39-cp39-win_amd64.whl", hash = "sha256:853db610214e77ee817ecf0514e0d1d052dff7f63a0c157aa6eabae98db8a8de"}, + {file = "protobuf-5.28.0-py3-none-any.whl", hash = "sha256:510ed78cd0980f6d3218099e874714cdf0d8a95582e7b059b06cabad855ed0a0"}, + {file = "protobuf-5.28.0.tar.gz", hash = "sha256:dde74af0fa774fa98892209992295adbfb91da3fa98c8f67a88afe8f5a349add"}, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pydantic" +version = "2.10.5" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic-2.10.5-py3-none-any.whl", hash = "sha256:4dd4e322dbe55472cb7ca7e73f4b63574eecccf2835ffa2af9021ce113c83c53"}, + {file = "pydantic-2.10.5.tar.gz", hash = "sha256:278b38dbbaec562011d659ee05f63346951b3a248a6f3642e1bc68894ea2b4ff"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-settings" +version = "2.7.1" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic_settings-2.7.1-py3-none-any.whl", hash = "sha256:590be9e6e24d06db33a4262829edef682500ef008565a969c73d39d5f8bfb3fd"}, + {file = "pydantic_settings-2.7.1.tar.gz", hash = "sha256:10c9caad35e64bfb3c2fbf70a078c0e25cc92499782e5200747f942a065dec93"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" + +[package.extras] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "pyjwt" +version = "2.10.1" +description = "JSON Web Token implementation in Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb"}, + {file = "pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953"}, +] + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pytest" +version = "8.3.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.25.2" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pytest_asyncio-0.25.2-py3-none-any.whl", hash = "sha256:0d0bb693f7b99da304a0634afc0a4b19e49d5e0de2d670f38dc4bfa5727c5075"}, + {file = "pytest_asyncio-0.25.2.tar.gz", hash = "sha256:3f8ef9a98f45948ea91a0ed3dc4268b5326c0e7bce73892acc654df4262ad45f"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-mock" +version = "3.14.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "realtime" +version = "2.0.2" +description = "" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "realtime-2.0.2-py3-none-any.whl", hash = "sha256:2634c915bc38807f2013f21e8bcc4d2f79870dfd81460ddb9393883d0489928a"}, + {file = "realtime-2.0.2.tar.gz", hash = "sha256:519da9325b3b8102139d51785013d592f6b2403d81fa21d838a0b0234723ed7d"}, +] + +[package.dependencies] +aiohttp = ">=3.10.2,<4.0.0" +python-dateutil = ">=2.8.1,<3.0.0" +typing-extensions = ">=4.12.2,<5.0.0" +websockets = ">=11,<13" + +[[package]] +name = "redis" +version = "5.2.1" +description = "Python client for Redis database and key-value store" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "redis-5.2.1-py3-none-any.whl", hash = "sha256:ee7e1056b9aea0f04c6c2ed59452947f34c4940ee025f5dd83e6a6418b6989e4"}, + {file = "redis-5.2.1.tar.gz", hash = "sha256:16f2e22dff21d5125e8481515e386711a34cbec50f0e44413dd7d9c060a54e0f"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""} + +[package.extras] +hiredis = ["hiredis (>=3.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +groups = ["main"] +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "ruff" +version = "0.9.2" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "ruff-0.9.2-py3-none-linux_armv6l.whl", hash = "sha256:80605a039ba1454d002b32139e4970becf84b5fee3a3c3bf1c2af6f61a784347"}, + {file = "ruff-0.9.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b9aab82bb20afd5f596527045c01e6ae25a718ff1784cb92947bff1f83068b00"}, + {file = "ruff-0.9.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fbd337bac1cfa96be615f6efcd4bc4d077edbc127ef30e2b8ba2a27e18c054d4"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b35259b0cbf8daa22a498018e300b9bb0174c2bbb7bcba593935158a78054d"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b6a9701d1e371bf41dca22015c3f89769da7576884d2add7317ec1ec8cb9c3c"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9cc53e68b3c5ae41e8faf83a3b89f4a5d7b2cb666dff4b366bb86ed2a85b481f"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8efd9da7a1ee314b910da155ca7e8953094a7c10d0c0a39bfde3fcfd2a015684"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3292c5a22ea9a5f9a185e2d131dc7f98f8534a32fb6d2ee7b9944569239c648d"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a605fdcf6e8b2d39f9436d343d1f0ff70c365a1e681546de0104bef81ce88df"}, + {file = "ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c547f7f256aa366834829a08375c297fa63386cbe5f1459efaf174086b564247"}, + {file = "ruff-0.9.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d18bba3d3353ed916e882521bc3e0af403949dbada344c20c16ea78f47af965e"}, + {file = "ruff-0.9.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b338edc4610142355ccf6b87bd356729b62bf1bc152a2fad5b0c7dc04af77bfe"}, + {file = "ruff-0.9.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:492a5e44ad9b22a0ea98cf72e40305cbdaf27fac0d927f8bc9e1df316dcc96eb"}, + {file = "ruff-0.9.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:af1e9e9fe7b1f767264d26b1075ac4ad831c7db976911fa362d09b2d0356426a"}, + {file = "ruff-0.9.2-py3-none-win32.whl", hash = "sha256:71cbe22e178c5da20e1514e1e01029c73dc09288a8028a5d3446e6bba87a5145"}, + {file = "ruff-0.9.2-py3-none-win_amd64.whl", hash = "sha256:c5e1d6abc798419cf46eed03f54f2e0c3adb1ad4b801119dedf23fcaf69b55b5"}, + {file = "ruff-0.9.2-py3-none-win_arm64.whl", hash = "sha256:a1b63fa24149918f8b37cef2ee6fff81f24f0d74b6f0bdc37bc3e1f2143e41c6"}, + {file = "ruff-0.9.2.tar.gz", hash = "sha256:b5eceb334d55fae5f316f783437392642ae18e16dcf4f1858d55d3c2a0f8f5d0"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "storage3" +version = "0.11.0" +description = "Supabase Storage client for Python." +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "storage3-0.11.0-py3-none-any.whl", hash = "sha256:de2d8f9c9103ca91a9a9d0d69d80b07a3ab6f647b93e023e6a1a97d3607b9728"}, + {file = "storage3-0.11.0.tar.gz", hash = "sha256:243583f2180686c0f0a19e6117d8a9796fd60c0ca72ec567d62b75a5af0d57a1"}, +] + +[package.dependencies] +httpx = {version = ">=0.26,<0.29", extras = ["http2"]} +python-dateutil = ">=2.8.2,<3.0.0" + +[[package]] +name = "strenum" +version = "0.4.15" +description = "An Enum that inherits from str." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659"}, + {file = "StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff"}, +] + +[package.extras] +docs = ["myst-parser[linkify]", "sphinx", "sphinx-rtd-theme"] +release = ["twine"] +test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"] + +[[package]] +name = "supabase" +version = "2.11.0" +description = "Supabase client for Python." +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "supabase-2.11.0-py3-none-any.whl", hash = "sha256:67a0da498895f4cd6554935e2854b4c41f87b297b78fb9c9414902a382041406"}, + {file = "supabase-2.11.0.tar.gz", hash = "sha256:2a906f7909fd9a50f944cd9332ce66c684e2d37c0864284d34c5815e6c63cc01"}, +] + +[package.dependencies] +gotrue = ">=2.11.0,<3.0.0" +httpx = ">=0.26,<0.29" +postgrest = ">=0.19,<0.20" +realtime = ">=2.0.0,<3.0.0" +storage3 = ">=0.10,<0.12" +supafunc = ">=0.9,<0.10" + +[[package]] +name = "supafunc" +version = "0.9.2" +description = "Library for Supabase Functions" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "supafunc-0.9.2-py3-none-any.whl", hash = "sha256:be5ee9f53842c4b0ba5f4abfb5bddf9f9e37e69e755ec0526852bb15af9d2ff5"}, + {file = "supafunc-0.9.2.tar.gz", hash = "sha256:f5164114a3e65e7e552539f3f1050aa3d4970885abdd7405555c17fd216e2da1"}, +] + +[package.dependencies] +httpx = {version = ">=0.26,<0.29", extras = ["http2"]} +strenum = ">=0.4.15,<0.5.0" + +[[package]] +name = "tomli" +version = "2.1.0" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.11\"" +files = [ + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "urllib3" +version = "2.2.2" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "websockets" +version = "12.0" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"}, + {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"}, + {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"}, + {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"}, + {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"}, + {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"}, + {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"}, + {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"}, + {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"}, + {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"}, + {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"}, + {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"}, + {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"}, + {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"}, + {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"}, + {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"}, + {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"}, + {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"}, + {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"}, + {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"}, + {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"}, + {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"}, + {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"}, + {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"}, + {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"}, + {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"}, + {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"}, + {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"}, + {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"}, + {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"}, + {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"}, + {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"}, + {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, +] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "yarl" +version = "1.11.1" +description = "Yet another URL library" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "yarl-1.11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:400cd42185f92de559d29eeb529e71d80dfbd2f45c36844914a4a34297ca6f00"}, + {file = "yarl-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8258c86f47e080a258993eed877d579c71da7bda26af86ce6c2d2d072c11320d"}, + {file = "yarl-1.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2164cd9725092761fed26f299e3f276bb4b537ca58e6ff6b252eae9631b5c96e"}, + {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08ea567c16f140af8ddc7cb58e27e9138a1386e3e6e53982abaa6f2377b38cc"}, + {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:768ecc550096b028754ea28bf90fde071c379c62c43afa574edc6f33ee5daaec"}, + {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2909fa3a7d249ef64eeb2faa04b7957e34fefb6ec9966506312349ed8a7e77bf"}, + {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01a8697ec24f17c349c4f655763c4db70eebc56a5f82995e5e26e837c6eb0e49"}, + {file = "yarl-1.11.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e286580b6511aac7c3268a78cdb861ec739d3e5a2a53b4809faef6b49778eaff"}, + {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4179522dc0305c3fc9782549175c8e8849252fefeb077c92a73889ccbcd508ad"}, + {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:27fcb271a41b746bd0e2a92182df507e1c204759f460ff784ca614e12dd85145"}, + {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f61db3b7e870914dbd9434b560075e0366771eecbe6d2b5561f5bc7485f39efd"}, + {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:c92261eb2ad367629dc437536463dc934030c9e7caca861cc51990fe6c565f26"}, + {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d95b52fbef190ca87d8c42f49e314eace4fc52070f3dfa5f87a6594b0c1c6e46"}, + {file = "yarl-1.11.1-cp310-cp310-win32.whl", hash = "sha256:489fa8bde4f1244ad6c5f6d11bb33e09cf0d1d0367edb197619c3e3fc06f3d91"}, + {file = "yarl-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:476e20c433b356e16e9a141449f25161e6b69984fb4cdbd7cd4bd54c17844998"}, + {file = "yarl-1.11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:946eedc12895873891aaceb39bceb484b4977f70373e0122da483f6c38faaa68"}, + {file = "yarl-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21a7c12321436b066c11ec19c7e3cb9aec18884fe0d5b25d03d756a9e654edfe"}, + {file = "yarl-1.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c35f493b867912f6fda721a59cc7c4766d382040bdf1ddaeeaa7fa4d072f4675"}, + {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25861303e0be76b60fddc1250ec5986c42f0a5c0c50ff57cc30b1be199c00e63"}, + {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4b53f73077e839b3f89c992223f15b1d2ab314bdbdf502afdc7bb18e95eae27"}, + {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:327c724b01b8641a1bf1ab3b232fb638706e50f76c0b5bf16051ab65c868fac5"}, + {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4307d9a3417eea87715c9736d050c83e8c1904e9b7aada6ce61b46361b733d92"}, + {file = "yarl-1.11.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a28bed68ab8fb7e380775f0029a079f08a17799cb3387a65d14ace16c12e2b"}, + {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:067b961853c8e62725ff2893226fef3d0da060656a9827f3f520fb1d19b2b68a"}, + {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8215f6f21394d1f46e222abeb06316e77ef328d628f593502d8fc2a9117bde83"}, + {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:498442e3af2a860a663baa14fbf23fb04b0dd758039c0e7c8f91cb9279799bff"}, + {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:69721b8effdb588cb055cc22f7c5105ca6fdaa5aeb3ea09021d517882c4a904c"}, + {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e969fa4c1e0b1a391f3fcbcb9ec31e84440253325b534519be0d28f4b6b533e"}, + {file = "yarl-1.11.1-cp311-cp311-win32.whl", hash = "sha256:7d51324a04fc4b0e097ff8a153e9276c2593106a811704025bbc1d6916f45ca6"}, + {file = "yarl-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:15061ce6584ece023457fb8b7a7a69ec40bf7114d781a8c4f5dcd68e28b5c53b"}, + {file = "yarl-1.11.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a4264515f9117be204935cd230fb2a052dd3792789cc94c101c535d349b3dab0"}, + {file = "yarl-1.11.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f41fa79114a1d2eddb5eea7b912d6160508f57440bd302ce96eaa384914cd265"}, + {file = "yarl-1.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:02da8759b47d964f9173c8675710720b468aa1c1693be0c9c64abb9d8d9a4867"}, + {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9361628f28f48dcf8b2f528420d4d68102f593f9c2e592bfc842f5fb337e44fd"}, + {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b91044952da03b6f95fdba398d7993dd983b64d3c31c358a4c89e3c19b6f7aef"}, + {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:74db2ef03b442276d25951749a803ddb6e270d02dda1d1c556f6ae595a0d76a8"}, + {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e975a2211952a8a083d1b9d9ba26472981ae338e720b419eb50535de3c02870"}, + {file = "yarl-1.11.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8aef97ba1dd2138112890ef848e17d8526fe80b21f743b4ee65947ea184f07a2"}, + {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7915ea49b0c113641dc4d9338efa9bd66b6a9a485ffe75b9907e8573ca94b84"}, + {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:504cf0d4c5e4579a51261d6091267f9fd997ef58558c4ffa7a3e1460bd2336fa"}, + {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3de5292f9f0ee285e6bd168b2a77b2a00d74cbcfa420ed078456d3023d2f6dff"}, + {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a34e1e30f1774fa35d37202bbeae62423e9a79d78d0874e5556a593479fdf239"}, + {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:66b63c504d2ca43bf7221a1f72fbe981ff56ecb39004c70a94485d13e37ebf45"}, + {file = "yarl-1.11.1-cp312-cp312-win32.whl", hash = "sha256:a28b70c9e2213de425d9cba5ab2e7f7a1c8ca23a99c4b5159bf77b9c31251447"}, + {file = "yarl-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:17b5a386d0d36fb828e2fb3ef08c8829c1ebf977eef88e5367d1c8c94b454639"}, + {file = "yarl-1.11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1fa2e7a406fbd45b61b4433e3aa254a2c3e14c4b3186f6e952d08a730807fa0c"}, + {file = "yarl-1.11.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:750f656832d7d3cb0c76be137ee79405cc17e792f31e0a01eee390e383b2936e"}, + {file = "yarl-1.11.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b8486f322d8f6a38539136a22c55f94d269addb24db5cb6f61adc61eabc9d93"}, + {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fce4da3703ee6048ad4138fe74619c50874afe98b1ad87b2698ef95bf92c96d"}, + {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed653638ef669e0efc6fe2acb792275cb419bf9cb5c5049399f3556995f23c7"}, + {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18ac56c9dd70941ecad42b5a906820824ca72ff84ad6fa18db33c2537ae2e089"}, + {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:688654f8507464745ab563b041d1fb7dab5d9912ca6b06e61d1c4708366832f5"}, + {file = "yarl-1.11.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4973eac1e2ff63cf187073cd4e1f1148dcd119314ab79b88e1b3fad74a18c9d5"}, + {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:964a428132227edff96d6f3cf261573cb0f1a60c9a764ce28cda9525f18f7786"}, + {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6d23754b9939cbab02c63434776df1170e43b09c6a517585c7ce2b3d449b7318"}, + {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c2dc4250fe94d8cd864d66018f8344d4af50e3758e9d725e94fecfa27588ff82"}, + {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09696438cb43ea6f9492ef237761b043f9179f455f405279e609f2bc9100212a"}, + {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:999bfee0a5b7385a0af5ffb606393509cfde70ecca4f01c36985be6d33e336da"}, + {file = "yarl-1.11.1-cp313-cp313-win32.whl", hash = "sha256:ce928c9c6409c79e10f39604a7e214b3cb69552952fbda8d836c052832e6a979"}, + {file = "yarl-1.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:501c503eed2bb306638ccb60c174f856cc3246c861829ff40eaa80e2f0330367"}, + {file = "yarl-1.11.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dae7bd0daeb33aa3e79e72877d3d51052e8b19c9025ecf0374f542ea8ec120e4"}, + {file = "yarl-1.11.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3ff6b1617aa39279fe18a76c8d165469c48b159931d9b48239065767ee455b2b"}, + {file = "yarl-1.11.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3257978c870728a52dcce8c2902bf01f6c53b65094b457bf87b2644ee6238ddc"}, + {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f351fa31234699d6084ff98283cb1e852270fe9e250a3b3bf7804eb493bd937"}, + {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8aef1b64da41d18026632d99a06b3fefe1d08e85dd81d849fa7c96301ed22f1b"}, + {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7175a87ab8f7fbde37160a15e58e138ba3b2b0e05492d7351314a250d61b1591"}, + {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba444bdd4caa2a94456ef67a2f383710928820dd0117aae6650a4d17029fa25e"}, + {file = "yarl-1.11.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ea9682124fc062e3d931c6911934a678cb28453f957ddccf51f568c2f2b5e05"}, + {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8418c053aeb236b20b0ab8fa6bacfc2feaaf7d4683dd96528610989c99723d5f"}, + {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:61a5f2c14d0a1adfdd82258f756b23a550c13ba4c86c84106be4c111a3a4e413"}, + {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f3a6d90cab0bdf07df8f176eae3a07127daafcf7457b997b2bf46776da2c7eb7"}, + {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:077da604852be488c9a05a524068cdae1e972b7dc02438161c32420fb4ec5e14"}, + {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:15439f3c5c72686b6c3ff235279630d08936ace67d0fe5c8d5bbc3ef06f5a420"}, + {file = "yarl-1.11.1-cp38-cp38-win32.whl", hash = "sha256:238a21849dd7554cb4d25a14ffbfa0ef380bb7ba201f45b144a14454a72ffa5a"}, + {file = "yarl-1.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:67459cf8cf31da0e2cbdb4b040507e535d25cfbb1604ca76396a3a66b8ba37a6"}, + {file = "yarl-1.11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:884eab2ce97cbaf89f264372eae58388862c33c4f551c15680dd80f53c89a269"}, + {file = "yarl-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a336eaa7ee7e87cdece3cedb395c9657d227bfceb6781295cf56abcd3386a26"}, + {file = "yarl-1.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87f020d010ba80a247c4abc335fc13421037800ca20b42af5ae40e5fd75e7909"}, + {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:637c7ddb585a62d4469f843dac221f23eec3cbad31693b23abbc2c366ad41ff4"}, + {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48dfd117ab93f0129084577a07287376cc69c08138694396f305636e229caa1a"}, + {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e0ae31fb5ccab6eda09ba1494e87eb226dcbd2372dae96b87800e1dcc98804"}, + {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f46f81501160c28d0c0b7333b4f7be8983dbbc161983b6fb814024d1b4952f79"}, + {file = "yarl-1.11.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:04293941646647b3bfb1719d1d11ff1028e9c30199509a844da3c0f5919dc520"}, + {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:250e888fa62d73e721f3041e3a9abf427788a1934b426b45e1b92f62c1f68366"}, + {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e8f63904df26d1a66aabc141bfd258bf738b9bc7bc6bdef22713b4f5ef789a4c"}, + {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:aac44097d838dda26526cffb63bdd8737a2dbdf5f2c68efb72ad83aec6673c7e"}, + {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:267b24f891e74eccbdff42241c5fb4f974de2d6271dcc7d7e0c9ae1079a560d9"}, + {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6907daa4b9d7a688063ed098c472f96e8181733c525e03e866fb5db480a424df"}, + {file = "yarl-1.11.1-cp39-cp39-win32.whl", hash = "sha256:14438dfc5015661f75f85bc5adad0743678eefee266ff0c9a8e32969d5d69f74"}, + {file = "yarl-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:94d0caaa912bfcdc702a4204cd5e2bb01eb917fc4f5ea2315aa23962549561b0"}, + {file = "yarl-1.11.1-py3-none-any.whl", hash = "sha256:72bf26f66456baa0584eff63e44545c9f0eaed9b73cb6601b647c91f14c11f38"}, + {file = "yarl-1.11.1.tar.gz", hash = "sha256:1bb2d9e212fb7449b8fb73bc461b51eaa17cc8430b4a87d87be7b25052d92f53"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + +[[package]] +name = "zipp" +version = "3.20.1" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"}, + {file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.10,<4.0" +content-hash = "53a31ce3d94999d9267f2a229c53a9d97d96c9413843bfdcb7ef0c0c21723e49" diff --git a/autogpt_platform/autogpt_libs/pyproject.toml b/autogpt_platform/autogpt_libs/pyproject.toml new file mode 100644 index 000000000000..dcb06475f521 --- /dev/null +++ b/autogpt_platform/autogpt_libs/pyproject.toml @@ -0,0 +1,34 @@ +[tool.poetry] +name = "autogpt-libs" +version = "0.2.0" +description = "Shared libraries across NextGen AutoGPT" +authors = ["Aarushi "] +readme = "README.md" +packages = [{ include = "autogpt_libs" }] + +[tool.poetry.dependencies] +colorama = "^0.4.6" +expiringdict = "^1.2.2" +google-cloud-logging = "^3.11.3" +pydantic = "^2.10.5" +pydantic-settings = "^2.7.1" +pyjwt = "^2.10.1" +pytest-asyncio = "^0.25.2" +pytest-mock = "^3.14.0" +python = ">=3.10,<4.0" +python-dotenv = "^1.0.1" +supabase = "^2.11.0" + +[tool.poetry.group.dev.dependencies] +redis = "^5.2.1" +ruff = "^0.9.2" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.ruff] +line-length = 88 + +[tool.ruff.lint] +extend-select = ["I"] # sort dependencies diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example new file mode 100644 index 000000000000..f0804681b5df --- /dev/null +++ b/autogpt_platform/backend/.env.example @@ -0,0 +1,155 @@ +DB_USER=postgres +DB_PASS=your-super-secret-and-long-postgres-password +DB_NAME=postgres +DB_PORT=5432 +DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}?connect_timeout=60&schema=platform" +PRISMA_SCHEMA="postgres/schema.prisma" + +BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] + +# generate using `from cryptography.fernet import Fernet;Fernet.generate_key().decode()` +ENCRYPTION_KEY='dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=' + +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD=password + +ENABLE_CREDIT=false +STRIPE_API_KEY= +STRIPE_WEBHOOK_SECRET= + +# What environment things should be logged under: local dev or prod +APP_ENV=local +# What environment to behave as: "local" or "cloud" +BEHAVE_AS=local +PYRO_HOST=localhost +SENTRY_DSN= + +## User auth with Supabase is required for any of the 3rd party integrations with auth to work. +ENABLE_AUTH=true +SUPABASE_URL=http://localhost:8000 +SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q +SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long + +## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow +## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set. +# FRONTEND_BASE_URL=http://localhost:3000 + +## PLATFORM_BASE_URL must be set to a *publicly accessible* URL pointing to your backend +## to use the platform's webhook-related functionality. +## If you are developing locally, you can use something like ngrok to get a publc URL +## and tunnel it to your locally running backend. +PLATFORM_BASE_URL=http://localhost:3000 + +## == INTEGRATION CREDENTIALS == ## +# Each set of server side credentials is required for the corresponding 3rd party +# integration to work. + +# For the OAuth callback URL, use /auth/integrations/oauth_callback, +# e.g. http://localhost:3000/auth/integrations/oauth_callback + +# GitHub OAuth App server credentials - https://github.com/settings/developers +GITHUB_CLIENT_ID= +GITHUB_CLIENT_SECRET= + +# Google OAuth App server credentials - https://console.cloud.google.com/apis/credentials, and enable gmail api and set scopes +# https://console.cloud.google.com/apis/credentials/consent ?project= + +# You'll need to add/enable the following scopes (minimum): +# https://console.developers.google.com/apis/api/gmail.googleapis.com/overview ?project= +# https://console.cloud.google.com/apis/library/sheets.googleapis.com/ ?project= +GOOGLE_CLIENT_ID= +GOOGLE_CLIENT_SECRET= + +# Twitter (X) OAuth 2.0 with PKCE Configuration +# 1. Create a Twitter Developer Account: +# - Visit https://developer.x.com/en and sign up +# 2. Set up your application: +# - Navigate to Developer Portal > Projects > Create Project +# - Add a new app to your project +# 3. Configure app settings: +# - App Permissions: Read + Write + Direct Messages +# - App Type: Web App, Automated App or Bot +# - OAuth 2.0 Callback URL: http://localhost:3000/auth/integrations/oauth_callback +# - Save your Client ID and Client Secret below +TWITTER_CLIENT_ID= +TWITTER_CLIENT_SECRET= + +# Linear App +# Make a new workspace for your OAuth APP -- trust me +# https://linear.app/settings/api/applications/new +# Callback URL: http://localhost:3000/auth/integrations/oauth_callback +LINEAR_CLIENT_ID= +LINEAR_CLIENT_SECRET= + +## ===== OPTIONAL API KEYS ===== ## + +# LLM +OPENAI_API_KEY= +ANTHROPIC_API_KEY= +GROQ_API_KEY= +OPEN_ROUTER_API_KEY= + +# Reddit +# Go to https://www.reddit.com/prefs/apps and create a new app +# Choose "script" for the type +# Fill in the redirect uri as /auth/integrations/oauth_callback, e.g. http://localhost:3000/auth/integrations/oauth_callback +REDDIT_CLIENT_ID= +REDDIT_CLIENT_SECRET= +REDDIT_USER_AGENT="AutoGPT:1.0 (by /u/autogpt)" + +# Discord +DISCORD_BOT_TOKEN= + +# SMTP/Email +SMTP_SERVER= +SMTP_PORT= +SMTP_USERNAME= +SMTP_PASSWORD= + +# D-ID +DID_API_KEY= + +# Open Weather Map +OPENWEATHERMAP_API_KEY= + +# SMTP +SMTP_SERVER= +SMTP_PORT= +SMTP_USERNAME= +SMTP_PASSWORD= + +# Medium +MEDIUM_API_KEY= +MEDIUM_AUTHOR_ID= + +# Google Maps +GOOGLE_MAPS_API_KEY= + +# Replicate +REPLICATE_API_KEY= + +# Ideogram +IDEOGRAM_API_KEY= + +# Fal +FAL_API_KEY= + +# Exa +EXA_API_KEY= + +# E2B +E2B_API_KEY= + +# Mem0 +MEM0_API_KEY= + +# Nvidia +NVIDIA_API_KEY= + +# Logging Configuration +LOG_LEVEL=INFO +ENABLE_CLOUD_LOGGING=false +ENABLE_FILE_LOGGING=false +# Use to manually set the log directory +# LOG_DIR=./logs diff --git a/autogpt_platform/backend/.gitignore b/autogpt_platform/backend/.gitignore new file mode 100644 index 000000000000..1ce7f628ee1b --- /dev/null +++ b/autogpt_platform/backend/.gitignore @@ -0,0 +1,11 @@ +database.db +database.db-journal +dev.db +dev.db-journal +build/ +config.json +secrets/* +!secrets/.gitkeep + +*.ignore.* +*.ign.* \ No newline at end of file diff --git a/autogpt_platform/backend/Dockerfile b/autogpt_platform/backend/Dockerfile new file mode 100644 index 000000000000..91a136026557 --- /dev/null +++ b/autogpt_platform/backend/Dockerfile @@ -0,0 +1,79 @@ +FROM python:3.11.10-slim-bookworm AS builder + +# Set environment variables +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 + +WORKDIR /app + +RUN echo 'Acquire::http::Pipeline-Depth 0;\nAcquire::http::No-Cache true;\nAcquire::BrokenProxy true;\n' > /etc/apt/apt.conf.d/99fixbadproxy + +RUN apt-get update --allow-releaseinfo-change --fix-missing + +# Install build dependencies +RUN apt-get install -y build-essential +RUN apt-get install -y libpq5 +RUN apt-get install -y libz-dev +RUN apt-get install -y libssl-dev +RUN apt-get install -y postgresql-client + +ENV POETRY_HOME=/opt/poetry +ENV POETRY_NO_INTERACTION=1 +ENV POETRY_VIRTUALENVS_CREATE=false +ENV PATH=/opt/poetry/bin:$PATH + +# Upgrade pip and setuptools to fix security vulnerabilities +RUN pip3 install --upgrade pip setuptools + +RUN pip3 install poetry + +# Copy and install dependencies +COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs +COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/ +WORKDIR /app/autogpt_platform/backend +RUN poetry install --no-ansi --no-root + +# Generate Prisma client +COPY autogpt_platform/backend/schema.prisma ./ +RUN poetry run prisma generate + +FROM python:3.11.10-slim-bookworm AS server_dependencies + +WORKDIR /app + +ENV POETRY_HOME=/opt/poetry \ + POETRY_NO_INTERACTION=1 \ + POETRY_VIRTUALENVS_CREATE=false +ENV PATH=/opt/poetry/bin:$PATH + +# Upgrade pip and setuptools to fix security vulnerabilities +RUN pip3 install --upgrade pip setuptools + +# Copy only necessary files from builder +COPY --from=builder /app /app +COPY --from=builder /usr/local/lib/python3.11 /usr/local/lib/python3.11 +COPY --from=builder /usr/local/bin /usr/local/bin +# Copy Prisma binaries +COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries + + +ENV PATH="/app/.venv/bin:$PATH" + +RUN mkdir -p /app/autogpt_platform/autogpt_libs +RUN mkdir -p /app/autogpt_platform/backend + +COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs + +COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/ + +WORKDIR /app/autogpt_platform/backend + +FROM server_dependencies AS server + +COPY autogpt_platform/backend /app/autogpt_platform/backend +RUN poetry install --no-ansi --only-root + +ENV DATABASE_URL="" +ENV PORT=8000 + +CMD ["poetry", "run", "rest"] diff --git a/autogpt_platform/backend/README.advanced.md b/autogpt_platform/backend/README.advanced.md new file mode 100644 index 000000000000..09e0f90fcc25 --- /dev/null +++ b/autogpt_platform/backend/README.advanced.md @@ -0,0 +1,75 @@ +# AutoGPT Agent Server Advanced set up + +This guide walks you through a dockerized set up, with an external DB (postgres) + +## Setup + +We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory: + +0. Install Poetry + ```sh + pip install poetry + ``` + +1. Configure Poetry to use .venv in your project directory + ```sh + poetry config virtualenvs.in-project true + ``` + +2. Enter the poetry shell + + ```sh + poetry shell + ``` + +3. Install dependencies + + ```sh + poetry install + ``` + +4. Copy .env.example to .env + + ```sh + cp .env.example .env + ``` + +5. Generate the Prisma client + + ```sh + poetry run prisma generate + ``` + + + > In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package: + > + > ```sh + > pip uninstall prisma + > ``` + > + > Then run the generation again. The path *should* look something like this: + > `/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma` + +6. Run the postgres database from the /rnd folder + + ```sh + cd autogpt_platform/ + docker compose up -d + ``` + +7. Run the migrations (from the backend folder) + + ```sh + cd ../backend + prisma migrate deploy + ``` + +## Running The Server + +### Starting the server directly + +Run the following command: + +```sh +poetry run app +``` diff --git a/autogpt_platform/backend/README.md b/autogpt_platform/backend/README.md new file mode 100644 index 000000000000..c76fe40ba419 --- /dev/null +++ b/autogpt_platform/backend/README.md @@ -0,0 +1,203 @@ +# AutoGPT Agent Server + +This is an initial project for creating the next generation of agent execution, which is an AutoGPT agent server. +The agent server will enable the creation of composite multi-agent systems that utilize AutoGPT agents and other non-agent components as its primitives. + +## Docs + +You can access the docs for the [AutoGPT Agent Server here](https://docs.agpt.co/server/setup). + +## Setup + +We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory: + +0. Install Poetry + ```sh + pip install poetry + ``` + +1. Configure Poetry to use .venv in your project directory + ```sh + poetry config virtualenvs.in-project true + ``` + +2. Enter the poetry shell + + ```sh + poetry shell + ``` + +3. Install dependencies + + ```sh + poetry install + ``` + +4. Copy .env.example to .env + + ```sh + cp .env.example .env + ``` + +5. Generate the Prisma client + + ```sh + poetry run prisma generate + ``` + + + > In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package: + > + > ```sh + > pip uninstall prisma + > ``` + > + > Then run the generation again. The path *should* look something like this: + > `/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma` + +6. Migrate the database. Be careful because this deletes current data in the database. + + ```sh + docker compose up db -d + poetry run prisma migrate deploy + ``` + +## Running The Server + +### Starting the server without Docker + +Run the following command to run database in docker but the application locally: + +```sh +docker compose --profile local up deps --build --detach +poetry run app +``` + +### Starting the server with Docker + +Run the following command to build the dockerfiles: + +```sh +docker compose build +``` + +Run the following command to run the app: + +```sh +docker compose up +``` + +Run the following to automatically rebuild when code changes, in another terminal: + +```sh +docker compose watch +``` + +Run the following command to shut down: + +```sh +docker compose down +``` + +If you run into issues with dangling orphans, try: + +```sh +docker compose down --volumes --remove-orphans && docker-compose up --force-recreate --renew-anon-volumes --remove-orphans +``` + +## Testing + +To run the tests: + +```sh +poetry run test +``` + +## Development + +### Formatting & Linting +Auto formatter and linter are set up in the project. To run them: + +Install: +```sh +poetry install --with dev +``` + +Format the code: +```sh +poetry run format +``` + +Lint the code: +```sh +poetry run lint +``` + +## Project Outline + +The current project has the following main modules: + +### **blocks** + +This module stores all the Agent Blocks, which are reusable components to build a graph that represents the agent's behavior. + +### **data** + +This module stores the logical model that is persisted in the database. +It abstracts the database operations into functions that can be called by the service layer. +Any code that interacts with Prisma objects or the database should reside in this module. +The main models are: +* `block`: anything related to the block used in the graph +* `execution`: anything related to the execution graph execution +* `graph`: anything related to the graph, node, and its relations + +### **execution** + +This module stores the business logic of executing the graph. +It currently has the following main modules: +* `manager`: A service that consumes the queue of the graph execution and executes the graph. It contains both pieces of logic. +* `scheduler`: A service that triggers scheduled graph execution based on a cron expression. It pushes an execution request to the manager. + +### **server** + +This module stores the logic for the server API. +It contains all the logic used for the API that allows the client to create, execute, and monitor the graph and its execution. +This API service interacts with other services like those defined in `manager` and `scheduler`. + +### **utils** + +This module stores utility functions that are used across the project. +Currently, it has two main modules: +* `process`: A module that contains the logic to spawn a new process. +* `service`: A module that serves as a parent class for all the services in the project. + +## Service Communication + +Currently, there are only 3 active services: + +- AgentServer (the API, defined in `server.py`) +- ExecutionManager (the executor, defined in `manager.py`) +- ExecutionScheduler (the scheduler, defined in `scheduler.py`) + +The services run in independent Python processes and communicate through an IPC. +A communication layer (`service.py`) is created to decouple the communication library from the implementation. + +Currently, the IPC is done using Pyro5 and abstracted in a way that allows a function decorated with `@expose` to be called from a different process. + + +By default the daemons run on the following ports: + +Execution Manager Daemon: 8002 +Execution Scheduler Daemon: 8003 +Rest Server Daemon: 8004 + +## Adding a New Agent Block + +To add a new agent block, you need to create a new class that inherits from `Block` and provides the following information: +* All the block code should live in the `blocks` (`backend.blocks`) module. +* `input_schema`: the schema of the input data, represented by a Pydantic object. +* `output_schema`: the schema of the output data, represented by a Pydantic object. +* `run` method: the main logic of the block. +* `test_input` & `test_output`: the sample input and output data for the block, which will be used to auto-test the block. +* You can mock the functions declared in the block using the `test_mock` field for your unit tests. +* Once you finish creating the block, you can test it by running `poetry run pytest -s test/block/test_block.py`. diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/__init__.py b/autogpt_platform/backend/backend/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/core/runner/client_lib/__init__.py rename to autogpt_platform/backend/backend/__init__.py diff --git a/autogpt_platform/backend/backend/app.py b/autogpt_platform/backend/backend/app.py new file mode 100644 index 000000000000..5d77ea9632b3 --- /dev/null +++ b/autogpt_platform/backend/backend/app.py @@ -0,0 +1,42 @@ +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from backend.util.process import AppProcess + + +def run_processes(*processes: "AppProcess", **kwargs): + """ + Execute all processes in the app. The last process is run in the foreground. + """ + try: + for process in processes[:-1]: + process.start(background=True, **kwargs) + + # Run the last process in the foreground + processes[-1].start(background=False, **kwargs) + finally: + for process in processes: + process.stop() + + +def main(**kwargs): + """ + Run all the processes required for the AutoGPT-server (REST and WebSocket APIs). + """ + + from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler + from backend.server.rest_api import AgentServer + from backend.server.ws_api import WebsocketServer + + run_processes( + DatabaseManager(), + ExecutionManager(), + ExecutionScheduler(), + WebsocketServer(), + AgentServer(), + **kwargs, + ) + + +if __name__ == "__main__": + main() diff --git a/autogpt_platform/backend/backend/blocks/__init__.py b/autogpt_platform/backend/backend/blocks/__init__.py new file mode 100644 index 000000000000..11c5f3d5d6d2 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/__init__.py @@ -0,0 +1,89 @@ +import importlib +import os +import re +from pathlib import Path +from typing import Type, TypeVar + +from backend.data.block import Block + +# Dynamically load all modules under backend.blocks +AVAILABLE_MODULES = [] +current_dir = Path(__file__).parent +modules = [ + str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".") + for f in current_dir.rglob("*.py") + if f.is_file() and f.name != "__init__.py" +] +for module in modules: + if not re.match("^[a-z0-9_.]+$", module): + raise ValueError( + f"Block module {module} error: module name must be lowercase, " + "and contain only alphanumeric characters and underscores." + ) + + importlib.import_module(f".{module}", package=__name__) + AVAILABLE_MODULES.append(module) + +# Load all Block instances from the available modules +AVAILABLE_BLOCKS: dict[str, Type[Block]] = {} + + +T = TypeVar("T") + + +def all_subclasses(cls: Type[T]) -> list[Type[T]]: + subclasses = cls.__subclasses__() + for subclass in subclasses: + subclasses += all_subclasses(subclass) + return subclasses + + +for block_cls in all_subclasses(Block): + name = block_cls.__name__ + + if block_cls.__name__.endswith("Base"): + continue + + if not block_cls.__name__.endswith("Block"): + raise ValueError( + f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end" + ) + + block = block_cls.create() + + if not isinstance(block.id, str) or len(block.id) != 36: + raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID") + + if block.id in AVAILABLE_BLOCKS: + raise ValueError(f"Block ID {block.name} error: {block.id} is already in use") + + input_schema = block.input_schema.model_fields + output_schema = block.output_schema.model_fields + + # Make sure `error` field is a string in the output schema + if "error" in output_schema and output_schema["error"].annotation is not str: + raise ValueError( + f"{block.name} `error` field in output_schema must be a string" + ) + + # Make sure all fields in input_schema and output_schema are annotated and has a value + for field_name, field in [*input_schema.items(), *output_schema.items()]: + if field.annotation is None: + raise ValueError( + f"{block.name} has a field {field_name} that is not annotated" + ) + if field.json_schema_extra is None: + raise ValueError( + f"{block.name} has a field {field_name} not defined as SchemaField" + ) + + for field in block.input_schema.model_fields.values(): + if field.annotation is bool and field.default not in (True, False): + raise ValueError(f"{block.name} has a boolean field with no default value") + + if block.disabled: + continue + + AVAILABLE_BLOCKS[block.id] = block_cls + +__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"] diff --git a/autogpt_platform/backend/backend/blocks/agent.py b/autogpt_platform/backend/backend/blocks/agent.py new file mode 100644 index 000000000000..427de51106d8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/agent.py @@ -0,0 +1,104 @@ +import logging + +from autogpt_libs.utils.cache import thread_cached + +from backend.data.block import ( + Block, + BlockCategory, + BlockInput, + BlockOutput, + BlockSchema, + BlockType, + get_block, +) +from backend.data.execution import ExecutionStatus +from backend.data.model import SchemaField + +logger = logging.getLogger(__name__) + + +@thread_cached +def get_executor_manager_client(): + from backend.executor import ExecutionManager + from backend.util.service import get_service_client + + return get_service_client(ExecutionManager) + + +@thread_cached +def get_event_bus(): + from backend.data.execution import RedisExecutionEventBus + + return RedisExecutionEventBus() + + +class AgentExecutorBlock(Block): + class Input(BlockSchema): + user_id: str = SchemaField(description="User ID") + graph_id: str = SchemaField(description="Graph ID") + graph_version: int = SchemaField(description="Graph Version") + + data: BlockInput = SchemaField(description="Input data for the graph") + input_schema: dict = SchemaField(description="Input schema for the graph") + output_schema: dict = SchemaField(description="Output schema for the graph") + + class Output(BlockSchema): + pass + + def __init__(self): + super().__init__( + id="e189baac-8c20-45a1-94a7-55177ea42565", + description="Executes an existing agent inside your agent", + input_schema=AgentExecutorBlock.Input, + output_schema=AgentExecutorBlock.Output, + block_type=BlockType.AGENT, + categories={BlockCategory.AGENT}, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + executor_manager = get_executor_manager_client() + event_bus = get_event_bus() + + graph_exec = executor_manager.add_execution( + graph_id=input_data.graph_id, + graph_version=input_data.graph_version, + user_id=input_data.user_id, + data=input_data.data, + ) + log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}" + logger.info(f"Starting execution of {log_id}") + + for event in event_bus.listen( + graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id + ): + logger.info( + f"Execution {log_id} produced input {event.input_data} output {event.output_data}" + ) + + if not event.node_id: + if event.status in [ + ExecutionStatus.COMPLETED, + ExecutionStatus.TERMINATED, + ExecutionStatus.FAILED, + ]: + logger.info(f"Execution {log_id} ended with status {event.status}") + break + else: + continue + + if not event.block_id: + logger.warning(f"{log_id} received event without block_id {event}") + continue + + block = get_block(event.block_id) + if not block or block.block_type != BlockType.OUTPUT: + continue + + output_name = event.input_data.get("name") + if not output_name: + logger.warning(f"{log_id} produced an output with no name {event}") + continue + + for output_data in event.output_data.get("output", []): + logger.info(f"Execution {log_id} produced {output_name}: {output_data}") + yield output_name, output_data diff --git a/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py b/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py new file mode 100644 index 000000000000..ebd79dda9ac9 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/ai_image_generator_block.py @@ -0,0 +1,325 @@ +from enum import Enum +from typing import Literal + +import replicate +from pydantic import SecretStr +from replicate.helpers import FileOutput + +from backend.data.block import Block, BlockCategory, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + + +class ImageSize(str, Enum): + """ + Semantic sizes that map reliably across all models + """ + + SQUARE = "square" # For profile pictures, icons, etc. + LANDSCAPE = "landscape" # For traditional photos, scenes + PORTRAIT = "portrait" # For vertical photos, portraits + WIDE = "wide" # For cinematic, desktop wallpapers + TALL = "tall" # For mobile wallpapers, stories + + +# Mapping semantic sizes to model-specific formats +SIZE_TO_SD_RATIO = { + ImageSize.SQUARE: "1:1", + ImageSize.LANDSCAPE: "4:3", + ImageSize.PORTRAIT: "3:4", + ImageSize.WIDE: "16:9", + ImageSize.TALL: "9:16", +} + +SIZE_TO_FLUX_RATIO = { + ImageSize.SQUARE: "1:1", + ImageSize.LANDSCAPE: "4:3", + ImageSize.PORTRAIT: "3:4", + ImageSize.WIDE: "16:9", + ImageSize.TALL: "9:16", +} + +SIZE_TO_FLUX_DIMENSIONS = { + ImageSize.SQUARE: (1024, 1024), + ImageSize.LANDSCAPE: (1365, 1024), + ImageSize.PORTRAIT: (1024, 1365), + ImageSize.WIDE: (1440, 810), # Adjusted to maintain 16:9 within 1440 limit + ImageSize.TALL: (810, 1440), # Adjusted to maintain 9:16 within 1440 limit +} + +SIZE_TO_RECRAFT_DIMENSIONS = { + ImageSize.SQUARE: "1024x1024", + ImageSize.LANDSCAPE: "1365x1024", + ImageSize.PORTRAIT: "1024x1365", + ImageSize.WIDE: "1536x1024", + ImageSize.TALL: "1024x1536", +} + + +class ImageStyle(str, Enum): + """ + Complete set of supported styles + """ + + ANY = "any" + # Realistic image styles + REALISTIC = "realistic_image" + REALISTIC_BW = "realistic_image/b_and_w" + REALISTIC_HDR = "realistic_image/hdr" + REALISTIC_NATURAL = "realistic_image/natural_light" + REALISTIC_STUDIO = "realistic_image/studio_portrait" + REALISTIC_ENTERPRISE = "realistic_image/enterprise" + REALISTIC_HARD_FLASH = "realistic_image/hard_flash" + REALISTIC_MOTION_BLUR = "realistic_image/motion_blur" + # Digital illustration styles + DIGITAL_ART = "digital_illustration" + PIXEL_ART = "digital_illustration/pixel_art" + HAND_DRAWN = "digital_illustration/hand_drawn" + GRAIN = "digital_illustration/grain" + SKETCH = "digital_illustration/infantile_sketch" + POSTER = "digital_illustration/2d_art_poster" + POSTER_2 = "digital_illustration/2d_art_poster_2" + HANDMADE_3D = "digital_illustration/handmade_3d" + HAND_DRAWN_OUTLINE = "digital_illustration/hand_drawn_outline" + ENGRAVING_COLOR = "digital_illustration/engraving_color" + + +class ImageGenModel(str, Enum): + """ + Available model providers + """ + + FLUX = "Flux 1.1 Pro" + FLUX_ULTRA = "Flux 1.1 Pro Ultra" + RECRAFT = "Recraft v3" + SD3_5 = "Stable Diffusion 3.5 Medium" + + +class AIImageGeneratorBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.REPLICATE], Literal["api_key"] + ] = CredentialsField( + description="Enter your Replicate API key to access the image generation API. You can obtain an API key from https://replicate.com/account/api-tokens.", + ) + prompt: str = SchemaField( + description="Text prompt for image generation", + placeholder="e.g., 'A red panda using a laptop in a snowy forest'", + title="Prompt", + ) + model: ImageGenModel = SchemaField( + description="The AI model to use for image generation", + default=ImageGenModel.SD3_5, + title="Model", + ) + size: ImageSize = SchemaField( + description=( + "Format of the generated image:\n" + "- Square: Perfect for profile pictures, icons\n" + "- Landscape: Traditional photo format\n" + "- Portrait: Vertical photos, portraits\n" + "- Wide: Cinematic format, desktop wallpapers\n" + "- Tall: Mobile wallpapers, social media stories" + ), + default=ImageSize.SQUARE, + title="Image Format", + ) + style: ImageStyle = SchemaField( + description="Visual style for the generated image", + default=ImageStyle.ANY, + title="Image Style", + ) + + class Output(BlockSchema): + image_url: str = SchemaField(description="URL of the generated image") + error: str = SchemaField(description="Error message if generation failed") + + def __init__(self): + super().__init__( + id="ed1ae7a0-b770-4089-b520-1f0005fad19a", + description="Generate images using various AI models through a unified interface", + categories={BlockCategory.AI}, + input_schema=AIImageGeneratorBlock.Input, + output_schema=AIImageGeneratorBlock.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "prompt": "An octopus using a laptop in a snowy forest with 'AutoGPT' clearly visible on the screen", + "model": ImageGenModel.RECRAFT, + "size": ImageSize.SQUARE, + "style": ImageStyle.REALISTIC, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "image_url", + "https://replicate.delivery/generated-image.webp", + ), + ], + test_mock={ + "_run_client": lambda *args, **kwargs: "https://replicate.delivery/generated-image.webp" + }, + ) + + def _run_client( + self, credentials: APIKeyCredentials, model_name: str, input_params: dict + ): + try: + # Initialize Replicate client + client = replicate.Client(api_token=credentials.api_key.get_secret_value()) + + # Run the model with input parameters + output = client.run(model_name, input=input_params, wait=False) + + # Process output + if isinstance(output, list) and len(output) > 0: + if isinstance(output[0], FileOutput): + result_url = output[0].url + else: + result_url = output[0] + elif isinstance(output, FileOutput): + result_url = output.url + elif isinstance(output, str): + result_url = output + else: + result_url = None + + return result_url + + except TypeError as e: + raise TypeError(f"Error during model execution: {e}") + except Exception as e: + raise RuntimeError(f"Unexpected error during model execution: {e}") + + def generate_image(self, input_data: Input, credentials: APIKeyCredentials): + try: + # Handle style-based prompt modification for models without native style support + modified_prompt = input_data.prompt + if input_data.model not in [ImageGenModel.RECRAFT]: + style_prefix = self._style_to_prompt_prefix(input_data.style) + modified_prompt = f"{style_prefix} {modified_prompt}".strip() + + if input_data.model == ImageGenModel.SD3_5: + # Use Stable Diffusion 3.5 with aspect ratio + input_params = { + "prompt": modified_prompt, + "aspect_ratio": SIZE_TO_SD_RATIO[input_data.size], + "output_format": "webp", + "output_quality": 90, + "steps": 40, + "cfg_scale": 7.0, + } + output = self._run_client( + credentials, + "stability-ai/stable-diffusion-3.5-medium", + input_params, + ) + return output + + elif input_data.model == ImageGenModel.FLUX: + # Use Flux-specific dimensions with 'jpg' format to avoid ReplicateError + width, height = SIZE_TO_FLUX_DIMENSIONS[input_data.size] + input_params = { + "prompt": modified_prompt, + "width": width, + "height": height, + "aspect_ratio": SIZE_TO_FLUX_RATIO[input_data.size], + "output_format": "jpg", # Set to jpg for Flux models + "output_quality": 90, + } + output = self._run_client( + credentials, "black-forest-labs/flux-1.1-pro", input_params + ) + return output + + elif input_data.model == ImageGenModel.FLUX_ULTRA: + width, height = SIZE_TO_FLUX_DIMENSIONS[input_data.size] + input_params = { + "prompt": modified_prompt, + "width": width, + "height": height, + "aspect_ratio": SIZE_TO_FLUX_RATIO[input_data.size], + "output_format": "jpg", + "output_quality": 90, + } + output = self._run_client( + credentials, "black-forest-labs/flux-1.1-pro-ultra", input_params + ) + return output + + elif input_data.model == ImageGenModel.RECRAFT: + input_params = { + "prompt": input_data.prompt, + "size": SIZE_TO_RECRAFT_DIMENSIONS[input_data.size], + "style": input_data.style.value, + } + output = self._run_client( + credentials, "recraft-ai/recraft-v3", input_params + ) + return output + + except Exception as e: + raise RuntimeError(f"Failed to generate image: {str(e)}") + + def _style_to_prompt_prefix(self, style: ImageStyle) -> str: + """ + Convert a style enum to a prompt prefix for models without native style support. + """ + if style == ImageStyle.ANY: + return "" + + style_map = { + ImageStyle.REALISTIC: "photorealistic", + ImageStyle.REALISTIC_BW: "black and white photograph", + ImageStyle.REALISTIC_HDR: "HDR photograph", + ImageStyle.REALISTIC_NATURAL: "natural light photograph", + ImageStyle.REALISTIC_STUDIO: "studio portrait photograph", + ImageStyle.REALISTIC_ENTERPRISE: "enterprise photograph", + ImageStyle.REALISTIC_HARD_FLASH: "hard flash photograph", + ImageStyle.REALISTIC_MOTION_BLUR: "motion blur photograph", + ImageStyle.DIGITAL_ART: "digital art", + ImageStyle.PIXEL_ART: "pixel art", + ImageStyle.HAND_DRAWN: "hand drawn illustration", + ImageStyle.GRAIN: "grainy digital illustration", + ImageStyle.SKETCH: "sketchy illustration", + ImageStyle.POSTER: "2D art poster", + ImageStyle.POSTER_2: "alternate 2D art poster", + ImageStyle.HANDMADE_3D: "handmade 3D illustration", + ImageStyle.HAND_DRAWN_OUTLINE: "hand drawn outline illustration", + ImageStyle.ENGRAVING_COLOR: "color engraving illustration", + } + + style_text = style_map.get(style, "") + return f"{style_text} of" if style_text else "" + + def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs): + try: + url = self.generate_image(input_data, credentials) + if url: + yield "image_url", url + else: + yield "error", "Image generation returned an empty result." + except Exception as e: + # Capture and return only the message of the exception, avoiding serialization of non-serializable objects + yield "error", str(e) + + +# Test credentials stay the same +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="replicate", + api_key=SecretStr("mock-replicate-api-key"), + title="Mock Replicate API key", + expires_at=None, +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} diff --git a/autogpt_platform/backend/backend/blocks/ai_music_generator.py b/autogpt_platform/backend/backend/blocks/ai_music_generator.py new file mode 100644 index 000000000000..708203510877 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/ai_music_generator.py @@ -0,0 +1,227 @@ +import logging +import time +from enum import Enum +from typing import Literal + +import replicate +from pydantic import SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + +logger = logging.getLogger(__name__) + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="replicate", + api_key=SecretStr("mock-replicate-api-key"), + title="Mock Replicate API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +# Model version enum +class MusicGenModelVersion(str, Enum): + STEREO_LARGE = "stereo-large" + MELODY_LARGE = "melody-large" + LARGE = "large" + + +# Audio format enum +class AudioFormat(str, Enum): + WAV = "wav" + MP3 = "mp3" + + +# Normalization strategy enum +class NormalizationStrategy(str, Enum): + LOUDNESS = "loudness" + CLIP = "clip" + PEAK = "peak" + RMS = "rms" + + +class AIMusicGeneratorBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.REPLICATE], Literal["api_key"] + ] = CredentialsField( + description="The Replicate integration can be used with " + "any API key with sufficient permissions for the blocks it is used on.", + ) + prompt: str = SchemaField( + description="A description of the music you want to generate", + placeholder="e.g., 'An upbeat electronic dance track with heavy bass'", + title="Prompt", + ) + music_gen_model_version: MusicGenModelVersion = SchemaField( + description="Model to use for generation", + default=MusicGenModelVersion.STEREO_LARGE, + title="Model Version", + ) + duration: int = SchemaField( + description="Duration of the generated audio in seconds", + default=8, + title="Duration", + ) + temperature: float = SchemaField( + description="Controls the 'conservativeness' of the sampling process. Higher temperature means more diversity", + default=1.0, + title="Temperature", + ) + top_k: int = SchemaField( + description="Reduces sampling to the k most likely tokens", + default=250, + title="Top K", + ) + top_p: float = SchemaField( + description="Reduces sampling to tokens with cumulative probability of p. When set to 0 (default), top_k sampling is used", + default=0.0, + title="Top P", + ) + classifier_free_guidance: int = SchemaField( + description="Increases the influence of inputs on the output. Higher values produce lower-variance outputs that adhere more closely to inputs", + default=3, + title="Classifier Free Guidance", + ) + output_format: AudioFormat = SchemaField( + description="Output format for generated audio", + default=AudioFormat.WAV, + title="Output Format", + ) + normalization_strategy: NormalizationStrategy = SchemaField( + description="Strategy for normalizing audio", + default=NormalizationStrategy.LOUDNESS, + title="Normalization Strategy", + ) + + class Output(BlockSchema): + result: str = SchemaField(description="URL of the generated audio file") + error: str = SchemaField(description="Error message if the model run failed") + + def __init__(self): + super().__init__( + id="44f6c8ad-d75c-4ae1-8209-aad1c0326928", + description="This block generates music using Meta's MusicGen model on Replicate.", + categories={BlockCategory.AI}, + input_schema=AIMusicGeneratorBlock.Input, + output_schema=AIMusicGeneratorBlock.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "prompt": "An upbeat electronic dance track with heavy bass", + "music_gen_model_version": MusicGenModelVersion.STEREO_LARGE, + "duration": 8, + "temperature": 1.0, + "top_k": 250, + "top_p": 0.0, + "classifier_free_guidance": 3, + "output_format": AudioFormat.WAV, + "normalization_strategy": NormalizationStrategy.LOUDNESS, + }, + test_output=[ + ( + "result", + "https://replicate.com/output/generated-audio-url.wav", + ), + ], + test_mock={ + "run_model": lambda api_key, music_gen_model_version, prompt, duration, temperature, top_k, top_p, classifier_free_guidance, output_format, normalization_strategy: "https://replicate.com/output/generated-audio-url.wav", + }, + test_credentials=TEST_CREDENTIALS, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + max_retries = 3 + retry_delay = 5 # seconds + last_error = None + + for attempt in range(max_retries): + try: + logger.debug( + f"[AIMusicGeneratorBlock] - Running model (attempt {attempt + 1})" + ) + result = self.run_model( + api_key=credentials.api_key, + music_gen_model_version=input_data.music_gen_model_version, + prompt=input_data.prompt, + duration=input_data.duration, + temperature=input_data.temperature, + top_k=input_data.top_k, + top_p=input_data.top_p, + classifier_free_guidance=input_data.classifier_free_guidance, + output_format=input_data.output_format, + normalization_strategy=input_data.normalization_strategy, + ) + if result and result != "No output received": + yield "result", result + return + else: + last_error = "Model returned empty or invalid response" + raise ValueError(last_error) + except Exception as e: + last_error = f"Unexpected error: {str(e)}" + logger.error(f"[AIMusicGeneratorBlock] - Error: {last_error}") + if attempt < max_retries - 1: + time.sleep(retry_delay) + continue + + # If we've exhausted all retries, yield the error + yield "error", f"Failed after {max_retries} attempts. Last error: {last_error}" + + def run_model( + self, + api_key: SecretStr, + music_gen_model_version: MusicGenModelVersion, + prompt: str, + duration: int, + temperature: float, + top_k: int, + top_p: float, + classifier_free_guidance: int, + output_format: AudioFormat, + normalization_strategy: NormalizationStrategy, + ): + # Initialize Replicate client with the API key + client = replicate.Client(api_token=api_key.get_secret_value()) + + # Run the model with parameters + output = client.run( + "meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb", + input={ + "prompt": prompt, + "music_gen_model_version": music_gen_model_version, + "duration": duration, + "temperature": temperature, + "top_k": top_k, + "top_p": top_p, + "classifier_free_guidance": classifier_free_guidance, + "output_format": output_format, + "normalization_strategy": normalization_strategy, + }, + ) + + # Handle the output + if isinstance(output, list) and len(output) > 0: + result_url = output[0] # If output is a list, get the first element + elif isinstance(output, str): + result_url = output # If output is a string, use it directly + else: + result_url = ( + "No output received" # Fallback message if output is not as expected + ) + + return result_url diff --git a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py new file mode 100644 index 000000000000..df2b3a27263c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py @@ -0,0 +1,323 @@ +import logging +import time +from enum import Enum +from typing import Literal + +from pydantic import SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="revid", + api_key=SecretStr("mock-revid-api-key"), + title="Mock Revid API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class AudioTrack(str, Enum): + OBSERVER = ("Observer",) + FUTURISTIC_BEAT = ("Futuristic Beat",) + SCIENCE_DOCUMENTARY = ("Science Documentary",) + HOTLINE = ("Hotline",) + BLADERUNNER_2049 = ("Bladerunner 2049",) + A_FUTURE = ("A Future",) + ELYSIAN_EMBERS = ("Elysian Embers",) + INSPIRING_CINEMATIC = ("Inspiring Cinematic",) + BLADERUNNER_REMIX = ("Bladerunner Remix",) + IZZAMUZZIC = ("Izzamuzzic",) + NAS = ("Nas",) + PARIS_ELSE = ("Paris - Else",) + SNOWFALL = ("Snowfall",) + BURLESQUE = ("Burlesque",) + CORNY_CANDY = ("Corny Candy",) + HIGHWAY_NOCTURNE = ("Highway Nocturne",) + I_DONT_THINK_SO = ("I Don't Think So",) + LOSING_YOUR_MARBLES = ("Losing Your Marbles",) + REFRESHER = ("Refresher",) + TOURIST = ("Tourist",) + TWIN_TYCHES = ("Twin Tyches",) + + @property + def audio_url(self): + audio_urls = { + AudioTrack.OBSERVER: "https://cdn.tfrv.xyz/audio/observer.mp3", + AudioTrack.FUTURISTIC_BEAT: "https://cdn.tfrv.xyz/audio/_futuristic-beat.mp3", + AudioTrack.SCIENCE_DOCUMENTARY: "https://cdn.tfrv.xyz/audio/_science-documentary.mp3", + AudioTrack.HOTLINE: "https://cdn.tfrv.xyz/audio/_hotline.mp3", + AudioTrack.BLADERUNNER_2049: "https://cdn.tfrv.xyz/audio/_bladerunner-2049.mp3", + AudioTrack.A_FUTURE: "https://cdn.tfrv.xyz/audio/a-future.mp3", + AudioTrack.ELYSIAN_EMBERS: "https://cdn.tfrv.xyz/audio/elysian-embers.mp3", + AudioTrack.INSPIRING_CINEMATIC: "https://cdn.tfrv.xyz/audio/inspiring-cinematic-ambient.mp3", + AudioTrack.BLADERUNNER_REMIX: "https://cdn.tfrv.xyz/audio/bladerunner-remix.mp3", + AudioTrack.IZZAMUZZIC: "https://cdn.tfrv.xyz/audio/_izzamuzzic.mp3", + AudioTrack.NAS: "https://cdn.tfrv.xyz/audio/_nas.mp3", + AudioTrack.PARIS_ELSE: "https://cdn.tfrv.xyz/audio/_paris-else.mp3", + AudioTrack.SNOWFALL: "https://cdn.tfrv.xyz/audio/_snowfall.mp3", + AudioTrack.BURLESQUE: "https://cdn.tfrv.xyz/audio/burlesque.mp3", + AudioTrack.CORNY_CANDY: "https://cdn.tfrv.xyz/audio/corny-candy.mp3", + AudioTrack.HIGHWAY_NOCTURNE: "https://cdn.tfrv.xyz/audio/highway-nocturne.mp3", + AudioTrack.I_DONT_THINK_SO: "https://cdn.tfrv.xyz/audio/i-dont-think-so.mp3", + AudioTrack.LOSING_YOUR_MARBLES: "https://cdn.tfrv.xyz/audio/losing-your-marbles.mp3", + AudioTrack.REFRESHER: "https://cdn.tfrv.xyz/audio/refresher.mp3", + AudioTrack.TOURIST: "https://cdn.tfrv.xyz/audio/tourist.mp3", + AudioTrack.TWIN_TYCHES: "https://cdn.tfrv.xyz/audio/twin-tynches.mp3", + } + return audio_urls[self] + + +class GenerationPreset(str, Enum): + LEONARDO = ("Default",) + ANIME = ("Anime",) + REALISM = ("Realist",) + ILLUSTRATION = ("Illustration",) + SKETCH_COLOR = ("Sketch Color",) + SKETCH_BW = ("Sketch B&W",) + PIXAR = ("Pixar",) + INK = ("Japanese Ink",) + RENDER_3D = ("3D Render",) + LEGO = ("Lego",) + SCIFI = ("Sci-Fi",) + RECRO_CARTOON = ("Retro Cartoon",) + PIXEL_ART = ("Pixel Art",) + CREATIVE = ("Creative",) + PHOTOGRAPHY = ("Photography",) + RAYTRACED = ("Raytraced",) + ENVIRONMENT = ("Environment",) + FANTASY = ("Fantasy",) + ANIME_SR = ("Anime Realism",) + MOVIE = ("Movie",) + STYLIZED_ILLUSTRATION = ("Stylized Illustration",) + MANGA = ("Manga",) + + +class Voice(str, Enum): + LILY = "Lily" + DANIEL = "Daniel" + BRIAN = "Brian" + JESSICA = "Jessica" + CHARLOTTE = "Charlotte" + CALLUM = "Callum" + + @property + def voice_id(self): + voice_id_map = { + Voice.LILY: "pFZP5JQG7iQjIQuC4Bku", + Voice.DANIEL: "onwK4e9ZLuTAKqWW03F9", + Voice.BRIAN: "nPczCjzI2devNBz1zQrb", + Voice.JESSICA: "cgSgspJ2msm6clMCkdW9", + Voice.CHARLOTTE: "XB0fDUnXU5powFXDhCwa", + Voice.CALLUM: "N2lVS1w4EtoT3dr4eOWO", + } + return voice_id_map[self] + + def __str__(self): + return self.value + + +class VisualMediaType(str, Enum): + STOCK_VIDEOS = ("stockVideo",) + MOVING_AI_IMAGES = ("movingImage",) + AI_VIDEO = ("aiVideo",) + + +logger = logging.getLogger(__name__) + + +class AIShortformVideoCreatorBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.REVID], Literal["api_key"] + ] = CredentialsField( + description="The revid.ai integration can be used with " + "any API key with sufficient permissions for the blocks it is used on.", + ) + script: str = SchemaField( + description="""1. Use short and punctuated sentences\n\n2. Use linebreaks to create a new clip\n\n3. Text outside of brackets is spoken by the AI, and [text between brackets] will be used to guide the visual generation. For example, [close-up of a cat] will show a close-up of a cat.""", + placeholder="[close-up of a cat] Meow!", + ) + ratio: str = SchemaField( + description="Aspect ratio of the video", default="9 / 16" + ) + resolution: str = SchemaField( + description="Resolution of the video", default="720p" + ) + frame_rate: int = SchemaField(description="Frame rate of the video", default=60) + generation_preset: GenerationPreset = SchemaField( + description="Generation preset for visual style - only effects AI generated visuals", + default=GenerationPreset.LEONARDO, + placeholder=GenerationPreset.LEONARDO, + ) + background_music: AudioTrack = SchemaField( + description="Background music track", + default=AudioTrack.HIGHWAY_NOCTURNE, + placeholder=AudioTrack.HIGHWAY_NOCTURNE, + ) + voice: Voice = SchemaField( + description="AI voice to use for narration", + default=Voice.LILY, + placeholder=Voice.LILY, + ) + video_style: VisualMediaType = SchemaField( + description="Type of visual media to use for the video", + default=VisualMediaType.STOCK_VIDEOS, + placeholder=VisualMediaType.STOCK_VIDEOS, + ) + + class Output(BlockSchema): + video_url: str = SchemaField(description="The URL of the created video") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="361697fb-0c4f-4feb-aed3-8320c88c771b", + description="Creates a shortform video using revid.ai", + categories={BlockCategory.SOCIAL, BlockCategory.AI}, + input_schema=AIShortformVideoCreatorBlock.Input, + output_schema=AIShortformVideoCreatorBlock.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "script": "[close-up of a cat] Meow!", + "ratio": "9 / 16", + "resolution": "720p", + "frame_rate": 60, + "generation_preset": GenerationPreset.LEONARDO, + "background_music": AudioTrack.HIGHWAY_NOCTURNE, + "voice": Voice.LILY, + "video_style": VisualMediaType.STOCK_VIDEOS, + }, + test_output=( + "video_url", + "https://example.com/video.mp4", + ), + test_mock={ + "create_webhook": lambda: ( + "test_uuid", + "https://webhook.site/test_uuid", + ), + "create_video": lambda api_key, payload: {"pid": "test_pid"}, + "wait_for_video": lambda api_key, pid, webhook_token, max_wait_time=1000: "https://example.com/video.mp4", + }, + test_credentials=TEST_CREDENTIALS, + ) + + def create_webhook(self): + url = "https://webhook.site/token" + headers = {"Accept": "application/json", "Content-Type": "application/json"} + response = requests.post(url, headers=headers) + webhook_data = response.json() + return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}" + + def create_video(self, api_key: SecretStr, payload: dict) -> dict: + url = "https://www.revid.ai/api/public/v2/render" + headers = {"key": api_key.get_secret_value()} + response = requests.post(url, json=payload, headers=headers) + logger.debug( + f"API Response Status Code: {response.status_code}, Content: {response.text}" + ) + return response.json() + + def check_video_status(self, api_key: SecretStr, pid: str) -> dict: + url = f"https://www.revid.ai/api/public/v2/status?pid={pid}" + headers = {"key": api_key.get_secret_value()} + response = requests.get(url, headers=headers) + return response.json() + + def wait_for_video( + self, + api_key: SecretStr, + pid: str, + webhook_token: str, + max_wait_time: int = 1000, + ) -> str: + start_time = time.time() + while time.time() - start_time < max_wait_time: + status = self.check_video_status(api_key, pid) + logger.debug(f"Video status: {status}") + + if status.get("status") == "ready" and "videoUrl" in status: + return status["videoUrl"] + elif status.get("status") == "error": + error_message = status.get("error", "Unknown error occurred") + logger.error(f"Video creation failed: {error_message}") + raise ValueError(f"Video creation failed: {error_message}") + elif status.get("status") in ["FAILED", "CANCELED"]: + logger.error(f"Video creation failed: {status.get('message')}") + raise ValueError(f"Video creation failed: {status.get('message')}") + + time.sleep(10) + + logger.error("Video creation timed out") + raise TimeoutError("Video creation timed out") + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + # Create a new Webhook.site URL + webhook_token, webhook_url = self.create_webhook() + logger.debug(f"Webhook URL: {webhook_url}") + + audio_url = input_data.background_music.audio_url + + payload = { + "frameRate": input_data.frame_rate, + "resolution": input_data.resolution, + "frameDurationMultiplier": 18, + "webhook": webhook_url, + "creationParams": { + "mediaType": input_data.video_style, + "captionPresetName": "Wrap 1", + "selectedVoice": input_data.voice.voice_id, + "hasEnhancedGeneration": True, + "generationPreset": input_data.generation_preset.name, + "selectedAudio": input_data.background_music, + "origin": "/create", + "inputText": input_data.script, + "flowType": "text-to-video", + "slug": "create-tiktok-video", + "hasToGenerateVoice": True, + "hasToTranscript": False, + "hasToSearchMedia": True, + "hasAvatar": False, + "hasWebsiteRecorder": False, + "hasTextSmallAtBottom": False, + "ratio": input_data.ratio, + "sourceType": "contentScraping", + "selectedStoryStyle": {"value": "custom", "label": "Custom"}, + "hasToGenerateVideos": input_data.video_style + != VisualMediaType.STOCK_VIDEOS, + "audioUrl": audio_url, + }, + } + + logger.debug("Creating video...") + response = self.create_video(credentials.api_key, payload) + pid = response.get("pid") + + if not pid: + logger.error( + f"Failed to create video: No project ID returned. API Response: {response}" + ) + raise RuntimeError("Failed to create video: No project ID returned") + else: + logger.debug( + f"Video created with project ID: {pid}. Waiting for completion..." + ) + video_url = self.wait_for_video(credentials.api_key, pid, webhook_token) + logger.debug(f"Video ready: {video_url}") + yield "video_url", video_url diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py new file mode 100644 index 000000000000..63aea34883e7 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -0,0 +1,717 @@ +import enum +from typing import Any, List + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType +from backend.data.model import SchemaField +from backend.util.file import MediaFile, store_media_file +from backend.util.mock import MockObject +from backend.util.text import TextFormatter +from backend.util.type import convert + +formatter = TextFormatter() + + +class FileStoreBlock(Block): + class Input(BlockSchema): + file_in: MediaFile = SchemaField( + description="The file to store in the temporary directory, it can be a URL, data URI, or local path." + ) + + class Output(BlockSchema): + file_out: MediaFile = SchemaField( + description="The relative path to the stored file in the temporary directory." + ) + + def __init__(self): + super().__init__( + id="cbb50872-625b-42f0-8203-a2ae78242d8a", + description="Stores the input file in the temporary directory.", + categories={BlockCategory.BASIC, BlockCategory.MULTIMEDIA}, + input_schema=FileStoreBlock.Input, + output_schema=FileStoreBlock.Output, + static_output=True, + ) + + def run( + self, + input_data: Input, + *, + graph_exec_id: str, + **kwargs, + ) -> BlockOutput: + file_path = store_media_file( + graph_exec_id=graph_exec_id, + file=input_data.file_in, + return_content=False, + ) + yield "file_out", file_path + + +class StoreValueBlock(Block): + """ + This block allows you to provide a constant value as a block, in a stateless manner. + The common use-case is simply pass the `input` data, it will `output` the same data. + The block output will be static, the output can be consumed multiple times. + """ + + class Input(BlockSchema): + input: Any = SchemaField( + description="Trigger the block to produce the output. " + "The value is only used when `data` is None." + ) + data: Any = SchemaField( + description="The constant data to be retained in the block. " + "This value is passed as `output`.", + default=None, + ) + + class Output(BlockSchema): + output: Any = SchemaField(description="The stored data retained in the block.") + + def __init__(self): + super().__init__( + id="1ff065e9-88e8-4358-9d82-8dc91f622ba9", + description="This block forwards an input value as output, allowing reuse without change.", + categories={BlockCategory.BASIC}, + input_schema=StoreValueBlock.Input, + output_schema=StoreValueBlock.Output, + test_input=[ + {"input": "Hello, World!"}, + {"input": "Hello, World!", "data": "Existing Data"}, + ], + test_output=[ + ("output", "Hello, World!"), # No data provided, so trigger is returned + ("output", "Existing Data"), # Data is provided, so data is returned. + ], + static_output=True, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + yield "output", input_data.data or input_data.input + + +class PrintToConsoleBlock(Block): + class Input(BlockSchema): + text: str = SchemaField(description="The text to print to the console.") + + class Output(BlockSchema): + status: str = SchemaField(description="The status of the print operation.") + + def __init__(self): + super().__init__( + id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c", + description="Print the given text to the console, this is used for a debugging purpose.", + categories={BlockCategory.BASIC}, + input_schema=PrintToConsoleBlock.Input, + output_schema=PrintToConsoleBlock.Output, + test_input={"text": "Hello, World!"}, + test_output=("status", "printed"), + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + print(">>>>> Print: ", input_data.text) + yield "status", "printed" + + +class FindInDictionaryBlock(Block): + class Input(BlockSchema): + input: Any = SchemaField(description="Dictionary to lookup from") + key: str | int = SchemaField(description="Key to lookup in the dictionary") + + class Output(BlockSchema): + output: Any = SchemaField(description="Value found for the given key") + missing: Any = SchemaField( + description="Value of the input that missing the key" + ) + + def __init__(self): + super().__init__( + id="0e50422c-6dee-4145-83d6-3a5a392f65de", + description="Lookup the given key in the input dictionary/object/list and return the value.", + input_schema=FindInDictionaryBlock.Input, + output_schema=FindInDictionaryBlock.Output, + test_input=[ + {"input": {"apple": 1, "banana": 2, "cherry": 3}, "key": "banana"}, + {"input": {"x": 10, "y": 20, "z": 30}, "key": "w"}, + {"input": [1, 2, 3], "key": 1}, + {"input": [1, 2, 3], "key": 3}, + {"input": MockObject(value="!!", key="key"), "key": "key"}, + {"input": [{"k1": "v1"}, {"k2": "v2"}, {"k1": "v3"}], "key": "k1"}, + ], + test_output=[ + ("output", 2), + ("missing", {"x": 10, "y": 20, "z": 30}), + ("output", 2), + ("missing", [1, 2, 3]), + ("output", "key"), + ("output", ["v1", "v3"]), + ], + categories={BlockCategory.BASIC}, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + obj = input_data.input + key = input_data.key + + if isinstance(obj, dict) and key in obj: + yield "output", obj[key] + elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj): + yield "output", obj[key] + elif isinstance(obj, list) and isinstance(key, str): + if len(obj) == 0: + yield "output", [] + elif isinstance(obj[0], dict) and key in obj[0]: + yield "output", [item[key] for item in obj if key in item] + else: + yield "output", [getattr(val, key) for val in obj if hasattr(val, key)] + elif isinstance(obj, object) and isinstance(key, str) and hasattr(obj, key): + yield "output", getattr(obj, key) + else: + yield "missing", input_data.input + + +class AgentInputBlock(Block): + """ + This block is used to provide input to the graph. + + It takes in a value, name, description, default values list and bool to limit selection to default values. + + It Outputs the value passed as input. + """ + + class Input(BlockSchema): + name: str = SchemaField(description="The name of the input.") + value: Any = SchemaField( + description="The value to be passed as input.", + default=None, + ) + title: str | None = SchemaField( + description="The title of the input.", default=None, advanced=True + ) + description: str | None = SchemaField( + description="The description of the input.", + default=None, + advanced=True, + ) + placeholder_values: List[Any] = SchemaField( + description="The placeholder values to be passed as input.", + default=[], + advanced=True, + ) + limit_to_placeholder_values: bool = SchemaField( + description="Whether to limit the selection to placeholder values.", + default=False, + advanced=True, + ) + advanced: bool = SchemaField( + description="Whether to show the input in the advanced section, if the field is not required.", + default=False, + advanced=True, + ) + secret: bool = SchemaField( + description="Whether the input should be treated as a secret.", + default=False, + advanced=True, + ) + + class Output(BlockSchema): + result: Any = SchemaField(description="The value passed as input.") + + def __init__(self): + super().__init__( + id="c0a8e994-ebf1-4a9c-a4d8-89d09c86741b", + description="This block is used to provide input to the graph.", + input_schema=AgentInputBlock.Input, + output_schema=AgentInputBlock.Output, + test_input=[ + { + "value": "Hello, World!", + "name": "input_1", + "description": "This is a test input.", + "placeholder_values": [], + "limit_to_placeholder_values": False, + }, + { + "value": "Hello, World!", + "name": "input_2", + "description": "This is a test input.", + "placeholder_values": ["Hello, World!"], + "limit_to_placeholder_values": True, + }, + ], + test_output=[ + ("result", "Hello, World!"), + ("result", "Hello, World!"), + ], + categories={BlockCategory.INPUT, BlockCategory.BASIC}, + block_type=BlockType.INPUT, + static_output=True, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + yield "result", input_data.value + + +class AgentOutputBlock(Block): + """ + Records the output of the graph for users to see. + + Behavior: + If `format` is provided and the `value` is of a type that can be formatted, + the block attempts to format the recorded_value using the `format`. + If formatting fails or no `format` is provided, the raw `value` is output. + """ + + class Input(BlockSchema): + value: Any = SchemaField( + description="The value to be recorded as output.", + default=None, + advanced=False, + ) + name: str = SchemaField(description="The name of the output.") + title: str | None = SchemaField( + description="The title of the output.", + default=None, + advanced=True, + ) + description: str | None = SchemaField( + description="The description of the output.", + default=None, + advanced=True, + ) + format: str = SchemaField( + description="The format string to be used to format the recorded_value. Use Jinja2 syntax.", + default="", + advanced=True, + ) + advanced: bool = SchemaField( + description="Whether to treat the output as advanced.", + default=False, + advanced=True, + ) + secret: bool = SchemaField( + description="Whether the output should be treated as a secret.", + default=False, + advanced=True, + ) + + class Output(BlockSchema): + output: Any = SchemaField(description="The value recorded as output.") + + def __init__(self): + super().__init__( + id="363ae599-353e-4804-937e-b2ee3cef3da4", + description="Stores the output of the graph for users to see.", + input_schema=AgentOutputBlock.Input, + output_schema=AgentOutputBlock.Output, + test_input=[ + { + "value": "Hello, World!", + "name": "output_1", + "description": "This is a test output.", + "format": "{{ output_1 }}!!", + }, + { + "value": "42", + "name": "output_2", + "description": "This is another test output.", + "format": "{{ output_2 }}", + }, + { + "value": MockObject(value="!!", key="key"), + "name": "output_3", + "description": "This is a test output with a mock object.", + "format": "{{ output_3 }}", + }, + ], + test_output=[ + ("output", "Hello, World!!!"), + ("output", "42"), + ("output", MockObject(value="!!", key="key")), + ], + categories={BlockCategory.OUTPUT, BlockCategory.BASIC}, + block_type=BlockType.OUTPUT, + static_output=True, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + """ + Attempts to format the recorded_value using the fmt_string if provided. + If formatting fails or no fmt_string is given, returns the original recorded_value. + """ + if input_data.format: + try: + yield "output", formatter.format_string( + input_data.format, {input_data.name: input_data.value} + ) + except Exception as e: + yield "output", f"Error: {e}, {input_data.value}" + else: + yield "output", input_data.value + + +class AddToDictionaryBlock(Block): + class Input(BlockSchema): + dictionary: dict[Any, Any] = SchemaField( + default={}, + description="The dictionary to add the entry to. If not provided, a new dictionary will be created.", + ) + key: str = SchemaField( + default="", + description="The key for the new entry.", + placeholder="new_key", + advanced=False, + ) + value: Any = SchemaField( + default=None, + description="The value for the new entry.", + placeholder="new_value", + advanced=False, + ) + entries: dict[Any, Any] = SchemaField( + default={}, + description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.", + advanced=True, + ) + + class Output(BlockSchema): + updated_dictionary: dict = SchemaField( + description="The dictionary with the new entry added." + ) + error: str = SchemaField(description="Error message if the operation failed.") + + def __init__(self): + super().__init__( + id="31d1064e-7446-4693-a7d4-65e5ca1180d1", + description="Adds a new key-value pair to a dictionary. If no dictionary is provided, a new one is created.", + categories={BlockCategory.BASIC}, + input_schema=AddToDictionaryBlock.Input, + output_schema=AddToDictionaryBlock.Output, + test_input=[ + { + "dictionary": {"existing_key": "existing_value"}, + "key": "new_key", + "value": "new_value", + }, + {"key": "first_key", "value": "first_value"}, + { + "dictionary": {"existing_key": "existing_value"}, + "entries": {"new_key": "new_value", "first_key": "first_value"}, + }, + ], + test_output=[ + ( + "updated_dictionary", + {"existing_key": "existing_value", "new_key": "new_value"}, + ), + ("updated_dictionary", {"first_key": "first_value"}), + ( + "updated_dictionary", + { + "existing_key": "existing_value", + "new_key": "new_value", + "first_key": "first_value", + }, + ), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + updated_dict = input_data.dictionary.copy() + + if input_data.value is not None and input_data.key: + updated_dict[input_data.key] = input_data.value + + for key, value in input_data.entries.items(): + updated_dict[key] = value + + yield "updated_dictionary", updated_dict + + +class AddToListBlock(Block): + class Input(BlockSchema): + list: List[Any] = SchemaField( + default=[], + advanced=False, + description="The list to add the entry to. If not provided, a new list will be created.", + ) + entry: Any = SchemaField( + description="The entry to add to the list. Can be of any type (string, int, dict, etc.).", + advanced=False, + default=None, + ) + entries: List[Any] = SchemaField( + default=[], + description="The entries to add to the list. This is the batch version of the `entry` field.", + advanced=True, + ) + position: int | None = SchemaField( + default=None, + description="The position to insert the new entry. If not provided, the entry will be appended to the end of the list.", + ) + + class Output(BlockSchema): + updated_list: List[Any] = SchemaField( + description="The list with the new entry added." + ) + error: str = SchemaField(description="Error message if the operation failed.") + + def __init__(self): + super().__init__( + id="aeb08fc1-2fc1-4141-bc8e-f758f183a822", + description="Adds a new entry to a list. The entry can be of any type. If no list is provided, a new one is created.", + categories={BlockCategory.BASIC}, + input_schema=AddToListBlock.Input, + output_schema=AddToListBlock.Output, + test_input=[ + { + "list": [1, "string", {"existing_key": "existing_value"}], + "entry": {"new_key": "new_value"}, + "position": 1, + }, + {"entry": "first_entry"}, + {"list": ["a", "b", "c"], "entry": "d"}, + { + "entry": "e", + "entries": ["f", "g"], + "list": ["a", "b"], + "position": 1, + }, + ], + test_output=[ + ( + "updated_list", + [ + 1, + {"new_key": "new_value"}, + "string", + {"existing_key": "existing_value"}, + ], + ), + ("updated_list", ["first_entry"]), + ("updated_list", ["a", "b", "c", "d"]), + ("updated_list", ["a", "f", "g", "e", "b"]), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + entries_added = input_data.entries.copy() + if input_data.entry: + entries_added.append(input_data.entry) + + updated_list = input_data.list.copy() + if (pos := input_data.position) is not None: + updated_list = updated_list[:pos] + entries_added + updated_list[pos:] + else: + updated_list += entries_added + + yield "updated_list", updated_list + + +class FindInListBlock(Block): + class Input(BlockSchema): + list: List[Any] = SchemaField(description="The list to search in.") + value: Any = SchemaField(description="The value to search for.") + + class Output(BlockSchema): + index: int = SchemaField(description="The index of the value in the list.") + found: bool = SchemaField( + description="Whether the value was found in the list." + ) + not_found_value: Any = SchemaField( + description="The value that was not found in the list." + ) + + def __init__(self): + super().__init__( + id="5e2c6d0a-1e37-489f-b1d0-8e1812b23333", + description="Finds the index of the value in the list.", + categories={BlockCategory.BASIC}, + input_schema=FindInListBlock.Input, + output_schema=FindInListBlock.Output, + test_input=[ + {"list": [1, 2, 3, 4, 5], "value": 3}, + {"list": [1, 2, 3, 4, 5], "value": 6}, + ], + test_output=[ + ("index", 2), + ("found", True), + ("found", False), + ("not_found_value", 6), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + try: + yield "index", input_data.list.index(input_data.value) + yield "found", True + except ValueError: + yield "found", False + yield "not_found_value", input_data.value + + +class NoteBlock(Block): + class Input(BlockSchema): + text: str = SchemaField(description="The text to display in the sticky note.") + + class Output(BlockSchema): + output: str = SchemaField(description="The text to display in the sticky note.") + + def __init__(self): + super().__init__( + id="cc10ff7b-7753-4ff2-9af6-9399b1a7eddc", + description="This block is used to display a sticky note with the given text.", + categories={BlockCategory.BASIC}, + input_schema=NoteBlock.Input, + output_schema=NoteBlock.Output, + test_input={"text": "Hello, World!"}, + test_output=[ + ("output", "Hello, World!"), + ], + block_type=BlockType.NOTE, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + yield "output", input_data.text + + +class CreateDictionaryBlock(Block): + class Input(BlockSchema): + values: dict[str, Any] = SchemaField( + description="Key-value pairs to create the dictionary with", + placeholder="e.g., {'name': 'Alice', 'age': 25}", + ) + + class Output(BlockSchema): + dictionary: dict[str, Any] = SchemaField( + description="The created dictionary containing the specified key-value pairs" + ) + error: str = SchemaField( + description="Error message if dictionary creation failed" + ) + + def __init__(self): + super().__init__( + id="b924ddf4-de4f-4b56-9a85-358930dcbc91", + description="Creates a dictionary with the specified key-value pairs. Use this when you know all the values you want to add upfront.", + categories={BlockCategory.DATA}, + input_schema=CreateDictionaryBlock.Input, + output_schema=CreateDictionaryBlock.Output, + test_input=[ + { + "values": {"name": "Alice", "age": 25, "city": "New York"}, + }, + { + "values": {"numbers": [1, 2, 3], "active": True, "score": 95.5}, + }, + ], + test_output=[ + ( + "dictionary", + {"name": "Alice", "age": 25, "city": "New York"}, + ), + ( + "dictionary", + {"numbers": [1, 2, 3], "active": True, "score": 95.5}, + ), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + try: + # The values are already validated by Pydantic schema + yield "dictionary", input_data.values + except Exception as e: + yield "error", f"Failed to create dictionary: {str(e)}" + + +class CreateListBlock(Block): + class Input(BlockSchema): + values: List[Any] = SchemaField( + description="A list of values to be combined into a new list.", + placeholder="e.g., ['Alice', 25, True]", + ) + + class Output(BlockSchema): + list: List[Any] = SchemaField( + description="The created list containing the specified values." + ) + error: str = SchemaField(description="Error message if list creation failed.") + + def __init__(self): + super().__init__( + id="a912d5c7-6e00-4542-b2a9-8034136930e4", + description="Creates a list with the specified values. Use this when you know all the values you want to add upfront.", + categories={BlockCategory.DATA}, + input_schema=CreateListBlock.Input, + output_schema=CreateListBlock.Output, + test_input=[ + { + "values": ["Alice", 25, True], + }, + { + "values": [1, 2, 3, "four", {"key": "value"}], + }, + ], + test_output=[ + ( + "list", + ["Alice", 25, True], + ), + ( + "list", + [1, 2, 3, "four", {"key": "value"}], + ), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + try: + # The values are already validated by Pydantic schema + yield "list", input_data.values + except Exception as e: + yield "error", f"Failed to create list: {str(e)}" + + +class TypeOptions(enum.Enum): + STRING = "string" + NUMBER = "number" + BOOLEAN = "boolean" + LIST = "list" + DICTIONARY = "dictionary" + + +class UniversalTypeConverterBlock(Block): + class Input(BlockSchema): + value: Any = SchemaField( + description="The value to convert to a universal type." + ) + type: TypeOptions = SchemaField(description="The type to convert the value to.") + + class Output(BlockSchema): + value: Any = SchemaField(description="The converted value.") + + def __init__(self): + super().__init__( + id="95d1b990-ce13-4d88-9737-ba5c2070c97b", + description="This block is used to convert a value to a universal type.", + categories={BlockCategory.BASIC}, + input_schema=UniversalTypeConverterBlock.Input, + output_schema=UniversalTypeConverterBlock.Output, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + try: + converted_value = convert( + input_data.value, + { + TypeOptions.STRING: str, + TypeOptions.NUMBER: float, + TypeOptions.BOOLEAN: bool, + TypeOptions.LIST: list, + TypeOptions.DICTIONARY: dict, + }[input_data.type], + ) + yield "value", converted_value + except Exception as e: + yield "error", f"Failed to convert value: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/block.py b/autogpt_platform/backend/backend/blocks/block.py new file mode 100644 index 000000000000..01e8af7238ea --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/block.py @@ -0,0 +1,71 @@ +import os +import re +from typing import Type + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class BlockInstallationBlock(Block): + """ + This block allows the verification and installation of other blocks in the system. + + NOTE: + This block allows remote code execution on the server, and it should be used + for development purposes only. + """ + + class Input(BlockSchema): + code: str = SchemaField( + description="Python code of the block to be installed", + ) + + class Output(BlockSchema): + success: str = SchemaField( + description="Success message if the block is installed successfully", + ) + error: str = SchemaField( + description="Error message if the block installation fails", + ) + + def __init__(self): + super().__init__( + id="45e78db5-03e9-447f-9395-308d712f5f08", + description="Given a code string, this block allows the verification and installation of a block code into the system.", + categories={BlockCategory.BASIC}, + input_schema=BlockInstallationBlock.Input, + output_schema=BlockInstallationBlock.Output, + disabled=True, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + code = input_data.code + + if search := re.search(r"class (\w+)\(Block\):", code): + class_name = search.group(1) + else: + raise RuntimeError("No class found in the code.") + + if search := re.search(r"id=\"(\w+-\w+-\w+-\w+-\w+)\"", code): + file_name = search.group(1) + else: + raise RuntimeError("No UUID found in the code.") + + block_dir = os.path.dirname(__file__) + file_path = f"{block_dir}/{file_name}.py" + module_name = f"backend.blocks.{file_name}" + with open(file_path, "w") as f: + f.write(code) + + try: + module = __import__(module_name, fromlist=[class_name]) + block_class: Type[Block] = getattr(module, class_name) + block = block_class() + + from backend.util.test import execute_block_test + + execute_block_test(block) + yield "success", "Block installed successfully." + except Exception as e: + os.remove(file_path) + raise RuntimeError(f"[Code]\n{code}\n\n[Error]\n{str(e)}") diff --git a/autogpt_platform/backend/backend/blocks/branching.py b/autogpt_platform/backend/backend/blocks/branching.py new file mode 100644 index 000000000000..a3424d3374d8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/branching.py @@ -0,0 +1,189 @@ +from enum import Enum +from typing import Any + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class ComparisonOperator(Enum): + EQUAL = "==" + NOT_EQUAL = "!=" + GREATER_THAN = ">" + LESS_THAN = "<" + GREATER_THAN_OR_EQUAL = ">=" + LESS_THAN_OR_EQUAL = "<=" + + +class ConditionBlock(Block): + class Input(BlockSchema): + value1: Any = SchemaField( + description="Enter the first value for comparison", + placeholder="For example: 10 or 'hello' or True", + ) + operator: ComparisonOperator = SchemaField( + description="Choose the comparison operator", + placeholder="Select an operator", + ) + value2: Any = SchemaField( + description="Enter the second value for comparison", + placeholder="For example: 20 or 'world' or False", + ) + yes_value: Any = SchemaField( + description="(Optional) Value to output if the condition is true. If not provided, value1 will be used.", + placeholder="Leave empty to use value1, or enter a specific value", + default=None, + ) + no_value: Any = SchemaField( + description="(Optional) Value to output if the condition is false. If not provided, value1 will be used.", + placeholder="Leave empty to use value1, or enter a specific value", + default=None, + ) + + class Output(BlockSchema): + result: bool = SchemaField( + description="The result of the condition evaluation (True or False)" + ) + yes_output: Any = SchemaField( + description="The output value if the condition is true" + ) + no_output: Any = SchemaField( + description="The output value if the condition is false" + ) + + def __init__(self): + super().__init__( + id="715696a0-e1da-45c8-b209-c2fa9c3b0be6", + input_schema=ConditionBlock.Input, + output_schema=ConditionBlock.Output, + description="Handles conditional logic based on comparison operators", + categories={BlockCategory.LOGIC}, + test_input={ + "value1": 10, + "operator": ComparisonOperator.GREATER_THAN.value, + "value2": 5, + "yes_value": "Greater", + "no_value": "Not greater", + }, + test_output=[ + ("result", True), + ("yes_output", "Greater"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + operator = input_data.operator + + value1 = input_data.value1 + if isinstance(value1, str): + try: + value1 = float(value1.strip()) + except ValueError: + value1 = value1.strip() + + value2 = input_data.value2 + if isinstance(value2, str): + try: + value2 = float(value2.strip()) + except ValueError: + value2 = value2.strip() + + yes_value = input_data.yes_value if input_data.yes_value is not None else value1 + no_value = input_data.no_value if input_data.no_value is not None else value2 + + comparison_funcs = { + ComparisonOperator.EQUAL: lambda a, b: a == b, + ComparisonOperator.NOT_EQUAL: lambda a, b: a != b, + ComparisonOperator.GREATER_THAN: lambda a, b: a > b, + ComparisonOperator.LESS_THAN: lambda a, b: a < b, + ComparisonOperator.GREATER_THAN_OR_EQUAL: lambda a, b: a >= b, + ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b, + } + + result = comparison_funcs[operator](value1, value2) + + yield "result", result + + if result: + yield "yes_output", yes_value + else: + yield "no_output", no_value + + +class IfInputMatchesBlock(Block): + class Input(BlockSchema): + input: Any = SchemaField( + description="The input to match against", + placeholder="For example: 10 or 'hello' or True", + ) + value: Any = SchemaField( + description="The value to output if the input matches", + placeholder="For example: 'Greater' or 20 or False", + ) + yes_value: Any = SchemaField( + description="The value to output if the input matches", + placeholder="For example: 'Greater' or 20 or False", + default=None, + ) + no_value: Any = SchemaField( + description="The value to output if the input does not match", + placeholder="For example: 'Greater' or 20 or False", + default=None, + ) + + class Output(BlockSchema): + result: bool = SchemaField( + description="The result of the condition evaluation (True or False)" + ) + yes_output: Any = SchemaField( + description="The output value if the condition is true" + ) + no_output: Any = SchemaField( + description="The output value if the condition is false" + ) + + def __init__(self): + super().__init__( + id="6dbbc4b3-ca6c-42b6-b508-da52d23e13f2", + input_schema=IfInputMatchesBlock.Input, + output_schema=IfInputMatchesBlock.Output, + description="Handles conditional logic based on comparison operators", + categories={BlockCategory.LOGIC}, + test_input=[ + { + "input": 10, + "value": 10, + "yes_value": "Greater", + "no_value": "Not greater", + }, + { + "input": 10, + "value": 20, + "yes_value": "Greater", + "no_value": "Not greater", + }, + { + "input": 10, + "value": None, + "yes_value": "Yes", + "no_value": "No", + }, + ], + test_output=[ + ("result", True), + ("yes_output", "Greater"), + ("result", False), + ("no_output", "Not greater"), + ("result", False), + ("no_output", "No"), + # ("result", True), + # ("yes_output", "Yes"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + if input_data.input == input_data.value or input_data.input is input_data.value: + yield "result", True + yield "yes_output", input_data.yes_value + else: + yield "result", False + yield "no_output", input_data.no_value diff --git a/autogpt_platform/backend/backend/blocks/code_executor.py b/autogpt_platform/backend/backend/blocks/code_executor.py new file mode 100644 index 000000000000..29028b27688e --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/code_executor.py @@ -0,0 +1,457 @@ +from enum import Enum +from typing import Literal + +from e2b_code_interpreter import Sandbox +from pydantic import SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="e2b", + api_key=SecretStr("mock-e2b-api-key"), + title="Mock E2B API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class ProgrammingLanguage(Enum): + PYTHON = "python" + JAVASCRIPT = "js" + BASH = "bash" + R = "r" + JAVA = "java" + + +class CodeExecutionBlock(Block): + # TODO : Add support to upload and download files + # Currently, You can customized the CPU and Memory, only by creating a pre customized sandbox template + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.E2B], Literal["api_key"] + ] = CredentialsField( + description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs", + ) + + # Todo : Option to run commond in background + setup_commands: list[str] = SchemaField( + description=( + "Shell commands to set up the sandbox before running the code. " + "You can use `curl` or `git` to install your desired Debian based " + "package manager. `pip` and `npm` are pre-installed.\n\n" + "These commands are executed with `sh`, in the foreground." + ), + placeholder="pip install cowsay", + default=[], + advanced=False, + ) + + code: str = SchemaField( + description="Code to execute in the sandbox", + placeholder="print('Hello, World!')", + default="", + advanced=False, + ) + + language: ProgrammingLanguage = SchemaField( + description="Programming language to execute", + default=ProgrammingLanguage.PYTHON, + advanced=False, + ) + + timeout: int = SchemaField( + description="Execution timeout in seconds", default=300 + ) + + template_id: str = SchemaField( + description=( + "You can use an E2B sandbox template by entering its ID here. " + "Check out the E2B docs for more details: " + "[E2B - Sandbox template](https://e2b.dev/docs/sandbox-template)" + ), + default="", + advanced=True, + ) + + class Output(BlockSchema): + response: str = SchemaField(description="Response from code execution") + stdout_logs: str = SchemaField( + description="Standard output logs from execution" + ) + stderr_logs: str = SchemaField(description="Standard error logs from execution") + error: str = SchemaField(description="Error message if execution failed") + + def __init__(self): + super().__init__( + id="0b02b072-abe7-11ef-8372-fb5d162dd712", + description="Executes code in an isolated sandbox environment with internet access.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=CodeExecutionBlock.Input, + output_schema=CodeExecutionBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "code": "print('Hello World')", + "language": ProgrammingLanguage.PYTHON.value, + "setup_commands": [], + "timeout": 300, + "template_id": "", + }, + test_output=[ + ("response", "Hello World"), + ("stdout_logs", "Hello World\n"), + ], + test_mock={ + "execute_code": lambda code, language, setup_commands, timeout, api_key, template_id: ( + "Hello World", + "Hello World\n", + "", + ), + }, + ) + + def execute_code( + self, + code: str, + language: ProgrammingLanguage, + setup_commands: list[str], + timeout: int, + api_key: str, + template_id: str, + ): + try: + sandbox = None + if template_id: + sandbox = Sandbox( + template=template_id, api_key=api_key, timeout=timeout + ) + else: + sandbox = Sandbox(api_key=api_key, timeout=timeout) + + if not sandbox: + raise Exception("Sandbox not created") + + # Running setup commands + for cmd in setup_commands: + sandbox.commands.run(cmd) + + # Executing the code + execution = sandbox.run_code( + code, + language=language.value, + on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error + ) + + if execution.error: + raise Exception(execution.error) + + response = execution.text + stdout_logs = "".join(execution.logs.stdout) + stderr_logs = "".join(execution.logs.stderr) + + return response, stdout_logs, stderr_logs + + except Exception as e: + raise e + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + response, stdout_logs, stderr_logs = self.execute_code( + input_data.code, + input_data.language, + input_data.setup_commands, + input_data.timeout, + credentials.api_key.get_secret_value(), + input_data.template_id, + ) + + if response: + yield "response", response + if stdout_logs: + yield "stdout_logs", stdout_logs + if stderr_logs: + yield "stderr_logs", stderr_logs + except Exception as e: + yield "error", str(e) + + +class InstantiationBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.E2B], Literal["api_key"] + ] = CredentialsField( + description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs", + ) + + # Todo : Option to run commond in background + setup_commands: list[str] = SchemaField( + description=( + "Shell commands to set up the sandbox before running the code. " + "You can use `curl` or `git` to install your desired Debian based " + "package manager. `pip` and `npm` are pre-installed.\n\n" + "These commands are executed with `sh`, in the foreground." + ), + placeholder="pip install cowsay", + default=[], + advanced=False, + ) + + setup_code: str = SchemaField( + description="Code to execute in the sandbox", + placeholder="print('Hello, World!')", + default="", + advanced=False, + ) + + language: ProgrammingLanguage = SchemaField( + description="Programming language to execute", + default=ProgrammingLanguage.PYTHON, + advanced=False, + ) + + timeout: int = SchemaField( + description="Execution timeout in seconds", default=300 + ) + + template_id: str = SchemaField( + description=( + "You can use an E2B sandbox template by entering its ID here. " + "Check out the E2B docs for more details: " + "[E2B - Sandbox template](https://e2b.dev/docs/sandbox-template)" + ), + default="", + advanced=True, + ) + + class Output(BlockSchema): + sandbox_id: str = SchemaField(description="ID of the sandbox instance") + response: str = SchemaField(description="Response from code execution") + stdout_logs: str = SchemaField( + description="Standard output logs from execution" + ) + stderr_logs: str = SchemaField(description="Standard error logs from execution") + error: str = SchemaField(description="Error message if execution failed") + + def __init__(self): + super().__init__( + id="ff0861c9-1726-4aec-9e5b-bf53f3622112", + description="Instantiate an isolated sandbox environment with internet access where to execute code in.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=InstantiationBlock.Input, + output_schema=InstantiationBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "setup_code": "print('Hello World')", + "language": ProgrammingLanguage.PYTHON.value, + "setup_commands": [], + "timeout": 300, + "template_id": "", + }, + test_output=[ + ("sandbox_id", str), + ("response", "Hello World"), + ("stdout_logs", "Hello World\n"), + ], + test_mock={ + "execute_code": lambda setup_code, language, setup_commands, timeout, api_key, template_id: ( + "sandbox_id", + "Hello World", + "Hello World\n", + "", + ), + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + sandbox_id, response, stdout_logs, stderr_logs = self.execute_code( + input_data.setup_code, + input_data.language, + input_data.setup_commands, + input_data.timeout, + credentials.api_key.get_secret_value(), + input_data.template_id, + ) + if sandbox_id: + yield "sandbox_id", sandbox_id + else: + yield "error", "Sandbox ID not found" + if response: + yield "response", response + if stdout_logs: + yield "stdout_logs", stdout_logs + if stderr_logs: + yield "stderr_logs", stderr_logs + except Exception as e: + yield "error", str(e) + + def execute_code( + self, + code: str, + language: ProgrammingLanguage, + setup_commands: list[str], + timeout: int, + api_key: str, + template_id: str, + ): + try: + sandbox = None + if template_id: + sandbox = Sandbox( + template=template_id, api_key=api_key, timeout=timeout + ) + else: + sandbox = Sandbox(api_key=api_key, timeout=timeout) + + if not sandbox: + raise Exception("Sandbox not created") + + # Running setup commands + for cmd in setup_commands: + sandbox.commands.run(cmd) + + # Executing the code + execution = sandbox.run_code( + code, + language=language.value, + on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error + ) + + if execution.error: + raise Exception(execution.error) + + response = execution.text + stdout_logs = "".join(execution.logs.stdout) + stderr_logs = "".join(execution.logs.stderr) + + return sandbox.sandbox_id, response, stdout_logs, stderr_logs + + except Exception as e: + raise e + + +class StepExecutionBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.E2B], Literal["api_key"] + ] = CredentialsField( + description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs", + ) + + sandbox_id: str = SchemaField( + description="ID of the sandbox instance to execute the code in", + advanced=False, + ) + + step_code: str = SchemaField( + description="Code to execute in the sandbox", + placeholder="print('Hello, World!')", + default="", + advanced=False, + ) + + language: ProgrammingLanguage = SchemaField( + description="Programming language to execute", + default=ProgrammingLanguage.PYTHON, + advanced=False, + ) + + class Output(BlockSchema): + response: str = SchemaField(description="Response from code execution") + stdout_logs: str = SchemaField( + description="Standard output logs from execution" + ) + stderr_logs: str = SchemaField(description="Standard error logs from execution") + error: str = SchemaField(description="Error message if execution failed") + + def __init__(self): + super().__init__( + id="82b59b8e-ea10-4d57-9161-8b169b0adba6", + description="Execute code in a previously instantiated sandbox environment.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=StepExecutionBlock.Input, + output_schema=StepExecutionBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "sandbox_id": "sandbox_id", + "step_code": "print('Hello World')", + "language": ProgrammingLanguage.PYTHON.value, + }, + test_output=[ + ("response", "Hello World"), + ("stdout_logs", "Hello World\n"), + ], + test_mock={ + "execute_step_code": lambda sandbox_id, step_code, language, api_key: ( + "Hello World", + "Hello World\n", + "", + ), + }, + ) + + def execute_step_code( + self, + sandbox_id: str, + code: str, + language: ProgrammingLanguage, + api_key: str, + ): + try: + sandbox = Sandbox.connect(sandbox_id=sandbox_id, api_key=api_key) + if not sandbox: + raise Exception("Sandbox not found") + + # Executing the code + execution = sandbox.run_code(code, language=language.value) + + if execution.error: + raise Exception(execution.error) + + response = execution.text + stdout_logs = "".join(execution.logs.stdout) + stderr_logs = "".join(execution.logs.stderr) + + return response, stdout_logs, stderr_logs + + except Exception as e: + raise e + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + response, stdout_logs, stderr_logs = self.execute_step_code( + input_data.sandbox_id, + input_data.step_code, + input_data.language, + credentials.api_key.get_secret_value(), + ) + + if response: + yield "response", response + if stdout_logs: + yield "stdout_logs", stdout_logs + if stderr_logs: + yield "stderr_logs", stderr_logs + except Exception as e: + yield "error", str(e) diff --git a/autogpt_platform/backend/backend/blocks/code_extraction_block.py b/autogpt_platform/backend/backend/blocks/code_extraction_block.py new file mode 100644 index 000000000000..ab1e35aa5dca --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/code_extraction_block.py @@ -0,0 +1,110 @@ +import re + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class CodeExtractionBlock(Block): + class Input(BlockSchema): + text: str = SchemaField( + description="Text containing code blocks to extract (e.g., AI response)", + placeholder="Enter text containing code blocks", + ) + + class Output(BlockSchema): + html: str = SchemaField(description="Extracted HTML code") + css: str = SchemaField(description="Extracted CSS code") + javascript: str = SchemaField(description="Extracted JavaScript code") + python: str = SchemaField(description="Extracted Python code") + sql: str = SchemaField(description="Extracted SQL code") + java: str = SchemaField(description="Extracted Java code") + cpp: str = SchemaField(description="Extracted C++ code") + csharp: str = SchemaField(description="Extracted C# code") + json_code: str = SchemaField(description="Extracted JSON code") + bash: str = SchemaField(description="Extracted Bash code") + php: str = SchemaField(description="Extracted PHP code") + ruby: str = SchemaField(description="Extracted Ruby code") + yaml: str = SchemaField(description="Extracted YAML code") + markdown: str = SchemaField(description="Extracted Markdown code") + typescript: str = SchemaField(description="Extracted TypeScript code") + xml: str = SchemaField(description="Extracted XML code") + remaining_text: str = SchemaField( + description="Remaining text after code extraction" + ) + + def __init__(self): + super().__init__( + id="d3a7d896-3b78-4f44-8b4b-48fbf4f0bcd8", + description="Extracts code blocks from text and identifies their programming languages", + categories={BlockCategory.TEXT}, + input_schema=CodeExtractionBlock.Input, + output_schema=CodeExtractionBlock.Output, + test_input={ + "text": "Here's a Python example:\n```python\nprint('Hello World')\n```\nAnd some HTML:\n```html\n

Title

\n```" + }, + test_output=[ + ("html", "

Title

"), + ("python", "print('Hello World')"), + ("remaining_text", "Here's a Python example:\nAnd some HTML:"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + # List of supported programming languages with mapped aliases + language_aliases = { + "html": ["html", "htm"], + "css": ["css"], + "javascript": ["javascript", "js"], + "python": ["python", "py"], + "sql": ["sql"], + "java": ["java"], + "cpp": ["cpp", "c++"], + "csharp": ["csharp", "c#", "cs"], + "json_code": ["json"], + "bash": ["bash", "shell", "sh"], + "php": ["php"], + "ruby": ["ruby", "rb"], + "yaml": ["yaml", "yml"], + "markdown": ["markdown", "md"], + "typescript": ["typescript", "ts"], + "xml": ["xml"], + } + + # Extract code for each language + for canonical_name, aliases in language_aliases.items(): + code = "" + # Try each alias for the language + for alias in aliases: + code_for_alias = self.extract_code(input_data.text, alias) + if code_for_alias: + code = code + "\n\n" + code_for_alias if code else code_for_alias + + if code: # Only yield if there's actual code content + yield canonical_name, code + + # Remove all code blocks from the text to get remaining text + pattern = ( + r"```(?:" + + "|".join( + re.escape(alias) + for aliases in language_aliases.values() + for alias in aliases + ) + + r")\s+[\s\S]*?```" + ) + + remaining_text = re.sub(pattern, "", input_data.text).strip() + remaining_text = re.sub(r"\n\s*\n", "\n", remaining_text) + + if remaining_text: # Only yield if there's remaining text + yield "remaining_text", remaining_text + + def extract_code(self, text: str, language: str) -> str: + # Escape special regex characters in the language string + language = re.escape(language) + # Extract all code blocks enclosed in ```language``` blocks + pattern = re.compile(rf"```{language}\s+(.*?)```", re.DOTALL | re.IGNORECASE) + matches = pattern.finditer(text) + # Combine all code blocks for this language with newlines between them + code_blocks = [match.group(1).strip() for match in matches] + return "\n\n".join(code_blocks) if code_blocks else "" diff --git a/autogpt_platform/backend/backend/blocks/compass/triggers.py b/autogpt_platform/backend/backend/blocks/compass/triggers.py new file mode 100644 index 000000000000..c17becd9acbd --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/compass/triggers.py @@ -0,0 +1,59 @@ +from pydantic import BaseModel + +from backend.data.block import ( + Block, + BlockCategory, + BlockManualWebhookConfig, + BlockOutput, + BlockSchema, +) +from backend.data.model import SchemaField +from backend.integrations.webhooks.compass import CompassWebhookType + + +class Transcription(BaseModel): + text: str + speaker: str + end: float + start: float + duration: float + + +class TranscriptionDataModel(BaseModel): + date: str + transcription: str + transcriptions: list[Transcription] + + +class CompassAITriggerBlock(Block): + class Input(BlockSchema): + payload: TranscriptionDataModel = SchemaField(hidden=True) + + class Output(BlockSchema): + transcription: str = SchemaField( + description="The contents of the compass transcription." + ) + + def __init__(self): + super().__init__( + id="9464a020-ed1d-49e1-990f-7f2ac924a2b7", + description="This block will output the contents of the compass transcription.", + categories={BlockCategory.HARDWARE}, + input_schema=CompassAITriggerBlock.Input, + output_schema=CompassAITriggerBlock.Output, + webhook_config=BlockManualWebhookConfig( + provider="compass", + webhook_type=CompassWebhookType.TRANSCRIPTION, + ), + test_input=[ + {"input": "Hello, World!"}, + {"input": "Hello, World!", "data": "Existing Data"}, + ], + # test_output=[ + # ("output", "Hello, World!"), # No data provided, so trigger is returned + # ("output", "Existing Data"), # Data is provided, so data is returned. + # ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + yield "transcription", input_data.payload.transcription diff --git a/autogpt_platform/backend/backend/blocks/count_words_and_char_block.py b/autogpt_platform/backend/backend/blocks/count_words_and_char_block.py new file mode 100644 index 000000000000..13f9e3977941 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/count_words_and_char_block.py @@ -0,0 +1,43 @@ +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class WordCharacterCountBlock(Block): + class Input(BlockSchema): + text: str = SchemaField( + description="Input text to count words and characters", + placeholder="Enter your text here", + advanced=False, + ) + + class Output(BlockSchema): + word_count: int = SchemaField(description="Number of words in the input text") + character_count: int = SchemaField( + description="Number of characters in the input text" + ) + error: str = SchemaField( + description="Error message if the counting operation failed" + ) + + def __init__(self): + super().__init__( + id="ab2a782d-22cf-4587-8a70-55b59b3f9f90", + description="Counts the number of words and characters in a given text.", + categories={BlockCategory.TEXT}, + input_schema=WordCharacterCountBlock.Input, + output_schema=WordCharacterCountBlock.Output, + test_input={"text": "Hello, how are you?"}, + test_output=[("word_count", 4), ("character_count", 19)], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + try: + text = input_data.text + word_count = len(text.split()) + character_count = len(text) + + yield "word_count", word_count + yield "character_count", character_count + + except Exception as e: + yield "error", str(e) diff --git a/autogpt_platform/backend/backend/blocks/csv.py b/autogpt_platform/backend/backend/blocks/csv.py new file mode 100644 index 000000000000..e78c8994737a --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/csv.py @@ -0,0 +1,109 @@ +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ContributorDetails, SchemaField + + +class ReadCsvBlock(Block): + class Input(BlockSchema): + contents: str = SchemaField( + description="The contents of the CSV file to read", + placeholder="a, b, c\n1,2,3\n4,5,6", + ) + delimiter: str = SchemaField( + description="The delimiter used in the CSV file", + default=",", + ) + quotechar: str = SchemaField( + description="The character used to quote fields", + default='"', + ) + escapechar: str = SchemaField( + description="The character used to escape the delimiter", + default="\\", + ) + has_header: bool = SchemaField( + description="Whether the CSV file has a header row", + default=True, + ) + skip_rows: int = SchemaField( + description="The number of rows to skip from the start of the file", + default=0, + ) + strip: bool = SchemaField( + description="Whether to strip whitespace from the values", + default=True, + ) + skip_columns: list[str] = SchemaField( + description="The columns to skip from the start of the row", + default=[], + ) + + class Output(BlockSchema): + row: dict[str, str] = SchemaField( + description="The data produced from each row in the CSV file" + ) + all_data: list[dict[str, str]] = SchemaField( + description="All the data in the CSV file as a list of rows" + ) + + def __init__(self): + super().__init__( + id="acf7625e-d2cb-4941-bfeb-2819fc6fc015", + input_schema=ReadCsvBlock.Input, + output_schema=ReadCsvBlock.Output, + description="Reads a CSV file and outputs the data as a list of dictionaries and individual rows via rows.", + contributors=[ContributorDetails(name="Nicholas Tindle")], + categories={BlockCategory.TEXT, BlockCategory.DATA}, + test_input={ + "contents": "a, b, c\n1,2,3\n4,5,6", + }, + test_output=[ + ("row", {"a": "1", "b": "2", "c": "3"}), + ("row", {"a": "4", "b": "5", "c": "6"}), + ( + "all_data", + [ + {"a": "1", "b": "2", "c": "3"}, + {"a": "4", "b": "5", "c": "6"}, + ], + ), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + import csv + from io import StringIO + + csv_file = StringIO(input_data.contents) + reader = csv.reader( + csv_file, + delimiter=input_data.delimiter, + quotechar=input_data.quotechar, + escapechar=input_data.escapechar, + ) + + header = None + if input_data.has_header: + header = next(reader) + if input_data.strip: + header = [h.strip() for h in header] + + for _ in range(input_data.skip_rows): + next(reader) + + def process_row(row): + data = {} + for i, value in enumerate(row): + if i not in input_data.skip_columns: + if input_data.has_header and header: + data[header[i]] = value.strip() if input_data.strip else value + else: + data[str(i)] = value.strip() if input_data.strip else value + return data + + all_data = [] + for row in reader: + processed_row = process_row(row) + all_data.append(processed_row) + yield "row", processed_row + + yield "all_data", all_data diff --git a/autogpt_platform/backend/backend/blocks/decoder_block.py b/autogpt_platform/backend/backend/blocks/decoder_block.py new file mode 100644 index 000000000000..033cdfb0b355 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/decoder_block.py @@ -0,0 +1,39 @@ +import codecs + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TextDecoderBlock(Block): + class Input(BlockSchema): + text: str = SchemaField( + description="A string containing escaped characters to be decoded", + placeholder='Your entire text block with \\n and \\" escaped characters', + ) + + class Output(BlockSchema): + decoded_text: str = SchemaField( + description="The decoded text with escape sequences processed" + ) + + def __init__(self): + super().__init__( + id="2570e8fe-8447-43ed-84c7-70d657923231", + description="Decodes a string containing escape sequences into actual text", + categories={BlockCategory.TEXT}, + input_schema=TextDecoderBlock.Input, + output_schema=TextDecoderBlock.Output, + test_input={"text": """Hello\nWorld!\nThis is a \"quoted\" string."""}, + test_output=[ + ( + "decoded_text", + """Hello +World! +This is a "quoted" string.""", + ) + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + decoded_text = codecs.decode(input_data.text, "unicode_escape") + yield "decoded_text", decoded_text diff --git a/autogpt_platform/backend/backend/blocks/discord.py b/autogpt_platform/backend/backend/blocks/discord.py new file mode 100644 index 000000000000..08ba8af074cd --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/discord.py @@ -0,0 +1,254 @@ +import asyncio +from typing import Literal + +import aiohttp +import discord +from pydantic import SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + +DiscordCredentials = CredentialsMetaInput[ + Literal[ProviderName.DISCORD], Literal["api_key"] +] + + +def DiscordCredentialsField() -> DiscordCredentials: + return CredentialsField(description="Discord bot token") + + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="discord", + api_key=SecretStr("test_api_key"), + title="Mock Discord API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class ReadDiscordMessagesBlock(Block): + class Input(BlockSchema): + credentials: DiscordCredentials = DiscordCredentialsField() + + class Output(BlockSchema): + message_content: str = SchemaField( + description="The content of the message received" + ) + channel_name: str = SchemaField( + description="The name of the channel the message was received from" + ) + username: str = SchemaField( + description="The username of the user who sent the message" + ) + + def __init__(self): + super().__init__( + id="df06086a-d5ac-4abb-9996-2ad0acb2eff7", + input_schema=ReadDiscordMessagesBlock.Input, # Assign input schema + output_schema=ReadDiscordMessagesBlock.Output, # Assign output schema + description="Reads messages from a Discord channel using a bot token.", + categories={BlockCategory.SOCIAL}, + test_input={ + "continuous_read": False, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "message_content", + "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.", + ), + ("channel_name", "general"), + ("username", "test_user"), + ], + test_mock={ + "run_bot": lambda token: asyncio.Future() # Create a Future object for mocking + }, + ) + + async def run_bot(self, token: SecretStr): + intents = discord.Intents.default() + intents.message_content = True + + client = discord.Client(intents=intents) + + self.output_data = None + self.channel_name = None + self.username = None + + @client.event + async def on_ready(): + print(f"Logged in as {client.user}") + + @client.event + async def on_message(message): + if message.author == client.user: + return + + self.output_data = message.content + self.channel_name = message.channel.name + self.username = message.author.name + + if message.attachments: + attachment = message.attachments[0] # Process the first attachment + if attachment.filename.endswith((".txt", ".py")): + async with aiohttp.ClientSession() as session: + async with session.get(attachment.url) as response: + file_content = await response.text() + self.output_data += f"\n\nFile from user: {attachment.filename}\nContent: {file_content}" + + await client.close() + + await client.start(token.get_secret_value()) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + while True: + for output_name, output_value in self.__run(input_data, credentials): + yield output_name, output_value + break + + def __run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput: + try: + loop = asyncio.get_event_loop() + future = self.run_bot(credentials.api_key) + + # If it's a Future (mock), set the result + if isinstance(future, asyncio.Future): + future.set_result( + { + "output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.", + "channel_name": "general", + "username": "test_user", + } + ) + + result = loop.run_until_complete(future) + + # For testing purposes, use the mocked result + if isinstance(result, dict): + self.output_data = result.get("output_data") + self.channel_name = result.get("channel_name") + self.username = result.get("username") + + if ( + self.output_data is None + or self.channel_name is None + or self.username is None + ): + raise ValueError("No message, channel name, or username received.") + + yield "message_content", self.output_data + yield "channel_name", self.channel_name + yield "username", self.username + + except discord.errors.LoginFailure as login_err: + raise ValueError(f"Login error occurred: {login_err}") + except Exception as e: + raise ValueError(f"An error occurred: {e}") + + +class SendDiscordMessageBlock(Block): + class Input(BlockSchema): + credentials: DiscordCredentials = DiscordCredentialsField() + message_content: str = SchemaField( + description="The content of the message received" + ) + channel_name: str = SchemaField( + description="The name of the channel the message was received from" + ) + + class Output(BlockSchema): + status: str = SchemaField( + description="The status of the operation (e.g., 'Message sent', 'Error')" + ) + + def __init__(self): + super().__init__( + id="d0822ab5-9f8a-44a3-8971-531dd0178b6b", + input_schema=SendDiscordMessageBlock.Input, # Assign input schema + output_schema=SendDiscordMessageBlock.Output, # Assign output schema + description="Sends a message to a Discord channel using a bot token.", + categories={BlockCategory.SOCIAL}, + test_input={ + "channel_name": "general", + "message_content": "Hello, Discord!", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[("status", "Message sent")], + test_mock={ + "send_message": lambda token, channel_name, message_content: asyncio.Future() + }, + test_credentials=TEST_CREDENTIALS, + ) + + async def send_message(self, token: str, channel_name: str, message_content: str): + intents = discord.Intents.default() + intents.guilds = True # Required for fetching guild/channel information + client = discord.Client(intents=intents) + + @client.event + async def on_ready(): + print(f"Logged in as {client.user}") + for guild in client.guilds: + for channel in guild.text_channels: + if channel.name == channel_name: + # Split message into chunks if it exceeds 2000 characters + for chunk in self.chunk_message(message_content): + await channel.send(chunk) + self.output_data = "Message sent" + await client.close() + return + + self.output_data = "Channel not found" + await client.close() + + await client.start(token) + + def chunk_message(self, message: str, limit: int = 2000) -> list: + """Splits a message into chunks not exceeding the Discord limit.""" + return [message[i : i + limit] for i in range(0, len(message), limit)] + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + loop = asyncio.get_event_loop() + future = self.send_message( + credentials.api_key.get_secret_value(), + input_data.channel_name, + input_data.message_content, + ) + + # If it's a Future (mock), set the result + if isinstance(future, asyncio.Future): + future.set_result("Message sent") + + result = loop.run_until_complete(future) + + # For testing purposes, use the mocked result + if isinstance(result, str): + self.output_data = result + + if self.output_data is None: + raise ValueError("No status message received.") + + yield "status", self.output_data + + except discord.errors.LoginFailure as login_err: + raise ValueError(f"Login error occurred: {login_err}") + except Exception as e: + raise ValueError(f"An error occurred: {e}") diff --git a/autogpt_platform/backend/backend/blocks/email_block.py b/autogpt_platform/backend/backend/blocks/email_block.py new file mode 100644 index 000000000000..4159886cee29 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/email_block.py @@ -0,0 +1,133 @@ +import smtplib +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +from typing import Literal + +from pydantic import BaseModel, ConfigDict, SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + CredentialsField, + CredentialsMetaInput, + SchemaField, + UserPasswordCredentials, +) +from backend.integrations.providers import ProviderName + +TEST_CREDENTIALS = UserPasswordCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="smtp", + username=SecretStr("mock-smtp-username"), + password=SecretStr("mock-smtp-password"), + title="Mock SMTP credentials", +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} +SMTPCredentials = UserPasswordCredentials +SMTPCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.SMTP], + Literal["user_password"], +] + + +def SMTPCredentialsField() -> SMTPCredentialsInput: + return CredentialsField( + description="The SMTP integration requires a username and password.", + ) + + +class SMTPConfig(BaseModel): + smtp_server: str = SchemaField( + default="smtp.example.com", description="SMTP server address" + ) + smtp_port: int = SchemaField(default=25, description="SMTP port number") + + model_config = ConfigDict(title="SMTP Config") + + +class SendEmailBlock(Block): + class Input(BlockSchema): + to_email: str = SchemaField( + description="Recipient email address", placeholder="recipient@example.com" + ) + subject: str = SchemaField( + description="Subject of the email", placeholder="Enter the email subject" + ) + body: str = SchemaField( + description="Body of the email", placeholder="Enter the email body" + ) + config: SMTPConfig = SchemaField( + description="SMTP Config", + default=SMTPConfig(), + ) + credentials: SMTPCredentialsInput = SMTPCredentialsField() + + class Output(BlockSchema): + status: str = SchemaField(description="Status of the email sending operation") + error: str = SchemaField( + description="Error message if the email sending failed" + ) + + def __init__(self): + super().__init__( + id="4335878a-394e-4e67-adf2-919877ff49ae", + description="This block sends an email using the provided SMTP credentials.", + categories={BlockCategory.OUTPUT}, + input_schema=SendEmailBlock.Input, + output_schema=SendEmailBlock.Output, + test_input={ + "to_email": "recipient@example.com", + "subject": "Test Email", + "body": "This is a test email.", + "config": { + "smtp_server": "smtp.gmail.com", + "smtp_port": 25, + }, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Email sent successfully")], + test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"}, + ) + + @staticmethod + def send_email( + config: SMTPConfig, + to_email: str, + subject: str, + body: str, + credentials: SMTPCredentials, + ) -> str: + smtp_server = config.smtp_server + smtp_port = config.smtp_port + smtp_username = credentials.username.get_secret_value() + smtp_password = credentials.password.get_secret_value() + + msg = MIMEMultipart() + msg["From"] = smtp_username + msg["To"] = to_email + msg["Subject"] = subject + msg.attach(MIMEText(body, "plain")) + + with smtplib.SMTP(smtp_server, smtp_port) as server: + server.starttls() + server.login(smtp_username, smtp_password) + server.sendmail(smtp_username, to_email, msg.as_string()) + + return "Email sent successfully" + + def run( + self, input_data: Input, *, credentials: SMTPCredentials, **kwargs + ) -> BlockOutput: + yield "status", self.send_email( + config=input_data.config, + to_email=input_data.to_email, + subject=input_data.subject, + body=input_data.body, + credentials=credentials, + ) diff --git a/autogpt_platform/backend/backend/blocks/exa/_auth.py b/autogpt_platform/backend/backend/blocks/exa/_auth.py new file mode 100644 index 000000000000..7b826ef408b2 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/exa/_auth.py @@ -0,0 +1,32 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +ExaCredentials = APIKeyCredentials +ExaCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.EXA], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="exa", + api_key=SecretStr("mock-exa-api-key"), + title="Mock Exa API key", + expires_at=None, +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def ExaCredentialsField() -> ExaCredentialsInput: + """Creates an Exa credentials input on a block.""" + return CredentialsField(description="The Exa integration requires an API Key.") diff --git a/autogpt_platform/backend/backend/blocks/exa/contents.py b/autogpt_platform/backend/backend/blocks/exa/contents.py new file mode 100644 index 000000000000..a65de53acf69 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/exa/contents.py @@ -0,0 +1,87 @@ +from typing import List + +from pydantic import BaseModel + +from backend.blocks.exa._auth import ( + ExaCredentials, + ExaCredentialsField, + ExaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class ContentRetrievalSettings(BaseModel): + text: dict = SchemaField( + description="Text content settings", + default={"maxCharacters": 1000, "includeHtmlTags": False}, + advanced=True, + ) + highlights: dict = SchemaField( + description="Highlight settings", + default={ + "numSentences": 3, + "highlightsPerUrl": 3, + "query": "", + }, + advanced=True, + ) + summary: dict = SchemaField( + description="Summary settings", + default={"query": ""}, + advanced=True, + ) + + +class ExaContentsBlock(Block): + class Input(BlockSchema): + credentials: ExaCredentialsInput = ExaCredentialsField() + ids: List[str] = SchemaField( + description="Array of document IDs obtained from searches", + ) + contents: ContentRetrievalSettings = SchemaField( + description="Content retrieval settings", + default=ContentRetrievalSettings(), + advanced=True, + ) + + class Output(BlockSchema): + results: list = SchemaField( + description="List of document contents", + default=[], + ) + + def __init__(self): + super().__init__( + id="c52be83f-f8cd-4180-b243-af35f986b461", + description="Retrieves document contents using Exa's contents API", + categories={BlockCategory.SEARCH}, + input_schema=ExaContentsBlock.Input, + output_schema=ExaContentsBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: ExaCredentials, **kwargs + ) -> BlockOutput: + url = "https://api.exa.ai/contents" + headers = { + "Content-Type": "application/json", + "x-api-key": credentials.api_key.get_secret_value(), + } + + payload = { + "ids": input_data.ids, + "text": input_data.contents.text, + "highlights": input_data.contents.highlights, + "summary": input_data.contents.summary, + } + + try: + response = requests.post(url, headers=headers, json=payload) + response.raise_for_status() + data = response.json() + yield "results", data.get("results", []) + except Exception as e: + yield "error", str(e) + yield "results", [] diff --git a/autogpt_platform/backend/backend/blocks/exa/helpers.py b/autogpt_platform/backend/backend/blocks/exa/helpers.py new file mode 100644 index 000000000000..a392d5367fed --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/exa/helpers.py @@ -0,0 +1,54 @@ +from typing import Optional + +from pydantic import BaseModel + +from backend.data.model import SchemaField + + +class TextSettings(BaseModel): + max_characters: int = SchemaField( + default=1000, + description="Maximum number of characters to return", + placeholder="1000", + ) + include_html_tags: bool = SchemaField( + default=False, + description="Whether to include HTML tags in the text", + placeholder="False", + ) + + +class HighlightSettings(BaseModel): + num_sentences: int = SchemaField( + default=3, + description="Number of sentences per highlight", + placeholder="3", + ) + highlights_per_url: int = SchemaField( + default=3, + description="Number of highlights per URL", + placeholder="3", + ) + + +class SummarySettings(BaseModel): + query: Optional[str] = SchemaField( + default="", + description="Query string for summarization", + placeholder="Enter query", + ) + + +class ContentSettings(BaseModel): + text: TextSettings = SchemaField( + default=TextSettings(), + description="Text content settings", + ) + highlights: HighlightSettings = SchemaField( + default=HighlightSettings(), + description="Highlight settings", + ) + summary: SummarySettings = SchemaField( + default=SummarySettings(), + description="Summary settings", + ) diff --git a/autogpt_platform/backend/backend/blocks/exa/search.py b/autogpt_platform/backend/backend/blocks/exa/search.py new file mode 100644 index 000000000000..f06962e52718 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/exa/search.py @@ -0,0 +1,143 @@ +from datetime import datetime +from typing import List + +from backend.blocks.exa._auth import ( + ExaCredentials, + ExaCredentialsField, + ExaCredentialsInput, +) +from backend.blocks.exa.helpers import ContentSettings +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class ExaSearchBlock(Block): + class Input(BlockSchema): + credentials: ExaCredentialsInput = ExaCredentialsField() + query: str = SchemaField(description="The search query") + use_auto_prompt: bool = SchemaField( + description="Whether to use autoprompt", + default=True, + advanced=True, + ) + type: str = SchemaField( + description="Type of search", + default="", + advanced=True, + ) + category: str = SchemaField( + description="Category to search within", + default="", + advanced=True, + ) + number_of_results: int = SchemaField( + description="Number of results to return", + default=10, + advanced=True, + ) + include_domains: List[str] = SchemaField( + description="Domains to include in search", + default=[], + ) + exclude_domains: List[str] = SchemaField( + description="Domains to exclude from search", + default=[], + advanced=True, + ) + start_crawl_date: datetime = SchemaField( + description="Start date for crawled content", + ) + end_crawl_date: datetime = SchemaField( + description="End date for crawled content", + ) + start_published_date: datetime = SchemaField( + description="Start date for published content", + ) + end_published_date: datetime = SchemaField( + description="End date for published content", + ) + include_text: List[str] = SchemaField( + description="Text patterns to include", + default=[], + advanced=True, + ) + exclude_text: List[str] = SchemaField( + description="Text patterns to exclude", + default=[], + advanced=True, + ) + contents: ContentSettings = SchemaField( + description="Content retrieval settings", + default=ContentSettings(), + advanced=True, + ) + + class Output(BlockSchema): + results: list = SchemaField( + description="List of search results", + default=[], + ) + + def __init__(self): + super().__init__( + id="996cec64-ac40-4dde-982f-b0dc60a5824d", + description="Searches the web using Exa's advanced search API", + categories={BlockCategory.SEARCH}, + input_schema=ExaSearchBlock.Input, + output_schema=ExaSearchBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: ExaCredentials, **kwargs + ) -> BlockOutput: + url = "https://api.exa.ai/search" + headers = { + "Content-Type": "application/json", + "x-api-key": credentials.api_key.get_secret_value(), + } + + payload = { + "query": input_data.query, + "useAutoprompt": input_data.use_auto_prompt, + "numResults": input_data.number_of_results, + "contents": input_data.contents.dict(), + } + + date_field_mapping = { + "start_crawl_date": "startCrawlDate", + "end_crawl_date": "endCrawlDate", + "start_published_date": "startPublishedDate", + "end_published_date": "endPublishedDate", + } + + # Add dates if they exist + for input_field, api_field in date_field_mapping.items(): + value = getattr(input_data, input_field, None) + if value: + payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z") + + optional_field_mapping = { + "type": "type", + "category": "category", + "include_domains": "includeDomains", + "exclude_domains": "excludeDomains", + "include_text": "includeText", + "exclude_text": "excludeText", + } + + # Add other fields + for input_field, api_field in optional_field_mapping.items(): + value = getattr(input_data, input_field) + if value: # Only add non-empty values + payload[api_field] = value + + try: + response = requests.post(url, headers=headers, json=payload) + response.raise_for_status() + data = response.json() + # Extract just the results array from the response + yield "results", data.get("results", []) + except Exception as e: + yield "error", str(e) + yield "results", [] diff --git a/autogpt_platform/backend/backend/blocks/exa/similar.py b/autogpt_platform/backend/backend/blocks/exa/similar.py new file mode 100644 index 000000000000..b9a44fe9023d --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/exa/similar.py @@ -0,0 +1,128 @@ +from datetime import datetime +from typing import Any, List + +from backend.blocks.exa._auth import ( + ExaCredentials, + ExaCredentialsField, + ExaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + +from .helpers import ContentSettings + + +class ExaFindSimilarBlock(Block): + class Input(BlockSchema): + credentials: ExaCredentialsInput = ExaCredentialsField() + url: str = SchemaField( + description="The url for which you would like to find similar links" + ) + number_of_results: int = SchemaField( + description="Number of results to return", + default=10, + advanced=True, + ) + include_domains: List[str] = SchemaField( + description="Domains to include in search", + default=[], + advanced=True, + ) + exclude_domains: List[str] = SchemaField( + description="Domains to exclude from search", + default=[], + advanced=True, + ) + start_crawl_date: datetime = SchemaField( + description="Start date for crawled content", + ) + end_crawl_date: datetime = SchemaField( + description="End date for crawled content", + ) + start_published_date: datetime = SchemaField( + description="Start date for published content", + ) + end_published_date: datetime = SchemaField( + description="End date for published content", + ) + include_text: List[str] = SchemaField( + description="Text patterns to include (max 1 string, up to 5 words)", + default=[], + advanced=True, + ) + exclude_text: List[str] = SchemaField( + description="Text patterns to exclude (max 1 string, up to 5 words)", + default=[], + advanced=True, + ) + contents: ContentSettings = SchemaField( + description="Content retrieval settings", + default=ContentSettings(), + advanced=True, + ) + + class Output(BlockSchema): + results: List[Any] = SchemaField( + description="List of similar documents with title, URL, published date, author, and score", + default=[], + ) + + def __init__(self): + super().__init__( + id="5e7315d1-af61-4a0c-9350-7c868fa7438a", + description="Finds similar links using Exa's findSimilar API", + categories={BlockCategory.SEARCH}, + input_schema=ExaFindSimilarBlock.Input, + output_schema=ExaFindSimilarBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: ExaCredentials, **kwargs + ) -> BlockOutput: + url = "https://api.exa.ai/findSimilar" + headers = { + "Content-Type": "application/json", + "x-api-key": credentials.api_key.get_secret_value(), + } + + payload = { + "url": input_data.url, + "numResults": input_data.number_of_results, + "contents": input_data.contents.dict(), + } + + optional_field_mapping = { + "include_domains": "includeDomains", + "exclude_domains": "excludeDomains", + "include_text": "includeText", + "exclude_text": "excludeText", + } + + # Add optional fields if they have values + for input_field, api_field in optional_field_mapping.items(): + value = getattr(input_data, input_field) + if value: # Only add non-empty values + payload[api_field] = value + + date_field_mapping = { + "start_crawl_date": "startCrawlDate", + "end_crawl_date": "endCrawlDate", + "start_published_date": "startPublishedDate", + "end_published_date": "endPublishedDate", + } + + # Add dates if they exist + for input_field, api_field in date_field_mapping.items(): + value = getattr(input_data, input_field, None) + if value: + payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z") + + try: + response = requests.post(url, headers=headers, json=payload) + response.raise_for_status() + data = response.json() + yield "results", data.get("results", []) + except Exception as e: + yield "error", str(e) + yield "results", [] diff --git a/autogpt_platform/backend/backend/blocks/fal/_auth.py b/autogpt_platform/backend/backend/blocks/fal/_auth.py new file mode 100644 index 000000000000..5d02186e5797 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/fal/_auth.py @@ -0,0 +1,35 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +FalCredentials = APIKeyCredentials +FalCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.FAL], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="fal", + api_key=SecretStr("mock-fal-api-key"), + title="Mock FAL API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def FalCredentialsField() -> FalCredentialsInput: + """ + Creates a FAL credentials input on a block. + """ + return CredentialsField( + description="The FAL integration can be used with an API Key.", + ) diff --git a/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py b/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py new file mode 100644 index 000000000000..e52e2ba5b245 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/fal/ai_video_generator.py @@ -0,0 +1,199 @@ +import logging +import time +from enum import Enum +from typing import Any + +import httpx + +from backend.blocks.fal._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + FalCredentials, + FalCredentialsField, + FalCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +logger = logging.getLogger(__name__) + + +class FalModel(str, Enum): + MOCHI = "fal-ai/mochi-v1" + LUMA = "fal-ai/luma-dream-machine" + + +class AIVideoGeneratorBlock(Block): + class Input(BlockSchema): + prompt: str = SchemaField( + description="Description of the video to generate.", + placeholder="A dog running in a field.", + ) + model: FalModel = SchemaField( + title="FAL Model", + default=FalModel.MOCHI, + description="The FAL model to use for video generation.", + ) + credentials: FalCredentialsInput = FalCredentialsField() + + class Output(BlockSchema): + video_url: str = SchemaField(description="The URL of the generated video.") + error: str = SchemaField( + description="Error message if video generation failed." + ) + logs: list[str] = SchemaField( + description="Generation progress logs.", optional=True + ) + + def __init__(self): + super().__init__( + id="530cf046-2ce0-4854-ae2c-659db17c7a46", + description="Generate videos using FAL AI models.", + categories={BlockCategory.AI}, + input_schema=self.Input, + output_schema=self.Output, + test_input={ + "prompt": "A dog running in a field.", + "model": FalModel.MOCHI, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("video_url", "https://fal.media/files/example/video.mp4")], + test_mock={ + "generate_video": lambda *args, **kwargs: "https://fal.media/files/example/video.mp4" + }, + ) + + def _get_headers(self, api_key: str) -> dict[str, str]: + """Get headers for FAL API requests.""" + return { + "Authorization": f"Key {api_key}", + "Content-Type": "application/json", + } + + def _submit_request( + self, url: str, headers: dict[str, str], data: dict[str, Any] + ) -> dict[str, Any]: + """Submit a request to the FAL API.""" + try: + response = httpx.post(url, headers=headers, json=data) + response.raise_for_status() + return response.json() + except httpx.HTTPError as e: + logger.error(f"FAL API request failed: {str(e)}") + raise RuntimeError(f"Failed to submit request: {str(e)}") + + def _poll_status(self, status_url: str, headers: dict[str, str]) -> dict[str, Any]: + """Poll the status endpoint until completion or failure.""" + try: + response = httpx.get(status_url, headers=headers) + response.raise_for_status() + return response.json() + except httpx.HTTPError as e: + logger.error(f"Failed to get status: {str(e)}") + raise RuntimeError(f"Failed to get status: {str(e)}") + + def generate_video(self, input_data: Input, credentials: FalCredentials) -> str: + """Generate video using the specified FAL model.""" + base_url = "https://queue.fal.run" + api_key = credentials.api_key.get_secret_value() + headers = self._get_headers(api_key) + + # Submit generation request + submit_url = f"{base_url}/{input_data.model.value}" + submit_data = {"prompt": input_data.prompt} + + seen_logs = set() + + try: + # Submit request to queue + submit_response = httpx.post(submit_url, headers=headers, json=submit_data) + submit_response.raise_for_status() + request_data = submit_response.json() + + # Get request_id and urls from initial response + request_id = request_data.get("request_id") + status_url = request_data.get("status_url") + result_url = request_data.get("response_url") + + if not all([request_id, status_url, result_url]): + raise ValueError("Missing required data in submission response") + + # Poll for status with exponential backoff + max_attempts = 30 + attempt = 0 + base_wait_time = 5 + + while attempt < max_attempts: + status_response = httpx.get(f"{status_url}?logs=1", headers=headers) + status_response.raise_for_status() + status_data = status_response.json() + + # Process new logs only + logs = status_data.get("logs", []) + if logs and isinstance(logs, list): + for log in logs: + if isinstance(log, dict): + # Create a unique key for this log entry + log_key = ( + f"{log.get('timestamp', '')}-{log.get('message', '')}" + ) + if log_key not in seen_logs: + seen_logs.add(log_key) + message = log.get("message", "") + if message: + logger.debug( + f"[FAL Generation] [{log.get('level', 'INFO')}] [{log.get('source', '')}] [{log.get('timestamp', '')}] {message}" + ) + + status = status_data.get("status") + if status == "COMPLETED": + # Get the final result + result_response = httpx.get(result_url, headers=headers) + result_response.raise_for_status() + result_data = result_response.json() + + if "video" not in result_data or not isinstance( + result_data["video"], dict + ): + raise ValueError("Invalid response format - missing video data") + + video_url = result_data["video"].get("url") + if not video_url: + raise ValueError("No video URL in response") + + return video_url + + elif status == "FAILED": + error_msg = status_data.get("error", "No error details provided") + raise RuntimeError(f"Video generation failed: {error_msg}") + elif status == "IN_QUEUE": + position = status_data.get("queue_position", "unknown") + logger.debug( + f"[FAL Generation] Status: In queue, position: {position}" + ) + elif status == "IN_PROGRESS": + logger.debug( + "[FAL Generation] Status: Request is being processed..." + ) + else: + logger.info(f"[FAL Generation] Status: Unknown status: {status}") + + wait_time = min(base_wait_time * (2**attempt), 60) # Cap at 60 seconds + time.sleep(wait_time) + attempt += 1 + + raise RuntimeError("Maximum polling attempts reached") + + except httpx.HTTPError as e: + raise RuntimeError(f"API request failed: {str(e)}") + + def run( + self, input_data: Input, *, credentials: FalCredentials, **kwargs + ) -> BlockOutput: + try: + video_url = self.generate_video(input_data, credentials) + yield "video_url", video_url + except Exception as e: + error_message = str(e) + yield "error", error_message diff --git a/autogpt_platform/backend/backend/blocks/github/_api.py b/autogpt_platform/backend/backend/blocks/github/_api.py new file mode 100644 index 000000000000..92436e865292 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/_api.py @@ -0,0 +1,49 @@ +from urllib.parse import urlparse + +from backend.blocks.github._auth import ( + GithubCredentials, + GithubFineGrainedAPICredentials, +) +from backend.util.request import Requests + + +def _convert_to_api_url(url: str) -> str: + """ + Converts a standard GitHub URL to the corresponding GitHub API URL. + Handles repository URLs, issue URLs, pull request URLs, and more. + """ + parsed_url = urlparse(url) + path_parts = parsed_url.path.strip("/").split("/") + + if len(path_parts) >= 2: + owner, repo = path_parts[0], path_parts[1] + api_base = f"https://api.github.com/repos/{owner}/{repo}" + + if len(path_parts) > 2: + additional_path = "/".join(path_parts[2:]) + api_url = f"{api_base}/{additional_path}" + else: + # Repository base URL + api_url = api_base + else: + raise ValueError("Invalid GitHub URL format.") + + return api_url + + +def _get_headers(credentials: GithubCredentials) -> dict[str, str]: + return { + "Authorization": credentials.auth_header(), + "Accept": "application/vnd.github.v3+json", + } + + +def get_api( + credentials: GithubCredentials | GithubFineGrainedAPICredentials, + convert_urls: bool = True, +) -> Requests: + return Requests( + trusted_origins=["https://api.github.com", "https://github.com"], + extra_url_validator=_convert_to_api_url if convert_urls else None, + extra_headers=_get_headers(credentials), + ) diff --git a/autogpt_platform/backend/backend/blocks/github/_auth.py b/autogpt_platform/backend/backend/blocks/github/_auth.py new file mode 100644 index 000000000000..3109024abf0e --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/_auth.py @@ -0,0 +1,82 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + OAuth2Credentials, +) +from backend.integrations.providers import ProviderName +from backend.util.settings import Secrets + +secrets = Secrets() +GITHUB_OAUTH_IS_CONFIGURED = bool( + secrets.github_client_id and secrets.github_client_secret +) + +GithubCredentials = APIKeyCredentials | OAuth2Credentials +GithubCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.GITHUB], + Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"], +] + +GithubFineGrainedAPICredentials = APIKeyCredentials +GithubFineGrainedAPICredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.GITHUB], Literal["api_key"] +] + + +def GithubCredentialsField(scope: str) -> GithubCredentialsInput: + """ + Creates a GitHub credentials input on a block. + + Params: + scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes)) + """ # noqa + return CredentialsField( + required_scopes={scope}, + description="The GitHub integration can be used with OAuth, " + "or any API key with sufficient permissions for the blocks it is used on.", + ) + + +def GithubFineGrainedAPICredentialsField( + scope: str, +) -> GithubFineGrainedAPICredentialsInput: + return CredentialsField( + required_scopes={scope}, + description="The GitHub integration can be used with OAuth, " + "or any API key with sufficient permissions for the blocks it is used on.", + ) + + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="github", + api_key=SecretStr("mock-github-api-key"), + title="Mock GitHub API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + +TEST_FINE_GRAINED_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="github", + api_key=SecretStr("mock-github-api-key"), + title="Mock GitHub API key", + expires_at=None, +) + +TEST_FINE_GRAINED_CREDENTIALS_INPUT = { + "provider": TEST_FINE_GRAINED_CREDENTIALS.provider, + "id": TEST_FINE_GRAINED_CREDENTIALS.id, + "type": TEST_FINE_GRAINED_CREDENTIALS.type, + "title": TEST_FINE_GRAINED_CREDENTIALS.type, +} diff --git a/autogpt_platform/backend/backend/blocks/github/checks.py b/autogpt_platform/backend/backend/blocks/github/checks.py new file mode 100644 index 000000000000..6d9ac1897c05 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/checks.py @@ -0,0 +1,356 @@ +from enum import Enum +from typing import Optional + +from pydantic import BaseModel + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +from ._api import get_api +from ._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + GithubCredentials, + GithubCredentialsField, + GithubCredentialsInput, +) + + +# queued, in_progress, completed, waiting, requested, pending +class ChecksStatus(Enum): + QUEUED = "queued" + IN_PROGRESS = "in_progress" + COMPLETED = "completed" + WAITING = "waiting" + REQUESTED = "requested" + PENDING = "pending" + + +class ChecksConclusion(Enum): + SUCCESS = "success" + FAILURE = "failure" + NEUTRAL = "neutral" + CANCELLED = "cancelled" + TIMED_OUT = "timed_out" + ACTION_REQUIRED = "action_required" + SKIPPED = "skipped" + + +class GithubCreateCheckRunBlock(Block): + """Block for creating a new check run on a GitHub repository.""" + + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo:status") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + name: str = SchemaField( + description="The name of the check run (e.g., 'code-coverage')", + ) + head_sha: str = SchemaField( + description="The SHA of the commit to check", + ) + status: ChecksStatus = SchemaField( + description="Current status of the check run", + default=ChecksStatus.QUEUED, + ) + conclusion: Optional[ChecksConclusion] = SchemaField( + description="The final conclusion of the check (required if status is completed)", + default=None, + ) + details_url: str = SchemaField( + description="The URL for the full details of the check", + default="", + ) + output_title: str = SchemaField( + description="Title of the check run output", + default="", + ) + output_summary: str = SchemaField( + description="Summary of the check run output", + default="", + ) + output_text: str = SchemaField( + description="Detailed text of the check run output", + default="", + ) + + class Output(BlockSchema): + class CheckRunResult(BaseModel): + id: int + html_url: str + status: str + + check_run: CheckRunResult = SchemaField( + description="Details of the created check run" + ) + error: str = SchemaField( + description="Error message if check run creation failed" + ) + + def __init__(self): + super().__init__( + id="2f45e89a-3b7d-4f22-b89e-6c4f5c7e1234", + description="Creates a new check run for a specific commit in a GitHub repository", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubCreateCheckRunBlock.Input, + output_schema=GithubCreateCheckRunBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "name": "test-check", + "head_sha": "ce587453ced02b1526dfb4cb910479d431683101", + "status": ChecksStatus.COMPLETED.value, + "conclusion": ChecksConclusion.SUCCESS.value, + "output_title": "Test Results", + "output_summary": "All tests passed", + "credentials": TEST_CREDENTIALS_INPUT, + }, + # requires a github app not available to oauth in our current system + disabled=True, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "check_run", + { + "id": 4, + "html_url": "https://github.com/owner/repo/runs/4", + "status": "completed", + }, + ), + ], + test_mock={ + "create_check_run": lambda *args, **kwargs: { + "id": 4, + "html_url": "https://github.com/owner/repo/runs/4", + "status": "completed", + } + }, + ) + + @staticmethod + def create_check_run( + credentials: GithubCredentials, + repo_url: str, + name: str, + head_sha: str, + status: ChecksStatus, + conclusion: Optional[ChecksConclusion] = None, + details_url: Optional[str] = None, + output_title: Optional[str] = None, + output_summary: Optional[str] = None, + output_text: Optional[str] = None, + ) -> dict: + api = get_api(credentials) + + class CheckRunData(BaseModel): + name: str + head_sha: str + status: str + conclusion: Optional[str] = None + details_url: Optional[str] = None + output: Optional[dict[str, str]] = None + + data = CheckRunData( + name=name, + head_sha=head_sha, + status=status.value, + ) + + if conclusion: + data.conclusion = conclusion.value + + if details_url: + data.details_url = details_url + + if output_title or output_summary or output_text: + output_data = { + "title": output_title or "", + "summary": output_summary or "", + "text": output_text or "", + } + data.output = output_data + + check_runs_url = f"{repo_url}/check-runs" + response = api.post(check_runs_url) + result = response.json() + + return { + "id": result["id"], + "html_url": result["html_url"], + "status": result["status"], + } + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + result = self.create_check_run( + credentials=credentials, + repo_url=input_data.repo_url, + name=input_data.name, + head_sha=input_data.head_sha, + status=input_data.status, + conclusion=input_data.conclusion, + details_url=input_data.details_url, + output_title=input_data.output_title, + output_summary=input_data.output_summary, + output_text=input_data.output_text, + ) + yield "check_run", result + except Exception as e: + yield "error", str(e) + + +class GithubUpdateCheckRunBlock(Block): + """Block for updating an existing check run on a GitHub repository.""" + + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo:status") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + check_run_id: int = SchemaField( + description="The ID of the check run to update", + ) + status: ChecksStatus = SchemaField( + description="New status of the check run", + ) + conclusion: ChecksConclusion = SchemaField( + description="The final conclusion of the check (required if status is completed)", + ) + output_title: Optional[str] = SchemaField( + description="New title of the check run output", + default=None, + ) + output_summary: Optional[str] = SchemaField( + description="New summary of the check run output", + default=None, + ) + output_text: Optional[str] = SchemaField( + description="New detailed text of the check run output", + default=None, + ) + + class Output(BlockSchema): + class CheckRunResult(BaseModel): + id: int + html_url: str + status: str + conclusion: Optional[str] + + check_run: CheckRunResult = SchemaField( + description="Details of the updated check run" + ) + error: str = SchemaField(description="Error message if check run update failed") + + def __init__(self): + super().__init__( + id="8a23c567-9d01-4e56-b789-0c12d3e45678", # Generated UUID + description="Updates an existing check run in a GitHub repository", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubUpdateCheckRunBlock.Input, + output_schema=GithubUpdateCheckRunBlock.Output, + # requires a github app not available to oauth in our current system + disabled=True, + test_input={ + "repo_url": "https://github.com/owner/repo", + "check_run_id": 4, + "status": ChecksStatus.COMPLETED.value, + "conclusion": ChecksConclusion.SUCCESS.value, + "output_title": "Updated Results", + "output_summary": "All tests passed after retry", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "check_run", + { + "id": 4, + "html_url": "https://github.com/owner/repo/runs/4", + "status": "completed", + "conclusion": "success", + }, + ), + ], + test_mock={ + "update_check_run": lambda *args, **kwargs: { + "id": 4, + "html_url": "https://github.com/owner/repo/runs/4", + "status": "completed", + "conclusion": "success", + } + }, + ) + + @staticmethod + def update_check_run( + credentials: GithubCredentials, + repo_url: str, + check_run_id: int, + status: ChecksStatus, + conclusion: Optional[ChecksConclusion] = None, + output_title: Optional[str] = None, + output_summary: Optional[str] = None, + output_text: Optional[str] = None, + ) -> dict: + api = get_api(credentials) + + class UpdateCheckRunData(BaseModel): + status: str + conclusion: Optional[str] = None + output: Optional[dict[str, str]] = None + + data = UpdateCheckRunData( + status=status.value, + ) + + if conclusion: + data.conclusion = conclusion.value + + if output_title or output_summary or output_text: + output_data = { + "title": output_title or "", + "summary": output_summary or "", + "text": output_text or "", + } + data.output = output_data + + check_run_url = f"{repo_url}/check-runs/{check_run_id}" + response = api.patch(check_run_url) + result = response.json() + + return { + "id": result["id"], + "html_url": result["html_url"], + "status": result["status"], + "conclusion": result.get("conclusion"), + } + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + result = self.update_check_run( + credentials=credentials, + repo_url=input_data.repo_url, + check_run_id=input_data.check_run_id, + status=input_data.status, + conclusion=input_data.conclusion, + output_title=input_data.output_title, + output_summary=input_data.output_summary, + output_text=input_data.output_text, + ) + yield "check_run", result + except Exception as e: + yield "error", str(e) diff --git a/autogpt_platform/backend/backend/blocks/github/example_payloads/pull_request.synchronize.json b/autogpt_platform/backend/backend/blocks/github/example_payloads/pull_request.synchronize.json new file mode 100644 index 000000000000..7d8f8efbe054 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/example_payloads/pull_request.synchronize.json @@ -0,0 +1,700 @@ +{ + "action": "synchronize", + "number": 8358, + "pull_request": { + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358", + "id": 2128918491, + "node_id": "PR_kwDOJKSTjM5-5Lfb", + "html_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358", + "diff_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358.diff", + "patch_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358.patch", + "issue_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358", + "number": 8358, + "state": "open", + "locked": false, + "title": "feat(platform, blocks): Webhook-triggered blocks", + "user": { + "login": "Pwuts", + "id": 12185583, + "node_id": "MDQ6VXNlcjEyMTg1NTgz", + "avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Pwuts", + "html_url": "https://github.com/Pwuts", + "followers_url": "https://api.github.com/users/Pwuts/followers", + "following_url": "https://api.github.com/users/Pwuts/following{/other_user}", + "gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions", + "organizations_url": "https://api.github.com/users/Pwuts/orgs", + "repos_url": "https://api.github.com/users/Pwuts/repos", + "events_url": "https://api.github.com/users/Pwuts/events{/privacy}", + "received_events_url": "https://api.github.com/users/Pwuts/received_events", + "type": "User", + "user_view_type": "public", + "site_admin": false + }, + "body": "- Resolves #8352\r\n\r\n## Changes 🏗️\r\n\r\n- feat(blocks): Add GitHub Pull Request Trigger block\r\n\r\n### feat(platform): Add support for Webhook-triggered blocks\r\n- ⚠️ Add `PLATFORM_BASE_URL` setting\r\n\r\n- Add webhook config option and `BlockType.WEBHOOK` to `Block`\r\n - Add check to `Block.__init__` to enforce type and shape of webhook event filter\r\n - Add check to `Block.__init__` to enforce `payload` input on webhook blocks\r\n\r\n- Add `Webhook` model + CRUD functions in `backend.data.integrations` to represent webhooks created by our system\r\n - Add `IntegrationWebhook` to DB schema + reference `AgentGraphNode.webhook_id`\r\n - Add `set_node_webhook(..)` in `backend.data.graph`\r\n\r\n- Add webhook-related endpoints:\r\n - `POST /integrations/{provider}/webhooks/{webhook_id}/ingress` endpoint, to receive webhook payloads, and for all associated nodes create graph executions\r\n - Add `Node.is_triggered_by_event_type(..)` helper method\r\n - `POST /integrations/{provider}/webhooks/{webhook_id}/ping` endpoint, to allow testing a webhook\r\n - Add `WebhookEvent` + pub/sub functions in `backend.data.integrations`\r\n\r\n- Add `backend.integrations.webhooks` module, including:\r\n - `graph_lifecycle_hooks`, e.g. `on_graph_activate(..)`, to handle corresponding webhook creation etc.\r\n - Add calls to these hooks in the graph create/update endpoints\r\n - `BaseWebhooksManager` + `GithubWebhooksManager` to handle creating + registering, removing + deregistering, and retrieving existing webhooks, and validating incoming payloads\r\n\r\n### Other improvements\r\n- fix(blocks): Allow having an input and output pin with the same name\r\n- feat(blocks): Allow hiding inputs (e.g. `payload`) with `SchemaField(hidden=True)`\r\n- feat(backend/data): Add `graph_id`, `graph_version` to `Node`; `user_id` to `GraphMeta`\r\n - Add `Creatable` versions of `Node`, `GraphMeta` and `Graph` without these properties\r\n - Add `graph_from_creatable(..)` helper function in `backend.data.graph`\r\n- refactor(backend/data): Make `RedisEventQueue` generic\r\n- refactor(frontend): Deduplicate & clean up code for different block types in `generateInputHandles(..)` in `CustomNode`\r\n- refactor(backend): Remove unused subgraph functionality\r\n\r\n## How it works\r\n- When a graph is created, the `on_graph_activate` and `on_node_activate` hooks are called on the graph and its nodes\r\n- If a webhook-triggered node has presets for all the relevant inputs, `on_node_activate` will get/create a suitable webhook and link it by setting `AgentGraphNode.webhook_id`\r\n - `on_node_activate` uses `webhook_manager.get_suitable_webhook(..)`, which tries to find a suitable webhook (with matching requirements) or creates it if none exists yet\r\n- When a graph is deactivated (in favor of a newer/other version) or deleted, `on_graph_deactivate` and `on_node_deactivate` are called on the graph and its nodes to clean up webhooks that are no longer in use\r\n- When a valid webhook payload is received, two things happen:\r\n 1. It is broadcast on the Redis channel `webhooks/{webhook_id}/{event_type}`\r\n 2. Graph executions are initiated for all nodes triggered by this webhook\r\n\r\n## TODO\r\n- [ ] #8537\r\n- [x] #8538\r\n- [ ] #8357\r\n- [ ] ~~#8554~~ can be done in a follow-up PR\r\n- [ ] Test test test!\r\n- [ ] Add note on `repo` input of webhook blocks that the credentials used must have the right permissions for the given organization/repo\r\n- [x] Implement proper detection and graceful handling of webhook creation failing due to insufficient permissions. This should give a clear message to the user to e.g. \"give the app access to this organization in your settings\".\r\n- [ ] Nice-to-have: make a button on webhook blocks to trigger a ping and check its result. The API endpoints for this is already implemented.", + "created_at": "2024-10-16T22:13:47Z", + "updated_at": "2024-11-11T18:34:54Z", + "closed_at": null, + "merged_at": null, + "merge_commit_sha": "cbfd0cdd8db52cdd5a3b7ce088fc0ab4617a652e", + "assignee": { + "login": "Pwuts", + "id": 12185583, + "node_id": "MDQ6VXNlcjEyMTg1NTgz", + "avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Pwuts", + "html_url": "https://github.com/Pwuts", + "followers_url": "https://api.github.com/users/Pwuts/followers", + "following_url": "https://api.github.com/users/Pwuts/following{/other_user}", + "gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions", + "organizations_url": "https://api.github.com/users/Pwuts/orgs", + "repos_url": "https://api.github.com/users/Pwuts/repos", + "events_url": "https://api.github.com/users/Pwuts/events{/privacy}", + "received_events_url": "https://api.github.com/users/Pwuts/received_events", + "type": "User", + "user_view_type": "public", + "site_admin": false + }, + "assignees": [ + { + "login": "Pwuts", + "id": 12185583, + "node_id": "MDQ6VXNlcjEyMTg1NTgz", + "avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Pwuts", + "html_url": "https://github.com/Pwuts", + "followers_url": "https://api.github.com/users/Pwuts/followers", + "following_url": "https://api.github.com/users/Pwuts/following{/other_user}", + "gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions", + "organizations_url": "https://api.github.com/users/Pwuts/orgs", + "repos_url": "https://api.github.com/users/Pwuts/repos", + "events_url": "https://api.github.com/users/Pwuts/events{/privacy}", + "received_events_url": "https://api.github.com/users/Pwuts/received_events", + "type": "User", + "user_view_type": "public", + "site_admin": false + } + ], + "requested_reviewers": [ + { + "login": "kcze", + "id": 34861343, + "node_id": "MDQ6VXNlcjM0ODYxMzQz", + "avatar_url": "https://avatars.githubusercontent.com/u/34861343?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/kcze", + "html_url": "https://github.com/kcze", + "followers_url": "https://api.github.com/users/kcze/followers", + "following_url": "https://api.github.com/users/kcze/following{/other_user}", + "gists_url": "https://api.github.com/users/kcze/gists{/gist_id}", + "starred_url": "https://api.github.com/users/kcze/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/kcze/subscriptions", + "organizations_url": "https://api.github.com/users/kcze/orgs", + "repos_url": "https://api.github.com/users/kcze/repos", + "events_url": "https://api.github.com/users/kcze/events{/privacy}", + "received_events_url": "https://api.github.com/users/kcze/received_events", + "type": "User", + "user_view_type": "public", + "site_admin": false + } + ], + "requested_teams": [ + { + "name": "DevOps", + "id": 9547361, + "node_id": "T_kwDOB8roIc4Aka5h", + "slug": "devops", + "description": "", + "privacy": "closed", + "notification_setting": "notifications_enabled", + "url": "https://api.github.com/organizations/130738209/team/9547361", + "html_url": "https://github.com/orgs/Significant-Gravitas/teams/devops", + "members_url": "https://api.github.com/organizations/130738209/team/9547361/members{/member}", + "repositories_url": "https://api.github.com/organizations/130738209/team/9547361/repos", + "permission": "pull", + "parent": null + } + ], + "labels": [ + { + "id": 5272676214, + "node_id": "LA_kwDOJKSTjM8AAAABOkandg", + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/documentation", + "name": "documentation", + "color": "0075ca", + "default": true, + "description": "Improvements or additions to documentation" + }, + { + "id": 5410633769, + "node_id": "LA_kwDOJKSTjM8AAAABQn-4KQ", + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/size/xl", + "name": "size/xl", + "color": "E751DD", + "default": false, + "description": "" + }, + { + "id": 6892322271, + "node_id": "LA_kwDOJKSTjM8AAAABmtB93w", + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/Review%20effort%20[1-5]:%204", + "name": "Review effort [1-5]: 4", + "color": "d1bcf9", + "default": false, + "description": null + }, + { + "id": 7218433025, + "node_id": "LA_kwDOJKSTjM8AAAABrkCMAQ", + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/frontend", + "name": "platform/frontend", + "color": "033C07", + "default": false, + "description": "AutoGPT Platform - Front end" + }, + { + "id": 7219356193, + "node_id": "LA_kwDOJKSTjM8AAAABrk6iIQ", + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/backend", + "name": "platform/backend", + "color": "ededed", + "default": false, + "description": "AutoGPT Platform - Back end" + }, + { + "id": 7515330106, + "node_id": "LA_kwDOJKSTjM8AAAABv_LWOg", + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/blocks", + "name": "platform/blocks", + "color": "eb5757", + "default": false, + "description": null + } + ], + "milestone": null, + "draft": false, + "commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/commits", + "review_comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/comments", + "review_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/comments{/number}", + "comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358/comments", + "statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/8f708a2b60463eec10747d8f45dead35b5a45bd0", + "head": { + "label": "Significant-Gravitas:reinier/open-1961-implement-github-on-pull-request-block", + "ref": "reinier/open-1961-implement-github-on-pull-request-block", + "sha": "8f708a2b60463eec10747d8f45dead35b5a45bd0", + "user": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "followers_url": "https://api.github.com/users/Significant-Gravitas/followers", + "following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}", + "gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions", + "organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs", + "repos_url": "https://api.github.com/users/Significant-Gravitas/repos", + "events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}", + "received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events", + "type": "Organization", + "user_view_type": "public", + "site_admin": false + }, + "repo": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "followers_url": "https://api.github.com/users/Significant-Gravitas/followers", + "following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}", + "gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions", + "organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs", + "repos_url": "https://api.github.com/users/Significant-Gravitas/repos", + "events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}", + "received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events", + "type": "Organization", + "user_view_type": "public", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks", + "keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams", + "hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks", + "issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}", + "events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events", + "assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}", + "branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}", + "tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags", + "blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages", + "stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers", + "contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors", + "subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers", + "subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription", + "commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}", + "compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges", + "archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads", + "issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}", + "pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}", + "releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}", + "deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-11-11T18:16:29Z", + "pushed_at": "2024-11-11T18:34:52Z", + "git_url": "git://github.com/Significant-Gravitas/AutoGPT.git", + "ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git", + "clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git", + "svn_url": "https://github.com/Significant-Gravitas/AutoGPT", + "homepage": "https://agpt.co", + "size": 181894, + "stargazers_count": 168203, + "watchers_count": 168203, + "language": "Python", + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "has_discussions": true, + "forks_count": 44376, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 189, + "license": { + "key": "other", + "name": "Other", + "spdx_id": "NOASSERTION", + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "allow_forking": true, + "is_template": false, + "web_commit_signoff_required": false, + "topics": [ + "ai", + "artificial-intelligence", + "autonomous-agents", + "gpt-4", + "openai", + "python" + ], + "visibility": "public", + "forks": 44376, + "open_issues": 189, + "watchers": 168203, + "default_branch": "master", + "allow_squash_merge": true, + "allow_merge_commit": false, + "allow_rebase_merge": false, + "allow_auto_merge": true, + "delete_branch_on_merge": true, + "allow_update_branch": true, + "use_squash_pr_title_as_default": true, + "squash_merge_commit_message": "COMMIT_MESSAGES", + "squash_merge_commit_title": "PR_TITLE", + "merge_commit_message": "BLANK", + "merge_commit_title": "PR_TITLE" + } + }, + "base": { + "label": "Significant-Gravitas:dev", + "ref": "dev", + "sha": "0b5b95eff5e18c1e162d2b30b66a7be2bed1cbc2", + "user": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "followers_url": "https://api.github.com/users/Significant-Gravitas/followers", + "following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}", + "gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions", + "organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs", + "repos_url": "https://api.github.com/users/Significant-Gravitas/repos", + "events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}", + "received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events", + "type": "Organization", + "user_view_type": "public", + "site_admin": false + }, + "repo": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "followers_url": "https://api.github.com/users/Significant-Gravitas/followers", + "following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}", + "gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions", + "organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs", + "repos_url": "https://api.github.com/users/Significant-Gravitas/repos", + "events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}", + "received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events", + "type": "Organization", + "user_view_type": "public", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks", + "keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams", + "hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks", + "issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}", + "events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events", + "assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}", + "branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}", + "tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags", + "blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages", + "stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers", + "contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors", + "subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers", + "subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription", + "commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}", + "compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges", + "archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads", + "issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}", + "pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}", + "releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}", + "deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-11-11T18:16:29Z", + "pushed_at": "2024-11-11T18:34:52Z", + "git_url": "git://github.com/Significant-Gravitas/AutoGPT.git", + "ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git", + "clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git", + "svn_url": "https://github.com/Significant-Gravitas/AutoGPT", + "homepage": "https://agpt.co", + "size": 181894, + "stargazers_count": 168203, + "watchers_count": 168203, + "language": "Python", + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "has_discussions": true, + "forks_count": 44376, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 189, + "license": { + "key": "other", + "name": "Other", + "spdx_id": "NOASSERTION", + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "allow_forking": true, + "is_template": false, + "web_commit_signoff_required": false, + "topics": [ + "ai", + "artificial-intelligence", + "autonomous-agents", + "gpt-4", + "openai", + "python" + ], + "visibility": "public", + "forks": 44376, + "open_issues": 189, + "watchers": 168203, + "default_branch": "master", + "allow_squash_merge": true, + "allow_merge_commit": false, + "allow_rebase_merge": false, + "allow_auto_merge": true, + "delete_branch_on_merge": true, + "allow_update_branch": true, + "use_squash_pr_title_as_default": true, + "squash_merge_commit_message": "COMMIT_MESSAGES", + "squash_merge_commit_title": "PR_TITLE", + "merge_commit_message": "BLANK", + "merge_commit_title": "PR_TITLE" + } + }, + "_links": { + "self": { + "href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358" + }, + "html": { + "href": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358" + }, + "issue": { + "href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358" + }, + "comments": { + "href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358/comments" + }, + "review_comments": { + "href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/comments" + }, + "review_comment": { + "href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/comments{/number}" + }, + "commits": { + "href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/commits" + }, + "statuses": { + "href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/8f708a2b60463eec10747d8f45dead35b5a45bd0" + } + }, + "author_association": "MEMBER", + "auto_merge": null, + "active_lock_reason": null, + "merged": false, + "mergeable": null, + "rebaseable": null, + "mergeable_state": "unknown", + "merged_by": null, + "comments": 12, + "review_comments": 29, + "maintainer_can_modify": false, + "commits": 62, + "additions": 1674, + "deletions": 331, + "changed_files": 36 + }, + "before": "f40aef87672203f47bbbd53f83fae0964c5624da", + "after": "8f708a2b60463eec10747d8f45dead35b5a45bd0", + "repository": { + "id": 614765452, + "node_id": "R_kgDOJKSTjA", + "name": "AutoGPT", + "full_name": "Significant-Gravitas/AutoGPT", + "private": false, + "owner": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Significant-Gravitas", + "html_url": "https://github.com/Significant-Gravitas", + "followers_url": "https://api.github.com/users/Significant-Gravitas/followers", + "following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}", + "gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions", + "organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs", + "repos_url": "https://api.github.com/users/Significant-Gravitas/repos", + "events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}", + "received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events", + "type": "Organization", + "user_view_type": "public", + "site_admin": false + }, + "html_url": "https://github.com/Significant-Gravitas/AutoGPT", + "description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.", + "fork": false, + "url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT", + "forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks", + "keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams", + "hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks", + "issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}", + "events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events", + "assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}", + "branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}", + "tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags", + "blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}", + "languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages", + "stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers", + "contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors", + "subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers", + "subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription", + "commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}", + "compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges", + "archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads", + "issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}", + "pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}", + "milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}", + "notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}", + "releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}", + "deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments", + "created_at": "2023-03-16T09:21:07Z", + "updated_at": "2024-11-11T18:16:29Z", + "pushed_at": "2024-11-11T18:34:52Z", + "git_url": "git://github.com/Significant-Gravitas/AutoGPT.git", + "ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git", + "clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git", + "svn_url": "https://github.com/Significant-Gravitas/AutoGPT", + "homepage": "https://agpt.co", + "size": 181894, + "stargazers_count": 168203, + "watchers_count": 168203, + "language": "Python", + "has_issues": true, + "has_projects": true, + "has_downloads": true, + "has_wiki": true, + "has_pages": false, + "has_discussions": true, + "forks_count": 44376, + "mirror_url": null, + "archived": false, + "disabled": false, + "open_issues_count": 189, + "license": { + "key": "other", + "name": "Other", + "spdx_id": "NOASSERTION", + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "allow_forking": true, + "is_template": false, + "web_commit_signoff_required": false, + "topics": [ + "ai", + "artificial-intelligence", + "autonomous-agents", + "gpt-4", + "openai", + "python" + ], + "visibility": "public", + "forks": 44376, + "open_issues": 189, + "watchers": 168203, + "default_branch": "master", + "custom_properties": { + + } + }, + "organization": { + "login": "Significant-Gravitas", + "id": 130738209, + "node_id": "O_kgDOB8roIQ", + "url": "https://api.github.com/orgs/Significant-Gravitas", + "repos_url": "https://api.github.com/orgs/Significant-Gravitas/repos", + "events_url": "https://api.github.com/orgs/Significant-Gravitas/events", + "hooks_url": "https://api.github.com/orgs/Significant-Gravitas/hooks", + "issues_url": "https://api.github.com/orgs/Significant-Gravitas/issues", + "members_url": "https://api.github.com/orgs/Significant-Gravitas/members{/member}", + "public_members_url": "https://api.github.com/orgs/Significant-Gravitas/public_members{/member}", + "avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4", + "description": "" + }, + "enterprise": { + "id": 149607, + "slug": "significant-gravitas", + "name": "Significant Gravitas", + "node_id": "E_kgDOAAJIZw", + "avatar_url": "https://avatars.githubusercontent.com/b/149607?v=4", + "description": "The creators of AutoGPT", + "website_url": "discord.gg/autogpt", + "html_url": "https://github.com/enterprises/significant-gravitas", + "created_at": "2024-04-18T17:43:53Z", + "updated_at": "2024-10-23T16:59:55Z" + }, + "sender": { + "login": "Pwuts", + "id": 12185583, + "node_id": "MDQ6VXNlcjEyMTg1NTgz", + "avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/Pwuts", + "html_url": "https://github.com/Pwuts", + "followers_url": "https://api.github.com/users/Pwuts/followers", + "following_url": "https://api.github.com/users/Pwuts/following{/other_user}", + "gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}", + "starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions", + "organizations_url": "https://api.github.com/users/Pwuts/orgs", + "repos_url": "https://api.github.com/users/Pwuts/repos", + "events_url": "https://api.github.com/users/Pwuts/events{/privacy}", + "received_events_url": "https://api.github.com/users/Pwuts/received_events", + "type": "User", + "user_view_type": "public", + "site_admin": false + } +} \ No newline at end of file diff --git a/autogpt_platform/backend/backend/blocks/github/issues.py b/autogpt_platform/backend/backend/blocks/github/issues.py new file mode 100644 index 000000000000..ff1d0a41ad0e --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/issues.py @@ -0,0 +1,581 @@ +from urllib.parse import urlparse + +from typing_extensions import TypedDict + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +from ._api import get_api +from ._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + GithubCredentials, + GithubCredentialsField, + GithubCredentialsInput, +) + + +def is_github_url(url: str) -> bool: + return urlparse(url).netloc == "github.com" + + +# --8<-- [start:GithubCommentBlockExample] +class GithubCommentBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + issue_url: str = SchemaField( + description="URL of the GitHub issue or pull request", + placeholder="https://github.com/owner/repo/issues/1", + ) + comment: str = SchemaField( + description="Comment to post on the issue or pull request", + placeholder="Enter your comment", + ) + + class Output(BlockSchema): + id: int = SchemaField(description="ID of the created comment") + url: str = SchemaField(description="URL to the comment on GitHub") + error: str = SchemaField( + description="Error message if the comment posting failed" + ) + + def __init__(self): + super().__init__( + id="a8db4d8d-db1c-4a25-a1b0-416a8c33602b", + description="This block posts a comment on a specified GitHub issue or pull request.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubCommentBlock.Input, + output_schema=GithubCommentBlock.Output, + test_input=[ + { + "issue_url": "https://github.com/owner/repo/issues/1", + "comment": "This is a test comment.", + "credentials": TEST_CREDENTIALS_INPUT, + }, + { + "issue_url": "https://github.com/owner/repo/pull/1", + "comment": "This is a test comment.", + "credentials": TEST_CREDENTIALS_INPUT, + }, + ], + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("id", 1337), + ("url", "https://github.com/owner/repo/issues/1#issuecomment-1337"), + ("id", 1337), + ( + "url", + "https://github.com/owner/repo/issues/1#issuecomment-1337", + ), + ], + test_mock={ + "post_comment": lambda *args, **kwargs: ( + 1337, + "https://github.com/owner/repo/issues/1#issuecomment-1337", + ) + }, + ) + + @staticmethod + def post_comment( + credentials: GithubCredentials, issue_url: str, body_text: str + ) -> tuple[int, str]: + api = get_api(credentials) + data = {"body": body_text} + if "pull" in issue_url: + issue_url = issue_url.replace("pull", "issues") + comments_url = issue_url + "/comments" + response = api.post(comments_url, json=data) + comment = response.json() + return comment["id"], comment["html_url"] + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + id, url = self.post_comment( + credentials, + input_data.issue_url, + input_data.comment, + ) + yield "id", id + yield "url", url + + +# --8<-- [end:GithubCommentBlockExample] + + +class GithubMakeIssueBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + title: str = SchemaField( + description="Title of the issue", placeholder="Enter the issue title" + ) + body: str = SchemaField( + description="Body of the issue", placeholder="Enter the issue body" + ) + + class Output(BlockSchema): + number: int = SchemaField(description="Number of the created issue") + url: str = SchemaField(description="URL of the created issue") + error: str = SchemaField( + description="Error message if the issue creation failed" + ) + + def __init__(self): + super().__init__( + id="691dad47-f494-44c3-a1e8-05b7990f2dab", + description="This block creates a new issue on a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubMakeIssueBlock.Input, + output_schema=GithubMakeIssueBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "title": "Test Issue", + "body": "This is a test issue.", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("number", 1), + ("url", "https://github.com/owner/repo/issues/1"), + ], + test_mock={ + "create_issue": lambda *args, **kwargs: ( + 1, + "https://github.com/owner/repo/issues/1", + ) + }, + ) + + @staticmethod + def create_issue( + credentials: GithubCredentials, repo_url: str, title: str, body: str + ) -> tuple[int, str]: + api = get_api(credentials) + data = {"title": title, "body": body} + issues_url = repo_url + "/issues" + response = api.post(issues_url, json=data) + issue = response.json() + return issue["number"], issue["html_url"] + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + number, url = self.create_issue( + credentials, + input_data.repo_url, + input_data.title, + input_data.body, + ) + yield "number", number + yield "url", url + + +class GithubReadIssueBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + issue_url: str = SchemaField( + description="URL of the GitHub issue", + placeholder="https://github.com/owner/repo/issues/1", + ) + + class Output(BlockSchema): + title: str = SchemaField(description="Title of the issue") + body: str = SchemaField(description="Body of the issue") + user: str = SchemaField(description="User who created the issue") + error: str = SchemaField( + description="Error message if reading the issue failed" + ) + + def __init__(self): + super().__init__( + id="6443c75d-032a-4772-9c08-230c707c8acc", + description="This block reads the body, title, and user of a specified GitHub issue.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubReadIssueBlock.Input, + output_schema=GithubReadIssueBlock.Output, + test_input={ + "issue_url": "https://github.com/owner/repo/issues/1", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("title", "Title of the issue"), + ("body", "This is the body of the issue."), + ("user", "username"), + ], + test_mock={ + "read_issue": lambda *args, **kwargs: ( + "Title of the issue", + "This is the body of the issue.", + "username", + ) + }, + ) + + @staticmethod + def read_issue( + credentials: GithubCredentials, issue_url: str + ) -> tuple[str, str, str]: + api = get_api(credentials) + response = api.get(issue_url) + data = response.json() + title = data.get("title", "No title found") + body = data.get("body", "No body content found") + user = data.get("user", {}).get("login", "No user found") + return title, body, user + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + title, body, user = self.read_issue( + credentials, + input_data.issue_url, + ) + if title: + yield "title", title + if body: + yield "body", body + if user: + yield "user", user + + +class GithubListIssuesBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + + class Output(BlockSchema): + class IssueItem(TypedDict): + title: str + url: str + + issue: IssueItem = SchemaField( + title="Issue", description="Issues with their title and URL" + ) + error: str = SchemaField(description="Error message if listing issues failed") + + def __init__(self): + super().__init__( + id="c215bfd7-0e57-4573-8f8c-f7d4963dcd74", + description="This block lists all issues for a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubListIssuesBlock.Input, + output_schema=GithubListIssuesBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "issue", + { + "title": "Issue 1", + "url": "https://github.com/owner/repo/issues/1", + }, + ) + ], + test_mock={ + "list_issues": lambda *args, **kwargs: [ + { + "title": "Issue 1", + "url": "https://github.com/owner/repo/issues/1", + } + ] + }, + ) + + @staticmethod + def list_issues( + credentials: GithubCredentials, repo_url: str + ) -> list[Output.IssueItem]: + api = get_api(credentials) + issues_url = repo_url + "/issues" + response = api.get(issues_url) + data = response.json() + issues: list[GithubListIssuesBlock.Output.IssueItem] = [ + {"title": issue["title"], "url": issue["html_url"]} for issue in data + ] + return issues + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + issues = self.list_issues( + credentials, + input_data.repo_url, + ) + yield from (("issue", issue) for issue in issues) + + +class GithubAddLabelBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + issue_url: str = SchemaField( + description="URL of the GitHub issue or pull request", + placeholder="https://github.com/owner/repo/issues/1", + ) + label: str = SchemaField( + description="Label to add to the issue or pull request", + placeholder="Enter the label", + ) + + class Output(BlockSchema): + status: str = SchemaField(description="Status of the label addition operation") + error: str = SchemaField( + description="Error message if the label addition failed" + ) + + def __init__(self): + super().__init__( + id="98bd6b77-9506-43d5-b669-6b9733c4b1f1", + description="This block adds a label to a specified GitHub issue or pull request.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubAddLabelBlock.Input, + output_schema=GithubAddLabelBlock.Output, + test_input={ + "issue_url": "https://github.com/owner/repo/issues/1", + "label": "bug", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Label added successfully")], + test_mock={"add_label": lambda *args, **kwargs: "Label added successfully"}, + ) + + @staticmethod + def add_label(credentials: GithubCredentials, issue_url: str, label: str) -> str: + api = get_api(credentials) + data = {"labels": [label]} + labels_url = issue_url + "/labels" + api.post(labels_url, json=data) + return "Label added successfully" + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + status = self.add_label( + credentials, + input_data.issue_url, + input_data.label, + ) + yield "status", status + + +class GithubRemoveLabelBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + issue_url: str = SchemaField( + description="URL of the GitHub issue or pull request", + placeholder="https://github.com/owner/repo/issues/1", + ) + label: str = SchemaField( + description="Label to remove from the issue or pull request", + placeholder="Enter the label", + ) + + class Output(BlockSchema): + status: str = SchemaField(description="Status of the label removal operation") + error: str = SchemaField( + description="Error message if the label removal failed" + ) + + def __init__(self): + super().__init__( + id="78f050c5-3e3a-48c0-9e5b-ef1ceca5589c", + description="This block removes a label from a specified GitHub issue or pull request.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubRemoveLabelBlock.Input, + output_schema=GithubRemoveLabelBlock.Output, + test_input={ + "issue_url": "https://github.com/owner/repo/issues/1", + "label": "bug", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Label removed successfully")], + test_mock={ + "remove_label": lambda *args, **kwargs: "Label removed successfully" + }, + ) + + @staticmethod + def remove_label(credentials: GithubCredentials, issue_url: str, label: str) -> str: + api = get_api(credentials) + label_url = issue_url + f"/labels/{label}" + api.delete(label_url) + return "Label removed successfully" + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + status = self.remove_label( + credentials, + input_data.issue_url, + input_data.label, + ) + yield "status", status + + +class GithubAssignIssueBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + issue_url: str = SchemaField( + description="URL of the GitHub issue", + placeholder="https://github.com/owner/repo/issues/1", + ) + assignee: str = SchemaField( + description="Username to assign to the issue", + placeholder="Enter the username", + ) + + class Output(BlockSchema): + status: str = SchemaField( + description="Status of the issue assignment operation" + ) + error: str = SchemaField( + description="Error message if the issue assignment failed" + ) + + def __init__(self): + super().__init__( + id="90507c72-b0ff-413a-886a-23bbbd66f542", + description="This block assigns a user to a specified GitHub issue.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubAssignIssueBlock.Input, + output_schema=GithubAssignIssueBlock.Output, + test_input={ + "issue_url": "https://github.com/owner/repo/issues/1", + "assignee": "username1", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Issue assigned successfully")], + test_mock={ + "assign_issue": lambda *args, **kwargs: "Issue assigned successfully" + }, + ) + + @staticmethod + def assign_issue( + credentials: GithubCredentials, + issue_url: str, + assignee: str, + ) -> str: + api = get_api(credentials) + assignees_url = issue_url + "/assignees" + data = {"assignees": [assignee]} + api.post(assignees_url, json=data) + return "Issue assigned successfully" + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + status = self.assign_issue( + credentials, + input_data.issue_url, + input_data.assignee, + ) + yield "status", status + + +class GithubUnassignIssueBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + issue_url: str = SchemaField( + description="URL of the GitHub issue", + placeholder="https://github.com/owner/repo/issues/1", + ) + assignee: str = SchemaField( + description="Username to unassign from the issue", + placeholder="Enter the username", + ) + + class Output(BlockSchema): + status: str = SchemaField( + description="Status of the issue unassignment operation" + ) + error: str = SchemaField( + description="Error message if the issue unassignment failed" + ) + + def __init__(self): + super().__init__( + id="d154002a-38f4-46c2-962d-2488f2b05ece", + description="This block unassigns a user from a specified GitHub issue.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubUnassignIssueBlock.Input, + output_schema=GithubUnassignIssueBlock.Output, + test_input={ + "issue_url": "https://github.com/owner/repo/issues/1", + "assignee": "username1", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Issue unassigned successfully")], + test_mock={ + "unassign_issue": lambda *args, **kwargs: "Issue unassigned successfully" + }, + ) + + @staticmethod + def unassign_issue( + credentials: GithubCredentials, + issue_url: str, + assignee: str, + ) -> str: + api = get_api(credentials) + assignees_url = issue_url + "/assignees" + data = {"assignees": [assignee]} + api.delete(assignees_url, json=data) + return "Issue unassigned successfully" + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + status = self.unassign_issue( + credentials, + input_data.issue_url, + input_data.assignee, + ) + yield "status", status diff --git a/autogpt_platform/backend/backend/blocks/github/pull_requests.py b/autogpt_platform/backend/backend/blocks/github/pull_requests.py new file mode 100644 index 000000000000..b29db0ff3439 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/pull_requests.py @@ -0,0 +1,515 @@ +import re + +from typing_extensions import TypedDict + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +from ._api import get_api +from ._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + GithubCredentials, + GithubCredentialsField, + GithubCredentialsInput, +) + + +class GithubListPullRequestsBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + + class Output(BlockSchema): + class PRItem(TypedDict): + title: str + url: str + + pull_request: PRItem = SchemaField( + title="Pull Request", description="PRs with their title and URL" + ) + error: str = SchemaField(description="Error message if listing issues failed") + + def __init__(self): + super().__init__( + id="ffef3c4c-6cd0-48dd-817d-459f975219f4", + description="This block lists all pull requests for a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubListPullRequestsBlock.Input, + output_schema=GithubListPullRequestsBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "pull_request", + { + "title": "Pull request 1", + "url": "https://github.com/owner/repo/pull/1", + }, + ) + ], + test_mock={ + "list_prs": lambda *args, **kwargs: [ + { + "title": "Pull request 1", + "url": "https://github.com/owner/repo/pull/1", + } + ] + }, + ) + + @staticmethod + def list_prs(credentials: GithubCredentials, repo_url: str) -> list[Output.PRItem]: + api = get_api(credentials) + pulls_url = repo_url + "/pulls" + response = api.get(pulls_url) + data = response.json() + pull_requests: list[GithubListPullRequestsBlock.Output.PRItem] = [ + {"title": pr["title"], "url": pr["html_url"]} for pr in data + ] + return pull_requests + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + pull_requests = self.list_prs( + credentials, + input_data.repo_url, + ) + yield from (("pull_request", pr) for pr in pull_requests) + + +class GithubMakePullRequestBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + title: str = SchemaField( + description="Title of the pull request", + placeholder="Enter the pull request title", + ) + body: str = SchemaField( + description="Body of the pull request", + placeholder="Enter the pull request body", + ) + head: str = SchemaField( + description=( + "The name of the branch where your changes are implemented. " + "For cross-repository pull requests in the same network, " + "namespace head with a user like this: username:branch." + ), + placeholder="Enter the head branch", + ) + base: str = SchemaField( + description="The name of the branch you want the changes pulled into.", + placeholder="Enter the base branch", + ) + + class Output(BlockSchema): + number: int = SchemaField(description="Number of the created pull request") + url: str = SchemaField(description="URL of the created pull request") + error: str = SchemaField( + description="Error message if the pull request creation failed" + ) + + def __init__(self): + super().__init__( + id="dfb987f8-f197-4b2e-bf19-111812afd692", + description="This block creates a new pull request on a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubMakePullRequestBlock.Input, + output_schema=GithubMakePullRequestBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "title": "Test Pull Request", + "body": "This is a test pull request.", + "head": "feature-branch", + "base": "main", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("number", 1), + ("url", "https://github.com/owner/repo/pull/1"), + ], + test_mock={ + "create_pr": lambda *args, **kwargs: ( + 1, + "https://github.com/owner/repo/pull/1", + ) + }, + ) + + @staticmethod + def create_pr( + credentials: GithubCredentials, + repo_url: str, + title: str, + body: str, + head: str, + base: str, + ) -> tuple[int, str]: + api = get_api(credentials) + pulls_url = repo_url + "/pulls" + data = {"title": title, "body": body, "head": head, "base": base} + response = api.post(pulls_url, json=data) + pr_data = response.json() + return pr_data["number"], pr_data["html_url"] + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + number, url = self.create_pr( + credentials, + input_data.repo_url, + input_data.title, + input_data.body, + input_data.head, + input_data.base, + ) + yield "number", number + yield "url", url + except Exception as e: + yield "error", str(e) + + +class GithubReadPullRequestBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + pr_url: str = SchemaField( + description="URL of the GitHub pull request", + placeholder="https://github.com/owner/repo/pull/1", + ) + include_pr_changes: bool = SchemaField( + description="Whether to include the changes made in the pull request", + default=False, + advanced=False, + ) + + class Output(BlockSchema): + title: str = SchemaField(description="Title of the pull request") + body: str = SchemaField(description="Body of the pull request") + author: str = SchemaField(description="User who created the pull request") + changes: str = SchemaField(description="Changes made in the pull request") + error: str = SchemaField( + description="Error message if reading the pull request failed" + ) + + def __init__(self): + super().__init__( + id="bf94b2a4-1a30-4600-a783-a8a44ee31301", + description="This block reads the body, title, user, and changes of a specified GitHub pull request.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubReadPullRequestBlock.Input, + output_schema=GithubReadPullRequestBlock.Output, + test_input={ + "pr_url": "https://github.com/owner/repo/pull/1", + "include_pr_changes": True, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("title", "Title of the pull request"), + ("body", "This is the body of the pull request."), + ("author", "username"), + ("changes", "List of changes made in the pull request."), + ], + test_mock={ + "read_pr": lambda *args, **kwargs: ( + "Title of the pull request", + "This is the body of the pull request.", + "username", + ), + "read_pr_changes": lambda *args, **kwargs: "List of changes made in the pull request.", + }, + ) + + @staticmethod + def read_pr(credentials: GithubCredentials, pr_url: str) -> tuple[str, str, str]: + api = get_api(credentials) + # Adjust the URL to access the issue endpoint for PR metadata + issue_url = pr_url.replace("/pull/", "/issues/") + response = api.get(issue_url) + data = response.json() + title = data.get("title", "No title found") + body = data.get("body", "No body content found") + author = data.get("user", {}).get("login", "No user found") + return title, body, author + + @staticmethod + def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str: + api = get_api(credentials) + files_url = prepare_pr_api_url(pr_url=pr_url, path="files") + response = api.get(files_url) + files = response.json() + changes = [] + for file in files: + filename = file.get("filename") + patch = file.get("patch") + if filename and patch: + changes.append(f"File: {filename}\n{patch}") + return "\n\n".join(changes) + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + title, body, author = self.read_pr( + credentials, + input_data.pr_url, + ) + yield "title", title + yield "body", body + yield "author", author + + if input_data.include_pr_changes: + changes = self.read_pr_changes( + credentials, + input_data.pr_url, + ) + yield "changes", changes + + +class GithubAssignPRReviewerBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + pr_url: str = SchemaField( + description="URL of the GitHub pull request", + placeholder="https://github.com/owner/repo/pull/1", + ) + reviewer: str = SchemaField( + description="Username of the reviewer to assign", + placeholder="Enter the reviewer's username", + ) + + class Output(BlockSchema): + status: str = SchemaField( + description="Status of the reviewer assignment operation" + ) + error: str = SchemaField( + description="Error message if the reviewer assignment failed" + ) + + def __init__(self): + super().__init__( + id="c0d22c5e-e688-43e3-ba43-d5faba7927fd", + description="This block assigns a reviewer to a specified GitHub pull request.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubAssignPRReviewerBlock.Input, + output_schema=GithubAssignPRReviewerBlock.Output, + test_input={ + "pr_url": "https://github.com/owner/repo/pull/1", + "reviewer": "reviewer_username", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Reviewer assigned successfully")], + test_mock={ + "assign_reviewer": lambda *args, **kwargs: "Reviewer assigned successfully" + }, + ) + + @staticmethod + def assign_reviewer( + credentials: GithubCredentials, pr_url: str, reviewer: str + ) -> str: + api = get_api(credentials) + reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers") + data = {"reviewers": [reviewer]} + api.post(reviewers_url, json=data) + return "Reviewer assigned successfully" + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + status = self.assign_reviewer( + credentials, + input_data.pr_url, + input_data.reviewer, + ) + yield "status", status + except Exception as e: + yield "error", str(e) + + +class GithubUnassignPRReviewerBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + pr_url: str = SchemaField( + description="URL of the GitHub pull request", + placeholder="https://github.com/owner/repo/pull/1", + ) + reviewer: str = SchemaField( + description="Username of the reviewer to unassign", + placeholder="Enter the reviewer's username", + ) + + class Output(BlockSchema): + status: str = SchemaField( + description="Status of the reviewer unassignment operation" + ) + error: str = SchemaField( + description="Error message if the reviewer unassignment failed" + ) + + def __init__(self): + super().__init__( + id="9637945d-c602-4875-899a-9c22f8fd30de", + description="This block unassigns a reviewer from a specified GitHub pull request.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubUnassignPRReviewerBlock.Input, + output_schema=GithubUnassignPRReviewerBlock.Output, + test_input={ + "pr_url": "https://github.com/owner/repo/pull/1", + "reviewer": "reviewer_username", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Reviewer unassigned successfully")], + test_mock={ + "unassign_reviewer": lambda *args, **kwargs: "Reviewer unassigned successfully" + }, + ) + + @staticmethod + def unassign_reviewer( + credentials: GithubCredentials, pr_url: str, reviewer: str + ) -> str: + api = get_api(credentials) + reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers") + data = {"reviewers": [reviewer]} + api.delete(reviewers_url, json=data) + return "Reviewer unassigned successfully" + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + status = self.unassign_reviewer( + credentials, + input_data.pr_url, + input_data.reviewer, + ) + yield "status", status + except Exception as e: + yield "error", str(e) + + +class GithubListPRReviewersBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + pr_url: str = SchemaField( + description="URL of the GitHub pull request", + placeholder="https://github.com/owner/repo/pull/1", + ) + + class Output(BlockSchema): + class ReviewerItem(TypedDict): + username: str + url: str + + reviewer: ReviewerItem = SchemaField( + title="Reviewer", + description="Reviewers with their username and profile URL", + ) + error: str = SchemaField( + description="Error message if listing reviewers failed" + ) + + def __init__(self): + super().__init__( + id="2646956e-96d5-4754-a3df-034017e7ed96", + description="This block lists all reviewers for a specified GitHub pull request.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubListPRReviewersBlock.Input, + output_schema=GithubListPRReviewersBlock.Output, + test_input={ + "pr_url": "https://github.com/owner/repo/pull/1", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "reviewer", + { + "username": "reviewer1", + "url": "https://github.com/reviewer1", + }, + ) + ], + test_mock={ + "list_reviewers": lambda *args, **kwargs: [ + { + "username": "reviewer1", + "url": "https://github.com/reviewer1", + } + ] + }, + ) + + @staticmethod + def list_reviewers( + credentials: GithubCredentials, pr_url: str + ) -> list[Output.ReviewerItem]: + api = get_api(credentials) + reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers") + response = api.get(reviewers_url) + data = response.json() + reviewers: list[GithubListPRReviewersBlock.Output.ReviewerItem] = [ + {"username": reviewer["login"], "url": reviewer["html_url"]} + for reviewer in data.get("users", []) + ] + return reviewers + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + reviewers = self.list_reviewers( + credentials, + input_data.pr_url, + ) + yield from (("reviewer", reviewer) for reviewer in reviewers) + + +def prepare_pr_api_url(pr_url: str, path: str) -> str: + # Pattern to capture the base repository URL and the pull request number + pattern = r"^(?:https?://)?([^/]+/[^/]+/[^/]+)/pull/(\d+)" + match = re.match(pattern, pr_url) + if not match: + return pr_url + + base_url, pr_number = match.groups() + return f"{base_url}/pulls/{pr_number}/{path}" diff --git a/autogpt_platform/backend/backend/blocks/github/repo.py b/autogpt_platform/backend/backend/blocks/github/repo.py new file mode 100644 index 000000000000..82bef9475bad --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/repo.py @@ -0,0 +1,1118 @@ +import base64 + +from typing_extensions import TypedDict + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +from ._api import get_api +from ._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + GithubCredentials, + GithubCredentialsField, + GithubCredentialsInput, +) + + +class GithubListTagsBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + + class Output(BlockSchema): + class TagItem(TypedDict): + name: str + url: str + + tag: TagItem = SchemaField( + title="Tag", description="Tags with their name and file tree browser URL" + ) + error: str = SchemaField(description="Error message if listing tags failed") + + def __init__(self): + super().__init__( + id="358924e7-9a11-4d1a-a0f2-13c67fe59e2e", + description="This block lists all tags for a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubListTagsBlock.Input, + output_schema=GithubListTagsBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "tag", + { + "name": "v1.0.0", + "url": "https://github.com/owner/repo/tree/v1.0.0", + }, + ) + ], + test_mock={ + "list_tags": lambda *args, **kwargs: [ + { + "name": "v1.0.0", + "url": "https://github.com/owner/repo/tree/v1.0.0", + } + ] + }, + ) + + @staticmethod + def list_tags( + credentials: GithubCredentials, repo_url: str + ) -> list[Output.TagItem]: + api = get_api(credentials) + tags_url = repo_url + "/tags" + response = api.get(tags_url) + data = response.json() + repo_path = repo_url.replace("https://github.com/", "") + tags: list[GithubListTagsBlock.Output.TagItem] = [ + { + "name": tag["name"], + "url": f"https://github.com/{repo_path}/tree/{tag['name']}", + } + for tag in data + ] + return tags + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + tags = self.list_tags( + credentials, + input_data.repo_url, + ) + yield from (("tag", tag) for tag in tags) + + +class GithubListBranchesBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + + class Output(BlockSchema): + class BranchItem(TypedDict): + name: str + url: str + + branch: BranchItem = SchemaField( + title="Branch", + description="Branches with their name and file tree browser URL", + ) + error: str = SchemaField(description="Error message if listing branches failed") + + def __init__(self): + super().__init__( + id="74243e49-2bec-4916-8bf4-db43d44aead5", + description="This block lists all branches for a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubListBranchesBlock.Input, + output_schema=GithubListBranchesBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "branch", + { + "name": "main", + "url": "https://github.com/owner/repo/tree/main", + }, + ) + ], + test_mock={ + "list_branches": lambda *args, **kwargs: [ + { + "name": "main", + "url": "https://github.com/owner/repo/tree/main", + } + ] + }, + ) + + @staticmethod + def list_branches( + credentials: GithubCredentials, repo_url: str + ) -> list[Output.BranchItem]: + api = get_api(credentials) + branches_url = repo_url + "/branches" + response = api.get(branches_url) + data = response.json() + repo_path = repo_url.replace("https://github.com/", "") + branches: list[GithubListBranchesBlock.Output.BranchItem] = [ + { + "name": branch["name"], + "url": f"https://github.com/{repo_path}/tree/{branch['name']}", + } + for branch in data + ] + return branches + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + branches = self.list_branches( + credentials, + input_data.repo_url, + ) + yield from (("branch", branch) for branch in branches) + + +class GithubListDiscussionsBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + num_discussions: int = SchemaField( + description="Number of discussions to fetch", default=5 + ) + + class Output(BlockSchema): + class DiscussionItem(TypedDict): + title: str + url: str + + discussion: DiscussionItem = SchemaField( + title="Discussion", description="Discussions with their title and URL" + ) + error: str = SchemaField( + description="Error message if listing discussions failed" + ) + + def __init__(self): + super().__init__( + id="3ef1a419-3d76-4e07-b761-de9dad4d51d7", + description="This block lists recent discussions for a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubListDiscussionsBlock.Input, + output_schema=GithubListDiscussionsBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "num_discussions": 3, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "discussion", + { + "title": "Discussion 1", + "url": "https://github.com/owner/repo/discussions/1", + }, + ) + ], + test_mock={ + "list_discussions": lambda *args, **kwargs: [ + { + "title": "Discussion 1", + "url": "https://github.com/owner/repo/discussions/1", + } + ] + }, + ) + + @staticmethod + def list_discussions( + credentials: GithubCredentials, repo_url: str, num_discussions: int + ) -> list[Output.DiscussionItem]: + api = get_api(credentials) + # GitHub GraphQL API endpoint is different; we'll use api.post with custom URL + repo_path = repo_url.replace("https://github.com/", "") + owner, repo = repo_path.split("/") + query = """ + query($owner: String!, $repo: String!, $num: Int!) { + repository(owner: $owner, name: $repo) { + discussions(first: $num) { + nodes { + title + url + } + } + } + } + """ + variables = {"owner": owner, "repo": repo, "num": num_discussions} + response = api.post( + "https://api.github.com/graphql", + json={"query": query, "variables": variables}, + ) + data = response.json() + discussions: list[GithubListDiscussionsBlock.Output.DiscussionItem] = [ + {"title": discussion["title"], "url": discussion["url"]} + for discussion in data["data"]["repository"]["discussions"]["nodes"] + ] + return discussions + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + discussions = self.list_discussions( + credentials, input_data.repo_url, input_data.num_discussions + ) + yield from (("discussion", discussion) for discussion in discussions) + + +class GithubListReleasesBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + + class Output(BlockSchema): + class ReleaseItem(TypedDict): + name: str + url: str + + release: ReleaseItem = SchemaField( + title="Release", + description="Releases with their name and file tree browser URL", + ) + error: str = SchemaField(description="Error message if listing releases failed") + + def __init__(self): + super().__init__( + id="3460367a-6ba7-4645-8ce6-47b05d040b92", + description="This block lists all releases for a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubListReleasesBlock.Input, + output_schema=GithubListReleasesBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "release", + { + "name": "v1.0.0", + "url": "https://github.com/owner/repo/releases/tag/v1.0.0", + }, + ) + ], + test_mock={ + "list_releases": lambda *args, **kwargs: [ + { + "name": "v1.0.0", + "url": "https://github.com/owner/repo/releases/tag/v1.0.0", + } + ] + }, + ) + + @staticmethod + def list_releases( + credentials: GithubCredentials, repo_url: str + ) -> list[Output.ReleaseItem]: + api = get_api(credentials) + releases_url = repo_url + "/releases" + response = api.get(releases_url) + data = response.json() + releases: list[GithubListReleasesBlock.Output.ReleaseItem] = [ + {"name": release["name"], "url": release["html_url"]} for release in data + ] + return releases + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + releases = self.list_releases( + credentials, + input_data.repo_url, + ) + yield from (("release", release) for release in releases) + + +class GithubReadFileBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + file_path: str = SchemaField( + description="Path to the file in the repository", + placeholder="path/to/file", + ) + branch: str = SchemaField( + description="Branch to read from", + placeholder="branch_name", + default="master", + ) + + class Output(BlockSchema): + text_content: str = SchemaField( + description="Content of the file (decoded as UTF-8 text)" + ) + raw_content: str = SchemaField( + description="Raw base64-encoded content of the file" + ) + size: int = SchemaField(description="The size of the file (in bytes)") + error: str = SchemaField(description="Error message if the file reading failed") + + def __init__(self): + super().__init__( + id="87ce6c27-5752-4bbc-8e26-6da40a3dcfd3", + description="This block reads the content of a specified file from a GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubReadFileBlock.Input, + output_schema=GithubReadFileBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "file_path": "path/to/file", + "branch": "master", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("raw_content", "RmlsZSBjb250ZW50"), + ("text_content", "File content"), + ("size", 13), + ], + test_mock={"read_file": lambda *args, **kwargs: ("RmlsZSBjb250ZW50", 13)}, + ) + + @staticmethod + def read_file( + credentials: GithubCredentials, repo_url: str, file_path: str, branch: str + ) -> tuple[str, int]: + api = get_api(credentials) + content_url = repo_url + f"/contents/{file_path}?ref={branch}" + response = api.get(content_url) + content = response.json() + + if isinstance(content, list): + # Multiple entries of different types exist at this path + if not (file := next((f for f in content if f["type"] == "file"), None)): + raise TypeError("Not a file") + content = file + + if content["type"] != "file": + raise TypeError("Not a file") + + return content["content"], content["size"] + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + raw_content, size = self.read_file( + credentials, + input_data.repo_url, + input_data.file_path.lstrip("/"), + input_data.branch, + ) + yield "raw_content", raw_content + yield "text_content", base64.b64decode(raw_content).decode("utf-8") + yield "size", size + + +class GithubReadFolderBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + folder_path: str = SchemaField( + description="Path to the folder in the repository", + placeholder="path/to/folder", + ) + branch: str = SchemaField( + description="Branch name to read from (defaults to master)", + placeholder="branch_name", + default="master", + ) + + class Output(BlockSchema): + class DirEntry(TypedDict): + name: str + path: str + + class FileEntry(TypedDict): + name: str + path: str + size: int + + file: FileEntry = SchemaField(description="Files in the folder") + dir: DirEntry = SchemaField(description="Directories in the folder") + error: str = SchemaField( + description="Error message if reading the folder failed" + ) + + def __init__(self): + super().__init__( + id="1355f863-2db3-4d75-9fba-f91e8a8ca400", + description="This block reads the content of a specified folder from a GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubReadFolderBlock.Input, + output_schema=GithubReadFolderBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "folder_path": "path/to/folder", + "branch": "master", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "file", + { + "name": "file1.txt", + "path": "path/to/folder/file1.txt", + "size": 1337, + }, + ), + ("dir", {"name": "dir2", "path": "path/to/folder/dir2"}), + ], + test_mock={ + "read_folder": lambda *args, **kwargs: ( + [ + { + "name": "file1.txt", + "path": "path/to/folder/file1.txt", + "size": 1337, + } + ], + [{"name": "dir2", "path": "path/to/folder/dir2"}], + ) + }, + ) + + @staticmethod + def read_folder( + credentials: GithubCredentials, repo_url: str, folder_path: str, branch: str + ) -> tuple[list[Output.FileEntry], list[Output.DirEntry]]: + api = get_api(credentials) + contents_url = repo_url + f"/contents/{folder_path}?ref={branch}" + response = api.get(contents_url) + content = response.json() + + if not isinstance(content, list): + raise TypeError("Not a folder") + + files = [ + GithubReadFolderBlock.Output.FileEntry( + name=entry["name"], + path=entry["path"], + size=entry["size"], + ) + for entry in content + if entry["type"] == "file" + ] + dirs = [ + GithubReadFolderBlock.Output.DirEntry( + name=entry["name"], + path=entry["path"], + ) + for entry in content + if entry["type"] == "dir" + ] + + return files, dirs + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + files, dirs = self.read_folder( + credentials, + input_data.repo_url, + input_data.folder_path.lstrip("/"), + input_data.branch, + ) + yield from (("file", file) for file in files) + yield from (("dir", dir) for dir in dirs) + + +class GithubMakeBranchBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + new_branch: str = SchemaField( + description="Name of the new branch", + placeholder="new_branch_name", + ) + source_branch: str = SchemaField( + description="Name of the source branch", + placeholder="source_branch_name", + ) + + class Output(BlockSchema): + status: str = SchemaField(description="Status of the branch creation operation") + error: str = SchemaField( + description="Error message if the branch creation failed" + ) + + def __init__(self): + super().__init__( + id="944cc076-95e7-4d1b-b6b6-b15d8ee5448d", + description="This block creates a new branch from a specified source branch.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubMakeBranchBlock.Input, + output_schema=GithubMakeBranchBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "new_branch": "new_branch_name", + "source_branch": "source_branch_name", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Branch created successfully")], + test_mock={ + "create_branch": lambda *args, **kwargs: "Branch created successfully" + }, + ) + + @staticmethod + def create_branch( + credentials: GithubCredentials, + repo_url: str, + new_branch: str, + source_branch: str, + ) -> str: + api = get_api(credentials) + # Get the SHA of the source branch + ref_url = repo_url + f"/git/refs/heads/{source_branch}" + response = api.get(ref_url) + sha = response.json()["object"]["sha"] + + # Create the new branch + create_ref_url = repo_url + "/git/refs" + data = {"ref": f"refs/heads/{new_branch}", "sha": sha} + response = api.post(create_ref_url, json=data) + return "Branch created successfully" + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + status = self.create_branch( + credentials, + input_data.repo_url, + input_data.new_branch, + input_data.source_branch, + ) + yield "status", status + + +class GithubDeleteBranchBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + branch: str = SchemaField( + description="Name of the branch to delete", + placeholder="branch_name", + ) + + class Output(BlockSchema): + status: str = SchemaField(description="Status of the branch deletion operation") + error: str = SchemaField( + description="Error message if the branch deletion failed" + ) + + def __init__(self): + super().__init__( + id="0d4130f7-e0ab-4d55-adc3-0a40225e80f4", + description="This block deletes a specified branch.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubDeleteBranchBlock.Input, + output_schema=GithubDeleteBranchBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "branch": "branch_name", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Branch deleted successfully")], + test_mock={ + "delete_branch": lambda *args, **kwargs: "Branch deleted successfully" + }, + ) + + @staticmethod + def delete_branch( + credentials: GithubCredentials, repo_url: str, branch: str + ) -> str: + api = get_api(credentials) + ref_url = repo_url + f"/git/refs/heads/{branch}" + api.delete(ref_url) + return "Branch deleted successfully" + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + status = self.delete_branch( + credentials, + input_data.repo_url, + input_data.branch, + ) + yield "status", status + + +class GithubCreateFileBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + file_path: str = SchemaField( + description="Path where the file should be created", + placeholder="path/to/file.txt", + ) + content: str = SchemaField( + description="Content to write to the file", + placeholder="File content here", + ) + branch: str = SchemaField( + description="Branch where the file should be created", + default="main", + ) + commit_message: str = SchemaField( + description="Message for the commit", + default="Create new file", + ) + + class Output(BlockSchema): + url: str = SchemaField(description="URL of the created file") + sha: str = SchemaField(description="SHA of the commit") + error: str = SchemaField( + description="Error message if the file creation failed" + ) + + def __init__(self): + super().__init__( + id="8fd132ac-b917-428a-8159-d62893e8a3fe", + description="This block creates a new file in a GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubCreateFileBlock.Input, + output_schema=GithubCreateFileBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "file_path": "test/file.txt", + "content": "Test content", + "branch": "main", + "commit_message": "Create test file", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("url", "https://github.com/owner/repo/blob/main/test/file.txt"), + ("sha", "abc123"), + ], + test_mock={ + "create_file": lambda *args, **kwargs: ( + "https://github.com/owner/repo/blob/main/test/file.txt", + "abc123", + ) + }, + ) + + @staticmethod + def create_file( + credentials: GithubCredentials, + repo_url: str, + file_path: str, + content: str, + branch: str, + commit_message: str, + ) -> tuple[str, str]: + api = get_api(credentials) + # Convert content to base64 + content_bytes = content.encode("utf-8") + content_base64 = base64.b64encode(content_bytes).decode("utf-8") + + # Create the file using the GitHub API + contents_url = f"{repo_url}/contents/{file_path}" + data = { + "message": commit_message, + "content": content_base64, + "branch": branch, + } + response = api.put(contents_url, json=data) + result = response.json() + + return result["content"]["html_url"], result["commit"]["sha"] + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + url, sha = self.create_file( + credentials, + input_data.repo_url, + input_data.file_path, + input_data.content, + input_data.branch, + input_data.commit_message, + ) + yield "url", url + yield "sha", sha + except Exception as e: + yield "error", str(e) + + +class GithubUpdateFileBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + file_path: str = SchemaField( + description="Path to the file to update", + placeholder="path/to/file.txt", + ) + content: str = SchemaField( + description="New content for the file", + placeholder="Updated content here", + ) + branch: str = SchemaField( + description="Branch containing the file", + default="main", + ) + commit_message: str = SchemaField( + description="Message for the commit", + default="Update file", + ) + + class Output(BlockSchema): + url: str = SchemaField(description="URL of the updated file") + sha: str = SchemaField(description="SHA of the commit") + error: str = SchemaField(description="Error message if the file update failed") + + def __init__(self): + super().__init__( + id="30be12a4-57cb-4aa4-baf5-fcc68d136076", + description="This block updates an existing file in a GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubUpdateFileBlock.Input, + output_schema=GithubUpdateFileBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "file_path": "test/file.txt", + "content": "Updated content", + "branch": "main", + "commit_message": "Update test file", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("url", "https://github.com/owner/repo/blob/main/test/file.txt"), + ("sha", "def456"), + ], + test_mock={ + "update_file": lambda *args, **kwargs: ( + "https://github.com/owner/repo/blob/main/test/file.txt", + "def456", + ) + }, + ) + + @staticmethod + def update_file( + credentials: GithubCredentials, + repo_url: str, + file_path: str, + content: str, + branch: str, + commit_message: str, + ) -> tuple[str, str]: + api = get_api(credentials) + + # First get the current file to get its SHA + contents_url = f"{repo_url}/contents/{file_path}" + params = {"ref": branch} + response = api.get(contents_url, params=params) + current_file = response.json() + + # Convert new content to base64 + content_bytes = content.encode("utf-8") + content_base64 = base64.b64encode(content_bytes).decode("utf-8") + + # Update the file + data = { + "message": commit_message, + "content": content_base64, + "sha": current_file["sha"], + "branch": branch, + } + response = api.put(contents_url, json=data) + result = response.json() + + return result["content"]["html_url"], result["commit"]["sha"] + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + url, sha = self.update_file( + credentials, + input_data.repo_url, + input_data.file_path, + input_data.content, + input_data.branch, + input_data.commit_message, + ) + yield "url", url + yield "sha", sha + except Exception as e: + yield "error", str(e) + + +class GithubCreateRepositoryBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + name: str = SchemaField( + description="Name of the repository to create", + placeholder="my-new-repo", + ) + description: str = SchemaField( + description="Description of the repository", + placeholder="A description of the repository", + default="", + ) + private: bool = SchemaField( + description="Whether the repository should be private", + default=False, + ) + auto_init: bool = SchemaField( + description="Whether to initialize the repository with a README", + default=True, + ) + gitignore_template: str = SchemaField( + description="Git ignore template to use (e.g., Python, Node, Java)", + default="", + ) + + class Output(BlockSchema): + url: str = SchemaField(description="URL of the created repository") + clone_url: str = SchemaField(description="Git clone URL of the repository") + error: str = SchemaField( + description="Error message if the repository creation failed" + ) + + def __init__(self): + super().__init__( + id="029ec3b8-1cfd-46d3-b6aa-28e4a706efd1", + description="This block creates a new GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubCreateRepositoryBlock.Input, + output_schema=GithubCreateRepositoryBlock.Output, + test_input={ + "name": "test-repo", + "description": "A test repository", + "private": False, + "auto_init": True, + "gitignore_template": "Python", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("url", "https://github.com/owner/test-repo"), + ("clone_url", "https://github.com/owner/test-repo.git"), + ], + test_mock={ + "create_repository": lambda *args, **kwargs: ( + "https://github.com/owner/test-repo", + "https://github.com/owner/test-repo.git", + ) + }, + ) + + @staticmethod + def create_repository( + credentials: GithubCredentials, + name: str, + description: str, + private: bool, + auto_init: bool, + gitignore_template: str, + ) -> tuple[str, str]: + api = get_api(credentials, convert_urls=False) # Disable URL conversion + data = { + "name": name, + "description": description, + "private": private, + "auto_init": auto_init, + } + + if gitignore_template: + data["gitignore_template"] = gitignore_template + + # Create repository using the user endpoint + response = api.post("https://api.github.com/user/repos", json=data) + result = response.json() + + return result["html_url"], result["clone_url"] + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + url, clone_url = self.create_repository( + credentials, + input_data.name, + input_data.description, + input_data.private, + input_data.auto_init, + input_data.gitignore_template, + ) + yield "url", url + yield "clone_url", clone_url + except Exception as e: + yield "error", str(e) + + +class GithubListStargazersBlock(Block): + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + + class Output(BlockSchema): + class StargazerItem(TypedDict): + username: str + url: str + + stargazer: StargazerItem = SchemaField( + title="Stargazer", + description="Stargazers with their username and profile URL", + ) + error: str = SchemaField( + description="Error message if listing stargazers failed" + ) + + def __init__(self): + super().__init__( + id="a4b9c2d1-e5f6-4g7h-8i9j-0k1l2m3n4o5p", # Generated unique UUID + description="This block lists all users who have starred a specified GitHub repository.", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubListStargazersBlock.Input, + output_schema=GithubListStargazersBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "stargazer", + { + "username": "octocat", + "url": "https://github.com/octocat", + }, + ) + ], + test_mock={ + "list_stargazers": lambda *args, **kwargs: [ + { + "username": "octocat", + "url": "https://github.com/octocat", + } + ] + }, + ) + + @staticmethod + def list_stargazers( + credentials: GithubCredentials, repo_url: str + ) -> list[Output.StargazerItem]: + api = get_api(credentials) + # Add /stargazers to the repo URL to get stargazers endpoint + stargazers_url = f"{repo_url}/stargazers" + # Set accept header to get starred_at timestamp + headers = {"Accept": "application/vnd.github.star+json"} + response = api.get(stargazers_url, headers=headers) + data = response.json() + + stargazers: list[GithubListStargazersBlock.Output.StargazerItem] = [ + { + "username": stargazer["login"], + "url": stargazer["html_url"], + } + for stargazer in data + ] + return stargazers + + def run( + self, + input_data: Input, + *, + credentials: GithubCredentials, + **kwargs, + ) -> BlockOutput: + try: + stargazers = self.list_stargazers( + credentials, + input_data.repo_url, + ) + yield from (("stargazer", stargazer) for stargazer in stargazers) + except Exception as e: + yield "error", str(e) diff --git a/autogpt_platform/backend/backend/blocks/github/statuses.py b/autogpt_platform/backend/backend/blocks/github/statuses.py new file mode 100644 index 000000000000..8abf27928f32 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/statuses.py @@ -0,0 +1,180 @@ +from enum import Enum +from typing import Optional + +from pydantic import BaseModel + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +from ._api import get_api +from ._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + GithubFineGrainedAPICredentials, + GithubFineGrainedAPICredentialsField, + GithubFineGrainedAPICredentialsInput, +) + + +class StatusState(Enum): + ERROR = "error" + FAILURE = "failure" + PENDING = "pending" + SUCCESS = "success" + + +class GithubCreateStatusBlock(Block): + """Block for creating a commit status on a GitHub repository.""" + + class Input(BlockSchema): + credentials: GithubFineGrainedAPICredentialsInput = ( + GithubFineGrainedAPICredentialsField("repo:status") + ) + repo_url: str = SchemaField( + description="URL of the GitHub repository", + placeholder="https://github.com/owner/repo", + ) + sha: str = SchemaField( + description="The SHA of the commit to set status for", + ) + state: StatusState = SchemaField( + description="The state of the status (error, failure, pending, success)", + ) + target_url: Optional[str] = SchemaField( + description="URL with additional details about this status", + default=None, + ) + description: Optional[str] = SchemaField( + description="Short description of the status", + default=None, + ) + check_name: Optional[str] = SchemaField( + description="Label to differentiate this status from others", + default="AutoGPT Platform Checks", + advanced=False, + ) + + class Output(BlockSchema): + class StatusResult(BaseModel): + id: int + url: str + state: str + context: str + description: Optional[str] + target_url: Optional[str] + created_at: str + updated_at: str + + status: StatusResult = SchemaField(description="Details of the created status") + error: str = SchemaField(description="Error message if status creation failed") + + def __init__(self): + super().__init__( + id="3d67f123-a4b5-4c89-9d01-2e34f5c67890", # Generated UUID + description="Creates a new commit status in a GitHub repository", + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=GithubCreateStatusBlock.Input, + output_schema=GithubCreateStatusBlock.Output, + test_input={ + "repo_url": "https://github.com/owner/repo", + "sha": "ce587453ced02b1526dfb4cb910479d431683101", + "state": StatusState.SUCCESS.value, + "target_url": "https://example.com/build/status", + "description": "The build succeeded!", + "check_name": "continuous-integration/jenkins", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "status", + { + "id": 1234567890, + "url": "https://api.github.com/repos/owner/repo/statuses/ce587453ced02b1526dfb4cb910479d431683101", + "state": "success", + "context": "continuous-integration/jenkins", + "description": "The build succeeded!", + "target_url": "https://example.com/build/status", + "created_at": "2024-01-21T10:00:00Z", + "updated_at": "2024-01-21T10:00:00Z", + }, + ), + ], + test_mock={ + "create_status": lambda *args, **kwargs: { + "id": 1234567890, + "url": "https://api.github.com/repos/owner/repo/statuses/ce587453ced02b1526dfb4cb910479d431683101", + "state": "success", + "context": "continuous-integration/jenkins", + "description": "The build succeeded!", + "target_url": "https://example.com/build/status", + "created_at": "2024-01-21T10:00:00Z", + "updated_at": "2024-01-21T10:00:00Z", + } + }, + ) + + @staticmethod + def create_status( + credentials: GithubFineGrainedAPICredentials, + repo_url: str, + sha: str, + state: StatusState, + target_url: Optional[str] = None, + description: Optional[str] = None, + context: str = "default", + ) -> dict: + api = get_api(credentials) + + class StatusData(BaseModel): + state: str + target_url: Optional[str] = None + description: Optional[str] = None + context: str + + data = StatusData( + state=state.value, + context=context, + ) + + if target_url: + data.target_url = target_url + + if description: + data.description = description + + status_url = f"{repo_url}/statuses/{sha}" + response = api.post(status_url, json=data) + result = response.json() + + return { + "id": result["id"], + "url": result["url"], + "state": result["state"], + "context": result["context"], + "description": result.get("description"), + "target_url": result.get("target_url"), + "created_at": result["created_at"], + "updated_at": result["updated_at"], + } + + def run( + self, + input_data: Input, + *, + credentials: GithubFineGrainedAPICredentials, + **kwargs, + ) -> BlockOutput: + try: + result = self.create_status( + credentials=credentials, + repo_url=input_data.repo_url, + sha=input_data.sha, + state=input_data.state, + target_url=input_data.target_url, + description=input_data.description, + context=input_data.check_name or "AutoGPT Platform Checks", + ) + yield "status", result + except Exception as e: + yield "error", str(e) diff --git a/autogpt_platform/backend/backend/blocks/github/triggers.py b/autogpt_platform/backend/backend/blocks/github/triggers.py new file mode 100644 index 000000000000..938dce84faea --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/github/triggers.py @@ -0,0 +1,158 @@ +import json +import logging +from pathlib import Path + +from pydantic import BaseModel + +from backend.data.block import ( + Block, + BlockCategory, + BlockOutput, + BlockSchema, + BlockWebhookConfig, +) +from backend.data.model import SchemaField + +from ._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + GithubCredentialsField, + GithubCredentialsInput, +) + +logger = logging.getLogger(__name__) + + +# --8<-- [start:GithubTriggerExample] +class GitHubTriggerBase: + class Input(BlockSchema): + credentials: GithubCredentialsInput = GithubCredentialsField("repo") + repo: str = SchemaField( + description=( + "Repository to subscribe to.\n\n" + "**Note:** Make sure your GitHub credentials have permissions " + "to create webhooks on this repo." + ), + placeholder="{owner}/{repo}", + ) + # --8<-- [start:example-payload-field] + payload: dict = SchemaField(hidden=True, default={}) + # --8<-- [end:example-payload-field] + + class Output(BlockSchema): + payload: dict = SchemaField( + description="The complete webhook payload that was received from GitHub. " + "Includes information about the affected resource (e.g. pull request), " + "the event, and the user who triggered the event." + ) + triggered_by_user: dict = SchemaField( + description="Object representing the GitHub user who triggered the event" + ) + error: str = SchemaField( + description="Error message if the payload could not be processed" + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + yield "payload", input_data.payload + yield "triggered_by_user", input_data.payload["sender"] + + +class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block): + EXAMPLE_PAYLOAD_FILE = ( + Path(__file__).parent / "example_payloads" / "pull_request.synchronize.json" + ) + + # --8<-- [start:example-event-filter] + class Input(GitHubTriggerBase.Input): + class EventsFilter(BaseModel): + """ + https://docs.github.com/en/webhooks/webhook-events-and-payloads#pull_request + """ + + opened: bool = False + edited: bool = False + closed: bool = False + reopened: bool = False + synchronize: bool = False + assigned: bool = False + unassigned: bool = False + labeled: bool = False + unlabeled: bool = False + converted_to_draft: bool = False + locked: bool = False + unlocked: bool = False + enqueued: bool = False + dequeued: bool = False + milestoned: bool = False + demilestoned: bool = False + ready_for_review: bool = False + review_requested: bool = False + review_request_removed: bool = False + auto_merge_enabled: bool = False + auto_merge_disabled: bool = False + + events: EventsFilter = SchemaField( + title="Events", description="The events to subscribe to" + ) + # --8<-- [end:example-event-filter] + + class Output(GitHubTriggerBase.Output): + event: str = SchemaField( + description="The PR event that triggered the webhook (e.g. 'opened')" + ) + number: int = SchemaField(description="The number of the affected pull request") + pull_request: dict = SchemaField( + description="Object representing the affected pull request" + ) + pull_request_url: str = SchemaField( + description="The URL of the affected pull request" + ) + + def __init__(self): + from backend.integrations.webhooks.github import GithubWebhookType + + example_payload = json.loads( + self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8") + ) + + super().__init__( + id="6c60ec01-8128-419e-988f-96a063ee2fea", + description="This block triggers on pull request events and outputs the event type and payload.", + categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT}, + input_schema=GithubPullRequestTriggerBlock.Input, + output_schema=GithubPullRequestTriggerBlock.Output, + # --8<-- [start:example-webhook_config] + webhook_config=BlockWebhookConfig( + provider="github", + webhook_type=GithubWebhookType.REPO, + resource_format="{repo}", + event_filter_input="events", + event_format="pull_request.{event}", + ), + # --8<-- [end:example-webhook_config] + test_input={ + "repo": "Significant-Gravitas/AutoGPT", + "events": {"opened": True, "synchronize": True}, + "credentials": TEST_CREDENTIALS_INPUT, + "payload": example_payload, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("payload", example_payload), + ("triggered_by_user", example_payload["sender"]), + ("event", example_payload["action"]), + ("number", example_payload["number"]), + ("pull_request", example_payload["pull_request"]), + ("pull_request_url", example_payload["pull_request"]["html_url"]), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + yield from super().run(input_data, **kwargs) + yield "event", input_data.payload["action"] + yield "number", input_data.payload["number"] + yield "pull_request", input_data.payload["pull_request"] + yield "pull_request_url", input_data.payload["pull_request"]["html_url"] + + +# --8<-- [end:GithubTriggerExample] diff --git a/autogpt_platform/backend/backend/blocks/google/_auth.py b/autogpt_platform/backend/backend/blocks/google/_auth.py new file mode 100644 index 000000000000..2b364dbd4091 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/google/_auth.py @@ -0,0 +1,54 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import CredentialsField, CredentialsMetaInput, OAuth2Credentials +from backend.integrations.providers import ProviderName +from backend.util.settings import Secrets + +# --8<-- [start:GoogleOAuthIsConfigured] +secrets = Secrets() +GOOGLE_OAUTH_IS_CONFIGURED = bool( + secrets.google_client_id and secrets.google_client_secret +) +# --8<-- [end:GoogleOAuthIsConfigured] +GoogleCredentials = OAuth2Credentials +GoogleCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.GOOGLE], Literal["oauth2"] +] + + +def GoogleCredentialsField(scopes: list[str]) -> GoogleCredentialsInput: + """ + Creates a Google credentials input on a block. + + Params: + scopes: The authorization scopes needed for the block to work. + """ + return CredentialsField( + required_scopes=set(scopes), + description="The Google integration requires OAuth2 authentication.", + ) + + +TEST_CREDENTIALS = OAuth2Credentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="google", + access_token=SecretStr("mock-google-access-token"), + refresh_token=SecretStr("mock-google-refresh-token"), + access_token_expires_at=1234567890, + scopes=[ + "https://www.googleapis.com/auth/gmail.readonly", + "https://www.googleapis.com/auth/gmail.send", + ], + title="Mock Google OAuth2 Credentials", + username="mock-google-username", + refresh_token_expires_at=1234567890, +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} diff --git a/autogpt_platform/backend/backend/blocks/google/gmail.py b/autogpt_platform/backend/backend/blocks/google/gmail.py new file mode 100644 index 000000000000..d0168e4a82be --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/google/gmail.py @@ -0,0 +1,529 @@ +import base64 +from email.utils import parseaddr +from typing import List + +from google.oauth2.credentials import Credentials +from googleapiclient.discovery import build +from pydantic import BaseModel + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +from ._auth import ( + GOOGLE_OAUTH_IS_CONFIGURED, + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + GoogleCredentials, + GoogleCredentialsField, + GoogleCredentialsInput, +) + + +class Attachment(BaseModel): + filename: str + content_type: str + size: int + attachment_id: str + + +class Email(BaseModel): + id: str + subject: str + snippet: str + from_: str + to: str + date: str + body: str = "" # Default to an empty string + sizeEstimate: int + attachments: List[Attachment] + + +class GmailReadBlock(Block): + class Input(BlockSchema): + credentials: GoogleCredentialsInput = GoogleCredentialsField( + ["https://www.googleapis.com/auth/gmail.readonly"] + ) + query: str = SchemaField( + description="Search query for reading emails", + default="is:unread", + ) + max_results: int = SchemaField( + description="Maximum number of emails to retrieve", + default=10, + ) + + class Output(BlockSchema): + email: Email = SchemaField( + description="Email data", + ) + emails: list[Email] = SchemaField( + description="List of email data", + ) + error: str = SchemaField( + description="Error message if any", + ) + + def __init__(self): + super().__init__( + id="25310c70-b89b-43ba-b25c-4dfa7e2a481c", + description="This block reads emails from Gmail.", + categories={BlockCategory.COMMUNICATION}, + disabled=not GOOGLE_OAUTH_IS_CONFIGURED, + input_schema=GmailReadBlock.Input, + output_schema=GmailReadBlock.Output, + test_input={ + "query": "is:unread", + "max_results": 5, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "email", + { + "id": "1", + "subject": "Test Email", + "snippet": "This is a test email", + "from_": "test@example.com", + "to": "recipient@example.com", + "date": "2024-01-01", + "body": "This is a test email", + "sizeEstimate": 100, + "attachments": [], + }, + ), + ( + "emails", + [ + { + "id": "1", + "subject": "Test Email", + "snippet": "This is a test email", + "from_": "test@example.com", + "to": "recipient@example.com", + "date": "2024-01-01", + "body": "This is a test email", + "sizeEstimate": 100, + "attachments": [], + } + ], + ), + ], + test_mock={ + "_read_emails": lambda *args, **kwargs: [ + { + "id": "1", + "subject": "Test Email", + "snippet": "This is a test email", + "from_": "test@example.com", + "to": "recipient@example.com", + "date": "2024-01-01", + "body": "This is a test email", + "sizeEstimate": 100, + "attachments": [], + } + ], + "_send_email": lambda *args, **kwargs: {"id": "1", "status": "sent"}, + }, + ) + + def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + service = self._build_service(credentials, **kwargs) + messages = self._read_emails(service, input_data.query, input_data.max_results) + for email in messages: + yield "email", email + yield "emails", messages + + @staticmethod + def _build_service(credentials: GoogleCredentials, **kwargs): + creds = Credentials( + token=( + credentials.access_token.get_secret_value() + if credentials.access_token + else None + ), + refresh_token=( + credentials.refresh_token.get_secret_value() + if credentials.refresh_token + else None + ), + token_uri="https://oauth2.googleapis.com/token", + client_id=kwargs.get("client_id"), + client_secret=kwargs.get("client_secret"), + scopes=credentials.scopes, + ) + return build("gmail", "v1", credentials=creds) + + def _read_emails( + self, service, query: str | None, max_results: int | None + ) -> list[Email]: + results = ( + service.users() + .messages() + .list(userId="me", q=query or "", maxResults=max_results or 10) + .execute() + ) + messages = results.get("messages", []) + + email_data = [] + for message in messages: + msg = ( + service.users() + .messages() + .get(userId="me", id=message["id"], format="full") + .execute() + ) + + headers = { + header["name"].lower(): header["value"] + for header in msg["payload"]["headers"] + } + + attachments = self._get_attachments(service, msg) + + email = Email( + id=msg["id"], + subject=headers.get("subject", "No Subject"), + snippet=msg["snippet"], + from_=parseaddr(headers.get("from", ""))[1], + to=parseaddr(headers.get("to", ""))[1], + date=headers.get("date", ""), + body=self._get_email_body(msg), + sizeEstimate=msg["sizeEstimate"], + attachments=attachments, + ) + email_data.append(email) + + return email_data + + def _get_email_body(self, msg): + if "parts" in msg["payload"]: + for part in msg["payload"]["parts"]: + if part["mimeType"] == "text/plain": + return base64.urlsafe_b64decode(part["body"]["data"]).decode( + "utf-8" + ) + elif msg["payload"]["mimeType"] == "text/plain": + return base64.urlsafe_b64decode(msg["payload"]["body"]["data"]).decode( + "utf-8" + ) + + return "This email does not contain a text body." + + def _get_attachments(self, service, message): + attachments = [] + if "parts" in message["payload"]: + for part in message["payload"]["parts"]: + if part["filename"]: + attachment = Attachment( + filename=part["filename"], + content_type=part["mimeType"], + size=int(part["body"].get("size", 0)), + attachment_id=part["body"]["attachmentId"], + ) + attachments.append(attachment) + return attachments + + # Add a new method to download attachment content + def download_attachment(self, service, message_id: str, attachment_id: str): + attachment = ( + service.users() + .messages() + .attachments() + .get(userId="me", messageId=message_id, id=attachment_id) + .execute() + ) + file_data = base64.urlsafe_b64decode(attachment["data"].encode("UTF-8")) + return file_data + + +class GmailSendBlock(Block): + class Input(BlockSchema): + credentials: GoogleCredentialsInput = GoogleCredentialsField( + ["https://www.googleapis.com/auth/gmail.send"] + ) + to: str = SchemaField( + description="Recipient email address", + ) + subject: str = SchemaField( + description="Email subject", + ) + body: str = SchemaField( + description="Email body", + ) + + class Output(BlockSchema): + result: dict = SchemaField( + description="Send confirmation", + ) + error: str = SchemaField( + description="Error message if any", + ) + + def __init__(self): + super().__init__( + id="6c27abc2-e51d-499e-a85f-5a0041ba94f0", + description="This block sends an email using Gmail.", + categories={BlockCategory.COMMUNICATION}, + input_schema=GmailSendBlock.Input, + output_schema=GmailSendBlock.Output, + disabled=not GOOGLE_OAUTH_IS_CONFIGURED, + test_input={ + "to": "recipient@example.com", + "subject": "Test Email", + "body": "This is a test email sent from GmailSendBlock.", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("result", {"id": "1", "status": "sent"}), + ], + test_mock={ + "_send_email": lambda *args, **kwargs: {"id": "1", "status": "sent"}, + }, + ) + + def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + service = GmailReadBlock._build_service(credentials, **kwargs) + send_result = self._send_email( + service, input_data.to, input_data.subject, input_data.body + ) + yield "result", send_result + + def _send_email(self, service, to: str, subject: str, body: str) -> dict: + if not to or not subject or not body: + raise ValueError("To, subject, and body are required for sending an email") + message = self._create_message(to, subject, body) + sent_message = ( + service.users().messages().send(userId="me", body=message).execute() + ) + return {"id": sent_message["id"], "status": "sent"} + + def _create_message(self, to: str, subject: str, body: str) -> dict: + import base64 + from email.mime.text import MIMEText + + message = MIMEText(body) + message["to"] = to + message["subject"] = subject + raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8") + return {"raw": raw_message} + + +class GmailListLabelsBlock(Block): + class Input(BlockSchema): + credentials: GoogleCredentialsInput = GoogleCredentialsField( + ["https://www.googleapis.com/auth/gmail.labels"] + ) + + class Output(BlockSchema): + result: list[dict] = SchemaField( + description="List of labels", + ) + error: str = SchemaField( + description="Error message if any", + ) + + def __init__(self): + super().__init__( + id="3e1c2c1c-c689-4520-b956-1f3bf4e02bb7", + description="This block lists all labels in Gmail.", + categories={BlockCategory.COMMUNICATION}, + input_schema=GmailListLabelsBlock.Input, + output_schema=GmailListLabelsBlock.Output, + disabled=not GOOGLE_OAUTH_IS_CONFIGURED, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "result", + [ + {"id": "Label_1", "name": "Important"}, + {"id": "Label_2", "name": "Work"}, + ], + ), + ], + test_mock={ + "_list_labels": lambda *args, **kwargs: [ + {"id": "Label_1", "name": "Important"}, + {"id": "Label_2", "name": "Work"}, + ], + }, + ) + + def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + service = GmailReadBlock._build_service(credentials, **kwargs) + labels = self._list_labels(service) + yield "result", labels + + def _list_labels(self, service) -> list[dict]: + results = service.users().labels().list(userId="me").execute() + labels = results.get("labels", []) + return [{"id": label["id"], "name": label["name"]} for label in labels] + + +class GmailAddLabelBlock(Block): + class Input(BlockSchema): + credentials: GoogleCredentialsInput = GoogleCredentialsField( + ["https://www.googleapis.com/auth/gmail.modify"] + ) + message_id: str = SchemaField( + description="Message ID to add label to", + ) + label_name: str = SchemaField( + description="Label name to add", + ) + + class Output(BlockSchema): + result: dict = SchemaField( + description="Label addition result", + ) + error: str = SchemaField( + description="Error message if any", + ) + + def __init__(self): + super().__init__( + id="f884b2fb-04f4-4265-9658-14f433926ac9", + description="This block adds a label to a Gmail message.", + categories={BlockCategory.COMMUNICATION}, + input_schema=GmailAddLabelBlock.Input, + output_schema=GmailAddLabelBlock.Output, + disabled=not GOOGLE_OAUTH_IS_CONFIGURED, + test_input={ + "message_id": "12345", + "label_name": "Important", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "result", + {"status": "Label added successfully", "label_id": "Label_1"}, + ), + ], + test_mock={ + "_add_label": lambda *args, **kwargs: { + "status": "Label added successfully", + "label_id": "Label_1", + }, + }, + ) + + def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + service = GmailReadBlock._build_service(credentials, **kwargs) + result = self._add_label(service, input_data.message_id, input_data.label_name) + yield "result", result + + def _add_label(self, service, message_id: str, label_name: str) -> dict: + label_id = self._get_or_create_label(service, label_name) + service.users().messages().modify( + userId="me", id=message_id, body={"addLabelIds": [label_id]} + ).execute() + return {"status": "Label added successfully", "label_id": label_id} + + def _get_or_create_label(self, service, label_name: str) -> str: + label_id = self._get_label_id(service, label_name) + if not label_id: + label = ( + service.users() + .labels() + .create(userId="me", body={"name": label_name}) + .execute() + ) + label_id = label["id"] + return label_id + + def _get_label_id(self, service, label_name: str) -> str | None: + results = service.users().labels().list(userId="me").execute() + labels = results.get("labels", []) + for label in labels: + if label["name"] == label_name: + return label["id"] + return None + + +class GmailRemoveLabelBlock(Block): + class Input(BlockSchema): + credentials: GoogleCredentialsInput = GoogleCredentialsField( + ["https://www.googleapis.com/auth/gmail.modify"] + ) + message_id: str = SchemaField( + description="Message ID to remove label from", + ) + label_name: str = SchemaField( + description="Label name to remove", + ) + + class Output(BlockSchema): + result: dict = SchemaField( + description="Label removal result", + ) + error: str = SchemaField( + description="Error message if any", + ) + + def __init__(self): + super().__init__( + id="0afc0526-aba1-4b2b-888e-a22b7c3f359d", + description="This block removes a label from a Gmail message.", + categories={BlockCategory.COMMUNICATION}, + input_schema=GmailRemoveLabelBlock.Input, + output_schema=GmailRemoveLabelBlock.Output, + disabled=not GOOGLE_OAUTH_IS_CONFIGURED, + test_input={ + "message_id": "12345", + "label_name": "Important", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "result", + {"status": "Label removed successfully", "label_id": "Label_1"}, + ), + ], + test_mock={ + "_remove_label": lambda *args, **kwargs: { + "status": "Label removed successfully", + "label_id": "Label_1", + }, + }, + ) + + def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + service = GmailReadBlock._build_service(credentials, **kwargs) + result = self._remove_label( + service, input_data.message_id, input_data.label_name + ) + yield "result", result + + def _remove_label(self, service, message_id: str, label_name: str) -> dict: + label_id = self._get_label_id(service, label_name) + if label_id: + service.users().messages().modify( + userId="me", id=message_id, body={"removeLabelIds": [label_id]} + ).execute() + return {"status": "Label removed successfully", "label_id": label_id} + else: + return {"status": "Label not found", "label_name": label_name} + + def _get_label_id(self, service, label_name: str) -> str | None: + results = service.users().labels().list(userId="me").execute() + labels = results.get("labels", []) + for label in labels: + if label["name"] == label_name: + return label["id"] + return None diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py new file mode 100644 index 000000000000..e7878ff4b606 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/google/sheets.py @@ -0,0 +1,184 @@ +from google.oauth2.credentials import Credentials +from googleapiclient.discovery import build + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +from ._auth import ( + GOOGLE_OAUTH_IS_CONFIGURED, + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + GoogleCredentials, + GoogleCredentialsField, + GoogleCredentialsInput, +) + + +class GoogleSheetsReadBlock(Block): + class Input(BlockSchema): + credentials: GoogleCredentialsInput = GoogleCredentialsField( + ["https://www.googleapis.com/auth/spreadsheets.readonly"] + ) + spreadsheet_id: str = SchemaField( + description="The ID of the spreadsheet to read from", + ) + range: str = SchemaField( + description="The A1 notation of the range to read", + ) + + class Output(BlockSchema): + result: list[list[str]] = SchemaField( + description="The data read from the spreadsheet", + ) + error: str = SchemaField( + description="Error message if any", + ) + + def __init__(self): + super().__init__( + id="5724e902-3635-47e9-a108-aaa0263a4988", + description="This block reads data from a Google Sheets spreadsheet.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsReadBlock.Input, + output_schema=GoogleSheetsReadBlock.Output, + disabled=not GOOGLE_OAUTH_IS_CONFIGURED, + test_input={ + "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "range": "Sheet1!A1:B2", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "result", + [ + ["Name", "Score"], + ["Alice", "85"], + ], + ), + ], + test_mock={ + "_read_sheet": lambda *args, **kwargs: [ + ["Name", "Score"], + ["Alice", "85"], + ], + }, + ) + + def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + service = self._build_service(credentials, **kwargs) + data = self._read_sheet(service, input_data.spreadsheet_id, input_data.range) + yield "result", data + + @staticmethod + def _build_service(credentials: GoogleCredentials, **kwargs): + creds = Credentials( + token=( + credentials.access_token.get_secret_value() + if credentials.access_token + else None + ), + refresh_token=( + credentials.refresh_token.get_secret_value() + if credentials.refresh_token + else None + ), + token_uri="https://oauth2.googleapis.com/token", + client_id=kwargs.get("client_id"), + client_secret=kwargs.get("client_secret"), + scopes=credentials.scopes, + ) + return build("sheets", "v4", credentials=creds) + + def _read_sheet(self, service, spreadsheet_id: str, range: str) -> list[list[str]]: + sheet = service.spreadsheets() + result = sheet.values().get(spreadsheetId=spreadsheet_id, range=range).execute() + return result.get("values", []) + + +class GoogleSheetsWriteBlock(Block): + class Input(BlockSchema): + credentials: GoogleCredentialsInput = GoogleCredentialsField( + ["https://www.googleapis.com/auth/spreadsheets"] + ) + spreadsheet_id: str = SchemaField( + description="The ID of the spreadsheet to write to", + ) + range: str = SchemaField( + description="The A1 notation of the range to write", + ) + values: list[list[str]] = SchemaField( + description="The data to write to the spreadsheet", + ) + + class Output(BlockSchema): + result: dict = SchemaField( + description="The result of the write operation", + ) + error: str = SchemaField( + description="Error message if any", + ) + + def __init__(self): + super().__init__( + id="d9291e87-301d-47a8-91fe-907fb55460e5", + description="This block writes data to a Google Sheets spreadsheet.", + categories={BlockCategory.DATA}, + input_schema=GoogleSheetsWriteBlock.Input, + output_schema=GoogleSheetsWriteBlock.Output, + disabled=not GOOGLE_OAUTH_IS_CONFIGURED, + test_input={ + "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms", + "range": "Sheet1!A1:B2", + "values": [ + ["Name", "Score"], + ["Bob", "90"], + ], + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "result", + {"updatedCells": 4, "updatedColumns": 2, "updatedRows": 2}, + ), + ], + test_mock={ + "_write_sheet": lambda *args, **kwargs: { + "updatedCells": 4, + "updatedColumns": 2, + "updatedRows": 2, + }, + }, + ) + + def run( + self, input_data: Input, *, credentials: GoogleCredentials, **kwargs + ) -> BlockOutput: + service = GoogleSheetsReadBlock._build_service(credentials, **kwargs) + result = self._write_sheet( + service, + input_data.spreadsheet_id, + input_data.range, + input_data.values, + ) + yield "result", result + + def _write_sheet( + self, service, spreadsheet_id: str, range: str, values: list[list[str]] + ) -> dict: + body = {"values": values} + result = ( + service.spreadsheets() + .values() + .update( + spreadsheetId=spreadsheet_id, + range=range, + valueInputOption="USER_ENTERED", + body=body, + ) + .execute() + ) + return result diff --git a/autogpt_platform/backend/backend/blocks/google_maps.py b/autogpt_platform/backend/backend/blocks/google_maps.py new file mode 100644 index 000000000000..9e7f79353123 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/google_maps.py @@ -0,0 +1,148 @@ +from typing import Literal + +import googlemaps +from pydantic import BaseModel, SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="google_maps", + api_key=SecretStr("mock-google-maps-api-key"), + title="Mock Google Maps API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class Place(BaseModel): + name: str + address: str + phone: str + rating: float + reviews: int + website: str + + +class GoogleMapsSearchBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.GOOGLE_MAPS], Literal["api_key"] + ] = CredentialsField(description="Google Maps API Key") + query: str = SchemaField( + description="Search query for local businesses", + placeholder="e.g., 'restaurants in New York'", + ) + radius: int = SchemaField( + description="Search radius in meters (max 50000)", + default=5000, + ge=1, + le=50000, + ) + max_results: int = SchemaField( + description="Maximum number of results to return (max 60)", + default=20, + ge=1, + le=60, + ) + + class Output(BlockSchema): + place: Place = SchemaField(description="Place found") + error: str = SchemaField(description="Error message if the search failed") + + def __init__(self): + super().__init__( + id="f47ac10b-58cc-4372-a567-0e02b2c3d479", + description="This block searches for local businesses using Google Maps API.", + categories={BlockCategory.SEARCH}, + input_schema=GoogleMapsSearchBlock.Input, + output_schema=GoogleMapsSearchBlock.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "query": "restaurants in new york", + "radius": 5000, + "max_results": 5, + }, + test_output=[ + ( + "place", + { + "name": "Test Restaurant", + "address": "123 Test St, New York, NY 10001", + "phone": "+1 (555) 123-4567", + "rating": 4.5, + "reviews": 100, + "website": "https://testrestaurant.com", + }, + ), + ], + test_mock={ + "search_places": lambda *args, **kwargs: [ + { + "name": "Test Restaurant", + "address": "123 Test St, New York, NY 10001", + "phone": "+1 (555) 123-4567", + "rating": 4.5, + "reviews": 100, + "website": "https://testrestaurant.com", + } + ] + }, + test_credentials=TEST_CREDENTIALS, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + places = self.search_places( + credentials.api_key, + input_data.query, + input_data.radius, + input_data.max_results, + ) + for place in places: + yield "place", place + + def search_places(self, api_key: SecretStr, query, radius, max_results): + client = googlemaps.Client(key=api_key.get_secret_value()) + return self._search_places(client, query, radius, max_results) + + def _search_places(self, client, query, radius, max_results): + results = [] + next_page_token = None + while len(results) < max_results: + response = client.places( + query=query, + radius=radius, + page_token=next_page_token, + ) + for place in response["results"]: + if len(results) >= max_results: + break + place_details = client.place(place["place_id"])["result"] + results.append( + Place( + name=place_details.get("name", ""), + address=place_details.get("formatted_address", ""), + phone=place_details.get("formatted_phone_number", ""), + rating=place_details.get("rating", 0), + reviews=place_details.get("user_ratings_total", 0), + website=place_details.get("website", ""), + ) + ) + next_page_token = response.get("next_page_token") + if not next_page_token: + break + return results diff --git a/autogpt_platform/backend/backend/blocks/helpers/http.py b/autogpt_platform/backend/backend/blocks/helpers/http.py new file mode 100644 index 000000000000..33579ba0d9c9 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/helpers/http.py @@ -0,0 +1,14 @@ +from typing import Any, Optional + +from backend.util.request import requests + + +class GetRequest: + @classmethod + def get_request( + cls, url: str, headers: Optional[dict] = None, json: bool = False + ) -> Any: + if headers is None: + headers = {} + response = requests.get(url, headers=headers) + return response.json() if json else response.text diff --git a/autogpt_platform/backend/backend/blocks/http.py b/autogpt_platform/backend/backend/blocks/http.py new file mode 100644 index 000000000000..099e2c3c1e68 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/http.py @@ -0,0 +1,87 @@ +import json +from enum import Enum +from typing import Any + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class HttpMethod(Enum): + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + PATCH = "PATCH" + OPTIONS = "OPTIONS" + HEAD = "HEAD" + + +class SendWebRequestBlock(Block): + class Input(BlockSchema): + url: str = SchemaField( + description="The URL to send the request to", + placeholder="https://api.example.com", + ) + method: HttpMethod = SchemaField( + description="The HTTP method to use for the request", + default=HttpMethod.POST, + ) + headers: dict[str, str] = SchemaField( + description="The headers to include in the request", + default={}, + ) + json_format: bool = SchemaField( + title="JSON format", + description="Whether to send and receive body as JSON", + default=True, + ) + body: Any = SchemaField( + description="The body of the request", + default=None, + ) + + class Output(BlockSchema): + response: object = SchemaField(description="The response from the server") + client_error: object = SchemaField(description="The error on 4xx status codes") + server_error: object = SchemaField(description="The error on 5xx status codes") + + def __init__(self): + super().__init__( + id="6595ae1f-b924-42cb-9a41-551a0611c4b4", + description="This block makes an HTTP request to the given URL.", + categories={BlockCategory.OUTPUT}, + input_schema=SendWebRequestBlock.Input, + output_schema=SendWebRequestBlock.Output, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + body = input_data.body + + if input_data.json_format: + if isinstance(body, str): + try: + # Try to parse as JSON first + body = json.loads(body) + except json.JSONDecodeError: + # If it's not valid JSON and just plain text, + # we should send it as plain text instead + input_data.json_format = False + + response = requests.request( + input_data.method.value, + input_data.url, + headers=input_data.headers, + json=body if input_data.json_format else None, + data=body if not input_data.json_format else None, + ) + result = response.json() if input_data.json_format else response.text + + if response.status_code // 100 == 2: + yield "response", result + elif response.status_code // 100 == 4: + yield "client_error", result + elif response.status_code // 100 == 5: + yield "server_error", result + else: + raise ValueError(f"Unexpected status code: {response.status_code}") diff --git a/autogpt_platform/backend/backend/blocks/hubspot/_auth.py b/autogpt_platform/backend/backend/blocks/hubspot/_auth.py new file mode 100644 index 000000000000..b32456d5d5b2 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/hubspot/_auth.py @@ -0,0 +1,35 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +HubSpotCredentials = APIKeyCredentials +HubSpotCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.HUBSPOT], + Literal["api_key"], +] + + +def HubSpotCredentialsField() -> HubSpotCredentialsInput: + """Creates a HubSpot credentials input on a block.""" + return CredentialsField( + description="The HubSpot integration requires an API Key.", + ) + + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="hubspot", + api_key=SecretStr("mock-hubspot-api-key"), + title="Mock HubSpot API key", + expires_at=None, +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} diff --git a/autogpt_platform/backend/backend/blocks/hubspot/company.py b/autogpt_platform/backend/backend/blocks/hubspot/company.py new file mode 100644 index 000000000000..3e9406103f57 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/hubspot/company.py @@ -0,0 +1,106 @@ +from backend.blocks.hubspot._auth import ( + HubSpotCredentials, + HubSpotCredentialsField, + HubSpotCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class HubSpotCompanyBlock(Block): + class Input(BlockSchema): + credentials: HubSpotCredentialsInput = HubSpotCredentialsField() + operation: str = SchemaField( + description="Operation to perform (create, update, get)", default="get" + ) + company_data: dict = SchemaField( + description="Company data for create/update operations", default={} + ) + domain: str = SchemaField( + description="Company domain for get/update operations", default="" + ) + + class Output(BlockSchema): + company: dict = SchemaField(description="Company information") + status: str = SchemaField(description="Operation status") + + def __init__(self): + super().__init__( + id="3ae02219-d540-47cd-9c78-3ad6c7d9820a", + description="Manages HubSpot companies - create, update, and retrieve company information", + categories={BlockCategory.CRM}, + input_schema=HubSpotCompanyBlock.Input, + output_schema=HubSpotCompanyBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs + ) -> BlockOutput: + base_url = "https://api.hubapi.com/crm/v3/objects/companies" + headers = { + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + "Content-Type": "application/json", + } + + if input_data.operation == "create": + response = requests.post( + base_url, headers=headers, json={"properties": input_data.company_data} + ) + result = response.json() + yield "company", result + yield "status", "created" + + elif input_data.operation == "get": + search_url = f"{base_url}/search" + search_data = { + "filterGroups": [ + { + "filters": [ + { + "propertyName": "domain", + "operator": "EQ", + "value": input_data.domain, + } + ] + } + ] + } + response = requests.post(search_url, headers=headers, json=search_data) + result = response.json() + yield "company", result.get("results", [{}])[0] + yield "status", "retrieved" + + elif input_data.operation == "update": + # First get company ID by domain + search_response = requests.post( + f"{base_url}/search", + headers=headers, + json={ + "filterGroups": [ + { + "filters": [ + { + "propertyName": "domain", + "operator": "EQ", + "value": input_data.domain, + } + ] + } + ] + }, + ) + company_id = search_response.json().get("results", [{}])[0].get("id") + + if company_id: + response = requests.patch( + f"{base_url}/{company_id}", + headers=headers, + json={"properties": input_data.company_data}, + ) + result = response.json() + yield "company", result + yield "status", "updated" + else: + yield "company", {} + yield "status", "company_not_found" diff --git a/autogpt_platform/backend/backend/blocks/hubspot/contact.py b/autogpt_platform/backend/backend/blocks/hubspot/contact.py new file mode 100644 index 000000000000..e4a01cbb3bd4 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/hubspot/contact.py @@ -0,0 +1,106 @@ +from backend.blocks.hubspot._auth import ( + HubSpotCredentials, + HubSpotCredentialsField, + HubSpotCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class HubSpotContactBlock(Block): + class Input(BlockSchema): + credentials: HubSpotCredentialsInput = HubSpotCredentialsField() + operation: str = SchemaField( + description="Operation to perform (create, update, get)", default="get" + ) + contact_data: dict = SchemaField( + description="Contact data for create/update operations", default={} + ) + email: str = SchemaField( + description="Email address for get/update operations", default="" + ) + + class Output(BlockSchema): + contact: dict = SchemaField(description="Contact information") + status: str = SchemaField(description="Operation status") + + def __init__(self): + super().__init__( + id="5267326e-c4c1-4016-9f54-4e72ad02f813", + description="Manages HubSpot contacts - create, update, and retrieve contact information", + categories={BlockCategory.CRM}, + input_schema=HubSpotContactBlock.Input, + output_schema=HubSpotContactBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs + ) -> BlockOutput: + base_url = "https://api.hubapi.com/crm/v3/objects/contacts" + headers = { + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + "Content-Type": "application/json", + } + + if input_data.operation == "create": + response = requests.post( + base_url, headers=headers, json={"properties": input_data.contact_data} + ) + result = response.json() + yield "contact", result + yield "status", "created" + + elif input_data.operation == "get": + # Search for contact by email + search_url = f"{base_url}/search" + search_data = { + "filterGroups": [ + { + "filters": [ + { + "propertyName": "email", + "operator": "EQ", + "value": input_data.email, + } + ] + } + ] + } + response = requests.post(search_url, headers=headers, json=search_data) + result = response.json() + yield "contact", result.get("results", [{}])[0] + yield "status", "retrieved" + + elif input_data.operation == "update": + search_response = requests.post( + f"{base_url}/search", + headers=headers, + json={ + "filterGroups": [ + { + "filters": [ + { + "propertyName": "email", + "operator": "EQ", + "value": input_data.email, + } + ] + } + ] + }, + ) + contact_id = search_response.json().get("results", [{}])[0].get("id") + + if contact_id: + response = requests.patch( + f"{base_url}/{contact_id}", + headers=headers, + json={"properties": input_data.contact_data}, + ) + result = response.json() + yield "contact", result + yield "status", "updated" + else: + yield "contact", {} + yield "status", "contact_not_found" diff --git a/autogpt_platform/backend/backend/blocks/hubspot/engagement.py b/autogpt_platform/backend/backend/blocks/hubspot/engagement.py new file mode 100644 index 000000000000..427cf051d599 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/hubspot/engagement.py @@ -0,0 +1,121 @@ +from datetime import datetime, timedelta + +from backend.blocks.hubspot._auth import ( + HubSpotCredentials, + HubSpotCredentialsField, + HubSpotCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class HubSpotEngagementBlock(Block): + class Input(BlockSchema): + credentials: HubSpotCredentialsInput = HubSpotCredentialsField() + operation: str = SchemaField( + description="Operation to perform (send_email, track_engagement)", + default="send_email", + ) + email_data: dict = SchemaField( + description="Email data including recipient, subject, content", + default={}, + ) + contact_id: str = SchemaField( + description="Contact ID for engagement tracking", default="" + ) + timeframe_days: int = SchemaField( + description="Number of days to look back for engagement", + default=30, + optional=True, + ) + + class Output(BlockSchema): + result: dict = SchemaField(description="Operation result") + status: str = SchemaField(description="Operation status") + + def __init__(self): + super().__init__( + id="c6524385-7d87-49d6-a470-248bd29ca765", + description="Manages HubSpot engagements - sends emails and tracks engagement metrics", + categories={BlockCategory.CRM, BlockCategory.COMMUNICATION}, + input_schema=HubSpotEngagementBlock.Input, + output_schema=HubSpotEngagementBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs + ) -> BlockOutput: + base_url = "https://api.hubapi.com" + headers = { + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + "Content-Type": "application/json", + } + + if input_data.operation == "send_email": + # Using the email send API + email_url = f"{base_url}/crm/v3/objects/emails" + email_data = { + "properties": { + "hs_timestamp": datetime.now().isoformat(), + "hubspot_owner_id": "1", # This should be configurable + "hs_email_direction": "OUTBOUND", + "hs_email_status": "SEND", + "hs_email_subject": input_data.email_data.get("subject"), + "hs_email_text": input_data.email_data.get("content"), + "hs_email_to_email": input_data.email_data.get("recipient"), + } + } + + response = requests.post(email_url, headers=headers, json=email_data) + result = response.json() + yield "result", result + yield "status", "email_sent" + + elif input_data.operation == "track_engagement": + # Get engagement events for the contact + from_date = datetime.now() - timedelta(days=input_data.timeframe_days) + engagement_url = ( + f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement" + ) + + params = {"limit": 100, "after": from_date.isoformat()} + + response = requests.get(engagement_url, headers=headers, params=params) + engagements = response.json() + + # Process engagement metrics + metrics = { + "email_opens": 0, + "email_clicks": 0, + "email_replies": 0, + "last_engagement": None, + "engagement_score": 0, + } + + for engagement in engagements.get("results", []): + eng_type = engagement.get("properties", {}).get("hs_engagement_type") + if eng_type == "EMAIL": + metrics["email_opens"] += 1 + elif eng_type == "EMAIL_CLICK": + metrics["email_clicks"] += 1 + elif eng_type == "EMAIL_REPLY": + metrics["email_replies"] += 1 + + # Update last engagement time + eng_time = engagement.get("properties", {}).get("hs_timestamp") + if eng_time and ( + not metrics["last_engagement"] + or eng_time > metrics["last_engagement"] + ): + metrics["last_engagement"] = eng_time + + # Calculate simple engagement score + metrics["engagement_score"] = ( + metrics["email_opens"] + + metrics["email_clicks"] * 2 + + metrics["email_replies"] * 3 + ) + + yield "result", metrics + yield "status", "engagement_tracked" diff --git a/autogpt_platform/backend/backend/blocks/ideogram.py b/autogpt_platform/backend/backend/blocks/ideogram.py new file mode 100644 index 000000000000..178c667642c2 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/ideogram.py @@ -0,0 +1,279 @@ +from enum import Enum +from typing import Any, Dict, Literal, Optional + +from pydantic import SecretStr +from requests.exceptions import RequestException + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="ideogram", + api_key=SecretStr("mock-ideogram-api-key"), + title="Mock Ideogram API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class IdeogramModelName(str, Enum): + V2 = "V_2" + V1 = "V_1" + V1_TURBO = "V_1_TURBO" + V2_TURBO = "V_2_TURBO" + + +class MagicPromptOption(str, Enum): + AUTO = "AUTO" + ON = "ON" + OFF = "OFF" + + +class StyleType(str, Enum): + AUTO = "AUTO" + GENERAL = "GENERAL" + REALISTIC = "REALISTIC" + DESIGN = "DESIGN" + RENDER_3D = "RENDER_3D" + ANIME = "ANIME" + + +class ColorPalettePreset(str, Enum): + NONE = "NONE" + EMBER = "EMBER" + FRESH = "FRESH" + JUNGLE = "JUNGLE" + MAGIC = "MAGIC" + MELON = "MELON" + MOSAIC = "MOSAIC" + PASTEL = "PASTEL" + ULTRAMARINE = "ULTRAMARINE" + + +class AspectRatio(str, Enum): + ASPECT_10_16 = "ASPECT_10_16" + ASPECT_16_10 = "ASPECT_16_10" + ASPECT_9_16 = "ASPECT_9_16" + ASPECT_16_9 = "ASPECT_16_9" + ASPECT_3_2 = "ASPECT_3_2" + ASPECT_2_3 = "ASPECT_2_3" + ASPECT_4_3 = "ASPECT_4_3" + ASPECT_3_4 = "ASPECT_3_4" + ASPECT_1_1 = "ASPECT_1_1" + ASPECT_1_3 = "ASPECT_1_3" + ASPECT_3_1 = "ASPECT_3_1" + + +class UpscaleOption(str, Enum): + AI_UPSCALE = "AI Upscale" + NO_UPSCALE = "No Upscale" + + +class IdeogramModelBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.IDEOGRAM], Literal["api_key"] + ] = CredentialsField( + description="The Ideogram integration can be used with any API key with sufficient permissions for the blocks it is used on.", + ) + prompt: str = SchemaField( + description="Text prompt for image generation", + placeholder="e.g., 'A futuristic cityscape at sunset'", + title="Prompt", + ) + ideogram_model_name: IdeogramModelName = SchemaField( + description="The name of the Image Generation Model, e.g., V_2", + default=IdeogramModelName.V2, + title="Image Generation Model", + advanced=False, + ) + aspect_ratio: AspectRatio = SchemaField( + description="Aspect ratio for the generated image", + default=AspectRatio.ASPECT_1_1, + title="Aspect Ratio", + advanced=False, + ) + upscale: UpscaleOption = SchemaField( + description="Upscale the generated image", + default=UpscaleOption.NO_UPSCALE, + title="Upscale Image", + advanced=False, + ) + magic_prompt_option: MagicPromptOption = SchemaField( + description="Whether to use MagicPrompt for enhancing the request", + default=MagicPromptOption.AUTO, + title="Magic Prompt Option", + advanced=True, + ) + seed: Optional[int] = SchemaField( + description="Random seed. Set for reproducible generation", + default=None, + title="Seed", + advanced=True, + ) + style_type: StyleType = SchemaField( + description="Style type to apply, applicable for V_2 and above", + default=StyleType.AUTO, + title="Style Type", + advanced=True, + ) + negative_prompt: Optional[str] = SchemaField( + description="Description of what to exclude from the image", + default=None, + title="Negative Prompt", + advanced=True, + ) + color_palette_name: ColorPalettePreset = SchemaField( + description="Color palette preset name, choose 'None' to skip", + default=ColorPalettePreset.NONE, + title="Color Palette Preset", + advanced=True, + ) + + class Output(BlockSchema): + result: str = SchemaField(description="Generated image URL") + error: str = SchemaField(description="Error message if the model run failed") + + def __init__(self): + super().__init__( + id="6ab085e2-20b3-4055-bc3e-08036e01eca6", + description="This block runs Ideogram models with both simple and advanced settings.", + categories={BlockCategory.AI, BlockCategory.MULTIMEDIA}, + input_schema=IdeogramModelBlock.Input, + output_schema=IdeogramModelBlock.Output, + test_input={ + "ideogram_model_name": IdeogramModelName.V2, + "prompt": "A futuristic cityscape at sunset", + "aspect_ratio": AspectRatio.ASPECT_1_1, + "upscale": UpscaleOption.NO_UPSCALE, + "magic_prompt_option": MagicPromptOption.AUTO, + "seed": None, + "style_type": StyleType.AUTO, + "negative_prompt": None, + "color_palette_name": ColorPalettePreset.NONE, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[ + ( + "result", + "https://ideogram.ai/api/images/test-generated-image-url.png", + ), + ], + test_mock={ + "run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name: "https://ideogram.ai/api/images/test-generated-image-url.png", + "upscale_image": lambda api_key, image_url: "https://ideogram.ai/api/images/test-upscaled-image-url.png", + }, + test_credentials=TEST_CREDENTIALS, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + seed = input_data.seed + + # Step 1: Generate the image + result = self.run_model( + api_key=credentials.api_key, + model_name=input_data.ideogram_model_name.value, + prompt=input_data.prompt, + seed=seed, + aspect_ratio=input_data.aspect_ratio.value, + magic_prompt_option=input_data.magic_prompt_option.value, + style_type=input_data.style_type.value, + negative_prompt=input_data.negative_prompt, + color_palette_name=input_data.color_palette_name.value, + ) + + # Step 2: Upscale the image if requested + if input_data.upscale == UpscaleOption.AI_UPSCALE: + result = self.upscale_image( + api_key=credentials.api_key, + image_url=result, + ) + + yield "result", result + + def run_model( + self, + api_key: SecretStr, + model_name: str, + prompt: str, + seed: Optional[int], + aspect_ratio: str, + magic_prompt_option: str, + style_type: str, + negative_prompt: Optional[str], + color_palette_name: str, + ): + url = "https://api.ideogram.ai/generate" + headers = { + "Api-Key": api_key.get_secret_value(), + "Content-Type": "application/json", + } + + data: Dict[str, Any] = { + "image_request": { + "prompt": prompt, + "model": model_name, + "aspect_ratio": aspect_ratio, + "magic_prompt_option": magic_prompt_option, + "style_type": style_type, + } + } + + if seed is not None: + data["image_request"]["seed"] = seed + + if negative_prompt: + data["image_request"]["negative_prompt"] = negative_prompt + + if color_palette_name != "NONE": + data["image_request"]["color_palette"] = {"name": color_palette_name} + + try: + response = requests.post(url, json=data, headers=headers) + return response.json()["data"][0]["url"] + except RequestException as e: + raise Exception(f"Failed to fetch image: {str(e)}") + + def upscale_image(self, api_key: SecretStr, image_url: str): + url = "https://api.ideogram.ai/upscale" + headers = { + "Api-Key": api_key.get_secret_value(), + } + + try: + # Step 1: Download the image from the provided URL + image_response = requests.get(image_url) + + # Step 2: Send the downloaded image to the upscale API + files = { + "image_file": ("image.png", image_response.content, "image/png"), + } + + response = requests.post( + url, + headers=headers, + data={ + "image_request": "{}", # Empty JSON object + }, + files=files, + ) + + return response.json()["data"][0]["url"] + + except RequestException as e: + raise Exception(f"Failed to upscale image: {str(e)}") diff --git a/autogpt_platform/backend/backend/blocks/iteration.py b/autogpt_platform/backend/backend/blocks/iteration.py new file mode 100644 index 000000000000..16783af59f11 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/iteration.py @@ -0,0 +1,73 @@ +from typing import Any + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.json import json + + +class StepThroughItemsBlock(Block): + class Input(BlockSchema): + items: list = SchemaField( + advanced=False, + description="The list or dictionary of items to iterate over", + placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}", + default=[], + ) + items_object: dict = SchemaField( + advanced=False, + description="The list or dictionary of items to iterate over", + placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}", + default={}, + ) + items_str: str = SchemaField( + advanced=False, + description="The list or dictionary of items to iterate over", + placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}", + default="", + ) + + class Output(BlockSchema): + item: Any = SchemaField(description="The current item in the iteration") + key: Any = SchemaField( + description="The key or index of the current item in the iteration", + ) + + def __init__(self): + super().__init__( + id="f66a3543-28d3-4ab5-8945-9b336371e2ce", + input_schema=StepThroughItemsBlock.Input, + output_schema=StepThroughItemsBlock.Output, + categories={BlockCategory.LOGIC}, + description="Iterates over a list or dictionary and outputs each item.", + test_input={"items": [1, 2, 3, {"key1": "value1", "key2": "value2"}]}, + test_output=[ + ("item", 1), + ("key", 0), + ("item", 2), + ("key", 1), + ("item", 3), + ("key", 2), + ("item", {"key1": "value1", "key2": "value2"}), + ("key", 3), + ], + test_mock={}, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + for data in [input_data.items, input_data.items_object, input_data.items_str]: + if not data: + continue + if isinstance(data, str): + items = json.loads(data) + else: + items = data + if isinstance(items, dict): + # If items is a dictionary, iterate over its values + for item in items.values(): + yield "item", item + yield "key", item + else: + # If items is a list, iterate over the list + for index, item in enumerate(items): + yield "item", item + yield "key", index diff --git a/autogpt_platform/backend/backend/blocks/jina/_auth.py b/autogpt_platform/backend/backend/blocks/jina/_auth.py new file mode 100644 index 000000000000..5bf0ddd5cf4c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/_auth.py @@ -0,0 +1,37 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +JinaCredentials = APIKeyCredentials +JinaCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.JINA], + Literal["api_key"], +] + + +def JinaCredentialsField() -> JinaCredentialsInput: + """ + Creates a Jina credentials input on a block. + + """ + return CredentialsField( + description="The Jina integration can be used with an API Key.", + ) + + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="jina", + api_key=SecretStr("mock-jina-api-key"), + title="Mock Jina API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} diff --git a/autogpt_platform/backend/backend/blocks/jina/chunking.py b/autogpt_platform/backend/backend/blocks/jina/chunking.py new file mode 100644 index 000000000000..24102e560fb7 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/chunking.py @@ -0,0 +1,67 @@ +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class JinaChunkingBlock(Block): + class Input(BlockSchema): + texts: list = SchemaField(description="List of texts to chunk") + + credentials: JinaCredentialsInput = JinaCredentialsField() + max_chunk_length: int = SchemaField( + description="Maximum length of each chunk", default=1000 + ) + return_tokens: bool = SchemaField( + description="Whether to return token information", default=False + ) + + class Output(BlockSchema): + chunks: list = SchemaField(description="List of chunked texts") + tokens: list = SchemaField( + description="List of token information for each chunk", optional=True + ) + + def __init__(self): + super().__init__( + id="806fb15e-830f-4796-8692-557d300ff43c", + description="Chunks texts using Jina AI's segmentation service", + categories={BlockCategory.AI, BlockCategory.TEXT}, + input_schema=JinaChunkingBlock.Input, + output_schema=JinaChunkingBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: JinaCredentials, **kwargs + ) -> BlockOutput: + url = "https://segment.jina.ai/" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + + all_chunks = [] + all_tokens = [] + + for text in input_data.texts: + data = { + "content": text, + "return_tokens": str(input_data.return_tokens).lower(), + "return_chunks": "true", + "max_chunk_length": str(input_data.max_chunk_length), + } + + response = requests.post(url, headers=headers, json=data) + result = response.json() + + all_chunks.extend(result.get("chunks", [])) + if input_data.return_tokens: + all_tokens.extend(result.get("tokens", [])) + + yield "chunks", all_chunks + if input_data.return_tokens: + yield "tokens", all_tokens diff --git a/autogpt_platform/backend/backend/blocks/jina/embeddings.py b/autogpt_platform/backend/backend/blocks/jina/embeddings.py new file mode 100644 index 000000000000..67a17bf2c34f --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/embeddings.py @@ -0,0 +1,43 @@ +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class JinaEmbeddingBlock(Block): + class Input(BlockSchema): + texts: list = SchemaField(description="List of texts to embed") + credentials: JinaCredentialsInput = JinaCredentialsField() + model: str = SchemaField( + description="Jina embedding model to use", + default="jina-embeddings-v2-base-en", + ) + + class Output(BlockSchema): + embeddings: list = SchemaField(description="List of embeddings") + + def __init__(self): + super().__init__( + id="7c56b3ab-62e7-43a2-a2dc-4ec4245660b6", + description="Generates embeddings using Jina AI", + categories={BlockCategory.AI}, + input_schema=JinaEmbeddingBlock.Input, + output_schema=JinaEmbeddingBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: JinaCredentials, **kwargs + ) -> BlockOutput: + url = "https://api.jina.ai/v1/embeddings" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + data = {"input": input_data.texts, "model": input_data.model} + response = requests.post(url, headers=headers, json=data) + embeddings = [e["embedding"] for e in response.json()["data"]] + yield "embeddings", embeddings diff --git a/autogpt_platform/backend/backend/blocks/jina/fact_checker.py b/autogpt_platform/backend/backend/blocks/jina/fact_checker.py new file mode 100644 index 000000000000..c9b8c08d1db8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/fact_checker.py @@ -0,0 +1,59 @@ +from urllib.parse import quote + +import requests + +from backend.blocks.jina._auth import ( + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class FactCheckerBlock(Block): + class Input(BlockSchema): + statement: str = SchemaField( + description="The statement to check for factuality" + ) + credentials: JinaCredentialsInput = JinaCredentialsField() + + class Output(BlockSchema): + factuality: float = SchemaField( + description="The factuality score of the statement" + ) + result: bool = SchemaField(description="The result of the factuality check") + reason: str = SchemaField(description="The reason for the factuality result") + error: str = SchemaField(description="Error message if the check fails") + + def __init__(self): + super().__init__( + id="d38b6c5e-9968-4271-8423-6cfe60d6e7e6", + description="This block checks the factuality of a given statement using Jina AI's Grounding API.", + categories={BlockCategory.SEARCH}, + input_schema=FactCheckerBlock.Input, + output_schema=FactCheckerBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: JinaCredentials, **kwargs + ) -> BlockOutput: + encoded_statement = quote(input_data.statement) + url = f"https://g.jina.ai/{encoded_statement}" + + headers = { + "Accept": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + + response = requests.get(url, headers=headers) + response.raise_for_status() + data = response.json() + + if "data" in data: + data = data["data"] + yield "factuality", data["factuality"] + yield "result", data["result"] + yield "reason", data["reason"] + else: + raise RuntimeError(f"Expected 'data' key not found in response: {data}") diff --git a/autogpt_platform/backend/backend/blocks/jina/search.py b/autogpt_platform/backend/backend/blocks/jina/search.py new file mode 100644 index 000000000000..c03ca3ce0134 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/jina/search.py @@ -0,0 +1,107 @@ +from groq._utils._utils import quote + +from backend.blocks.jina._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + JinaCredentials, + JinaCredentialsField, + JinaCredentialsInput, +) +from backend.blocks.search import GetRequest +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class SearchTheWebBlock(Block, GetRequest): + class Input(BlockSchema): + credentials: JinaCredentialsInput = JinaCredentialsField() + query: str = SchemaField(description="The search query to search the web for") + + class Output(BlockSchema): + results: str = SchemaField( + description="The search results including content from top 5 URLs" + ) + error: str = SchemaField(description="Error message if the search fails") + + def __init__(self): + super().__init__( + id="87840993-2053-44b7-8da4-187ad4ee518c", + description="This block searches the internet for the given search query.", + categories={BlockCategory.SEARCH}, + input_schema=SearchTheWebBlock.Input, + output_schema=SearchTheWebBlock.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "query": "Artificial Intelligence", + }, + test_credentials=TEST_CREDENTIALS, + test_output=("results", "search content"), + test_mock={"get_request": lambda *args, **kwargs: "search content"}, + ) + + def run( + self, input_data: Input, *, credentials: JinaCredentials, **kwargs + ) -> BlockOutput: + # Encode the search query + encoded_query = quote(input_data.query) + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + + # Prepend the Jina Search URL to the encoded query + jina_search_url = f"https://s.jina.ai/{encoded_query}" + results = self.get_request(jina_search_url, headers=headers, json=False) + + # Output the search results + yield "results", results + + +class ExtractWebsiteContentBlock(Block, GetRequest): + class Input(BlockSchema): + credentials: JinaCredentialsInput = JinaCredentialsField() + url: str = SchemaField(description="The URL to scrape the content from") + raw_content: bool = SchemaField( + default=False, + title="Raw Content", + description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content", + advanced=True, + ) + + class Output(BlockSchema): + content: str = SchemaField(description="The scraped content from the given URL") + error: str = SchemaField( + description="Error message if the content cannot be retrieved" + ) + + def __init__(self): + super().__init__( + id="436c3984-57fd-4b85-8e9a-459b356883bd", + description="This block scrapes the content from the given web URL.", + categories={BlockCategory.SEARCH}, + input_schema=ExtractWebsiteContentBlock.Input, + output_schema=ExtractWebsiteContentBlock.Output, + test_input={ + "url": "https://en.wikipedia.org/wiki/Artificial_intelligence", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=("content", "scraped content"), + test_mock={"get_request": lambda *args, **kwargs: "scraped content"}, + ) + + def run( + self, input_data: Input, *, credentials: JinaCredentials, **kwargs + ) -> BlockOutput: + if input_data.raw_content: + url = input_data.url + headers = {} + else: + url = f"https://r.jina.ai/{input_data.url}" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + + content = self.get_request(url, json=False, headers=headers) + yield "content", content diff --git a/autogpt_platform/backend/backend/blocks/linear/_api.py b/autogpt_platform/backend/backend/blocks/linear/_api.py new file mode 100644 index 000000000000..c43f46fa7077 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/_api.py @@ -0,0 +1,272 @@ +from __future__ import annotations + +import json +from typing import Any, Dict, Optional + +from backend.blocks.linear._auth import LinearCredentials +from backend.blocks.linear.models import ( + CreateCommentResponse, + CreateIssueResponse, + Issue, + Project, +) +from backend.util.request import Requests + + +class LinearAPIException(Exception): + def __init__(self, message: str, status_code: int): + super().__init__(message) + self.status_code = status_code + + +class LinearClient: + """Client for the Linear API + + If you're looking for the schema: https://studio.apollographql.com/public/Linear-API/variant/current/schema + """ + + API_URL = "https://api.linear.app/graphql" + + def __init__( + self, + credentials: LinearCredentials | None = None, + custom_requests: Optional[Requests] = None, + ): + if custom_requests: + self._requests = custom_requests + else: + + headers: Dict[str, str] = { + "Content-Type": "application/json", + } + if credentials: + headers["Authorization"] = credentials.auth_header() + + self._requests = Requests( + extra_headers=headers, + trusted_origins=["https://api.linear.app"], + raise_for_status=False, + ) + + def _execute_graphql_request( + self, query: str, variables: dict | None = None + ) -> Any: + """ + Executes a GraphQL request against the Linear API and returns the response data. + + Args: + query: The GraphQL query string. + variables (optional): Any GraphQL query variables + + Returns: + The parsed JSON response data, or raises a LinearAPIException on error. + """ + payload: Dict[str, Any] = {"query": query} + if variables: + payload["variables"] = variables + + response = self._requests.post(self.API_URL, json=payload) + + if not response.ok: + + try: + error_data = response.json() + error_message = error_data.get("errors", [{}])[0].get("message", "") + except json.JSONDecodeError: + error_message = response.text + + raise LinearAPIException( + f"Linear API request failed ({response.status_code}): {error_message}", + response.status_code, + ) + + response_data = response.json() + if "errors" in response_data: + + error_messages = [ + error.get("message", "") for error in response_data["errors"] + ] + raise LinearAPIException( + f"Linear API returned errors: {', '.join(error_messages)}", + response.status_code, + ) + + return response_data["data"] + + def query(self, query: str, variables: Optional[dict] = None) -> dict: + """Executes a GraphQL query. + + Args: + query: The GraphQL query string. + variables: Query variables, if any. + + Returns: + The response data. + """ + return self._execute_graphql_request(query, variables) + + def mutate(self, mutation: str, variables: Optional[dict] = None) -> dict: + """Executes a GraphQL mutation. + + Args: + mutation: The GraphQL mutation string. + variables: Query variables, if any. + + Returns: + The response data. + """ + return self._execute_graphql_request(mutation, variables) + + def try_create_comment(self, issue_id: str, comment: str) -> CreateCommentResponse: + try: + mutation = """ + mutation CommentCreate($input: CommentCreateInput!) { + commentCreate(input: $input) { + success + comment { + id + body + } + } + } + """ + + variables = { + "input": { + "body": comment, + "issueId": issue_id, + } + } + + added_comment = self.mutate(mutation, variables) + # Select the commentCreate field from the mutation response + return CreateCommentResponse(**added_comment["commentCreate"]) + except LinearAPIException as e: + raise e + + def try_get_team_by_name(self, team_name: str) -> str: + try: + query = """ + query GetTeamId($searchTerm: String!) { + teams(filter: { + or: [ + { name: { eqIgnoreCase: $searchTerm } }, + { key: { eqIgnoreCase: $searchTerm } } + ] + }) { + nodes { + id + name + key + } + } + } + """ + + variables: dict[str, Any] = { + "searchTerm": team_name, + } + + team_id = self.query(query, variables) + return team_id["teams"]["nodes"][0]["id"] + except LinearAPIException as e: + raise e + + def try_create_issue( + self, + team_id: str, + title: str, + description: str | None = None, + priority: int | None = None, + project_id: str | None = None, + ) -> CreateIssueResponse: + try: + mutation = """ + mutation IssueCreate($input: IssueCreateInput!) { + issueCreate(input: $input) { + issue { + title + description + id + identifier + priority + } + } + } + """ + + variables: dict[str, Any] = { + "input": { + "teamId": team_id, + "title": title, + } + } + + if project_id: + variables["input"]["projectId"] = project_id + + if description: + variables["input"]["description"] = description + + if priority: + variables["input"]["priority"] = priority + + added_issue = self.mutate(mutation, variables) + return CreateIssueResponse(**added_issue["issueCreate"]) + except LinearAPIException as e: + raise e + + def try_search_projects(self, term: str) -> list[Project]: + try: + query = """ + query SearchProjects($term: String!, $includeComments: Boolean!) { + searchProjects(term: $term, includeComments: $includeComments) { + nodes { + id + name + description + priority + progress + content + } + } + } + """ + + variables: dict[str, Any] = { + "term": term, + "includeComments": True, + } + + projects = self.query(query, variables) + return [ + Project(**project) for project in projects["searchProjects"]["nodes"] + ] + except LinearAPIException as e: + raise e + + def try_search_issues(self, term: str) -> list[Issue]: + try: + query = """ + query SearchIssues($term: String!, $includeComments: Boolean!) { + searchIssues(term: $term, includeComments: $includeComments) { + nodes { + id + identifier + title + description + priority + } + } + } + """ + + variables: dict[str, Any] = { + "term": term, + "includeComments": True, + } + + issues = self.query(query, variables) + return [Issue(**issue) for issue in issues["searchIssues"]["nodes"]] + except LinearAPIException as e: + raise e diff --git a/autogpt_platform/backend/backend/blocks/linear/_auth.py b/autogpt_platform/backend/backend/blocks/linear/_auth.py new file mode 100644 index 000000000000..fb91fbfe7acf --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/_auth.py @@ -0,0 +1,101 @@ +from enum import Enum +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + OAuth2Credentials, +) +from backend.integrations.providers import ProviderName +from backend.util.settings import Secrets + +secrets = Secrets() +LINEAR_OAUTH_IS_CONFIGURED = bool( + secrets.linear_client_id and secrets.linear_client_secret +) + +LinearCredentials = OAuth2Credentials | APIKeyCredentials +# LinearCredentialsInput = CredentialsMetaInput[ +# Literal[ProviderName.LINEAR], +# Literal["oauth2", "api_key"] if LINEAR_OAUTH_IS_CONFIGURED else Literal["oauth2"], +# ] +LinearCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.LINEAR], Literal["oauth2"] +] + + +# (required) Comma separated list of scopes: + +# read - (Default) Read access for the user's account. This scope will always be present. + +# write - Write access for the user's account. If your application only needs to create comments, use a more targeted scope + +# issues:create - Allows creating new issues and their attachments + +# comments:create - Allows creating new issue comments + +# timeSchedule:write - Allows creating and modifying time schedules + + +# admin - Full access to admin level endpoints. You should never ask for this permission unless it's absolutely needed +class LinearScope(str, Enum): + READ = "read" + WRITE = "write" + ISSUES_CREATE = "issues:create" + COMMENTS_CREATE = "comments:create" + TIME_SCHEDULE_WRITE = "timeSchedule:write" + ADMIN = "admin" + + +def LinearCredentialsField(scopes: list[LinearScope]) -> LinearCredentialsInput: + """ + Creates a Linear credentials input on a block. + + Params: + scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes)) + """ # noqa + return CredentialsField( + required_scopes=set([LinearScope.READ.value]).union( + set([scope.value for scope in scopes]) + ), + description="The Linear integration can be used with OAuth, " + "or any API key with sufficient permissions for the blocks it is used on.", + ) + + +TEST_CREDENTIALS_OAUTH = OAuth2Credentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="linear", + title="Mock Linear API key", + username="mock-linear-username", + access_token=SecretStr("mock-linear-access-token"), + access_token_expires_at=None, + refresh_token=SecretStr("mock-linear-refresh-token"), + refresh_token_expires_at=None, + scopes=["mock-linear-scopes"], +) + +TEST_CREDENTIALS_API_KEY = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="linear", + title="Mock Linear API key", + api_key=SecretStr("mock-linear-api-key"), + expires_at=None, +) + +TEST_CREDENTIALS_INPUT_OAUTH = { + "provider": TEST_CREDENTIALS_OAUTH.provider, + "id": TEST_CREDENTIALS_OAUTH.id, + "type": TEST_CREDENTIALS_OAUTH.type, + "title": TEST_CREDENTIALS_OAUTH.type, +} + +TEST_CREDENTIALS_INPUT_API_KEY = { + "provider": TEST_CREDENTIALS_API_KEY.provider, + "id": TEST_CREDENTIALS_API_KEY.id, + "type": TEST_CREDENTIALS_API_KEY.type, + "title": TEST_CREDENTIALS_API_KEY.type, +} diff --git a/autogpt_platform/backend/backend/blocks/linear/comment.py b/autogpt_platform/backend/backend/blocks/linear/comment.py new file mode 100644 index 000000000000..6789fd12e38f --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/comment.py @@ -0,0 +1,83 @@ +from backend.blocks.linear._api import LinearAPIException, LinearClient +from backend.blocks.linear._auth import ( + LINEAR_OAUTH_IS_CONFIGURED, + TEST_CREDENTIALS_INPUT_OAUTH, + TEST_CREDENTIALS_OAUTH, + LinearCredentials, + LinearCredentialsField, + LinearCredentialsInput, + LinearScope, +) +from backend.blocks.linear.models import CreateCommentResponse +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class LinearCreateCommentBlock(Block): + """Block for creating comments on Linear issues""" + + class Input(BlockSchema): + credentials: LinearCredentialsInput = LinearCredentialsField( + scopes=[LinearScope.COMMENTS_CREATE], + ) + issue_id: str = SchemaField(description="ID of the issue to comment on") + comment: str = SchemaField(description="Comment text to add to the issue") + + class Output(BlockSchema): + comment_id: str = SchemaField(description="ID of the created comment") + comment_body: str = SchemaField( + description="Text content of the created comment" + ) + error: str = SchemaField(description="Error message if comment creation failed") + + def __init__(self): + super().__init__( + id="8f7d3a2e-9b5c-4c6a-8f1d-7c8b3e4a5d6c", + description="Creates a new comment on a Linear issue", + input_schema=self.Input, + output_schema=self.Output, + categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING}, + test_input={ + "issue_id": "TEST-123", + "comment": "Test comment", + "credentials": TEST_CREDENTIALS_INPUT_OAUTH, + }, + disabled=not LINEAR_OAUTH_IS_CONFIGURED, + test_credentials=TEST_CREDENTIALS_OAUTH, + test_output=[("comment_id", "abc123"), ("comment_body", "Test comment")], + test_mock={ + "create_comment": lambda *args, **kwargs: ( + "abc123", + "Test comment", + ) + }, + ) + + @staticmethod + def create_comment( + credentials: LinearCredentials, issue_id: str, comment: str + ) -> tuple[str, str]: + client = LinearClient(credentials=credentials) + response: CreateCommentResponse = client.try_create_comment( + issue_id=issue_id, comment=comment + ) + return response.comment.id, response.comment.body + + def run( + self, input_data: Input, *, credentials: LinearCredentials, **kwargs + ) -> BlockOutput: + """Execute the comment creation""" + try: + comment_id, comment_body = self.create_comment( + credentials=credentials, + issue_id=input_data.issue_id, + comment=input_data.comment, + ) + + yield "comment_id", comment_id + yield "comment_body", comment_body + + except LinearAPIException as e: + yield "error", str(e) + except Exception as e: + yield "error", f"Unexpected error: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/linear/issues.py b/autogpt_platform/backend/backend/blocks/linear/issues.py new file mode 100644 index 000000000000..ff99c70e67ca --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/issues.py @@ -0,0 +1,189 @@ +from backend.blocks.linear._api import LinearAPIException, LinearClient +from backend.blocks.linear._auth import ( + LINEAR_OAUTH_IS_CONFIGURED, + TEST_CREDENTIALS_INPUT_OAUTH, + TEST_CREDENTIALS_OAUTH, + LinearCredentials, + LinearCredentialsField, + LinearCredentialsInput, + LinearScope, +) +from backend.blocks.linear.models import CreateIssueResponse, Issue +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class LinearCreateIssueBlock(Block): + """Block for creating issues on Linear""" + + class Input(BlockSchema): + credentials: LinearCredentialsInput = LinearCredentialsField( + scopes=[LinearScope.ISSUES_CREATE], + ) + title: str = SchemaField(description="Title of the issue") + description: str | None = SchemaField(description="Description of the issue") + team_name: str = SchemaField( + description="Name of the team to create the issue on" + ) + priority: int | None = SchemaField( + description="Priority of the issue", + default=None, + minimum=0, + maximum=4, + ) + project_name: str | None = SchemaField( + description="Name of the project to create the issue on", + default=None, + ) + + class Output(BlockSchema): + issue_id: str = SchemaField(description="ID of the created issue") + issue_title: str = SchemaField(description="Title of the created issue") + error: str = SchemaField(description="Error message if issue creation failed") + + def __init__(self): + super().__init__( + id="f9c68f55-dcca-40a8-8771-abf9601680aa", + description="Creates a new issue on Linear", + disabled=not LINEAR_OAUTH_IS_CONFIGURED, + input_schema=self.Input, + output_schema=self.Output, + categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING}, + test_input={ + "title": "Test issue", + "description": "Test description", + "team_name": "Test team", + "project_name": "Test project", + "credentials": TEST_CREDENTIALS_INPUT_OAUTH, + }, + test_credentials=TEST_CREDENTIALS_OAUTH, + test_output=[("issue_id", "abc123"), ("issue_title", "Test issue")], + test_mock={ + "create_issue": lambda *args, **kwargs: ( + "abc123", + "Test issue", + ) + }, + ) + + @staticmethod + def create_issue( + credentials: LinearCredentials, + team_name: str, + title: str, + description: str | None = None, + priority: int | None = None, + project_name: str | None = None, + ) -> tuple[str, str]: + client = LinearClient(credentials=credentials) + team_id = client.try_get_team_by_name(team_name=team_name) + project_id: str | None = None + if project_name: + projects = client.try_search_projects(term=project_name) + if projects: + project_id = projects[0].id + else: + raise LinearAPIException("Project not found", status_code=404) + response: CreateIssueResponse = client.try_create_issue( + team_id=team_id, + title=title, + description=description, + priority=priority, + project_id=project_id, + ) + return response.issue.identifier, response.issue.title + + def run( + self, input_data: Input, *, credentials: LinearCredentials, **kwargs + ) -> BlockOutput: + """Execute the issue creation""" + try: + issue_id, issue_title = self.create_issue( + credentials=credentials, + team_name=input_data.team_name, + title=input_data.title, + description=input_data.description, + priority=input_data.priority, + project_name=input_data.project_name, + ) + + yield "issue_id", issue_id + yield "issue_title", issue_title + + except LinearAPIException as e: + yield "error", str(e) + except Exception as e: + yield "error", f"Unexpected error: {str(e)}" + + +class LinearSearchIssuesBlock(Block): + """Block for searching issues on Linear""" + + class Input(BlockSchema): + term: str = SchemaField(description="Term to search for issues") + credentials: LinearCredentialsInput = LinearCredentialsField( + scopes=[LinearScope.READ], + ) + + class Output(BlockSchema): + issues: list[Issue] = SchemaField(description="List of issues") + + def __init__(self): + super().__init__( + id="b5a2a0e6-26b4-4c5b-8a42-bc79e9cb65c2", + description="Searches for issues on Linear", + input_schema=self.Input, + output_schema=self.Output, + disabled=not LINEAR_OAUTH_IS_CONFIGURED, + test_input={ + "term": "Test issue", + "credentials": TEST_CREDENTIALS_INPUT_OAUTH, + }, + test_credentials=TEST_CREDENTIALS_OAUTH, + test_output=[ + ( + "issues", + [ + Issue( + id="abc123", + identifier="abc123", + title="Test issue", + description="Test description", + priority=1, + ) + ], + ) + ], + test_mock={ + "search_issues": lambda *args, **kwargs: [ + Issue( + id="abc123", + identifier="abc123", + title="Test issue", + description="Test description", + priority=1, + ) + ] + }, + ) + + @staticmethod + def search_issues( + credentials: LinearCredentials, + term: str, + ) -> list[Issue]: + client = LinearClient(credentials=credentials) + response: list[Issue] = client.try_search_issues(term=term) + return response + + def run( + self, input_data: Input, *, credentials: LinearCredentials, **kwargs + ) -> BlockOutput: + """Execute the issue search""" + try: + issues = self.search_issues(credentials=credentials, term=input_data.term) + yield "issues", issues + except LinearAPIException as e: + yield "error", str(e) + except Exception as e: + yield "error", f"Unexpected error: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/linear/models.py b/autogpt_platform/backend/backend/blocks/linear/models.py new file mode 100644 index 000000000000..a6a2de3cd84a --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/models.py @@ -0,0 +1,41 @@ +from pydantic import BaseModel + + +class Comment(BaseModel): + id: str + body: str + + +class CreateCommentInput(BaseModel): + body: str + issueId: str + + +class CreateCommentResponse(BaseModel): + success: bool + comment: Comment + + +class CreateCommentResponseWrapper(BaseModel): + commentCreate: CreateCommentResponse + + +class Issue(BaseModel): + id: str + identifier: str + title: str + description: str | None + priority: int + + +class CreateIssueResponse(BaseModel): + issue: Issue + + +class Project(BaseModel): + id: str + name: str + description: str + priority: int + progress: int + content: str diff --git a/autogpt_platform/backend/backend/blocks/linear/projects.py b/autogpt_platform/backend/backend/blocks/linear/projects.py new file mode 100644 index 000000000000..695064a6a19e --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/linear/projects.py @@ -0,0 +1,95 @@ +from backend.blocks.linear._api import LinearAPIException, LinearClient +from backend.blocks.linear._auth import ( + LINEAR_OAUTH_IS_CONFIGURED, + TEST_CREDENTIALS_INPUT_OAUTH, + TEST_CREDENTIALS_OAUTH, + LinearCredentials, + LinearCredentialsField, + LinearCredentialsInput, + LinearScope, +) +from backend.blocks.linear.models import Project +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class LinearSearchProjectsBlock(Block): + """Block for searching projects on Linear""" + + class Input(BlockSchema): + credentials: LinearCredentialsInput = LinearCredentialsField( + scopes=[LinearScope.READ], + ) + term: str = SchemaField(description="Term to search for projects") + + class Output(BlockSchema): + projects: list[Project] = SchemaField(description="List of projects") + error: str = SchemaField(description="Error message if issue creation failed") + + def __init__(self): + super().__init__( + id="446a1d35-9d8f-4ac5-83ea-7684ec50e6af", + description="Searches for projects on Linear", + input_schema=self.Input, + output_schema=self.Output, + categories={BlockCategory.PRODUCTIVITY, BlockCategory.ISSUE_TRACKING}, + test_input={ + "term": "Test project", + "credentials": TEST_CREDENTIALS_INPUT_OAUTH, + }, + disabled=not LINEAR_OAUTH_IS_CONFIGURED, + test_credentials=TEST_CREDENTIALS_OAUTH, + test_output=[ + ( + "projects", + [ + Project( + id="abc123", + name="Test project", + description="Test description", + priority=1, + progress=1, + content="Test content", + ) + ], + ) + ], + test_mock={ + "search_projects": lambda *args, **kwargs: [ + Project( + id="abc123", + name="Test project", + description="Test description", + priority=1, + progress=1, + content="Test content", + ) + ] + }, + ) + + @staticmethod + def search_projects( + credentials: LinearCredentials, + term: str, + ) -> list[Project]: + client = LinearClient(credentials=credentials) + response: list[Project] = client.try_search_projects(term=term) + return response + + def run( + self, input_data: Input, *, credentials: LinearCredentials, **kwargs + ) -> BlockOutput: + """Execute the project search""" + try: + projects = self.search_projects( + credentials=credentials, + term=input_data.term, + ) + + yield "projects", projects + + except LinearAPIException as e: + yield "error", str(e) + except Exception as e: + yield "error", f"Unexpected error: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py new file mode 100644 index 000000000000..f28de14708bc --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -0,0 +1,1176 @@ +import ast +import logging +from abc import ABC +from enum import Enum, EnumMeta +from json import JSONDecodeError +from types import MappingProxyType +from typing import TYPE_CHECKING, Any, List, Literal, NamedTuple + +from pydantic import SecretStr + +from backend.integrations.providers import ProviderName + +if TYPE_CHECKING: + from enum import _EnumMemberT + +import anthropic +import ollama +import openai +from groq import Groq + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.util import json +from backend.util.settings import BehaveAs, Settings +from backend.util.text import TextFormatter + +logger = logging.getLogger(__name__) +fmt = TextFormatter() + +LLMProviderName = Literal[ + ProviderName.ANTHROPIC, + ProviderName.GROQ, + ProviderName.OLLAMA, + ProviderName.OPENAI, + ProviderName.OPEN_ROUTER, +] +AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]] + +TEST_CREDENTIALS = APIKeyCredentials( + id="ed55ac19-356e-4243-a6cb-bc599e9b716f", + provider="openai", + api_key=SecretStr("mock-openai-api-key"), + title="Mock OpenAI API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def AICredentialsField() -> AICredentials: + return CredentialsField( + description="API key for the LLM provider.", + discriminator="model", + discriminator_mapping={ + model.value: model.metadata.provider for model in LlmModel + }, + ) + + +class ModelMetadata(NamedTuple): + provider: str + context_window: int + + +class LlmModelMeta(EnumMeta): + @property + def __members__( + self: type["_EnumMemberT"], + ) -> MappingProxyType[str, "_EnumMemberT"]: + if Settings().config.behave_as == BehaveAs.LOCAL: + members = super().__members__ + return members + else: + removed_providers = ["ollama"] + existing_members = super().__members__ + members = { + name: member + for name, member in existing_members.items() + if LlmModel[name].provider not in removed_providers + } + return MappingProxyType(members) + + +class LlmModel(str, Enum, metaclass=LlmModelMeta): + # OpenAI models + O1_PREVIEW = "o1-preview" + O1_MINI = "o1-mini" + GPT4O_MINI = "gpt-4o-mini" + GPT4O = "gpt-4o" + GPT4_TURBO = "gpt-4-turbo" + GPT3_5_TURBO = "gpt-3.5-turbo" + # Anthropic models + CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest" + CLAUDE_3_HAIKU = "claude-3-haiku-20240307" + # Groq models + LLAMA3_8B = "llama3-8b-8192" + LLAMA3_70B = "llama3-70b-8192" + MIXTRAL_8X7B = "mixtral-8x7b-32768" + GEMMA_7B = "gemma-7b-it" + GEMMA2_9B = "gemma2-9b-it" + # New Groq models (Preview) + LLAMA3_1_405B = "llama-3.1-405b-reasoning" + LLAMA3_1_70B = "llama-3.1-70b-versatile" + LLAMA3_1_8B = "llama-3.1-8b-instant" + # Ollama models + OLLAMA_LLAMA3_2 = "llama3.2" + OLLAMA_LLAMA3_8B = "llama3" + OLLAMA_LLAMA3_405B = "llama3.1:405b" + OLLAMA_DOLPHIN = "dolphin-mistral:latest" + # OpenRouter models + GEMINI_FLASH_1_5_8B = "google/gemini-flash-1.5" + GROK_BETA = "x-ai/grok-beta" + MISTRAL_NEMO = "mistralai/mistral-nemo" + COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024" + COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024" + EVA_QWEN_2_5_32B = "eva-unit-01/eva-qwen-2.5-32b" + DEEPSEEK_CHAT = "deepseek/deepseek-chat" + PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE = ( + "perplexity/llama-3.1-sonar-large-128k-online" + ) + QWEN_QWQ_32B_PREVIEW = "qwen/qwq-32b-preview" + NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b" + NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b" + AMAZON_NOVA_LITE_V1 = "amazon/nova-lite-v1" + AMAZON_NOVA_MICRO_V1 = "amazon/nova-micro-v1" + AMAZON_NOVA_PRO_V1 = "amazon/nova-pro-v1" + MICROSOFT_WIZARDLM_2_8X22B = "microsoft/wizardlm-2-8x22b" + GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b" + + @property + def metadata(self) -> ModelMetadata: + return MODEL_METADATA[self] + + @property + def provider(self) -> str: + return self.metadata.provider + + @property + def context_window(self) -> int: + return self.metadata.context_window + + +MODEL_METADATA = { + LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000), + LlmModel.O1_MINI: ModelMetadata("openai", 62000), + LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000), + LlmModel.GPT4O: ModelMetadata("openai", 128000), + LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000), + LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385), + LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000), + LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000), + LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192), + LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192), + LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768), + LlmModel.GEMMA_7B: ModelMetadata("groq", 8192), + LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192), + LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192), + # Limited to 16k during preview + LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072), + LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072), + LlmModel.OLLAMA_LLAMA3_2: ModelMetadata("ollama", 8192), + LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192), + LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192), + LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768), + LlmModel.GEMINI_FLASH_1_5_8B: ModelMetadata("open_router", 8192), + LlmModel.GROK_BETA: ModelMetadata("open_router", 8192), + LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 4000), + LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 4000), + LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 4000), + LlmModel.EVA_QWEN_2_5_32B: ModelMetadata("open_router", 4000), + LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 8192), + LlmModel.PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE: ModelMetadata( + "open_router", 8192 + ), + LlmModel.QWEN_QWQ_32B_PREVIEW: ModelMetadata("open_router", 4000), + LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata("open_router", 4000), + LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: ModelMetadata("open_router", 4000), + LlmModel.AMAZON_NOVA_LITE_V1: ModelMetadata("open_router", 4000), + LlmModel.AMAZON_NOVA_MICRO_V1: ModelMetadata("open_router", 4000), + LlmModel.AMAZON_NOVA_PRO_V1: ModelMetadata("open_router", 4000), + LlmModel.MICROSOFT_WIZARDLM_2_8X22B: ModelMetadata("open_router", 4000), + LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4000), +} + +for model in LlmModel: + if model not in MODEL_METADATA: + raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}") + + +class MessageRole(str, Enum): + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + + +class Message(BlockSchema): + role: MessageRole + content: str + + +class AIBlockBase(Block, ABC): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.prompt = "" + + def merge_llm_stats(self, block: "AIBlockBase"): + self.merge_stats(block.execution_stats) + self.prompt = block.prompt + + +class AIStructuredResponseGeneratorBlock(AIBlockBase): + class Input(BlockSchema): + prompt: str = SchemaField( + description="The prompt to send to the language model.", + placeholder="Enter your prompt here...", + ) + expected_format: dict[str, str] = SchemaField( + description="Expected format of the response. If provided, the response will be validated against this format. " + "The keys should be the expected fields in the response, and the values should be the description of the field.", + ) + model: LlmModel = SchemaField( + title="LLM Model", + default=LlmModel.GPT4_TURBO, + description="The language model to use for answering the prompt.", + advanced=False, + ) + credentials: AICredentials = AICredentialsField() + sys_prompt: str = SchemaField( + title="System Prompt", + default="", + description="The system prompt to provide additional context to the model.", + ) + conversation_history: list[Message] = SchemaField( + default=[], + description="The conversation history to provide context for the prompt.", + ) + retry: int = SchemaField( + title="Retry Count", + default=3, + description="Number of times to retry the LLM call if the response does not match the expected format.", + ) + prompt_values: dict[str, str] = SchemaField( + advanced=False, + default={}, + description="Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}.", + ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) + + ollama_host: str = SchemaField( + advanced=True, + default="localhost:11434", + description="Ollama host for local models", + ) + + class Output(BlockSchema): + response: dict[str, Any] = SchemaField( + description="The response object generated by the language model." + ) + prompt: str = SchemaField(description="The prompt sent to the language model.") + error: str = SchemaField(description="Error message if the API call failed.") + + def __init__(self): + super().__init__( + id="ed55ac19-356e-4243-a6cb-bc599e9b716f", + description="Call a Large Language Model (LLM) to generate formatted object based on the given prompt.", + categories={BlockCategory.AI}, + input_schema=AIStructuredResponseGeneratorBlock.Input, + output_schema=AIStructuredResponseGeneratorBlock.Output, + test_input={ + "model": LlmModel.GPT4_TURBO, + "credentials": TEST_CREDENTIALS_INPUT, + "expected_format": { + "key1": "value1", + "key2": "value2", + }, + "prompt": "User prompt", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("response", {"key1": "key1Value", "key2": "key2Value"}), + ("prompt", str), + ], + test_mock={ + "llm_call": lambda *args, **kwargs: ( + json.dumps( + { + "key1": "key1Value", + "key2": "key2Value", + } + ), + 0, + 0, + ) + }, + ) + self.prompt = "" + + def llm_call( + self, + credentials: APIKeyCredentials, + llm_model: LlmModel, + prompt: list[dict], + json_format: bool, + max_tokens: int | None = None, + ollama_host: str = "localhost:11434", + ) -> tuple[str, int, int]: + """ + Args: + credentials: The API key credentials to use. + llm_model: The LLM model to use. + prompt: The prompt to send to the LLM. + json_format: Whether the response should be in JSON format. + max_tokens: The maximum number of tokens to generate in the chat completion. + ollama_host: The host for ollama to use + + Returns: + The response from the LLM. + The number of tokens used in the prompt. + The number of tokens used in the completion. + """ + provider = llm_model.metadata.provider + + if provider == "openai": + oai_client = openai.OpenAI(api_key=credentials.api_key.get_secret_value()) + response_format = None + + if llm_model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]: + sys_messages = [p["content"] for p in prompt if p["role"] == "system"] + usr_messages = [p["content"] for p in prompt if p["role"] != "system"] + prompt = [ + {"role": "user", "content": "\n".join(sys_messages)}, + {"role": "user", "content": "\n".join(usr_messages)}, + ] + elif json_format: + response_format = {"type": "json_object"} + + response = oai_client.chat.completions.create( + model=llm_model.value, + messages=prompt, # type: ignore + response_format=response_format, # type: ignore + max_completion_tokens=max_tokens, + ) + self.prompt = json.dumps(prompt) + + return ( + response.choices[0].message.content or "", + response.usage.prompt_tokens if response.usage else 0, + response.usage.completion_tokens if response.usage else 0, + ) + elif provider == "anthropic": + system_messages = [p["content"] for p in prompt if p["role"] == "system"] + sysprompt = " ".join(system_messages) + + messages = [] + last_role = None + for p in prompt: + if p["role"] in ["user", "assistant"]: + if p["role"] != last_role: + messages.append({"role": p["role"], "content": p["content"]}) + last_role = p["role"] + else: + # If the role is the same as the last one, combine the content + messages[-1]["content"] += "\n" + p["content"] + + client = anthropic.Anthropic(api_key=credentials.api_key.get_secret_value()) + try: + resp = client.messages.create( + model=llm_model.value, + system=sysprompt, + messages=messages, + max_tokens=max_tokens or 8192, + ) + self.prompt = json.dumps(prompt) + + if not resp.content: + raise ValueError("No content returned from Anthropic.") + + return ( + ( + resp.content[0].name + if isinstance(resp.content[0], anthropic.types.ToolUseBlock) + else resp.content[0].text + ), + resp.usage.input_tokens, + resp.usage.output_tokens, + ) + except anthropic.APIError as e: + error_message = f"Anthropic API error: {str(e)}" + logger.error(error_message) + raise ValueError(error_message) + elif provider == "groq": + client = Groq(api_key=credentials.api_key.get_secret_value()) + response_format = {"type": "json_object"} if json_format else None + response = client.chat.completions.create( + model=llm_model.value, + messages=prompt, # type: ignore + response_format=response_format, # type: ignore + max_tokens=max_tokens, + ) + self.prompt = json.dumps(prompt) + return ( + response.choices[0].message.content or "", + response.usage.prompt_tokens if response.usage else 0, + response.usage.completion_tokens if response.usage else 0, + ) + elif provider == "ollama": + client = ollama.Client(host=ollama_host) + sys_messages = [p["content"] for p in prompt if p["role"] == "system"] + usr_messages = [p["content"] for p in prompt if p["role"] != "system"] + response = client.generate( + model=llm_model.value, + prompt=f"{sys_messages}\n\n{usr_messages}", + stream=False, + ) + self.prompt = json.dumps(prompt) + return ( + response.get("response") or "", + response.get("prompt_eval_count") or 0, + response.get("eval_count") or 0, + ) + elif provider == "open_router": + client = openai.OpenAI( + base_url="https://openrouter.ai/api/v1", + api_key=credentials.api_key.get_secret_value(), + ) + + response = client.chat.completions.create( + extra_headers={ + "HTTP-Referer": "https://agpt.co", + "X-Title": "AutoGPT", + }, + model=llm_model.value, + messages=prompt, # type: ignore + max_tokens=max_tokens, + ) + self.prompt = json.dumps(prompt) + + # If there's no response, raise an error + if not response.choices: + if response: + raise ValueError(f"OpenRouter error: {response}") + else: + raise ValueError("No response from OpenRouter.") + + return ( + response.choices[0].message.content or "", + response.usage.prompt_tokens if response.usage else 0, + response.usage.completion_tokens if response.usage else 0, + ) + else: + raise ValueError(f"Unsupported LLM provider: {provider}") + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + logger.debug(f"Calling LLM with input data: {input_data}") + prompt = [p.model_dump() for p in input_data.conversation_history] + + def trim_prompt(s: str) -> str: + lines = s.strip().split("\n") + return "\n".join([line.strip().lstrip("|") for line in lines]) + + values = input_data.prompt_values + if values: + input_data.prompt = fmt.format_string(input_data.prompt, values) + input_data.sys_prompt = fmt.format_string(input_data.sys_prompt, values) + + if input_data.sys_prompt: + prompt.append({"role": "system", "content": input_data.sys_prompt}) + + if input_data.expected_format: + expected_format = [ + f'"{k}": "{v}"' for k, v in input_data.expected_format.items() + ] + format_prompt = ",\n ".join(expected_format) + sys_prompt = trim_prompt( + f""" + |Reply strictly only in the following JSON format: + |{{ + | {format_prompt} + |}} + """ + ) + prompt.append({"role": "system", "content": sys_prompt}) + + if input_data.prompt: + prompt.append({"role": "user", "content": input_data.prompt}) + + def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: + try: + parsed = json.loads(resp) + if not isinstance(parsed, dict): + return {}, f"Expected a dictionary, but got {type(parsed)}" + miss_keys = set(input_data.expected_format.keys()) - set(parsed.keys()) + if miss_keys: + return parsed, f"Missing keys: {miss_keys}" + return parsed, None + except JSONDecodeError as e: + return {}, f"JSON decode error: {e}" + + logger.info(f"LLM request: {prompt}") + retry_prompt = "" + llm_model = input_data.model + + for retry_count in range(input_data.retry): + try: + response_text, input_token, output_token = self.llm_call( + credentials=credentials, + llm_model=llm_model, + prompt=prompt, + json_format=bool(input_data.expected_format), + ollama_host=input_data.ollama_host, + max_tokens=input_data.max_tokens, + ) + self.merge_stats( + { + "input_token_count": input_token, + "output_token_count": output_token, + } + ) + logger.info(f"LLM attempt-{retry_count} response: {response_text}") + + if input_data.expected_format: + parsed_dict, parsed_error = parse_response(response_text) + if not parsed_error: + yield "response", { + k: ( + json.loads(v) + if isinstance(v, str) + and v.startswith("[") + and v.endswith("]") + else (", ".join(v) if isinstance(v, list) else v) + ) + for k, v in parsed_dict.items() + } + yield "prompt", self.prompt + return + else: + yield "response", {"response": response_text} + yield "prompt", self.prompt + return + + retry_prompt = trim_prompt( + f""" + |This is your previous error response: + |-- + |{response_text} + |-- + | + |And this is the error: + |-- + |{parsed_error} + |-- + """ + ) + prompt.append({"role": "user", "content": retry_prompt}) + except Exception as e: + logger.exception(f"Error calling LLM: {e}") + retry_prompt = f"Error calling LLM: {e}" + finally: + self.merge_stats( + { + "llm_call_count": retry_count + 1, + "llm_retry_count": retry_count, + } + ) + + raise RuntimeError(retry_prompt) + + +class AITextGeneratorBlock(AIBlockBase): + class Input(BlockSchema): + prompt: str = SchemaField( + description="The prompt to send to the language model. You can use any of the {keys} from Prompt Values to fill in the prompt with values from the prompt values dictionary by putting them in curly braces.", + placeholder="Enter your prompt here...", + ) + model: LlmModel = SchemaField( + title="LLM Model", + default=LlmModel.GPT4_TURBO, + description="The language model to use for answering the prompt.", + advanced=False, + ) + credentials: AICredentials = AICredentialsField() + sys_prompt: str = SchemaField( + title="System Prompt", + default="", + description="The system prompt to provide additional context to the model.", + ) + retry: int = SchemaField( + title="Retry Count", + default=3, + description="Number of times to retry the LLM call if the response does not match the expected format.", + ) + prompt_values: dict[str, str] = SchemaField( + advanced=False, + default={}, + description="Values used to fill in the prompt. The values can be used in the prompt by putting them in a double curly braces, e.g. {{variable_name}}.", + ) + ollama_host: str = SchemaField( + advanced=True, + default="localhost:11434", + description="Ollama host for local models", + ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) + + class Output(BlockSchema): + response: str = SchemaField( + description="The response generated by the language model." + ) + prompt: str = SchemaField(description="The prompt sent to the language model.") + error: str = SchemaField(description="Error message if the API call failed.") + + def __init__(self): + super().__init__( + id="1f292d4a-41a4-4977-9684-7c8d560b9f91", + description="Call a Large Language Model (LLM) to generate a string based on the given prompt.", + categories={BlockCategory.AI}, + input_schema=AITextGeneratorBlock.Input, + output_schema=AITextGeneratorBlock.Output, + test_input={ + "prompt": "User prompt", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("response", "Response text"), + ("prompt", str), + ], + test_mock={"llm_call": lambda *args, **kwargs: "Response text"}, + ) + + def llm_call( + self, + input_data: AIStructuredResponseGeneratorBlock.Input, + credentials: APIKeyCredentials, + ) -> str: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response", credentials=credentials) + self.merge_llm_stats(block) + return response["response"] + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + object_input_data = AIStructuredResponseGeneratorBlock.Input( + **{attr: getattr(input_data, attr) for attr in input_data.model_fields}, + expected_format={}, + ) + yield "response", self.llm_call(object_input_data, credentials) + yield "prompt", self.prompt + + +class SummaryStyle(Enum): + CONCISE = "concise" + DETAILED = "detailed" + BULLET_POINTS = "bullet points" + NUMBERED_LIST = "numbered list" + + +class AITextSummarizerBlock(AIBlockBase): + class Input(BlockSchema): + text: str = SchemaField( + description="The text to summarize.", + placeholder="Enter the text to summarize here...", + ) + model: LlmModel = SchemaField( + title="LLM Model", + default=LlmModel.GPT4_TURBO, + description="The language model to use for summarizing the text.", + ) + focus: str = SchemaField( + title="Focus", + default="general information", + description="The topic to focus on in the summary", + ) + style: SummaryStyle = SchemaField( + title="Summary Style", + default=SummaryStyle.CONCISE, + description="The style of the summary to generate.", + ) + credentials: AICredentials = AICredentialsField() + # TODO: Make this dynamic + max_tokens: int = SchemaField( + title="Max Tokens", + default=4096, + description="The maximum number of tokens to generate in the chat completion.", + ge=1, + ) + chunk_overlap: int = SchemaField( + title="Chunk Overlap", + default=100, + description="The number of overlapping tokens between chunks to maintain context.", + ge=0, + ) + ollama_host: str = SchemaField( + advanced=True, + default="localhost:11434", + description="Ollama host for local models", + ) + + class Output(BlockSchema): + summary: str = SchemaField(description="The final summary of the text.") + prompt: str = SchemaField(description="The prompt sent to the language model.") + error: str = SchemaField(description="Error message if the API call failed.") + + def __init__(self): + super().__init__( + id="a0a69be1-4528-491c-a85a-a4ab6873e3f0", + description="Utilize a Large Language Model (LLM) to summarize a long text.", + categories={BlockCategory.AI, BlockCategory.TEXT}, + input_schema=AITextSummarizerBlock.Input, + output_schema=AITextSummarizerBlock.Output, + test_input={ + "text": "Lorem ipsum..." * 100, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("summary", "Final summary of a long text"), + ("prompt", str), + ], + test_mock={ + "llm_call": lambda input_data, credentials: ( + {"final_summary": "Final summary of a long text"} + if "final_summary" in input_data.expected_format + else {"summary": "Summary of a chunk of text"} + ) + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + for output in self._run(input_data, credentials): + yield output + + def _run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput: + chunks = self._split_text( + input_data.text, input_data.max_tokens, input_data.chunk_overlap + ) + summaries = [] + + for chunk in chunks: + chunk_summary = self._summarize_chunk(chunk, input_data, credentials) + summaries.append(chunk_summary) + + final_summary = self._combine_summaries(summaries, input_data, credentials) + yield "summary", final_summary + yield "prompt", self.prompt + + @staticmethod + def _split_text(text: str, max_tokens: int, overlap: int) -> list[str]: + words = text.split() + chunks = [] + chunk_size = max_tokens - overlap + + for i in range(0, len(words), chunk_size): + chunk = " ".join(words[i : i + max_tokens]) + chunks.append(chunk) + + return chunks + + def llm_call( + self, + input_data: AIStructuredResponseGeneratorBlock.Input, + credentials: APIKeyCredentials, + ) -> dict: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response", credentials=credentials) + self.merge_llm_stats(block) + return response + + def _summarize_chunk( + self, chunk: str, input_data: Input, credentials: APIKeyCredentials + ) -> str: + prompt = f"Summarize the following text in a {input_data.style} form. Focus your summary on the topic of `{input_data.focus}` if present, otherwise just provide a general summary:\n\n```{chunk}```" + + llm_response = self.llm_call( + AIStructuredResponseGeneratorBlock.Input( + prompt=prompt, + credentials=input_data.credentials, + model=input_data.model, + expected_format={"summary": "The summary of the given text."}, + ), + credentials=credentials, + ) + + return llm_response["summary"] + + def _combine_summaries( + self, summaries: list[str], input_data: Input, credentials: APIKeyCredentials + ) -> str: + combined_text = "\n\n".join(summaries) + + if len(combined_text.split()) <= input_data.max_tokens: + prompt = f"Provide a final summary of the following section summaries in a {input_data.style} form, focus your summary on the topic of `{input_data.focus}` if present:\n\n ```{combined_text}```\n\n Just respond with the final_summary in the format specified." + + llm_response = self.llm_call( + AIStructuredResponseGeneratorBlock.Input( + prompt=prompt, + credentials=input_data.credentials, + model=input_data.model, + expected_format={ + "final_summary": "The final summary of all provided summaries." + }, + ), + credentials=credentials, + ) + + return llm_response["final_summary"] + else: + # If combined summaries are still too long, recursively summarize + return self._run( + AITextSummarizerBlock.Input( + text=combined_text, + credentials=input_data.credentials, + model=input_data.model, + max_tokens=input_data.max_tokens, + chunk_overlap=input_data.chunk_overlap, + ), + credentials=credentials, + ).send(None)[ + 1 + ] # Get the first yielded value + + +class AIConversationBlock(AIBlockBase): + class Input(BlockSchema): + messages: List[Message] = SchemaField( + description="List of messages in the conversation.", min_length=1 + ) + model: LlmModel = SchemaField( + title="LLM Model", + default=LlmModel.GPT4_TURBO, + description="The language model to use for the conversation.", + ) + credentials: AICredentials = AICredentialsField() + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) + ollama_host: str = SchemaField( + advanced=True, + default="localhost:11434", + description="Ollama host for local models", + ) + + class Output(BlockSchema): + response: str = SchemaField( + description="The model's response to the conversation." + ) + prompt: str = SchemaField(description="The prompt sent to the language model.") + error: str = SchemaField(description="Error message if the API call failed.") + + def __init__(self): + super().__init__( + id="32a87eab-381e-4dd4-bdb8-4c47151be35a", + description="Advanced LLM call that takes a list of messages and sends them to the language model.", + categories={BlockCategory.AI}, + input_schema=AIConversationBlock.Input, + output_schema=AIConversationBlock.Output, + test_input={ + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Who won the world series in 2020?"}, + { + "role": "assistant", + "content": "The Los Angeles Dodgers won the World Series in 2020.", + }, + {"role": "user", "content": "Where was it played?"}, + ], + "model": LlmModel.GPT4_TURBO, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "response", + "The 2020 World Series was played at Globe Life Field in Arlington, Texas.", + ), + ("prompt", str), + ], + test_mock={ + "llm_call": lambda *args, **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." + }, + ) + + def llm_call( + self, + input_data: AIStructuredResponseGeneratorBlock.Input, + credentials: APIKeyCredentials, + ) -> str: + block = AIStructuredResponseGeneratorBlock() + response = block.run_once(input_data, "response", credentials=credentials) + self.merge_llm_stats(block) + return response["response"] + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + response = self.llm_call( + AIStructuredResponseGeneratorBlock.Input( + prompt="", + credentials=input_data.credentials, + model=input_data.model, + conversation_history=input_data.messages, + max_tokens=input_data.max_tokens, + expected_format={}, + ollama_host=input_data.ollama_host, + ), + credentials=credentials, + ) + + yield "response", response + yield "prompt", self.prompt + + +class AIListGeneratorBlock(AIBlockBase): + class Input(BlockSchema): + focus: str | None = SchemaField( + description="The focus of the list to generate.", + placeholder="The top 5 most interesting news stories in the data.", + default=None, + advanced=False, + ) + source_data: str | None = SchemaField( + description="The data to generate the list from.", + placeholder="News Today: Humans land on Mars: Today humans landed on mars. -- AI wins Nobel Prize: AI wins Nobel Prize for solving world hunger. -- New AI Model: A new AI model has been released.", + default=None, + advanced=False, + ) + model: LlmModel = SchemaField( + title="LLM Model", + default=LlmModel.GPT4_TURBO, + description="The language model to use for generating the list.", + advanced=True, + ) + credentials: AICredentials = AICredentialsField() + max_retries: int = SchemaField( + default=3, + description="Maximum number of retries for generating a valid list.", + ge=1, + le=5, + ) + max_tokens: int | None = SchemaField( + advanced=True, + default=None, + description="The maximum number of tokens to generate in the chat completion.", + ) + ollama_host: str = SchemaField( + advanced=True, + default="localhost:11434", + description="Ollama host for local models", + ) + + class Output(BlockSchema): + generated_list: List[str] = SchemaField(description="The generated list.") + list_item: str = SchemaField( + description="Each individual item in the list.", + ) + prompt: str = SchemaField(description="The prompt sent to the language model.") + error: str = SchemaField( + description="Error message if the list generation failed." + ) + + def __init__(self): + super().__init__( + id="9c0b0450-d199-458b-a731-072189dd6593", + description="Generate a Python list based on the given prompt using a Large Language Model (LLM).", + categories={BlockCategory.AI, BlockCategory.TEXT}, + input_schema=AIListGeneratorBlock.Input, + output_schema=AIListGeneratorBlock.Output, + test_input={ + "focus": "planets", + "source_data": ( + "Zylora Prime is a glowing jungle world with bioluminescent plants, " + "while Kharon-9 is a harsh desert planet with underground cities. " + "Vortexia's constant storms power floating cities, and Oceara is a water-covered world home to " + "intelligent marine life. On icy Draknos, ancient ruins lie buried beneath its frozen landscape, " + "drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of " + "fictional worlds." + ), + "model": LlmModel.GPT4_TURBO, + "credentials": TEST_CREDENTIALS_INPUT, + "max_retries": 3, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "generated_list", + ["Zylora Prime", "Kharon-9", "Vortexia", "Oceara", "Draknos"], + ), + ("prompt", str), + ("list_item", "Zylora Prime"), + ("list_item", "Kharon-9"), + ("list_item", "Vortexia"), + ("list_item", "Oceara"), + ("list_item", "Draknos"), + ], + test_mock={ + "llm_call": lambda input_data, credentials: { + "response": "['Zylora Prime', 'Kharon-9', 'Vortexia', 'Oceara', 'Draknos']" + }, + }, + ) + + def llm_call( + self, + input_data: AIStructuredResponseGeneratorBlock.Input, + credentials: APIKeyCredentials, + ) -> dict[str, str]: + llm_block = AIStructuredResponseGeneratorBlock() + response = llm_block.run_once(input_data, "response", credentials=credentials) + self.merge_llm_stats(llm_block) + return response + + @staticmethod + def string_to_list(string): + """ + Converts a string representation of a list into an actual Python list object. + """ + logger.debug(f"Converting string to list. Input string: {string}") + try: + # Use ast.literal_eval to safely evaluate the string + python_list = ast.literal_eval(string) + if isinstance(python_list, list): + logger.debug(f"Successfully converted string to list: {python_list}") + return python_list + else: + logger.error(f"The provided string '{string}' is not a valid list") + raise ValueError(f"The provided string '{string}' is not a valid list.") + except (SyntaxError, ValueError) as e: + logger.error(f"Failed to convert string to list: {e}") + raise ValueError("Invalid list format. Could not convert to list.") + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + logger.debug(f"Starting AIListGeneratorBlock.run with input data: {input_data}") + + # Check for API key + api_key_check = credentials.api_key.get_secret_value() + if not api_key_check: + raise ValueError("No LLM API key provided.") + + # Prepare the system prompt + sys_prompt = """You are a Python list generator. Your task is to generate a Python list based on the user's prompt. + |Respond ONLY with a valid python list. + |The list can contain strings, numbers, or nested lists as appropriate. + |Do not include any explanations or additional text. + + |Valid Example string formats: + + |Example 1: + |``` + |['1', '2', '3', '4'] + |``` + + |Example 2: + |``` + |[['1', '2'], ['3', '4'], ['5', '6']] + |``` + + |Example 3: + |``` + |['1', ['2', '3'], ['4', ['5', '6']]] + |``` + + |Example 4: + |``` + |['a', 'b', 'c'] + |``` + + |Example 5: + |``` + |['1', '2.5', 'string', 'True', ['False', 'None']] + |``` + + |Do not include any explanations or additional text, just respond with the list in the format specified above. + """ + # If a focus is provided, add it to the prompt + if input_data.focus: + prompt = f"Generate a list with the following focus:\n\n\n{input_data.focus}" + else: + # If there's source data + if input_data.source_data: + prompt = "Extract the main focus of the source data to a list.\ni.e if the source data is a news website, the focus would be the news stories rather than the social links in the footer." + else: + # No focus or source data provided, generat a random list + prompt = "Generate a random list." + + # If the source data is provided, add it to the prompt + if input_data.source_data: + prompt += f"\n\nUse the following source data to generate the list from:\n\n\n\n{input_data.source_data}\n\nDo not invent fictional data that is not present in the source data." + # Else, tell the LLM to synthesize the data + else: + prompt += "\n\nInvent the data to generate the list from." + + for attempt in range(input_data.max_retries): + try: + logger.debug("Calling LLM") + llm_response = self.llm_call( + AIStructuredResponseGeneratorBlock.Input( + sys_prompt=sys_prompt, + prompt=prompt, + credentials=input_data.credentials, + model=input_data.model, + expected_format={}, # Do not use structured response + ollama_host=input_data.ollama_host, + ), + credentials=credentials, + ) + + logger.debug(f"LLM response: {llm_response}") + + # Extract Response string + response_string = llm_response["response"] + logger.debug(f"Response string: {response_string}") + + # Convert the string to a Python list + logger.debug("Converting string to Python list") + parsed_list = self.string_to_list(response_string) + logger.debug(f"Parsed list: {parsed_list}") + + # If we reach here, we have a valid Python list + logger.debug("Successfully generated a valid Python list") + yield "generated_list", parsed_list + yield "prompt", self.prompt + + # Yield each item in the list + for item in parsed_list: + yield "list_item", item + return + + except Exception as e: + logger.error(f"Error in attempt {attempt + 1}: {str(e)}") + if attempt == input_data.max_retries - 1: + logger.error( + f"Failed to generate a valid Python list after {input_data.max_retries} attempts" + ) + raise RuntimeError( + f"Failed to generate a valid Python list after {input_data.max_retries} attempts. Last error: {str(e)}" + ) + else: + # Add a retry prompt + logger.debug("Preparing retry prompt") + prompt = f""" + The previous attempt failed due to `{e}` + Generate a valid Python list based on the original prompt. + Remember to respond ONLY with a valid Python list as per the format specified earlier. + Original prompt: + ```{prompt}``` + + Respond only with the list in the format specified with no commentary or apologies. + """ + logger.debug(f"Retry prompt: {prompt}") + + logger.debug("AIListGeneratorBlock.run completed") diff --git a/autogpt_platform/backend/backend/blocks/maths.py b/autogpt_platform/backend/backend/blocks/maths.py new file mode 100644 index 000000000000..cb65de1c0965 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/maths.py @@ -0,0 +1,124 @@ +import operator +from enum import Enum +from typing import Any + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class Operation(Enum): + ADD = "Add" + SUBTRACT = "Subtract" + MULTIPLY = "Multiply" + DIVIDE = "Divide" + POWER = "Power" + + +class CalculatorBlock(Block): + class Input(BlockSchema): + operation: Operation = SchemaField( + description="Choose the math operation you want to perform", + placeholder="Select an operation", + ) + a: float = SchemaField( + description="Enter the first number (A)", placeholder="For example: 10" + ) + b: float = SchemaField( + description="Enter the second number (B)", placeholder="For example: 5" + ) + round_result: bool = SchemaField( + description="Do you want to round the result to a whole number?", + default=False, + ) + + class Output(BlockSchema): + result: float = SchemaField(description="The result of your calculation") + + def __init__(self): + super().__init__( + id="b1ab9b19-67a6-406d-abf5-2dba76d00c79", + input_schema=CalculatorBlock.Input, + output_schema=CalculatorBlock.Output, + description="Performs a mathematical operation on two numbers.", + categories={BlockCategory.LOGIC}, + test_input={ + "operation": Operation.ADD.value, + "a": 10.0, + "b": 5.0, + "round_result": False, + }, + test_output=[ + ("result", 15.0), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + operation = input_data.operation + a = input_data.a + b = input_data.b + + operations = { + Operation.ADD: operator.add, + Operation.SUBTRACT: operator.sub, + Operation.MULTIPLY: operator.mul, + Operation.DIVIDE: operator.truediv, + Operation.POWER: operator.pow, + } + + op_func = operations[operation] + + try: + if operation == Operation.DIVIDE and b == 0: + raise ZeroDivisionError("Cannot divide by zero") + + result = op_func(a, b) + + if input_data.round_result: + result = round(result) + + yield "result", result + + except ZeroDivisionError: + yield "result", float("inf") # Return infinity for division by zero + except Exception: + yield "result", float("nan") # Return NaN for other errors + + +class CountItemsBlock(Block): + class Input(BlockSchema): + collection: Any = SchemaField( + description="Enter the collection you want to count. This can be a list, dictionary, string, or any other iterable.", + placeholder="For example: [1, 2, 3] or {'a': 1, 'b': 2} or 'hello'", + ) + + class Output(BlockSchema): + count: int = SchemaField(description="The number of items in the collection") + + def __init__(self): + super().__init__( + id="3c9c2f42-b0c3-435f-ba35-05f7a25c772a", + input_schema=CountItemsBlock.Input, + output_schema=CountItemsBlock.Output, + description="Counts the number of items in a collection.", + categories={BlockCategory.LOGIC}, + test_input={"collection": [1, 2, 3, 4, 5]}, + test_output=[ + ("count", 5), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + collection = input_data.collection + + try: + if isinstance(collection, (str, list, tuple, set, dict)): + count = len(collection) + elif hasattr(collection, "__iter__"): + count = sum(1 for _ in collection) + else: + raise ValueError("Input is not a countable collection") + + yield "count", count + + except Exception: + yield "count", -1 # Return -1 to indicate an error diff --git a/autogpt_platform/backend/backend/blocks/media.py b/autogpt_platform/backend/backend/blocks/media.py new file mode 100644 index 000000000000..1714dca2abc7 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/media.py @@ -0,0 +1,245 @@ +import os +import tempfile +from typing import Literal, Optional + +from moviepy.audio.io.AudioFileClip import AudioFileClip +from moviepy.video.fx.Loop import Loop +from moviepy.video.io.VideoFileClip import VideoFileClip + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.file import MediaFile, get_exec_file_path, store_media_file + + +class MediaDurationBlock(Block): + + class Input(BlockSchema): + media_in: MediaFile = SchemaField( + description="Media input (URL, data URI, or local path)." + ) + is_video: bool = SchemaField( + description="Whether the media is a video (True) or audio (False).", + default=True, + ) + + class Output(BlockSchema): + duration: float = SchemaField( + description="Duration of the media file (in seconds)." + ) + error: str = SchemaField( + description="Error message if something fails.", default="" + ) + + def __init__(self): + super().__init__( + id="d8b91fd4-da26-42d4-8ecb-8b196c6d84b6", + description="Block to get the duration of a media file.", + categories={BlockCategory.MULTIMEDIA}, + input_schema=MediaDurationBlock.Input, + output_schema=MediaDurationBlock.Output, + ) + + def run( + self, + input_data: Input, + *, + graph_exec_id: str, + **kwargs, + ) -> BlockOutput: + # 1) Store the input media locally + local_media_path = store_media_file( + graph_exec_id=graph_exec_id, + file=input_data.media_in, + return_content=False, + ) + media_abspath = get_exec_file_path(graph_exec_id, local_media_path) + + # 2) Load the clip + if input_data.is_video: + clip = VideoFileClip(media_abspath) + else: + clip = AudioFileClip(media_abspath) + + yield "duration", clip.duration + + +class LoopVideoBlock(Block): + """ + Block for looping (repeating) a video clip until a given duration or number of loops. + """ + + class Input(BlockSchema): + video_in: MediaFile = SchemaField( + description="The input video (can be a URL, data URI, or local path)." + ) + # Provide EITHER a `duration` or `n_loops` or both. We'll demonstrate `duration`. + duration: Optional[float] = SchemaField( + description="Target duration (in seconds) to loop the video to. If omitted, defaults to no looping.", + default=None, + ge=0.0, + ) + n_loops: Optional[int] = SchemaField( + description="Number of times to repeat the video. If omitted, defaults to 1 (no repeat).", + default=None, + ge=1, + ) + output_return_type: Literal["file_path", "data_uri"] = SchemaField( + description="How to return the output video. Either a relative path or base64 data URI.", + default="file_path", + ) + + class Output(BlockSchema): + video_out: str = SchemaField( + description="Looped video returned either as a relative path or a data URI." + ) + error: str = SchemaField( + description="Error message if something fails.", default="" + ) + + def __init__(self): + super().__init__( + id="8bf9eef6-5451-4213-b265-25306446e94b", + description="Block to loop a video to a given duration or number of repeats.", + categories={BlockCategory.MULTIMEDIA}, + input_schema=LoopVideoBlock.Input, + output_schema=LoopVideoBlock.Output, + ) + + def run( + self, + input_data: Input, + *, + node_exec_id: str, + graph_exec_id: str, + **kwargs, + ) -> BlockOutput: + # 1) Store the input video locally + local_video_path = store_media_file( + graph_exec_id=graph_exec_id, + file=input_data.video_in, + return_content=False, + ) + input_abspath = get_exec_file_path(graph_exec_id, local_video_path) + + # 2) Load the clip + clip = VideoFileClip(input_abspath) + + # 3) Apply the loop effect + looped_clip = clip + if input_data.duration: + # Loop until we reach the specified duration + looped_clip = looped_clip.with_effects([Loop(duration=input_data.duration)]) + elif input_data.n_loops: + looped_clip = looped_clip.with_effects([Loop(n=input_data.n_loops)]) + else: + raise ValueError("Either 'duration' or 'n_loops' must be provided.") + + assert isinstance(looped_clip, VideoFileClip) + + # 4) Save the looped output + output_filename = MediaFile( + f"{node_exec_id}_looped_{os.path.basename(local_video_path)}" + ) + output_abspath = get_exec_file_path(graph_exec_id, output_filename) + + looped_clip = looped_clip.with_audio(clip.audio) + looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac") + + # Return as data URI + video_out = store_media_file( + graph_exec_id=graph_exec_id, + file=output_filename, + return_content=input_data.output_return_type == "data_uri", + ) + + yield "video_out", video_out + + +class AddAudioToVideoBlock(Block): + """ + Block that adds (attaches) an audio track to an existing video. + Optionally scale the volume of the new track. + """ + + class Input(BlockSchema): + video_in: MediaFile = SchemaField( + description="Video input (URL, data URI, or local path)." + ) + audio_in: MediaFile = SchemaField( + description="Audio input (URL, data URI, or local path)." + ) + volume: float = SchemaField( + description="Volume scale for the newly attached audio track (1.0 = original).", + default=1.0, + ) + output_return_type: Literal["file_path", "data_uri"] = SchemaField( + description="Return the final output as a relative path or base64 data URI.", + default="file_path", + ) + + class Output(BlockSchema): + video_out: MediaFile = SchemaField( + description="Final video (with attached audio), as a path or data URI." + ) + error: str = SchemaField( + description="Error message if something fails.", default="" + ) + + def __init__(self): + super().__init__( + id="3503748d-62b6-4425-91d6-725b064af509", + description="Block to attach an audio file to a video file using moviepy.", + categories={BlockCategory.MULTIMEDIA}, + input_schema=AddAudioToVideoBlock.Input, + output_schema=AddAudioToVideoBlock.Output, + ) + + def run( + self, + input_data: Input, + *, + node_exec_id: str, + graph_exec_id: str, + **kwargs, + ) -> BlockOutput: + # 1) Store the inputs locally + local_video_path = store_media_file( + graph_exec_id=graph_exec_id, + file=input_data.video_in, + return_content=False, + ) + local_audio_path = store_media_file( + graph_exec_id=graph_exec_id, + file=input_data.audio_in, + return_content=False, + ) + + abs_temp_dir = os.path.join(tempfile.gettempdir(), "exec_file", graph_exec_id) + video_abspath = os.path.join(abs_temp_dir, local_video_path) + audio_abspath = os.path.join(abs_temp_dir, local_audio_path) + + # 2) Load video + audio with moviepy + video_clip = VideoFileClip(video_abspath) + audio_clip = AudioFileClip(audio_abspath) + # Optionally scale volume + if input_data.volume != 1.0: + audio_clip = audio_clip.with_volume_scaled(input_data.volume) + + # 3) Attach the new audio track + final_clip = video_clip.with_audio(audio_clip) + + # 4) Write to output file + output_filename = MediaFile( + f"{node_exec_id}_audio_attached_{os.path.basename(local_video_path)}" + ) + output_abspath = os.path.join(abs_temp_dir, output_filename) + final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac") + + # 5) Return either path or data URI + video_out = store_media_file( + graph_exec_id=graph_exec_id, + file=output_filename, + return_content=input_data.output_return_type == "data_uri", + ) + + yield "video_out", video_out diff --git a/autogpt_platform/backend/backend/blocks/medium.py b/autogpt_platform/backend/backend/blocks/medium.py new file mode 100644 index 000000000000..6d871b4caac5 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/medium.py @@ -0,0 +1,195 @@ +from enum import Enum +from typing import List, Literal + +from pydantic import SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + BlockSecret, + CredentialsField, + CredentialsMetaInput, + SchemaField, + SecretField, +) +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="medium", + api_key=SecretStr("mock-medium-api-key"), + title="Mock Medium API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class PublishToMediumStatus(str, Enum): + PUBLIC = "public" + DRAFT = "draft" + UNLISTED = "unlisted" + + +class PublishToMediumBlock(Block): + class Input(BlockSchema): + author_id: BlockSecret = SecretField( + key="medium_author_id", + description="""The Medium AuthorID of the user. You can get this by calling the /me endpoint of the Medium API.\n\ncurl -H "Authorization: Bearer YOUR_ACCESS_TOKEN" https://api.medium.com/v1/me" the response will contain the authorId field.""", + placeholder="Enter the author's Medium AuthorID", + ) + title: str = SchemaField( + description="The title of your Medium post", + placeholder="Enter your post title", + ) + content: str = SchemaField( + description="The main content of your Medium post", + placeholder="Enter your post content", + ) + content_format: str = SchemaField( + description="The format of the content: 'html' or 'markdown'", + placeholder="html", + ) + tags: List[str] = SchemaField( + description="List of tags for your Medium post (up to 5)", + placeholder="['technology', 'AI', 'blogging']", + ) + canonical_url: str | None = SchemaField( + default=None, + description="The original home of this content, if it was originally published elsewhere", + placeholder="https://yourblog.com/original-post", + ) + publish_status: PublishToMediumStatus = SchemaField( + description="The publish status", + placeholder=PublishToMediumStatus.DRAFT, + ) + license: str = SchemaField( + default="all-rights-reserved", + description="The license of the post: 'all-rights-reserved', 'cc-40-by', 'cc-40-by-sa', 'cc-40-by-nd', 'cc-40-by-nc', 'cc-40-by-nc-nd', 'cc-40-by-nc-sa', 'cc-40-zero', 'public-domain'", + placeholder="all-rights-reserved", + ) + notify_followers: bool = SchemaField( + default=False, + description="Whether to notify followers that the user has published", + placeholder="False", + ) + credentials: CredentialsMetaInput[ + Literal[ProviderName.MEDIUM], Literal["api_key"] + ] = CredentialsField( + description="The Medium integration can be used with any API key with sufficient permissions for the blocks it is used on.", + ) + + class Output(BlockSchema): + post_id: str = SchemaField(description="The ID of the created Medium post") + post_url: str = SchemaField(description="The URL of the created Medium post") + published_at: int = SchemaField( + description="The timestamp when the post was published" + ) + error: str = SchemaField( + description="Error message if the post creation failed" + ) + + def __init__(self): + super().__init__( + id="3f7b2dcb-4a78-4e3f-b0f1-88132e1b89df", + input_schema=PublishToMediumBlock.Input, + output_schema=PublishToMediumBlock.Output, + description="Publishes a post to Medium.", + categories={BlockCategory.SOCIAL}, + test_input={ + "author_id": "1234567890abcdef", + "title": "Test Post", + "content": "

Test Content

This is a test post.

", + "content_format": "html", + "tags": ["test", "automation"], + "license": "all-rights-reserved", + "notify_followers": False, + "publish_status": PublishToMediumStatus.DRAFT.value, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[ + ("post_id", "e6f36a"), + ("post_url", "https://medium.com/@username/test-post-e6f36a"), + ("published_at", 1626282600), + ], + test_mock={ + "create_post": lambda *args, **kwargs: { + "data": { + "id": "e6f36a", + "url": "https://medium.com/@username/test-post-e6f36a", + "authorId": "1234567890abcdef", + "publishedAt": 1626282600, + } + } + }, + test_credentials=TEST_CREDENTIALS, + ) + + def create_post( + self, + api_key: SecretStr, + author_id, + title, + content, + content_format, + tags, + canonical_url, + publish_status, + license, + notify_followers, + ): + headers = { + "Authorization": f"Bearer {api_key.get_secret_value()}", + "Content-Type": "application/json", + "Accept": "application/json", + } + + data = { + "title": title, + "content": content, + "contentFormat": content_format, + "tags": tags, + "canonicalUrl": canonical_url, + "publishStatus": publish_status, + "license": license, + "notifyFollowers": notify_followers, + } + + response = requests.post( + f"https://api.medium.com/v1/users/{author_id}/posts", + headers=headers, + json=data, + ) + + return response.json() + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + response = self.create_post( + credentials.api_key, + input_data.author_id.get_secret_value(), + input_data.title, + input_data.content, + input_data.content_format, + input_data.tags, + input_data.canonical_url, + input_data.publish_status, + input_data.license, + input_data.notify_followers, + ) + + if "data" in response: + yield "post_id", response["data"]["id"] + yield "post_url", response["data"]["url"] + yield "published_at", response["data"]["publishedAt"] + else: + error_message = response.get("errors", [{}])[0].get( + "message", "Unknown error occurred" + ) + raise RuntimeError(f"Failed to create Medium post: {error_message}") diff --git a/autogpt_platform/backend/backend/blocks/mem0.py b/autogpt_platform/backend/backend/blocks/mem0.py new file mode 100644 index 000000000000..90245e0e0a4e --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/mem0.py @@ -0,0 +1,338 @@ +from typing import Any, Literal, Optional, Union + +from mem0 import MemoryClient +from pydantic import BaseModel, SecretStr + +from backend.data.block import Block, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + +TEST_CREDENTIALS = APIKeyCredentials( + id="ed55ac19-356e-4243-a6cb-bc599e9b716f", + provider="mem0", + api_key=SecretStr("mock-mem0-api-key"), + title="Mock Mem0 API key", + expires_at=None, +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +class Mem0Base: + """Base class with shared utilities for Mem0 blocks""" + + @staticmethod + def _get_client(credentials: APIKeyCredentials) -> MemoryClient: + """Get initialized Mem0 client""" + return MemoryClient(api_key=credentials.api_key.get_secret_value()) + + +Filter = dict[str, list[dict[str, str | dict[str, list[str]]]]] + + +class Conversation(BaseModel): + discriminator: Literal["conversation"] + messages: list[dict[str, str]] + + +class Content(BaseModel): + discriminator: Literal["content"] + content: str + + +class AddMemoryBlock(Block, Mem0Base): + """Block for adding memories to Mem0 + + Always limited by user_id and optional graph_id and graph_exec_id""" + + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.MEM0], Literal["api_key"] + ] = CredentialsField(description="Mem0 API key credentials") + content: Union[Content, Conversation] = SchemaField( + discriminator="discriminator", + description="Content to add - either a string or list of message objects as output from an AI block", + default=Content(discriminator="content", content="I'm a vegetarian"), + ) + metadata: dict[str, Any] = SchemaField( + description="Optional metadata for the memory", default={} + ) + + limit_memory_to_run: bool = SchemaField( + description="Limit the memory to the run", default=False + ) + limit_memory_to_agent: bool = SchemaField( + description="Limit the memory to the agent", default=False + ) + + class Output(BlockSchema): + action: str = SchemaField(description="Action of the operation") + memory: str = SchemaField(description="Memory created") + error: str = SchemaField(description="Error message if operation fails") + + def __init__(self): + super().__init__( + id="dce97578-86be-45a4-ae50-f6de33fc935a", + description="Add new memories to Mem0 with user segmentation", + input_schema=AddMemoryBlock.Input, + output_schema=AddMemoryBlock.Output, + test_input=[ + { + "content": { + "discriminator": "conversation", + "messages": [{"role": "user", "content": "I'm a vegetarian"}], + }, + "metadata": {"food": "vegetarian"}, + "credentials": TEST_CREDENTIALS_INPUT, + }, + { + "content": { + "discriminator": "content", + "content": "I am a vegetarian", + }, + "metadata": {"food": "vegetarian"}, + "credentials": TEST_CREDENTIALS_INPUT, + }, + ], + test_output=[("action", "NO_CHANGE"), ("action", "NO_CHANGE")], + test_credentials=TEST_CREDENTIALS, + test_mock={"_get_client": lambda credentials: MockMemoryClient()}, + ) + + def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + user_id: str, + graph_id: str, + graph_exec_id: str, + **kwargs + ) -> BlockOutput: + try: + client = self._get_client(credentials) + + if isinstance(input_data.content, Conversation): + messages = input_data.content.messages + else: + messages = [{"role": "user", "content": input_data.content}] + + params = { + "user_id": user_id, + "output_format": "v1.1", + "metadata": input_data.metadata, + } + + if input_data.limit_memory_to_run: + params["run_id"] = graph_exec_id + if input_data.limit_memory_to_agent: + params["agent_id"] = graph_id + + # Use the client to add memory + result = client.add( + messages, + **params, + ) + + if len(result.get("results", [])) > 0: + for result in result.get("results", []): + yield "action", result["event"] + yield "memory", result["memory"] + else: + yield "action", "NO_CHANGE" + + except Exception as e: + yield "error", str(object=e) + + +class SearchMemoryBlock(Block, Mem0Base): + """Block for searching memories in Mem0""" + + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.MEM0], Literal["api_key"] + ] = CredentialsField(description="Mem0 API key credentials") + query: str = SchemaField( + description="Search query", + advanced=False, + ) + trigger: bool = SchemaField( + description="An unused field that is used to (re-)trigger the block when you have no other inputs", + default=False, + advanced=False, + ) + categories_filter: list[str] = SchemaField( + description="Categories to filter by", + default=[], + advanced=True, + ) + limit_memory_to_run: bool = SchemaField( + description="Limit the memory to the run", default=False + ) + limit_memory_to_agent: bool = SchemaField( + description="Limit the memory to the agent", default=True + ) + + class Output(BlockSchema): + memories: Any = SchemaField(description="List of matching memories") + error: str = SchemaField(description="Error message if operation fails") + + def __init__(self): + super().__init__( + id="bd7c84e3-e073-4b75-810c-600886ec8a5b", + description="Search memories in Mem0 by user", + input_schema=SearchMemoryBlock.Input, + output_schema=SearchMemoryBlock.Output, + test_input={ + "query": "vegetarian preferences", + "credentials": TEST_CREDENTIALS_INPUT, + "top_k": 10, + "rerank": True, + }, + test_output=[ + ("memories", [{"id": "test-memory", "content": "test content"}]) + ], + test_credentials=TEST_CREDENTIALS, + test_mock={"_get_client": lambda credentials: MockMemoryClient()}, + ) + + def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + user_id: str, + graph_id: str, + graph_exec_id: str, + **kwargs + ) -> BlockOutput: + try: + client = self._get_client(credentials) + + filters: Filter = { + # This works with only one filter, so we can allow others to add on later + "AND": [ + {"user_id": user_id}, + ] + } + if input_data.categories_filter: + filters["AND"].append( + {"categories": {"contains": input_data.categories_filter}} + ) + if input_data.limit_memory_to_run: + filters["AND"].append({"run_id": graph_exec_id}) + if input_data.limit_memory_to_agent: + filters["AND"].append({"agent_id": graph_id}) + + result: list[dict[str, Any]] = client.search( + input_data.query, version="v2", filters=filters + ) + yield "memories", result + + except Exception as e: + yield "error", str(e) + + +class GetAllMemoriesBlock(Block, Mem0Base): + """Block for retrieving all memories from Mem0""" + + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.MEM0], Literal["api_key"] + ] = CredentialsField(description="Mem0 API key credentials") + trigger: bool = SchemaField( + description="An unused field that is used to trigger the block when you have no other inputs", + default=False, + advanced=False, + ) + categories: Optional[list[str]] = SchemaField( + description="Filter by categories", default=None + ) + limit_memory_to_run: bool = SchemaField( + description="Limit the memory to the run", default=False + ) + limit_memory_to_agent: bool = SchemaField( + description="Limit the memory to the agent", default=False + ) + + class Output(BlockSchema): + memories: Any = SchemaField(description="List of memories") + error: str = SchemaField(description="Error message if operation fails") + + def __init__(self): + super().__init__( + id="45aee5bf-4767-45d1-a28b-e01c5aae9fc1", + description="Retrieve all memories from Mem0 with pagination", + input_schema=GetAllMemoriesBlock.Input, + output_schema=GetAllMemoriesBlock.Output, + test_input={ + "user_id": "test_user", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[ + ("memories", [{"id": "test-memory", "content": "test content"}]), + ], + test_credentials=TEST_CREDENTIALS, + test_mock={"_get_client": lambda credentials: MockMemoryClient()}, + ) + + def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + user_id: str, + graph_id: str, + graph_exec_id: str, + **kwargs + ) -> BlockOutput: + try: + client = self._get_client(credentials) + + filters: Filter = { + "AND": [ + {"user_id": user_id}, + ] + } + if input_data.limit_memory_to_run: + filters["AND"].append({"run_id": graph_exec_id}) + if input_data.limit_memory_to_agent: + filters["AND"].append({"agent_id": graph_id}) + if input_data.categories: + filters["AND"].append( + {"categories": {"contains": input_data.categories}} + ) + + memories: list[dict[str, Any]] = client.get_all( + filters=filters, + version="v2", + ) + + yield "memories", memories + + except Exception as e: + yield "error", str(e) + + +# Mock client for testing +class MockMemoryClient: + """Mock Mem0 client for testing""" + + def add(self, *args, **kwargs): + return {"memory_id": "test-memory-id", "status": "success"} + + def search(self, *args, **kwargs) -> list[dict[str, str]]: + return [{"id": "test-memory", "content": "test content"}] + + def get_all(self, *args, **kwargs) -> list[dict[str, str]]: + return [{"id": "test-memory", "content": "test content"}] diff --git a/autogpt_platform/backend/backend/blocks/nvidia/_auth.py b/autogpt_platform/backend/backend/blocks/nvidia/_auth.py new file mode 100644 index 000000000000..46f28f009e0d --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/nvidia/_auth.py @@ -0,0 +1,32 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +NvidiaCredentials = APIKeyCredentials +NvidiaCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.NVIDIA], + Literal["api_key"], +] + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="nvidia", + api_key=SecretStr("mock-nvidia-api-key"), + title="Mock Nvidia API key", + expires_at=None, +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +def NvidiaCredentialsField() -> NvidiaCredentialsInput: + """Creates an Nvidia credentials input on a block.""" + return CredentialsField(description="The Nvidia integration requires an API Key.") diff --git a/autogpt_platform/backend/backend/blocks/nvidia/deepfake.py b/autogpt_platform/backend/backend/blocks/nvidia/deepfake.py new file mode 100644 index 000000000000..a90bb0282679 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/nvidia/deepfake.py @@ -0,0 +1,90 @@ +from backend.blocks.nvidia._auth import ( + NvidiaCredentials, + NvidiaCredentialsField, + NvidiaCredentialsInput, +) +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util.request import requests + + +class NvidiaDeepfakeDetectBlock(Block): + class Input(BlockSchema): + credentials: NvidiaCredentialsInput = NvidiaCredentialsField() + image_base64: str = SchemaField( + description="Image to analyze for deepfakes", image_upload=True + ) + return_image: bool = SchemaField( + description="Whether to return the processed image with markings", + default=False, + ) + + class Output(BlockSchema): + status: str = SchemaField( + description="Detection status (SUCCESS, ERROR, CONTENT_FILTERED)", + default="", + ) + image: str = SchemaField( + description="Processed image with detection markings (if return_image=True)", + default="", + image_output=True, + ) + is_deepfake: float = SchemaField( + description="Probability that the image is a deepfake (0-1)", + default=0.0, + ) + + def __init__(self): + super().__init__( + id="8c7d0d67-e79c-44f6-92a1-c2600c8aac7f", + description="Detects potential deepfakes in images using Nvidia's AI API", + categories={BlockCategory.SAFETY}, + input_schema=NvidiaDeepfakeDetectBlock.Input, + output_schema=NvidiaDeepfakeDetectBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs + ) -> BlockOutput: + url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection" + + headers = { + "accept": "application/json", + "content-type": "application/json", + "Authorization": f"Bearer {credentials.api_key.get_secret_value()}", + } + + image_data = f"data:image/jpeg;base64,{input_data.image_base64}" + + payload = { + "input": [image_data], + "return_image": input_data.return_image, + } + + try: + response = requests.post(url, headers=headers, json=payload) + response.raise_for_status() + data = response.json() + + result = data.get("data", [{}])[0] + + # Get deepfake probability from first bounding box if any + deepfake_prob = 0.0 + if result.get("bounding_boxes"): + deepfake_prob = result["bounding_boxes"][0].get("is_deepfake", 0.0) + + yield "status", result.get("status", "ERROR") + yield "is_deepfake", deepfake_prob + + if input_data.return_image: + image_data = result.get("image", "") + output_data = f"data:image/jpeg;base64,{image_data}" + yield "image", output_data + else: + yield "image", "" + + except Exception as e: + yield "error", str(e) + yield "status", "ERROR" + yield "is_deepfake", 0.0 + yield "image", "" diff --git a/autogpt_platform/backend/backend/blocks/pinecone.py b/autogpt_platform/backend/backend/blocks/pinecone.py new file mode 100644 index 000000000000..adfbc2e33fcd --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/pinecone.py @@ -0,0 +1,228 @@ +import uuid +from typing import Any, Literal + +from pinecone import Pinecone, ServerlessSpec + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + +PineconeCredentials = APIKeyCredentials +PineconeCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.PINECONE], + Literal["api_key"], +] + + +def PineconeCredentialsField() -> PineconeCredentialsInput: + """Creates a Pinecone credentials input on a block.""" + return CredentialsField( + description="The Pinecone integration can be used with an API Key.", + ) + + +class PineconeInitBlock(Block): + class Input(BlockSchema): + credentials: PineconeCredentialsInput = PineconeCredentialsField() + index_name: str = SchemaField(description="Name of the Pinecone index") + dimension: int = SchemaField( + description="Dimension of the vectors", default=768 + ) + metric: str = SchemaField( + description="Distance metric for the index", default="cosine" + ) + cloud: str = SchemaField( + description="Cloud provider for serverless", default="aws" + ) + region: str = SchemaField( + description="Region for serverless", default="us-east-1" + ) + + class Output(BlockSchema): + index: str = SchemaField(description="Name of the initialized Pinecone index") + message: str = SchemaField(description="Status message") + + def __init__(self): + super().__init__( + id="48d8fdab-8f03-41f3-8407-8107ba11ec9b", + description="Initializes a Pinecone index", + categories={BlockCategory.LOGIC}, + input_schema=PineconeInitBlock.Input, + output_schema=PineconeInitBlock.Output, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + pc = Pinecone(api_key=credentials.api_key.get_secret_value()) + + try: + existing_indexes = pc.list_indexes() + if input_data.index_name not in [index.name for index in existing_indexes]: + pc.create_index( + name=input_data.index_name, + dimension=input_data.dimension, + metric=input_data.metric, + spec=ServerlessSpec( + cloud=input_data.cloud, region=input_data.region + ), + ) + message = f"Created new index: {input_data.index_name}" + else: + message = f"Using existing index: {input_data.index_name}" + + yield "index", input_data.index_name + yield "message", message + except Exception as e: + yield "message", f"Error initializing Pinecone index: {str(e)}" + + +class PineconeQueryBlock(Block): + class Input(BlockSchema): + credentials: PineconeCredentialsInput = PineconeCredentialsField() + query_vector: list = SchemaField(description="Query vector") + namespace: str = SchemaField( + description="Namespace to query in Pinecone", default="" + ) + top_k: int = SchemaField( + description="Number of top results to return", default=3 + ) + include_values: bool = SchemaField( + description="Whether to include vector values in the response", + default=False, + ) + include_metadata: bool = SchemaField( + description="Whether to include metadata in the response", default=True + ) + host: str = SchemaField(description="Host for pinecone", default="") + idx_name: str = SchemaField(description="Index name for pinecone") + + class Output(BlockSchema): + results: Any = SchemaField(description="Query results from Pinecone") + combined_results: Any = SchemaField( + description="Combined results from Pinecone" + ) + + def __init__(self): + super().__init__( + id="9ad93d0f-91b4-4c9c-8eb1-82e26b4a01c5", + description="Queries a Pinecone index", + categories={BlockCategory.LOGIC}, + input_schema=PineconeQueryBlock.Input, + output_schema=PineconeQueryBlock.Output, + ) + + def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + **kwargs, + ) -> BlockOutput: + try: + # Create a new client instance + pc = Pinecone(api_key=credentials.api_key.get_secret_value()) + + # Get the index + idx = pc.Index(input_data.idx_name) + + # Ensure query_vector is in correct format + query_vector = input_data.query_vector + if isinstance(query_vector, list) and len(query_vector) > 0: + if isinstance(query_vector[0], list): + query_vector = query_vector[0] + + results = idx.query( + namespace=input_data.namespace, + vector=query_vector, + top_k=input_data.top_k, + include_values=input_data.include_values, + include_metadata=input_data.include_metadata, + ).to_dict() # type: ignore + combined_text = "" + if results["matches"]: + texts = [ + match["metadata"]["text"] + for match in results["matches"] + if match.get("metadata", {}).get("text") + ] + combined_text = "\n\n".join(texts) + + # Return both the raw matches and combined text + yield "results", { + "matches": results["matches"], + "combined_text": combined_text, + } + yield "combined_results", combined_text + + except Exception as e: + error_msg = f"Error querying Pinecone: {str(e)}" + raise RuntimeError(error_msg) from e + + +class PineconeInsertBlock(Block): + class Input(BlockSchema): + credentials: PineconeCredentialsInput = PineconeCredentialsField() + index: str = SchemaField(description="Initialized Pinecone index") + chunks: list = SchemaField(description="List of text chunks to ingest") + embeddings: list = SchemaField( + description="List of embeddings corresponding to the chunks" + ) + namespace: str = SchemaField( + description="Namespace to use in Pinecone", default="" + ) + metadata: dict = SchemaField( + description="Additional metadata to store with each vector", default={} + ) + + class Output(BlockSchema): + upsert_response: str = SchemaField( + description="Response from Pinecone upsert operation" + ) + + def __init__(self): + super().__init__( + id="477f2168-cd91-475a-8146-9499a5982434", + description="Upload data to a Pinecone index", + categories={BlockCategory.LOGIC}, + input_schema=PineconeInsertBlock.Input, + output_schema=PineconeInsertBlock.Output, + ) + + def run( + self, + input_data: Input, + *, + credentials: APIKeyCredentials, + **kwargs, + ) -> BlockOutput: + try: + # Create a new client instance + pc = Pinecone(api_key=credentials.api_key.get_secret_value()) + + # Get the index + idx = pc.Index(input_data.index) + + vectors = [] + for chunk, embedding in zip(input_data.chunks, input_data.embeddings): + vector_metadata = input_data.metadata.copy() + vector_metadata["text"] = chunk + vectors.append( + { + "id": str(uuid.uuid4()), + "values": embedding, + "metadata": vector_metadata, + } + ) + idx.upsert(vectors=vectors, namespace=input_data.namespace) + + yield "upsert_response", "successfully upserted" + + except Exception as e: + error_msg = f"Error uploading to Pinecone: {str(e)}" + raise RuntimeError(error_msg) from e diff --git a/autogpt_platform/backend/backend/blocks/reddit.py b/autogpt_platform/backend/backend/blocks/reddit.py new file mode 100644 index 000000000000..b3dca4ca7477 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/reddit.py @@ -0,0 +1,213 @@ +from datetime import datetime, timezone +from typing import Iterator, Literal + +import praw +from pydantic import BaseModel, SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + CredentialsField, + CredentialsMetaInput, + SchemaField, + UserPasswordCredentials, +) +from backend.integrations.providers import ProviderName +from backend.util.mock import MockObject +from backend.util.settings import Settings + +RedditCredentials = UserPasswordCredentials +RedditCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.REDDIT], + Literal["user_password"], +] + + +def RedditCredentialsField() -> RedditCredentialsInput: + """Creates a Reddit credentials input on a block.""" + return CredentialsField( + description="The Reddit integration requires a username and password.", + ) + + +TEST_CREDENTIALS = UserPasswordCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="reddit", + username=SecretStr("mock-reddit-username"), + password=SecretStr("mock-reddit-password"), + title="Mock Reddit credentials", +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +class RedditPost(BaseModel): + id: str + subreddit: str + title: str + body: str + + +class RedditComment(BaseModel): + post_id: str + comment: str + + +settings = Settings() + + +def get_praw(creds: RedditCredentials) -> praw.Reddit: + client = praw.Reddit( + client_id=settings.secrets.reddit_client_id, + client_secret=settings.secrets.reddit_client_secret, + username=creds.username.get_secret_value(), + password=creds.password.get_secret_value(), + user_agent=settings.config.reddit_user_agent, + ) + me = client.user.me() + if not me: + raise ValueError("Invalid Reddit credentials.") + print(f"Logged in as Reddit user: {me.name}") + return client + + +class GetRedditPostsBlock(Block): + class Input(BlockSchema): + subreddit: str = SchemaField( + description="Subreddit name, excluding the /r/ prefix", + default="writingprompts", + ) + credentials: RedditCredentialsInput = RedditCredentialsField() + last_minutes: int | None = SchemaField( + description="Post time to stop minutes ago while fetching posts", + default=None, + ) + last_post: str | None = SchemaField( + description="Post ID to stop when reached while fetching posts", + default=None, + ) + post_limit: int | None = SchemaField( + description="Number of posts to fetch", default=10 + ) + + class Output(BlockSchema): + post: RedditPost = SchemaField(description="Reddit post") + + def __init__(self): + super().__init__( + id="c6731acb-4285-4ee1-bc9b-03d0766c370f", + description="This block fetches Reddit posts from a defined subreddit name.", + categories={BlockCategory.SOCIAL}, + disabled=( + not settings.secrets.reddit_client_id + or not settings.secrets.reddit_client_secret + ), + input_schema=GetRedditPostsBlock.Input, + output_schema=GetRedditPostsBlock.Output, + test_credentials=TEST_CREDENTIALS, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "subreddit": "subreddit", + "last_post": "id3", + "post_limit": 2, + }, + test_output=[ + ( + "post", + RedditPost( + id="id1", subreddit="subreddit", title="title1", body="body1" + ), + ), + ( + "post", + RedditPost( + id="id2", subreddit="subreddit", title="title2", body="body2" + ), + ), + ], + test_mock={ + "get_posts": lambda input_data, credentials: [ + MockObject(id="id1", title="title1", selftext="body1"), + MockObject(id="id2", title="title2", selftext="body2"), + MockObject(id="id3", title="title2", selftext="body2"), + ] + }, + ) + + @staticmethod + def get_posts( + input_data: Input, *, credentials: RedditCredentials + ) -> Iterator[praw.reddit.Submission]: + client = get_praw(credentials) + subreddit = client.subreddit(input_data.subreddit) + return subreddit.new(limit=input_data.post_limit or 10) + + def run( + self, input_data: Input, *, credentials: RedditCredentials, **kwargs + ) -> BlockOutput: + current_time = datetime.now(tz=timezone.utc) + for post in self.get_posts(input_data=input_data, credentials=credentials): + if input_data.last_minutes: + post_datetime = datetime.fromtimestamp( + post.created_utc, tz=timezone.utc + ) + time_difference = current_time - post_datetime + if time_difference.total_seconds() / 60 > input_data.last_minutes: + continue + + if input_data.last_post and post.id == input_data.last_post: + break + + yield "post", RedditPost( + id=post.id, + subreddit=input_data.subreddit, + title=post.title, + body=post.selftext, + ) + + +class PostRedditCommentBlock(Block): + class Input(BlockSchema): + credentials: RedditCredentialsInput = RedditCredentialsField() + data: RedditComment = SchemaField(description="Reddit comment") + + class Output(BlockSchema): + comment_id: str = SchemaField(description="Posted comment ID") + + def __init__(self): + super().__init__( + id="4a92261b-701e-4ffb-8970-675fd28e261f", + description="This block posts a Reddit comment on a specified Reddit post.", + categories={BlockCategory.SOCIAL}, + input_schema=PostRedditCommentBlock.Input, + output_schema=PostRedditCommentBlock.Output, + disabled=( + not settings.secrets.reddit_client_id + or not settings.secrets.reddit_client_secret + ), + test_credentials=TEST_CREDENTIALS, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "data": {"post_id": "id", "comment": "comment"}, + }, + test_output=[("comment_id", "dummy_comment_id")], + test_mock={"reply_post": lambda creds, comment: "dummy_comment_id"}, + ) + + @staticmethod + def reply_post(creds: RedditCredentials, comment: RedditComment) -> str: + client = get_praw(creds) + submission = client.submission(id=comment.post_id) + new_comment = submission.reply(comment.comment) + if not new_comment: + raise ValueError("Failed to post comment.") + return new_comment.id + + def run( + self, input_data: Input, *, credentials: RedditCredentials, **kwargs + ) -> BlockOutput: + yield "comment_id", self.reply_post(credentials, input_data.data) diff --git a/autogpt_platform/backend/backend/blocks/replicate_flux_advanced.py b/autogpt_platform/backend/backend/blocks/replicate_flux_advanced.py new file mode 100644 index 000000000000..88094e9a1dec --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/replicate_flux_advanced.py @@ -0,0 +1,237 @@ +import os +from enum import Enum +from typing import Literal + +import replicate +from pydantic import SecretStr +from replicate.helpers import FileOutput + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="replicate", + api_key=SecretStr("mock-replicate-api-key"), + title="Mock Replicate API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +# Model name enum +class ReplicateFluxModelName(str, Enum): + FLUX_SCHNELL = ("Flux Schnell",) + FLUX_PRO = ("Flux Pro",) + FLUX_PRO1_1 = ("Flux Pro 1.1",) + + @property + def api_name(self): + api_names = { + ReplicateFluxModelName.FLUX_SCHNELL: "black-forest-labs/flux-schnell", + ReplicateFluxModelName.FLUX_PRO: "black-forest-labs/flux-pro", + ReplicateFluxModelName.FLUX_PRO1_1: "black-forest-labs/flux-1.1-pro", + } + return api_names[self] + + +# Image type Enum +class ImageType(str, Enum): + WEBP = "webp" + JPG = "jpg" + PNG = "png" + + +class ReplicateFluxAdvancedModelBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.REPLICATE], Literal["api_key"] + ] = CredentialsField( + description="The Replicate integration can be used with " + "any API key with sufficient permissions for the blocks it is used on.", + ) + prompt: str = SchemaField( + description="Text prompt for image generation", + placeholder="e.g., 'A futuristic cityscape at sunset'", + title="Prompt", + ) + replicate_model_name: ReplicateFluxModelName = SchemaField( + description="The name of the Image Generation Model, i.e Flux Schnell", + default=ReplicateFluxModelName.FLUX_SCHNELL, + title="Image Generation Model", + advanced=False, + ) + seed: int | None = SchemaField( + description="Random seed. Set for reproducible generation", + default=None, + title="Seed", + ) + steps: int = SchemaField( + description="Number of diffusion steps", + default=25, + title="Steps", + ) + guidance: float = SchemaField( + description=( + "Controls the balance between adherence to the text prompt and image quality/diversity. " + "Higher values make the output more closely match the prompt but may reduce overall image quality." + ), + default=3, + title="Guidance", + ) + interval: float = SchemaField( + description=( + "Interval is a setting that increases the variance in possible outputs. " + "Setting this value low will ensure strong prompt following with more consistent outputs." + ), + default=2, + title="Interval", + ) + aspect_ratio: str = SchemaField( + description="Aspect ratio for the generated image", + default="1:1", + title="Aspect Ratio", + placeholder="Choose from: 1:1, 16:9, 2:3, 3:2, 4:5, 5:4, 9:16", + ) + output_format: ImageType = SchemaField( + description="File format of the output image", + default=ImageType.WEBP, + title="Output Format", + ) + output_quality: int = SchemaField( + description=( + "Quality when saving the output images, from 0 to 100. " + "Not relevant for .png outputs" + ), + default=80, + title="Output Quality", + ) + safety_tolerance: int = SchemaField( + description="Safety tolerance, 1 is most strict and 5 is most permissive", + default=2, + title="Safety Tolerance", + ) + + class Output(BlockSchema): + result: str = SchemaField(description="Generated output") + error: str = SchemaField(description="Error message if the model run failed") + + def __init__(self): + super().__init__( + id="90f8c45e-e983-4644-aa0b-b4ebe2f531bc", + description="This block runs Flux models on Replicate with advanced settings.", + categories={BlockCategory.AI, BlockCategory.MULTIMEDIA}, + input_schema=ReplicateFluxAdvancedModelBlock.Input, + output_schema=ReplicateFluxAdvancedModelBlock.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "replicate_model_name": ReplicateFluxModelName.FLUX_SCHNELL, + "prompt": "A beautiful landscape painting of a serene lake at sunrise", + "seed": None, + "steps": 25, + "guidance": 3.0, + "interval": 2.0, + "aspect_ratio": "1:1", + "output_format": ImageType.PNG, + "output_quality": 80, + "safety_tolerance": 2, + }, + test_output=[ + ( + "result", + "https://replicate.com/output/generated-image-url.jpg", + ), + ], + test_mock={ + "run_model": lambda api_key, model_name, prompt, seed, steps, guidance, interval, aspect_ratio, output_format, output_quality, safety_tolerance: "https://replicate.com/output/generated-image-url.jpg", + }, + test_credentials=TEST_CREDENTIALS, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + # If the seed is not provided, generate a random seed + seed = input_data.seed + if seed is None: + seed = int.from_bytes(os.urandom(4), "big") + + # Run the model using the provided inputs + result = self.run_model( + api_key=credentials.api_key, + model_name=input_data.replicate_model_name.api_name, + prompt=input_data.prompt, + seed=seed, + steps=input_data.steps, + guidance=input_data.guidance, + interval=input_data.interval, + aspect_ratio=input_data.aspect_ratio, + output_format=input_data.output_format, + output_quality=input_data.output_quality, + safety_tolerance=input_data.safety_tolerance, + ) + yield "result", result + + def run_model( + self, + api_key: SecretStr, + model_name, + prompt, + seed, + steps, + guidance, + interval, + aspect_ratio, + output_format, + output_quality, + safety_tolerance, + ): + # Initialize Replicate client with the API key + client = replicate.Client(api_token=api_key.get_secret_value()) + + # Run the model with additional parameters + output: FileOutput | list[FileOutput] = client.run( # type: ignore This is because they changed the return type, and didn't update the type hint! It should be overloaded depending on the value of `use_file_output` to `FileOutput | list[FileOutput]` but it's `Any | Iterator[Any]` + f"{model_name}", + input={ + "prompt": prompt, + "seed": seed, + "steps": steps, + "guidance": guidance, + "interval": interval, + "aspect_ratio": aspect_ratio, + "output_format": output_format, + "output_quality": output_quality, + "safety_tolerance": safety_tolerance, + }, + wait=False, # don't arbitrarily return data:octect/stream or sometimes url depending on the model???? what is this api + ) + + # Check if output is a list or a string and extract accordingly; otherwise, assign a default message + if isinstance(output, list) and len(output) > 0: + if isinstance(output[0], FileOutput): + result_url = output[0].url # If output is a list, get the first element + else: + result_url = output[ + 0 + ] # If output is a list and not a FileOutput, get the first element. Should never happen, but just in case. + elif isinstance(output, FileOutput): + result_url = output.url # If output is a FileOutput, use the url + elif isinstance(output, str): + result_url = output # If output is a string (for some reason due to their janky type hinting), use it directly + else: + result_url = ( + "No output received" # Fallback message if output is not as expected + ) + + return result_url diff --git a/autogpt_platform/backend/backend/blocks/rss.py b/autogpt_platform/backend/backend/blocks/rss.py new file mode 100644 index 000000000000..9a5a17ebeeda --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/rss.py @@ -0,0 +1,116 @@ +import time +from datetime import datetime, timedelta, timezone +from typing import Any + +import feedparser +import pydantic + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class RSSEntry(pydantic.BaseModel): + title: str + link: str + description: str + pub_date: datetime + author: str + categories: list[str] + + +class ReadRSSFeedBlock(Block): + class Input(BlockSchema): + rss_url: str = SchemaField( + description="The URL of the RSS feed to read", + placeholder="https://example.com/rss", + ) + time_period: int = SchemaField( + description="The time period to check in minutes relative to the run block runtime, e.g. 60 would check for new entries in the last hour.", + placeholder="1440", + default=1440, + ) + polling_rate: int = SchemaField( + description="The number of seconds to wait between polling attempts.", + placeholder="300", + ) + run_continuously: bool = SchemaField( + description="Whether to run the block continuously or just once.", + default=True, + ) + + class Output(BlockSchema): + entry: RSSEntry = SchemaField(description="The RSS item") + + def __init__(self): + super().__init__( + id="5ebe6768-8e5d-41e3-9134-1c7bd89a8d52", + input_schema=ReadRSSFeedBlock.Input, + output_schema=ReadRSSFeedBlock.Output, + description="Reads RSS feed entries from a given URL.", + categories={BlockCategory.INPUT}, + test_input={ + "rss_url": "https://example.com/rss", + "time_period": 10_000_000, + "polling_rate": 1, + "run_continuously": False, + }, + test_output=[ + ( + "entry", + RSSEntry( + title="Example RSS Item", + link="https://example.com/article", + description="This is an example RSS item description.", + pub_date=datetime(2023, 6, 23, 12, 30, 0, tzinfo=timezone.utc), + author="John Doe", + categories=["Technology", "News"], + ), + ), + ], + test_mock={ + "parse_feed": lambda *args, **kwargs: { + "entries": [ + { + "title": "Example RSS Item", + "link": "https://example.com/article", + "summary": "This is an example RSS item description.", + "published_parsed": (2023, 6, 23, 12, 30, 0, 4, 174, 0), + "author": "John Doe", + "tags": [{"term": "Technology"}, {"term": "News"}], + } + ] + } + }, + ) + + @staticmethod + def parse_feed(url: str) -> dict[str, Any]: + return feedparser.parse(url) # type: ignore + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + keep_going = True + start_time = datetime.now(timezone.utc) - timedelta( + minutes=input_data.time_period + ) + while keep_going: + keep_going = input_data.run_continuously + + feed = self.parse_feed(input_data.rss_url) + + for entry in feed["entries"]: + pub_date = datetime(*entry["published_parsed"][:6], tzinfo=timezone.utc) + + if pub_date > start_time: + yield ( + "entry", + RSSEntry( + title=entry["title"], + link=entry["link"], + description=entry.get("summary", ""), + pub_date=pub_date, + author=entry.get("author", ""), + categories=[tag["term"] for tag in entry.get("tags", [])], + ), + ) + + time.sleep(input_data.polling_rate) diff --git a/autogpt_platform/backend/backend/blocks/sampling.py b/autogpt_platform/backend/backend/blocks/sampling.py new file mode 100644 index 000000000000..d2257db06f68 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/sampling.py @@ -0,0 +1,264 @@ +import random +from collections import defaultdict +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class SamplingMethod(str, Enum): + RANDOM = "random" + SYSTEMATIC = "systematic" + TOP = "top" + BOTTOM = "bottom" + STRATIFIED = "stratified" + WEIGHTED = "weighted" + RESERVOIR = "reservoir" + CLUSTER = "cluster" + + +class DataSamplingBlock(Block): + class Input(BlockSchema): + data: Union[Dict[str, Any], List[Union[dict, List[Any]]]] = SchemaField( + description="The dataset to sample from. Can be a single dictionary, a list of dictionaries, or a list of lists.", + placeholder="{'id': 1, 'value': 'a'} or [{'id': 1, 'value': 'a'}, {'id': 2, 'value': 'b'}, ...]", + ) + sample_size: int = SchemaField( + description="The number of samples to take from the dataset.", + placeholder="10", + default=10, + ) + sampling_method: SamplingMethod = SchemaField( + description="The method to use for sampling.", + default=SamplingMethod.RANDOM, + ) + accumulate: bool = SchemaField( + description="Whether to accumulate data before sampling.", + default=False, + ) + random_seed: Optional[int] = SchemaField( + description="Seed for random number generator (optional).", + default=None, + ) + stratify_key: Optional[str] = SchemaField( + description="Key to use for stratified sampling (required for stratified sampling).", + default=None, + ) + weight_key: Optional[str] = SchemaField( + description="Key to use for weighted sampling (required for weighted sampling).", + default=None, + ) + cluster_key: Optional[str] = SchemaField( + description="Key to use for cluster sampling (required for cluster sampling).", + default=None, + ) + + class Output(BlockSchema): + sampled_data: List[Union[dict, List[Any]]] = SchemaField( + description="The sampled subset of the input data." + ) + sample_indices: List[int] = SchemaField( + description="The indices of the sampled data in the original dataset." + ) + + def __init__(self): + super().__init__( + id="4a448883-71fa-49cf-91cf-70d793bd7d87", + description="This block samples data from a given dataset using various sampling methods.", + categories={BlockCategory.LOGIC}, + input_schema=DataSamplingBlock.Input, + output_schema=DataSamplingBlock.Output, + test_input={ + "data": [ + {"id": i, "value": chr(97 + i), "group": i % 3} for i in range(10) + ], + "sample_size": 3, + "sampling_method": SamplingMethod.STRATIFIED, + "accumulate": False, + "random_seed": 42, + "stratify_key": "group", + }, + test_output=[ + ( + "sampled_data", + [ + {"id": 0, "value": "a", "group": 0}, + {"id": 1, "value": "b", "group": 1}, + {"id": 8, "value": "i", "group": 2}, + ], + ), + ("sample_indices", [0, 1, 8]), + ], + ) + self.accumulated_data = [] + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + if input_data.accumulate: + if isinstance(input_data.data, dict): + self.accumulated_data.append(input_data.data) + elif isinstance(input_data.data, list): + self.accumulated_data.extend(input_data.data) + else: + raise ValueError(f"Unsupported data type: {type(input_data.data)}") + + # If we don't have enough data yet, return without sampling + if len(self.accumulated_data) < input_data.sample_size: + return + + data_to_sample = self.accumulated_data + else: + # If not accumulating, use the input data directly + data_to_sample = ( + input_data.data + if isinstance(input_data.data, list) + else [input_data.data] + ) + + if input_data.random_seed is not None: + random.seed(input_data.random_seed) + + data_size = len(data_to_sample) + + if input_data.sample_size > data_size: + raise ValueError( + f"Sample size ({input_data.sample_size}) cannot be larger than the dataset size ({data_size})." + ) + + indices = [] + + if input_data.sampling_method == SamplingMethod.RANDOM: + indices = random.sample(range(data_size), input_data.sample_size) + elif input_data.sampling_method == SamplingMethod.SYSTEMATIC: + step = data_size // input_data.sample_size + start = random.randint(0, step - 1) + indices = list(range(start, data_size, step))[: input_data.sample_size] + elif input_data.sampling_method == SamplingMethod.TOP: + indices = list(range(input_data.sample_size)) + elif input_data.sampling_method == SamplingMethod.BOTTOM: + indices = list(range(data_size - input_data.sample_size, data_size)) + elif input_data.sampling_method == SamplingMethod.STRATIFIED: + if not input_data.stratify_key: + raise ValueError( + "Stratify key must be provided for stratified sampling." + ) + strata = defaultdict(list) + for i, item in enumerate(data_to_sample): + if isinstance(item, dict): + strata_value = item.get(input_data.stratify_key) + elif hasattr(item, input_data.stratify_key): + strata_value = getattr(item, input_data.stratify_key) + else: + raise ValueError( + f"Stratify key '{input_data.stratify_key}' not found in item {item}" + ) + + if strata_value is None: + raise ValueError( + f"Stratify value for key '{input_data.stratify_key}' is None" + ) + + strata[str(strata_value)].append(i) + + # Calculate the number of samples to take from each stratum + stratum_sizes = { + k: max(1, int(len(v) / data_size * input_data.sample_size)) + for k, v in strata.items() + } + + # Adjust sizes to ensure we get exactly sample_size samples + while sum(stratum_sizes.values()) != input_data.sample_size: + if sum(stratum_sizes.values()) < input_data.sample_size: + stratum_sizes[ + max(stratum_sizes, key=lambda k: stratum_sizes[k]) + ] += 1 + else: + stratum_sizes[ + max(stratum_sizes, key=lambda k: stratum_sizes[k]) + ] -= 1 + + for stratum, size in stratum_sizes.items(): + indices.extend(random.sample(strata[stratum], size)) + elif input_data.sampling_method == SamplingMethod.WEIGHTED: + if not input_data.weight_key: + raise ValueError("Weight key must be provided for weighted sampling.") + weights = [] + for item in data_to_sample: + if isinstance(item, dict): + weight = item.get(input_data.weight_key) + elif hasattr(item, input_data.weight_key): + weight = getattr(item, input_data.weight_key) + else: + raise ValueError( + f"Weight key '{input_data.weight_key}' not found in item {item}" + ) + + if weight is None: + raise ValueError( + f"Weight value for key '{input_data.weight_key}' is None" + ) + try: + weights.append(float(weight)) + except ValueError: + raise ValueError( + f"Weight value '{weight}' cannot be converted to a number" + ) + + if not weights: + raise ValueError( + f"No valid weights found using key '{input_data.weight_key}'" + ) + + indices = random.choices( + range(data_size), weights=weights, k=input_data.sample_size + ) + elif input_data.sampling_method == SamplingMethod.RESERVOIR: + indices = list(range(input_data.sample_size)) + for i in range(input_data.sample_size, data_size): + j = random.randint(0, i) + if j < input_data.sample_size: + indices[j] = i + elif input_data.sampling_method == SamplingMethod.CLUSTER: + if not input_data.cluster_key: + raise ValueError("Cluster key must be provided for cluster sampling.") + clusters = defaultdict(list) + for i, item in enumerate(data_to_sample): + if isinstance(item, dict): + cluster_value = item.get(input_data.cluster_key) + elif hasattr(item, input_data.cluster_key): + cluster_value = getattr(item, input_data.cluster_key) + else: + raise TypeError( + f"Item {item} does not have the cluster key '{input_data.cluster_key}'" + ) + + clusters[str(cluster_value)].append(i) + + # Randomly select clusters until we have enough samples + selected_clusters = [] + while ( + sum(len(clusters[c]) for c in selected_clusters) + < input_data.sample_size + ): + available_clusters = [c for c in clusters if c not in selected_clusters] + if not available_clusters: + break + selected_clusters.append(random.choice(available_clusters)) + + for cluster in selected_clusters: + indices.extend(clusters[cluster]) + + # If we have more samples than needed, randomly remove some + if len(indices) > input_data.sample_size: + indices = random.sample(indices, input_data.sample_size) + else: + raise ValueError(f"Unknown sampling method: {input_data.sampling_method}") + + sampled_data = [data_to_sample[i] for i in indices] + + # Clear accumulated data after sampling if accumulation is enabled + if input_data.accumulate: + self.accumulated_data = [] + + yield "sampled_data", sampled_data + yield "sample_indices", indices diff --git a/autogpt_platform/backend/backend/blocks/search.py b/autogpt_platform/backend/backend/blocks/search.py new file mode 100644 index 000000000000..633ad3109113 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/search.py @@ -0,0 +1,130 @@ +from typing import Literal +from urllib.parse import quote + +from pydantic import SecretStr + +from backend.blocks.helpers.http import GetRequest +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName + + +class GetWikipediaSummaryBlock(Block, GetRequest): + class Input(BlockSchema): + topic: str = SchemaField(description="The topic to fetch the summary for") + + class Output(BlockSchema): + summary: str = SchemaField(description="The summary of the given topic") + error: str = SchemaField( + description="Error message if the summary cannot be retrieved" + ) + + def __init__(self): + super().__init__( + id="f5b0f5d0-1862-4d61-94be-3ad0fa772760", + description="This block fetches the summary of a given topic from Wikipedia.", + categories={BlockCategory.SEARCH}, + input_schema=GetWikipediaSummaryBlock.Input, + output_schema=GetWikipediaSummaryBlock.Output, + test_input={"topic": "Artificial Intelligence"}, + test_output=("summary", "summary content"), + test_mock={"get_request": lambda url, json: {"extract": "summary content"}}, + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + topic = input_data.topic + url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}" + response = self.get_request(url, json=True) + if "extract" not in response: + raise RuntimeError(f"Unable to parse Wikipedia response: {response}") + yield "summary", response["extract"] + + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="openweathermap", + api_key=SecretStr("mock-openweathermap-api-key"), + title="Mock OpenWeatherMap API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class GetWeatherInformationBlock(Block, GetRequest): + class Input(BlockSchema): + location: str = SchemaField( + description="Location to get weather information for" + ) + credentials: CredentialsMetaInput[ + Literal[ProviderName.OPENWEATHERMAP], Literal["api_key"] + ] = CredentialsField( + description="The OpenWeatherMap integration can be used with " + "any API key with sufficient permissions for the blocks it is used on.", + ) + use_celsius: bool = SchemaField( + default=True, + description="Whether to use Celsius or Fahrenheit for temperature", + ) + + class Output(BlockSchema): + temperature: str = SchemaField( + description="Temperature in the specified location" + ) + humidity: str = SchemaField(description="Humidity in the specified location") + condition: str = SchemaField( + description="Weather condition in the specified location" + ) + error: str = SchemaField( + description="Error message if the weather information cannot be retrieved" + ) + + def __init__(self): + super().__init__( + id="f7a8b2c3-6d4e-5f8b-9e7f-6d4e5f8b9e7f", + input_schema=GetWeatherInformationBlock.Input, + output_schema=GetWeatherInformationBlock.Output, + description="Retrieves weather information for a specified location using OpenWeatherMap API.", + test_input={ + "location": "New York", + "use_celsius": True, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[ + ("temperature", "21.66"), + ("humidity", "32"), + ("condition", "overcast clouds"), + ], + test_mock={ + "get_request": lambda url, json: { + "main": {"temp": 21.66, "humidity": 32}, + "weather": [{"description": "overcast clouds"}], + } + }, + test_credentials=TEST_CREDENTIALS, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + units = "metric" if input_data.use_celsius else "imperial" + api_key = credentials.api_key + location = input_data.location + url = f"http://api.openweathermap.org/data/2.5/weather?q={quote(location)}&appid={api_key}&units={units}" + weather_data = self.get_request(url, json=True) + + if "main" in weather_data and "weather" in weather_data: + yield "temperature", str(weather_data["main"]["temp"]) + yield "humidity", str(weather_data["main"]["humidity"]) + yield "condition", weather_data["weather"][0]["description"] + else: + raise RuntimeError(f"Expected keys not found in response: {weather_data}") diff --git a/autogpt_platform/backend/backend/blocks/slant3d/_api.py b/autogpt_platform/backend/backend/blocks/slant3d/_api.py new file mode 100644 index 000000000000..d952662e781d --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/slant3d/_api.py @@ -0,0 +1,70 @@ +from enum import Enum +from typing import Literal + +from pydantic import BaseModel, SecretStr + +from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput +from backend.integrations.providers import ProviderName + +Slant3DCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.SLANT3D], Literal["api_key"] +] + + +def Slant3DCredentialsField() -> Slant3DCredentialsInput: + return CredentialsField(description="Slant3D API key for authentication") + + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="slant3d", + api_key=SecretStr("mock-slant3d-api-key"), + title="Mock Slant3D API key", + expires_at=None, +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} + + +class CustomerDetails(BaseModel): + name: str + email: str + phone: str + address: str + city: str + state: str + zip: str + country_iso: str = "US" + is_residential: bool = True + + +class Color(Enum): + WHITE = "white" + BLACK = "black" + + +class Profile(Enum): + PLA = "PLA" + PETG = "PETG" + + +class OrderItem(BaseModel): + # filename: str + file_url: str + quantity: str # String as per API spec + color: Color = Color.WHITE + profile: Profile = Profile.PLA + # image_url: str = "" + # sku: str = "" + + +class Filament(BaseModel): + filament: str + hexColor: str + colorTag: str + profile: str diff --git a/autogpt_platform/backend/backend/blocks/slant3d/base.py b/autogpt_platform/backend/backend/blocks/slant3d/base.py new file mode 100644 index 000000000000..d5d1681e1d43 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/slant3d/base.py @@ -0,0 +1,94 @@ +from typing import Any, Dict + +from backend.data.block import Block +from backend.util.request import requests + +from ._api import Color, CustomerDetails, OrderItem, Profile + + +class Slant3DBlockBase(Block): + """Base block class for Slant3D API interactions""" + + BASE_URL = "https://www.slant3dapi.com/api" + + def _get_headers(self, api_key: str) -> Dict[str, str]: + return {"api-key": api_key, "Content-Type": "application/json"} + + def _make_request(self, method: str, endpoint: str, api_key: str, **kwargs) -> Dict: + url = f"{self.BASE_URL}/{endpoint}" + response = requests.request( + method=method, url=url, headers=self._get_headers(api_key), **kwargs + ) + + if not response.ok: + error_msg = response.json().get("error", "Unknown error") + raise RuntimeError(f"API request failed: {error_msg}") + + return response.json() + + def _check_valid_color(self, profile: Profile, color: Color, api_key: str) -> str: + response = self._make_request( + "GET", + "filament", + api_key, + params={"profile": profile.value, "color": color.value}, + ) + if profile == Profile.PLA: + color_tag = color.value + else: + color_tag = f"{profile.value.lower()}{color.value.capitalize()}" + valid_tags = [filament["colorTag"] for filament in response["filaments"]] + + if color_tag not in valid_tags: + raise ValueError( + f"""Invalid color profile combination {color_tag}. +Valid colors for {profile.value} are: +{','.join([filament['colorTag'].replace(profile.value.lower(), '') for filament in response['filaments'] if filament['profile'] == profile.value])} +""" + ) + return color_tag + + def _convert_to_color(self, profile: Profile, color: Color, api_key: str) -> str: + return self._check_valid_color(profile, color, api_key) + + def _format_order_data( + self, + customer: CustomerDetails, + order_number: str, + items: list[OrderItem], + api_key: str, + ) -> list[dict[str, Any]]: + """Helper function to format order data for API requests""" + orders = [] + for item in items: + order_data = { + "email": customer.email, + "phone": customer.phone, + "name": customer.name, + "orderNumber": order_number, + "filename": item.file_url, + "fileURL": item.file_url, + "bill_to_street_1": customer.address, + "bill_to_city": customer.city, + "bill_to_state": customer.state, + "bill_to_zip": customer.zip, + "bill_to_country_as_iso": customer.country_iso, + "bill_to_is_US_residential": str(customer.is_residential).lower(), + "ship_to_name": customer.name, + "ship_to_street_1": customer.address, + "ship_to_city": customer.city, + "ship_to_state": customer.state, + "ship_to_zip": customer.zip, + "ship_to_country_as_iso": customer.country_iso, + "ship_to_is_US_residential": str(customer.is_residential).lower(), + "order_item_name": item.file_url, + "order_quantity": item.quantity, + "order_image_url": "", + "order_sku": "NOT_USED", + "order_item_color": self._convert_to_color( + item.profile, item.color, api_key + ), + "profile": item.profile.value, + } + orders.append(order_data) + return orders diff --git a/autogpt_platform/backend/backend/blocks/slant3d/filament.py b/autogpt_platform/backend/backend/blocks/slant3d/filament.py new file mode 100644 index 000000000000..c232c2ba8da1 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/slant3d/filament.py @@ -0,0 +1,85 @@ +from typing import List + +from backend.data.block import BlockOutput, BlockSchema +from backend.data.model import APIKeyCredentials, SchemaField + +from ._api import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + Filament, + Slant3DCredentialsField, + Slant3DCredentialsInput, +) +from .base import Slant3DBlockBase + + +class Slant3DFilamentBlock(Slant3DBlockBase): + """Block for retrieving available filaments""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + + class Output(BlockSchema): + filaments: List[Filament] = SchemaField( + description="List of available filaments" + ) + error: str = SchemaField(description="Error message if request failed") + + def __init__(self): + super().__init__( + id="7cc416f4-f305-4606-9b3b-452b8a81031c", + description="Get list of available filaments", + input_schema=self.Input, + output_schema=self.Output, + test_input={"credentials": TEST_CREDENTIALS_INPUT}, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "filaments", + [ + { + "filament": "PLA BLACK", + "hexColor": "000000", + "colorTag": "black", + "profile": "PLA", + }, + { + "filament": "PLA WHITE", + "hexColor": "ffffff", + "colorTag": "white", + "profile": "PLA", + }, + ], + ) + ], + test_mock={ + "_make_request": lambda *args, **kwargs: { + "filaments": [ + { + "filament": "PLA BLACK", + "hexColor": "000000", + "colorTag": "black", + "profile": "PLA", + }, + { + "filament": "PLA WHITE", + "hexColor": "ffffff", + "colorTag": "white", + "profile": "PLA", + }, + ] + } + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + result = self._make_request( + "GET", "filament", credentials.api_key.get_secret_value() + ) + yield "filaments", result["filaments"] + except Exception as e: + yield "error", str(e) + raise diff --git a/autogpt_platform/backend/backend/blocks/slant3d/order.py b/autogpt_platform/backend/backend/blocks/slant3d/order.py new file mode 100644 index 000000000000..a1a342a98e0b --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/slant3d/order.py @@ -0,0 +1,418 @@ +import uuid +from typing import List + +import requests as baserequests + +from backend.data.block import BlockOutput, BlockSchema +from backend.data.model import APIKeyCredentials, SchemaField +from backend.util import settings +from backend.util.settings import BehaveAs + +from ._api import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + CustomerDetails, + OrderItem, + Slant3DCredentialsField, + Slant3DCredentialsInput, +) +from .base import Slant3DBlockBase + + +class Slant3DCreateOrderBlock(Slant3DBlockBase): + """Block for creating new orders""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + order_number: str = SchemaField( + description="Your custom order number (or leave blank for a random one)", + default_factory=lambda: str(uuid.uuid4()), + ) + customer: CustomerDetails = SchemaField( + description="Customer details for where to ship the item", + advanced=False, + ) + items: List[OrderItem] = SchemaField( + description="List of items to print", + advanced=False, + ) + + class Output(BlockSchema): + order_id: str = SchemaField(description="Slant3D order ID") + error: str = SchemaField(description="Error message if order failed") + + def __init__(self): + super().__init__( + id="f73007d6-f48f-4aaf-9e6b-6883998a09b4", + description="Create a new print order", + input_schema=self.Input, + output_schema=self.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "order_number": "TEST-001", + "customer": { + "name": "John Doe", + "email": "john@example.com", + "phone": "123-456-7890", + "address": "123 Test St", + "city": "Test City", + "state": "TS", + "zip": "12345", + }, + "items": [ + { + "file_url": "https://example.com/model.stl", + "quantity": "1", + "color": "black", + "profile": "PLA", + } + ], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("order_id", "314144241")], + test_mock={ + "_make_request": lambda *args, **kwargs: {"orderId": "314144241"}, + "_convert_to_color": lambda *args, **kwargs: "black", + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + order_data = self._format_order_data( + input_data.customer, + input_data.order_number, + input_data.items, + credentials.api_key.get_secret_value(), + ) + result = self._make_request( + "POST", "order", credentials.api_key.get_secret_value(), json=order_data + ) + yield "order_id", result["orderId"] + except Exception as e: + yield "error", str(e) + raise + + +class Slant3DEstimateOrderBlock(Slant3DBlockBase): + """Block for getting order cost estimates""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + order_number: str = SchemaField( + description="Your custom order number (or leave blank for a random one)", + default_factory=lambda: str(uuid.uuid4()), + ) + customer: CustomerDetails = SchemaField( + description="Customer details for where to ship the item", + advanced=False, + ) + items: List[OrderItem] = SchemaField( + description="List of items to print", + advanced=False, + ) + + class Output(BlockSchema): + total_price: float = SchemaField(description="Total price in USD") + shipping_cost: float = SchemaField(description="Shipping cost") + printing_cost: float = SchemaField(description="Printing cost") + error: str = SchemaField(description="Error message if estimation failed") + + def __init__(self): + super().__init__( + id="bf8823d6-b42a-48c7-b558-d7c117f2ae85", + description="Get order cost estimate", + input_schema=self.Input, + output_schema=self.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "order_number": "TEST-001", + "customer": { + "name": "John Doe", + "email": "john@example.com", + "phone": "123-456-7890", + "address": "123 Test St", + "city": "Test City", + "state": "TS", + "zip": "12345", + }, + "items": [ + { + "file_url": "https://example.com/model.stl", + "quantity": "1", + "color": "black", + "profile": "PLA", + } + ], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("total_price", 9.31), + ("shipping_cost", 5.56), + ("printing_cost", 3.75), + ], + test_mock={ + "_make_request": lambda *args, **kwargs: { + "totalPrice": 9.31, + "shippingCost": 5.56, + "printingCost": 3.75, + }, + "_convert_to_color": lambda *args, **kwargs: "black", + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + order_data = self._format_order_data( + input_data.customer, + input_data.order_number, + input_data.items, + credentials.api_key.get_secret_value(), + ) + try: + result = self._make_request( + "POST", + "order/estimate", + credentials.api_key.get_secret_value(), + json=order_data, + ) + yield "total_price", result["totalPrice"] + yield "shipping_cost", result["shippingCost"] + yield "printing_cost", result["printingCost"] + except baserequests.HTTPError as e: + yield "error", str(f"Error estimating order: {e} {e.response.text}") + raise + + +class Slant3DEstimateShippingBlock(Slant3DBlockBase): + """Block for getting shipping cost estimates""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + order_number: str = SchemaField( + description="Your custom order number (or leave blank for a random one)", + default_factory=lambda: str(uuid.uuid4()), + ) + customer: CustomerDetails = SchemaField( + description="Customer details for where to ship the item" + ) + items: List[OrderItem] = SchemaField( + description="List of items to print", + advanced=False, + ) + + class Output(BlockSchema): + shipping_cost: float = SchemaField(description="Estimated shipping cost") + currency_code: str = SchemaField(description="Currency code (e.g., 'usd')") + error: str = SchemaField(description="Error message if estimation failed") + + def __init__(self): + super().__init__( + id="00aae2a1-caf6-4a74-8175-39a0615d44e1", + description="Get shipping cost estimate", + input_schema=self.Input, + output_schema=self.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "order_number": "TEST-001", + "customer": { + "name": "John Doe", + "email": "john@example.com", + "phone": "123-456-7890", + "address": "123 Test St", + "city": "Test City", + "state": "TS", + "zip": "12345", + }, + "items": [ + { + "file_url": "https://example.com/model.stl", + "quantity": "1", + "color": "black", + "profile": "PLA", + } + ], + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("shipping_cost", 4.81), ("currency_code", "usd")], + test_mock={ + "_make_request": lambda *args, **kwargs: { + "shippingCost": 4.81, + "currencyCode": "usd", + }, + "_convert_to_color": lambda *args, **kwargs: "black", + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + order_data = self._format_order_data( + input_data.customer, + input_data.order_number, + input_data.items, + credentials.api_key.get_secret_value(), + ) + result = self._make_request( + "POST", + "order/estimateShipping", + credentials.api_key.get_secret_value(), + json=order_data, + ) + yield "shipping_cost", result["shippingCost"] + yield "currency_code", result["currencyCode"] + except Exception as e: + yield "error", str(e) + raise + + +class Slant3DGetOrdersBlock(Slant3DBlockBase): + """Block for retrieving all orders""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + + class Output(BlockSchema): + orders: List[str] = SchemaField(description="List of orders with their details") + error: str = SchemaField(description="Error message if request failed") + + def __init__(self): + super().__init__( + id="42283bf5-8a32-4fb4-92a2-60a9ea48e105", + description="Get all orders for the account", + input_schema=self.Input, + output_schema=self.Output, + # This block is disabled for cloud hosted because it allows access to all orders for the account + disabled=settings.Settings().config.behave_as == BehaveAs.CLOUD, + test_input={"credentials": TEST_CREDENTIALS_INPUT}, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "orders", + [ + "1234567890", + ], + ) + ], + test_mock={ + "_make_request": lambda *args, **kwargs: { + "ordersData": [ + { + "orderId": 1234567890, + "orderTimestamp": { + "_seconds": 1719510986, + "_nanoseconds": 710000000, + }, + } + ] + } + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + result = self._make_request( + "GET", "order", credentials.api_key.get_secret_value() + ) + yield "orders", [str(order["orderId"]) for order in result["ordersData"]] + except Exception as e: + yield "error", str(e) + raise + + +class Slant3DTrackingBlock(Slant3DBlockBase): + """Block for tracking order status and shipping""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + order_id: str = SchemaField(description="Slant3D order ID to track") + + class Output(BlockSchema): + status: str = SchemaField(description="Order status") + tracking_numbers: List[str] = SchemaField( + description="List of tracking numbers" + ) + error: str = SchemaField(description="Error message if tracking failed") + + def __init__(self): + super().__init__( + id="dd7c0293-c5af-4551-ba3e-fc162fb1fb89", + description="Track order status and shipping", + input_schema=self.Input, + output_schema=self.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "order_id": "314144241", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "awaiting_shipment"), ("tracking_numbers", [])], + test_mock={ + "_make_request": lambda *args, **kwargs: { + "status": "awaiting_shipment", + "trackingNumbers": [], + } + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + result = self._make_request( + "GET", + f"order/{input_data.order_id}/get-tracking", + credentials.api_key.get_secret_value(), + ) + yield "status", result["status"] + yield "tracking_numbers", result["trackingNumbers"] + except Exception as e: + yield "error", str(e) + raise + + +class Slant3DCancelOrderBlock(Slant3DBlockBase): + """Block for canceling orders""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + order_id: str = SchemaField(description="Slant3D order ID to cancel") + + class Output(BlockSchema): + status: str = SchemaField(description="Cancellation status message") + error: str = SchemaField(description="Error message if cancellation failed") + + def __init__(self): + super().__init__( + id="54de35e1-407f-450b-b5fa-3b5e2eba8185", + description="Cancel an existing order", + input_schema=self.Input, + output_schema=self.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "order_id": "314144241", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("status", "Order cancelled")], + test_mock={ + "_make_request": lambda *args, **kwargs: {"status": "Order cancelled"} + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + result = self._make_request( + "DELETE", + f"order/{input_data.order_id}", + credentials.api_key.get_secret_value(), + ) + yield "status", result["status"] + except Exception as e: + yield "error", str(e) + raise diff --git a/autogpt_platform/backend/backend/blocks/slant3d/slicing.py b/autogpt_platform/backend/backend/blocks/slant3d/slicing.py new file mode 100644 index 000000000000..1b868efc9edd --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/slant3d/slicing.py @@ -0,0 +1,61 @@ +from backend.data.block import BlockOutput, BlockSchema +from backend.data.model import APIKeyCredentials, SchemaField + +from ._api import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + Slant3DCredentialsField, + Slant3DCredentialsInput, +) +from .base import Slant3DBlockBase + + +class Slant3DSlicerBlock(Slant3DBlockBase): + """Block for slicing 3D model files""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + file_url: str = SchemaField( + description="URL of the 3D model file to slice (STL)" + ) + + class Output(BlockSchema): + message: str = SchemaField(description="Response message") + price: float = SchemaField(description="Calculated price for printing") + error: str = SchemaField(description="Error message if slicing failed") + + def __init__(self): + super().__init__( + id="f8a12c8d-3e4b-4d5f-b6a7-8c9d0e1f2g3h", + description="Slice a 3D model file and get pricing information", + input_schema=self.Input, + output_schema=self.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "file_url": "https://example.com/model.stl", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("message", "Slicing successful"), ("price", 8.23)], + test_mock={ + "_make_request": lambda *args, **kwargs: { + "message": "Slicing successful", + "data": {"price": 8.23}, + } + }, + ) + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + try: + result = self._make_request( + "POST", + "slicer", + credentials.api_key.get_secret_value(), + json={"fileURL": input_data.file_url}, + ) + yield "message", result["message"] + yield "price", result["data"]["price"] + except Exception as e: + yield "error", str(e) + raise diff --git a/autogpt_platform/backend/backend/blocks/slant3d/webhook.py b/autogpt_platform/backend/backend/blocks/slant3d/webhook.py new file mode 100644 index 000000000000..5726790f99f2 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/slant3d/webhook.py @@ -0,0 +1,125 @@ +from pydantic import BaseModel + +from backend.data.block import ( + Block, + BlockCategory, + BlockOutput, + BlockSchema, + BlockWebhookConfig, +) +from backend.data.model import SchemaField +from backend.util import settings +from backend.util.settings import AppEnvironment, BehaveAs + +from ._api import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + Slant3DCredentialsField, + Slant3DCredentialsInput, +) + + +class Slant3DTriggerBase: + """Base class for Slant3D webhook triggers""" + + class Input(BlockSchema): + credentials: Slant3DCredentialsInput = Slant3DCredentialsField() + # Webhook URL is handled by the webhook system + payload: dict = SchemaField(hidden=True, default={}) + + class Output(BlockSchema): + payload: dict = SchemaField( + description="The complete webhook payload received from Slant3D" + ) + order_id: str = SchemaField(description="The ID of the affected order") + error: str = SchemaField( + description="Error message if payload processing failed" + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + yield "payload", input_data.payload + yield "order_id", input_data.payload["orderId"] + + +class Slant3DOrderWebhookBlock(Slant3DTriggerBase, Block): + """Block for handling Slant3D order webhooks""" + + class Input(Slant3DTriggerBase.Input): + class EventsFilter(BaseModel): + """ + Currently Slant3D only supports 'SHIPPED' status updates + Could be expanded in the future with more status types + """ + + shipped: bool = True + + events: EventsFilter = SchemaField( + title="Events", + description="Order status events to subscribe to", + default=EventsFilter(shipped=True), + ) + + class Output(Slant3DTriggerBase.Output): + status: str = SchemaField(description="The new status of the order") + tracking_number: str = SchemaField( + description="The tracking number for the shipment" + ) + carrier_code: str = SchemaField(description="The carrier code (e.g., 'usps')") + + def __init__(self): + super().__init__( + id="8a74c2ad-0104-4640-962f-26c6b69e58cd", + description=( + "This block triggers on Slant3D order status updates and outputs " + "the event details, including tracking information when orders are shipped." + ), + # All webhooks are currently subscribed to for all orders. This works for self hosted, but not for cloud hosted prod + disabled=( + settings.Settings().config.behave_as == BehaveAs.CLOUD + and settings.Settings().config.app_env != AppEnvironment.LOCAL + ), + categories={BlockCategory.DEVELOPER_TOOLS}, + input_schema=self.Input, + output_schema=self.Output, + webhook_config=BlockWebhookConfig( + provider="slant3d", + webhook_type="orders", # Only one type for now + resource_format="", # No resource format needed + event_filter_input="events", + event_format="order.{event}", + ), + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "events": {"shipped": True}, + "payload": { + "orderId": "1234567890", + "status": "SHIPPED", + "trackingNumber": "ABCDEF123456", + "carrierCode": "usps", + }, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ( + "payload", + { + "orderId": "1234567890", + "status": "SHIPPED", + "trackingNumber": "ABCDEF123456", + "carrierCode": "usps", + }, + ), + ("order_id", "1234567890"), + ("status", "SHIPPED"), + ("tracking_number", "ABCDEF123456"), + ("carrier_code", "usps"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore + yield from super().run(input_data, **kwargs) + + # Extract and normalize values from the payload + yield "status", input_data.payload["status"] + yield "tracking_number", input_data.payload["trackingNumber"] + yield "carrier_code", input_data.payload["carrierCode"] diff --git a/autogpt_platform/backend/backend/blocks/talking_head.py b/autogpt_platform/backend/backend/blocks/talking_head.py new file mode 100644 index 000000000000..b57a9b0da6dd --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/talking_head.py @@ -0,0 +1,172 @@ +import time +from typing import Literal + +from pydantic import SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="d_id", + api_key=SecretStr("mock-d-id-api-key"), + title="Mock D-ID API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class CreateTalkingAvatarVideoBlock(Block): + class Input(BlockSchema): + credentials: CredentialsMetaInput[ + Literal[ProviderName.D_ID], Literal["api_key"] + ] = CredentialsField( + description="The D-ID integration can be used with " + "any API key with sufficient permissions for the blocks it is used on.", + ) + script_input: str = SchemaField( + description="The text input for the script", + placeholder="Welcome to AutoGPT", + ) + provider: Literal["microsoft", "elevenlabs", "amazon"] = SchemaField( + description="The voice provider to use", default="microsoft" + ) + voice_id: str = SchemaField( + description="The voice ID to use, get list of voices [here](https://docs.agpt.co/server/d_id)", + default="en-US-JennyNeural", + ) + presenter_id: str = SchemaField( + description="The presenter ID to use", default="amy-Aq6OmGZnMt" + ) + driver_id: str = SchemaField( + description="The driver ID to use", default="Vcq0R4a8F0" + ) + result_format: Literal["mp4", "gif", "wav"] = SchemaField( + description="The desired result format", default="mp4" + ) + crop_type: Literal["wide", "square", "vertical"] = SchemaField( + description="The crop type for the presenter", default="wide" + ) + subtitles: bool = SchemaField( + description="Whether to include subtitles", default=False + ) + ssml: bool = SchemaField(description="Whether the input is SSML", default=False) + max_polling_attempts: int = SchemaField( + description="Maximum number of polling attempts", default=30, ge=5 + ) + polling_interval: int = SchemaField( + description="Interval between polling attempts in seconds", default=10, ge=5 + ) + + class Output(BlockSchema): + video_url: str = SchemaField(description="The URL of the created video") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="98c6f503-8c47-4b1c-a96d-351fc7c87dab", + description="This block integrates with D-ID to create video clips and retrieve their URLs.", + categories={BlockCategory.AI, BlockCategory.MULTIMEDIA}, + input_schema=CreateTalkingAvatarVideoBlock.Input, + output_schema=CreateTalkingAvatarVideoBlock.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "script_input": "Welcome to AutoGPT", + "voice_id": "en-US-JennyNeural", + "presenter_id": "amy-Aq6OmGZnMt", + "driver_id": "Vcq0R4a8F0", + "result_format": "mp4", + "crop_type": "wide", + "subtitles": False, + "ssml": False, + "max_polling_attempts": 5, + "polling_interval": 5, + }, + test_output=[ + ( + "video_url", + "https://d-id.com/api/clips/abcd1234-5678-efgh-ijkl-mnopqrstuvwx/video", + ), + ], + test_mock={ + "create_clip": lambda *args, **kwargs: { + "id": "abcd1234-5678-efgh-ijkl-mnopqrstuvwx", + "status": "created", + }, + "get_clip_status": lambda *args, **kwargs: { + "status": "done", + "result_url": "https://d-id.com/api/clips/abcd1234-5678-efgh-ijkl-mnopqrstuvwx/video", + }, + }, + test_credentials=TEST_CREDENTIALS, + ) + + def create_clip(self, api_key: SecretStr, payload: dict) -> dict: + url = "https://api.d-id.com/clips" + headers = { + "accept": "application/json", + "content-type": "application/json", + "authorization": f"Basic {api_key.get_secret_value()}", + } + response = requests.post(url, json=payload, headers=headers) + return response.json() + + def get_clip_status(self, api_key: SecretStr, clip_id: str) -> dict: + url = f"https://api.d-id.com/clips/{clip_id}" + headers = { + "accept": "application/json", + "authorization": f"Basic {api_key.get_secret_value()}", + } + response = requests.get(url, headers=headers) + return response.json() + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + # Create the clip + payload = { + "script": { + "type": "text", + "subtitles": str(input_data.subtitles).lower(), + "provider": { + "type": input_data.provider, + "voice_id": input_data.voice_id, + }, + "ssml": str(input_data.ssml).lower(), + "input": input_data.script_input, + }, + "config": {"result_format": input_data.result_format}, + "presenter_config": {"crop": {"type": input_data.crop_type}}, + "presenter_id": input_data.presenter_id, + "driver_id": input_data.driver_id, + } + + response = self.create_clip(credentials.api_key, payload) + clip_id = response["id"] + + # Poll for clip status + for _ in range(input_data.max_polling_attempts): + status_response = self.get_clip_status(credentials.api_key, clip_id) + if status_response["status"] == "done": + yield "video_url", status_response["result_url"] + return + elif status_response["status"] == "error": + raise RuntimeError( + f"Clip creation failed: {status_response.get('error', 'Unknown error')}" + ) + + time.sleep(input_data.polling_interval) + + raise TimeoutError("Clip creation timed out") diff --git a/autogpt_platform/backend/backend/blocks/text.py b/autogpt_platform/backend/backend/blocks/text.py new file mode 100644 index 000000000000..be0917abeab4 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/text.py @@ -0,0 +1,254 @@ +import re +from typing import Any + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField +from backend.util import json, text + +formatter = text.TextFormatter() + + +class MatchTextPatternBlock(Block): + class Input(BlockSchema): + text: Any = SchemaField(description="Text to match") + match: str = SchemaField(description="Pattern (Regex) to match") + data: Any = SchemaField(description="Data to be forwarded to output") + case_sensitive: bool = SchemaField( + description="Case sensitive match", default=True + ) + dot_all: bool = SchemaField(description="Dot matches all", default=True) + + class Output(BlockSchema): + positive: Any = SchemaField(description="Output data if match is found") + negative: Any = SchemaField(description="Output data if match is not found") + + def __init__(self): + super().__init__( + id="3060088f-6ed9-4928-9ba7-9c92823a7ccd", + description="Matches text against a regex pattern and forwards data to positive or negative output based on the match.", + categories={BlockCategory.TEXT}, + input_schema=MatchTextPatternBlock.Input, + output_schema=MatchTextPatternBlock.Output, + test_input=[ + {"text": "ABC", "match": "ab", "data": "X", "case_sensitive": False}, + {"text": "ABC", "match": "ab", "data": "Y", "case_sensitive": True}, + {"text": "Hello World!", "match": ".orld.+", "data": "Z"}, + {"text": "Hello World!", "match": "World![a-z]+", "data": "Z"}, + ], + test_output=[ + ("positive", "X"), + ("negative", "Y"), + ("positive", "Z"), + ("negative", "Z"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + output = input_data.data or input_data.text + flags = 0 + if not input_data.case_sensitive: + flags = flags | re.IGNORECASE + if input_data.dot_all: + flags = flags | re.DOTALL + + if isinstance(input_data.text, str): + text = input_data.text + else: + text = json.dumps(input_data.text) + + if re.search(input_data.match, text, flags=flags): + yield "positive", output + else: + yield "negative", output + + +class ExtractTextInformationBlock(Block): + class Input(BlockSchema): + text: Any = SchemaField(description="Text to parse") + pattern: str = SchemaField(description="Pattern (Regex) to parse") + group: int = SchemaField(description="Group number to extract", default=0) + case_sensitive: bool = SchemaField( + description="Case sensitive match", default=True + ) + dot_all: bool = SchemaField(description="Dot matches all", default=True) + find_all: bool = SchemaField(description="Find all matches", default=False) + + class Output(BlockSchema): + positive: str = SchemaField(description="Extracted text") + negative: str = SchemaField(description="Original text") + + def __init__(self): + super().__init__( + id="3146e4fe-2cdd-4f29-bd12-0c9d5bb4deb0", + description="This block extracts the text from the given text using the pattern (regex).", + categories={BlockCategory.TEXT}, + input_schema=ExtractTextInformationBlock.Input, + output_schema=ExtractTextInformationBlock.Output, + test_input=[ + {"text": "Hello, World!", "pattern": "Hello, (.+)", "group": 1}, + {"text": "Hello, World!", "pattern": "Hello, (.+)", "group": 0}, + {"text": "Hello, World!", "pattern": "Hello, (.+)", "group": 2}, + {"text": "Hello, World!", "pattern": "hello,", "case_sensitive": False}, + { + "text": "Hello, World!! Hello, Earth!!", + "pattern": "Hello, (\\S+)", + "group": 1, + "find_all": False, + }, + { + "text": "Hello, World!! Hello, Earth!!", + "pattern": "Hello, (\\S+)", + "group": 1, + "find_all": True, + }, + ], + test_output=[ + ("positive", "World!"), + ("positive", "Hello, World!"), + ("negative", "Hello, World!"), + ("positive", "Hello,"), + ("positive", "World!!"), + ("positive", "World!!"), + ("positive", "Earth!!"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + flags = 0 + if not input_data.case_sensitive: + flags = flags | re.IGNORECASE + if input_data.dot_all: + flags = flags | re.DOTALL + + if isinstance(input_data.text, str): + txt = input_data.text + else: + txt = json.dumps(input_data.text) + + matches = [ + match.group(input_data.group) + for match in re.finditer(input_data.pattern, txt, flags) + if input_data.group <= len(match.groups()) + ] + for match in matches: + yield "positive", match + if not input_data.find_all: + return + if not matches: + yield "negative", input_data.text + + +class FillTextTemplateBlock(Block): + class Input(BlockSchema): + values: dict[str, Any] = SchemaField( + description="Values (dict) to be used in format. These values can be used by putting them in double curly braces in the format template. e.g. {{value_name}}.", + ) + format: str = SchemaField( + description="Template to format the text using `values`. Use Jinja2 syntax." + ) + + class Output(BlockSchema): + output: str = SchemaField(description="Formatted text") + + def __init__(self): + super().__init__( + id="db7d8f02-2f44-4c55-ab7a-eae0941f0c30", + description="This block formats the given texts using the format template.", + categories={BlockCategory.TEXT}, + input_schema=FillTextTemplateBlock.Input, + output_schema=FillTextTemplateBlock.Output, + test_input=[ + { + "values": {"name": "Alice", "hello": "Hello", "world": "World!"}, + "format": "{{hello}}, {{ world }} {{name}}", + }, + { + "values": {"list": ["Hello", " World!"]}, + "format": "{% for item in list %}{{ item }}{% endfor %}", + }, + { + "values": {}, + "format": "{% set name = 'Alice' %}Hello, World! {{ name }}", + }, + ], + test_output=[ + ("output", "Hello, World! Alice"), + ("output", "Hello World!"), + ("output", "Hello, World! Alice"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + yield "output", formatter.format_string(input_data.format, input_data.values) + + +class CombineTextsBlock(Block): + class Input(BlockSchema): + input: list[str] = SchemaField(description="text input to combine") + delimiter: str = SchemaField( + description="Delimiter to combine texts", default="" + ) + + class Output(BlockSchema): + output: str = SchemaField(description="Combined text") + + def __init__(self): + super().__init__( + id="e30a4d42-7b7d-4e6a-b36e-1f9b8e3b7d85", + description="This block combines multiple input texts into a single output text.", + categories={BlockCategory.TEXT}, + input_schema=CombineTextsBlock.Input, + output_schema=CombineTextsBlock.Output, + test_input=[ + {"input": ["Hello world I like ", "cake and to go for walks"]}, + {"input": ["This is a test", "Hi!"], "delimiter": "! "}, + ], + test_output=[ + ("output", "Hello world I like cake and to go for walks"), + ("output", "This is a test! Hi!"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + combined_text = input_data.delimiter.join(input_data.input) + yield "output", combined_text + + +class TextSplitBlock(Block): + class Input(BlockSchema): + text: str = SchemaField(description="The text to split.") + delimiter: str = SchemaField(description="The delimiter to split the text by.") + strip: bool = SchemaField( + description="Whether to strip the text.", default=True + ) + + class Output(BlockSchema): + texts: list[str] = SchemaField( + description="The text split into a list of strings." + ) + + def __init__(self): + super().__init__( + id="d5ea33c8-a575-477a-b42f-2fe3be5055ec", + description="This block is used to split a text into a list of strings.", + categories={BlockCategory.TEXT}, + input_schema=TextSplitBlock.Input, + output_schema=TextSplitBlock.Output, + test_input=[ + {"text": "Hello, World!", "delimiter": ","}, + {"text": "Hello, World!", "delimiter": ",", "strip": False}, + ], + test_output=[ + ("texts", ["Hello", "World!"]), + ("texts", ["Hello", " World!"]), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + if len(input_data.text) == 0: + yield "texts", [] + else: + texts = input_data.text.split(input_data.delimiter) + if input_data.strip: + texts = [text.strip() for text in texts] + yield "texts", texts diff --git a/autogpt_platform/backend/backend/blocks/text_to_speech_block.py b/autogpt_platform/backend/backend/blocks/text_to_speech_block.py new file mode 100644 index 000000000000..989dc54e128e --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/text_to_speech_block.py @@ -0,0 +1,102 @@ +from typing import Any, Literal + +from pydantic import SecretStr + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import ( + APIKeyCredentials, + CredentialsField, + CredentialsMetaInput, + SchemaField, +) +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +TEST_CREDENTIALS = APIKeyCredentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="unreal_speech", + api_key=SecretStr("mock-unreal-speech-api-key"), + title="Mock Unreal Speech API key", + expires_at=None, +) +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.type, +} + + +class UnrealTextToSpeechBlock(Block): + class Input(BlockSchema): + text: str = SchemaField( + description="The text to be converted to speech", + placeholder="Enter the text you want to convert to speech", + ) + voice_id: str = SchemaField( + description="The voice ID to use for text-to-speech conversion", + placeholder="Scarlett", + default="Scarlett", + ) + credentials: CredentialsMetaInput[ + Literal[ProviderName.UNREAL_SPEECH], Literal["api_key"] + ] = CredentialsField( + description="The Unreal Speech integration can be used with " + "any API key with sufficient permissions for the blocks it is used on.", + ) + + class Output(BlockSchema): + mp3_url: str = SchemaField(description="The URL of the generated MP3 file") + error: str = SchemaField(description="Error message if the API call failed") + + def __init__(self): + super().__init__( + id="4ff1ff6d-cc40-4caa-ae69-011daa20c378", + description="Converts text to speech using the Unreal Speech API", + categories={BlockCategory.AI, BlockCategory.TEXT, BlockCategory.MULTIMEDIA}, + input_schema=UnrealTextToSpeechBlock.Input, + output_schema=UnrealTextToSpeechBlock.Output, + test_input={ + "text": "This is a test of the text to speech API.", + "voice_id": "Scarlett", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_output=[("mp3_url", "https://example.com/test.mp3")], + test_mock={ + "call_unreal_speech_api": lambda *args, **kwargs: { + "OutputUri": "https://example.com/test.mp3" + } + }, + test_credentials=TEST_CREDENTIALS, + ) + + @staticmethod + def call_unreal_speech_api( + api_key: SecretStr, text: str, voice_id: str + ) -> dict[str, Any]: + url = "https://api.v7.unrealspeech.com/speech" + headers = { + "Authorization": f"Bearer {api_key.get_secret_value()}", + "Content-Type": "application/json", + } + data = { + "Text": text, + "VoiceId": voice_id, + "Bitrate": "192k", + "Speed": "0", + "Pitch": "1", + "TimestampType": "sentence", + } + + response = requests.post(url, headers=headers, json=data) + return response.json() + + def run( + self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs + ) -> BlockOutput: + api_response = self.call_unreal_speech_api( + credentials.api_key, + input_data.text, + input_data.voice_id, + ) + yield "mp3_url", api_response["OutputUri"] diff --git a/autogpt_platform/backend/backend/blocks/time_blocks.py b/autogpt_platform/backend/backend/blocks/time_blocks.py new file mode 100644 index 000000000000..4b060aea5cfc --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/time_blocks.py @@ -0,0 +1,191 @@ +import time +from datetime import datetime, timedelta +from typing import Any, Union + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class GetCurrentTimeBlock(Block): + class Input(BlockSchema): + trigger: str = SchemaField( + description="Trigger any data to output the current time" + ) + format: str = SchemaField( + description="Format of the time to output", default="%H:%M:%S" + ) + + class Output(BlockSchema): + time: str = SchemaField( + description="Current time in the specified format (default: %H:%M:%S)" + ) + + def __init__(self): + super().__init__( + id="a892b8d9-3e4e-4e9c-9c1e-75f8efcf1bfa", + description="This block outputs the current time.", + categories={BlockCategory.TEXT}, + input_schema=GetCurrentTimeBlock.Input, + output_schema=GetCurrentTimeBlock.Output, + test_input=[ + {"trigger": "Hello"}, + {"trigger": "Hello", "format": "%H:%M"}, + ], + test_output=[ + ("time", lambda _: time.strftime("%H:%M:%S")), + ("time", lambda _: time.strftime("%H:%M")), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + current_time = time.strftime(input_data.format) + yield "time", current_time + + +class GetCurrentDateBlock(Block): + class Input(BlockSchema): + trigger: str = SchemaField( + description="Trigger any data to output the current date" + ) + offset: Union[int, str] = SchemaField( + title="Days Offset", + description="Offset in days from the current date", + default=0, + ) + format: str = SchemaField( + description="Format of the date to output", default="%Y-%m-%d" + ) + + class Output(BlockSchema): + date: str = SchemaField( + description="Current date in the specified format (default: YYYY-MM-DD)" + ) + + def __init__(self): + super().__init__( + id="b29c1b50-5d0e-4d9f-8f9d-1b0e6fcbf0b1", + description="This block outputs the current date with an optional offset.", + categories={BlockCategory.TEXT}, + input_schema=GetCurrentDateBlock.Input, + output_schema=GetCurrentDateBlock.Output, + test_input=[ + {"trigger": "Hello", "offset": "7"}, + {"trigger": "Hello", "offset": "7", "format": "%m/%d/%Y"}, + ], + test_output=[ + ( + "date", + lambda t: abs(datetime.now() - datetime.strptime(t, "%Y-%m-%d")) + < timedelta(days=8), # 7 days difference + 1 day error margin. + ), + ( + "date", + lambda t: abs(datetime.now() - datetime.strptime(t, "%m/%d/%Y")) + < timedelta(days=8), + # 7 days difference + 1 day error margin. + ), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + try: + offset = int(input_data.offset) + except ValueError: + offset = 0 + current_date = datetime.now() - timedelta(days=offset) + yield "date", current_date.strftime(input_data.format) + + +class GetCurrentDateAndTimeBlock(Block): + class Input(BlockSchema): + trigger: str = SchemaField( + description="Trigger any data to output the current date and time" + ) + format: str = SchemaField( + description="Format of the date and time to output", + default="%Y-%m-%d %H:%M:%S", + ) + + class Output(BlockSchema): + date_time: str = SchemaField( + description="Current date and time in the specified format (default: YYYY-MM-DD HH:MM:SS)" + ) + + def __init__(self): + super().__init__( + id="716a67b3-6760-42e7-86dc-18645c6e00fc", + description="This block outputs the current date and time.", + categories={BlockCategory.TEXT}, + input_schema=GetCurrentDateAndTimeBlock.Input, + output_schema=GetCurrentDateAndTimeBlock.Output, + test_input=[ + {"trigger": "Hello"}, + ], + test_output=[ + ( + "date_time", + lambda t: abs( + datetime.now() - datetime.strptime(t, "%Y-%m-%d %H:%M:%S") + ) + < timedelta(seconds=10), # 10 seconds error margin. + ), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + current_date_time = time.strftime(input_data.format) + yield "date_time", current_date_time + + +class CountdownTimerBlock(Block): + class Input(BlockSchema): + input_message: Any = SchemaField( + advanced=False, + description="Message to output after the timer finishes", + default="timer finished", + ) + seconds: Union[int, str] = SchemaField( + advanced=False, description="Duration in seconds", default=0 + ) + minutes: Union[int, str] = SchemaField( + advanced=False, description="Duration in minutes", default=0 + ) + hours: Union[int, str] = SchemaField( + advanced=False, description="Duration in hours", default=0 + ) + days: Union[int, str] = SchemaField( + advanced=False, description="Duration in days", default=0 + ) + + class Output(BlockSchema): + output_message: Any = SchemaField( + description="Message after the timer finishes" + ) + + def __init__(self): + super().__init__( + id="d67a9c52-5e4e-11e2-bcfd-0800200c9a71", + description="This block triggers after a specified duration.", + categories={BlockCategory.TEXT}, + input_schema=CountdownTimerBlock.Input, + output_schema=CountdownTimerBlock.Output, + test_input=[ + {"seconds": 1}, + {"input_message": "Custom message"}, + ], + test_output=[ + ("output_message", "timer finished"), + ("output_message", "Custom message"), + ], + ) + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + seconds = int(input_data.seconds) + minutes = int(input_data.minutes) + hours = int(input_data.hours) + days = int(input_data.days) + + total_seconds = seconds + minutes * 60 + hours * 3600 + days * 86400 + + time.sleep(total_seconds) + yield "output_message", input_data.input_message diff --git a/autogpt_platform/backend/backend/blocks/twitter/_auth.py b/autogpt_platform/backend/backend/blocks/twitter/_auth.py new file mode 100644 index 000000000000..0bff03fa370a --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/_auth.py @@ -0,0 +1,60 @@ +from typing import Literal + +from pydantic import SecretStr + +from backend.data.model import ( + CredentialsField, + CredentialsMetaInput, + OAuth2Credentials, + ProviderName, +) +from backend.integrations.oauth.twitter import TwitterOAuthHandler +from backend.util.settings import Secrets + +# --8<-- [start:TwitterOAuthIsConfigured] +secrets = Secrets() +TWITTER_OAUTH_IS_CONFIGURED = bool( + secrets.twitter_client_id and secrets.twitter_client_secret +) +# --8<-- [end:TwitterOAuthIsConfigured] + +TwitterCredentials = OAuth2Credentials +TwitterCredentialsInput = CredentialsMetaInput[ + Literal[ProviderName.TWITTER], Literal["oauth2"] +] + + +# Currently, We are getting all the permission from the Twitter API initally +# In future, If we need to add incremental permission, we can use these requested_scopes +def TwitterCredentialsField(scopes: list[str]) -> TwitterCredentialsInput: + """ + Creates a Twitter credentials input on a block. + + Params: + scopes: The authorization scopes needed for the block to work. + """ + return CredentialsField( + # required_scopes=set(scopes), + required_scopes=set(TwitterOAuthHandler.DEFAULT_SCOPES + scopes), + description="The Twitter integration requires OAuth2 authentication.", + ) + + +TEST_CREDENTIALS = OAuth2Credentials( + id="01234567-89ab-cdef-0123-456789abcdef", + provider="twitter", + access_token=SecretStr("mock-twitter-access-token"), + refresh_token=SecretStr("mock-twitter-refresh-token"), + access_token_expires_at=1234567890, + scopes=["tweet.read", "tweet.write", "users.read", "offline.access"], + title="Mock Twitter OAuth2 Credentials", + username="mock-twitter-username", + refresh_token_expires_at=1234567890, +) + +TEST_CREDENTIALS_INPUT = { + "provider": TEST_CREDENTIALS.provider, + "id": TEST_CREDENTIALS.id, + "type": TEST_CREDENTIALS.type, + "title": TEST_CREDENTIALS.title, +} diff --git a/autogpt_platform/backend/backend/blocks/twitter/_builders.py b/autogpt_platform/backend/backend/blocks/twitter/_builders.py new file mode 100644 index 000000000000..6dc450c2474f --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/_builders.py @@ -0,0 +1,418 @@ +from datetime import datetime +from typing import Any, Dict + +from backend.blocks.twitter._mappers import ( + get_backend_expansion, + get_backend_field, + get_backend_list_expansion, + get_backend_list_field, + get_backend_media_field, + get_backend_place_field, + get_backend_poll_field, + get_backend_space_expansion, + get_backend_space_field, + get_backend_user_field, +) +from backend.blocks.twitter._types import ( # DMEventFieldFilter, + DMEventExpansionFilter, + DMEventTypeFilter, + DMMediaFieldFilter, + DMTweetFieldFilter, + ExpansionFilter, + ListExpansionsFilter, + ListFieldsFilter, + SpaceExpansionsFilter, + SpaceFieldsFilter, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetReplySettingsFilter, + TweetUserFieldsFilter, + UserExpansionsFilter, +) + + +# Common Builder +class TweetExpansionsBuilder: + def __init__(self, param: Dict[str, Any]): + self.params: Dict[str, Any] = param + + def add_expansions(self, expansions: ExpansionFilter | None): + if expansions: + filtered_expansions = [ + name for name, value in expansions.dict().items() if value is True + ] + + if filtered_expansions: + self.params["expansions"] = ",".join( + [get_backend_expansion(exp) for exp in filtered_expansions] + ) + + return self + + def add_media_fields(self, media_fields: TweetMediaFieldsFilter | None): + if media_fields: + filtered_fields = [ + name for name, value in media_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["media.fields"] = ",".join( + [get_backend_media_field(field) for field in filtered_fields] + ) + return self + + def add_place_fields(self, place_fields: TweetPlaceFieldsFilter | None): + if place_fields: + filtered_fields = [ + name for name, value in place_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["place.fields"] = ",".join( + [get_backend_place_field(field) for field in filtered_fields] + ) + return self + + def add_poll_fields(self, poll_fields: TweetPollFieldsFilter | None): + if poll_fields: + filtered_fields = [ + name for name, value in poll_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["poll.fields"] = ",".join( + [get_backend_poll_field(field) for field in filtered_fields] + ) + return self + + def add_tweet_fields(self, tweet_fields: TweetFieldsFilter | None): + if tweet_fields: + filtered_fields = [ + name for name, value in tweet_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["tweet.fields"] = ",".join( + [get_backend_field(field) for field in filtered_fields] + ) + return self + + def add_user_fields(self, user_fields: TweetUserFieldsFilter | None): + if user_fields: + filtered_fields = [ + name for name, value in user_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["user.fields"] = ",".join( + [get_backend_user_field(field) for field in filtered_fields] + ) + return self + + def build(self): + return self.params + + +class UserExpansionsBuilder: + def __init__(self, param: Dict[str, Any]): + self.params: Dict[str, Any] = param + + def add_expansions(self, expansions: UserExpansionsFilter | None): + if expansions: + filtered_expansions = [ + name for name, value in expansions.dict().items() if value is True + ] + if filtered_expansions: + self.params["expansions"] = ",".join(filtered_expansions) + return self + + def add_tweet_fields(self, tweet_fields: TweetFieldsFilter | None): + if tweet_fields: + filtered_fields = [ + name for name, value in tweet_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["tweet.fields"] = ",".join( + [get_backend_field(field) for field in filtered_fields] + ) + return self + + def add_user_fields(self, user_fields: TweetUserFieldsFilter | None): + if user_fields: + filtered_fields = [ + name for name, value in user_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["user.fields"] = ",".join( + [get_backend_user_field(field) for field in filtered_fields] + ) + return self + + def build(self): + return self.params + + +class ListExpansionsBuilder: + def __init__(self, param: Dict[str, Any]): + self.params: Dict[str, Any] = param + + def add_expansions(self, expansions: ListExpansionsFilter | None): + if expansions: + filtered_expansions = [ + name for name, value in expansions.dict().items() if value is True + ] + if filtered_expansions: + self.params["expansions"] = ",".join( + [get_backend_list_expansion(exp) for exp in filtered_expansions] + ) + return self + + def add_list_fields(self, list_fields: ListFieldsFilter | None): + if list_fields: + filtered_fields = [ + name for name, value in list_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["list.fields"] = ",".join( + [get_backend_list_field(field) for field in filtered_fields] + ) + return self + + def add_user_fields(self, user_fields: TweetUserFieldsFilter | None): + if user_fields: + filtered_fields = [ + name for name, value in user_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["user.fields"] = ",".join( + [get_backend_user_field(field) for field in filtered_fields] + ) + return self + + def build(self): + return self.params + + +class SpaceExpansionsBuilder: + def __init__(self, param: Dict[str, Any]): + self.params: Dict[str, Any] = param + + def add_expansions(self, expansions: SpaceExpansionsFilter | None): + if expansions: + filtered_expansions = [ + name for name, value in expansions.dict().items() if value is True + ] + if filtered_expansions: + self.params["expansions"] = ",".join( + [get_backend_space_expansion(exp) for exp in filtered_expansions] + ) + return self + + def add_space_fields(self, space_fields: SpaceFieldsFilter | None): + if space_fields: + filtered_fields = [ + name for name, value in space_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["space.fields"] = ",".join( + [get_backend_space_field(field) for field in filtered_fields] + ) + return self + + def add_user_fields(self, user_fields: TweetUserFieldsFilter | None): + if user_fields: + filtered_fields = [ + name for name, value in user_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["user.fields"] = ",".join( + [get_backend_user_field(field) for field in filtered_fields] + ) + return self + + def build(self): + return self.params + + +class TweetDurationBuilder: + def __init__(self, param: Dict[str, Any]): + self.params: Dict[str, Any] = param + + def add_start_time(self, start_time: datetime | None): + if start_time: + self.params["start_time"] = start_time + return self + + def add_end_time(self, end_time: datetime | None): + if end_time: + self.params["end_time"] = end_time + return self + + def add_since_id(self, since_id: str | None): + if since_id: + self.params["since_id"] = since_id + return self + + def add_until_id(self, until_id: str | None): + if until_id: + self.params["until_id"] = until_id + return self + + def add_sort_order(self, sort_order: str | None): + if sort_order: + self.params["sort_order"] = sort_order + return self + + def build(self): + return self.params + + +class DMExpansionsBuilder: + def __init__(self, param: Dict[str, Any]): + self.params: Dict[str, Any] = param + + def add_expansions(self, expansions: DMEventExpansionFilter): + if expansions: + filtered_expansions = [ + name for name, value in expansions.dict().items() if value is True + ] + if filtered_expansions: + self.params["expansions"] = ",".join(filtered_expansions) + return self + + def add_event_types(self, event_types: DMEventTypeFilter): + if event_types: + filtered_types = [ + name for name, value in event_types.dict().items() if value is True + ] + if filtered_types: + self.params["event_types"] = ",".join(filtered_types) + return self + + def add_media_fields(self, media_fields: DMMediaFieldFilter): + if media_fields: + filtered_fields = [ + name for name, value in media_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["media.fields"] = ",".join(filtered_fields) + return self + + def add_tweet_fields(self, tweet_fields: DMTweetFieldFilter): + if tweet_fields: + filtered_fields = [ + name for name, value in tweet_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["tweet.fields"] = ",".join(filtered_fields) + return self + + def add_user_fields(self, user_fields: TweetUserFieldsFilter): + if user_fields: + filtered_fields = [ + name for name, value in user_fields.dict().items() if value is True + ] + if filtered_fields: + self.params["user.fields"] = ",".join(filtered_fields) + return self + + def build(self): + return self.params + + +# Specific Builders +class TweetSearchBuilder: + def __init__(self): + self.params: Dict[str, Any] = {"user_auth": False} + + def add_query(self, query: str): + if query: + self.params["query"] = query + return self + + def add_pagination(self, max_results: int, pagination: str | None): + if max_results: + self.params["max_results"] = max_results + if pagination: + self.params["pagination_token"] = pagination + return self + + def build(self): + return self.params + + +class TweetPostBuilder: + def __init__(self): + self.params: Dict[str, Any] = {"user_auth": False} + + def add_text(self, text: str | None): + if text: + self.params["text"] = text + return self + + def add_media(self, media_ids: list, tagged_user_ids: list): + if media_ids: + self.params["media_ids"] = media_ids + if tagged_user_ids: + self.params["media_tagged_user_ids"] = tagged_user_ids + return self + + def add_deep_link(self, link: str): + if link: + self.params["direct_message_deep_link"] = link + return self + + def add_super_followers(self, for_super_followers: bool): + if for_super_followers: + self.params["for_super_followers_only"] = for_super_followers + return self + + def add_place(self, place_id: str): + if place_id: + self.params["place_id"] = place_id + return self + + def add_poll_options(self, poll_options: list): + if poll_options: + self.params["poll_options"] = poll_options + return self + + def add_poll_duration(self, poll_duration_minutes: int): + if poll_duration_minutes: + self.params["poll_duration_minutes"] = poll_duration_minutes + return self + + def add_quote(self, quote_id: str): + if quote_id: + self.params["quote_tweet_id"] = quote_id + return self + + def add_reply_settings( + self, + exclude_user_ids: list, + reply_to_id: str, + settings: TweetReplySettingsFilter, + ): + if exclude_user_ids: + self.params["exclude_reply_user_ids"] = exclude_user_ids + if reply_to_id: + self.params["in_reply_to_tweet_id"] = reply_to_id + if settings.All_Users: + self.params["reply_settings"] = None + elif settings.Following_Users_Only: + self.params["reply_settings"] = "following" + elif settings.Mentioned_Users_Only: + self.params["reply_settings"] = "mentionedUsers" + return self + + def build(self): + return self.params + + +class TweetGetsBuilder: + def __init__(self): + self.params: Dict[str, Any] = {"user_auth": False} + + def add_id(self, tweet_id: list[str]): + self.params["id"] = tweet_id + return self + + def build(self): + return self.params diff --git a/autogpt_platform/backend/backend/blocks/twitter/_mappers.py b/autogpt_platform/backend/backend/blocks/twitter/_mappers.py new file mode 100644 index 000000000000..a564174ed0f2 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/_mappers.py @@ -0,0 +1,234 @@ +# -------------- Tweets ----------------- + +# Tweet Expansions +EXPANSION_FRONTEND_TO_BACKEND_MAPPING = { + "Poll_IDs": "attachments.poll_ids", + "Media_Keys": "attachments.media_keys", + "Author_User_ID": "author_id", + "Edit_History_Tweet_IDs": "edit_history_tweet_ids", + "Mentioned_Usernames": "entities.mentions.username", + "Place_ID": "geo.place_id", + "Reply_To_User_ID": "in_reply_to_user_id", + "Referenced_Tweet_ID": "referenced_tweets.id", + "Referenced_Tweet_Author_ID": "referenced_tweets.id.author_id", +} + + +def get_backend_expansion(frontend_key: str) -> str: + result = EXPANSION_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid expansion key: {frontend_key}") + return result + + +# TweetReplySettings +REPLY_SETTINGS_FRONTEND_TO_BACKEND_MAPPING = { + "Mentioned_Users_Only": "mentionedUsers", + "Following_Users_Only": "following", + "All_Users": "all", +} + + +# TweetUserFields +def get_backend_reply_setting(frontend_key: str) -> str: + result = REPLY_SETTINGS_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid reply setting key: {frontend_key}") + return result + + +USER_FIELDS_FRONTEND_TO_BACKEND_MAPPING = { + "Account_Creation_Date": "created_at", + "User_Bio": "description", + "User_Entities": "entities", + "User_ID": "id", + "User_Location": "location", + "Latest_Tweet_ID": "most_recent_tweet_id", + "Display_Name": "name", + "Pinned_Tweet_ID": "pinned_tweet_id", + "Profile_Picture_URL": "profile_image_url", + "Is_Protected_Account": "protected", + "Account_Statistics": "public_metrics", + "Profile_URL": "url", + "Username": "username", + "Is_Verified": "verified", + "Verification_Type": "verified_type", + "Content_Withholding_Info": "withheld", +} + + +def get_backend_user_field(frontend_key: str) -> str: + result = USER_FIELDS_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid user field key: {frontend_key}") + return result + + +# TweetFields +FIELDS_FRONTEND_TO_BACKEND_MAPPING = { + "Tweet_Attachments": "attachments", + "Author_ID": "author_id", + "Context_Annotations": "context_annotations", + "Conversation_ID": "conversation_id", + "Creation_Time": "created_at", + "Edit_Controls": "edit_controls", + "Tweet_Entities": "entities", + "Geographic_Location": "geo", + "Tweet_ID": "id", + "Reply_To_User_ID": "in_reply_to_user_id", + "Language": "lang", + "Public_Metrics": "public_metrics", + "Sensitive_Content_Flag": "possibly_sensitive", + "Referenced_Tweets": "referenced_tweets", + "Reply_Settings": "reply_settings", + "Tweet_Source": "source", + "Tweet_Text": "text", + "Withheld_Content": "withheld", +} + + +def get_backend_field(frontend_key: str) -> str: + result = FIELDS_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid field key: {frontend_key}") + return result + + +# TweetPollFields +POLL_FIELDS_FRONTEND_TO_BACKEND_MAPPING = { + "Duration_Minutes": "duration_minutes", + "End_DateTime": "end_datetime", + "Poll_ID": "id", + "Poll_Options": "options", + "Voting_Status": "voting_status", +} + + +def get_backend_poll_field(frontend_key: str) -> str: + result = POLL_FIELDS_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid poll field key: {frontend_key}") + return result + + +PLACE_FIELDS_FRONTEND_TO_BACKEND_MAPPING = { + "Contained_Within_Places": "contained_within", + "Country": "country", + "Country_Code": "country_code", + "Full_Location_Name": "full_name", + "Geographic_Coordinates": "geo", + "Place_ID": "id", + "Place_Name": "name", + "Place_Type": "place_type", +} + + +def get_backend_place_field(frontend_key: str) -> str: + result = PLACE_FIELDS_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid place field key: {frontend_key}") + return result + + +# TweetMediaFields +MEDIA_FIELDS_FRONTEND_TO_BACKEND_MAPPING = { + "Duration_in_Milliseconds": "duration_ms", + "Height": "height", + "Media_Key": "media_key", + "Preview_Image_URL": "preview_image_url", + "Media_Type": "type", + "Media_URL": "url", + "Width": "width", + "Public_Metrics": "public_metrics", + "Non_Public_Metrics": "non_public_metrics", + "Organic_Metrics": "organic_metrics", + "Promoted_Metrics": "promoted_metrics", + "Alternative_Text": "alt_text", + "Media_Variants": "variants", +} + + +def get_backend_media_field(frontend_key: str) -> str: + result = MEDIA_FIELDS_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid media field key: {frontend_key}") + return result + + +# -------------- Spaces ----------------- + +# SpaceExpansions +EXPANSION_FRONTEND_TO_BACKEND_MAPPING_SPACE = { + "Invited_Users": "invited_user_ids", + "Speakers": "speaker_ids", + "Creator": "creator_id", + "Hosts": "host_ids", + "Topics": "topic_ids", +} + + +def get_backend_space_expansion(frontend_key: str) -> str: + result = EXPANSION_FRONTEND_TO_BACKEND_MAPPING_SPACE.get(frontend_key) + if result is None: + raise KeyError(f"Invalid expansion key: {frontend_key}") + return result + + +# SpaceFields +SPACE_FIELDS_FRONTEND_TO_BACKEND_MAPPING = { + "Space_ID": "id", + "Space_State": "state", + "Creation_Time": "created_at", + "End_Time": "ended_at", + "Host_User_IDs": "host_ids", + "Language": "lang", + "Is_Ticketed": "is_ticketed", + "Invited_User_IDs": "invited_user_ids", + "Participant_Count": "participant_count", + "Subscriber_Count": "subscriber_count", + "Scheduled_Start_Time": "scheduled_start", + "Speaker_User_IDs": "speaker_ids", + "Start_Time": "started_at", + "Space_Title": "title", + "Topic_IDs": "topic_ids", + "Last_Updated_Time": "updated_at", +} + + +def get_backend_space_field(frontend_key: str) -> str: + result = SPACE_FIELDS_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid space field key: {frontend_key}") + return result + + +# -------------- List Expansions ----------------- + +# ListExpansions +LIST_EXPANSION_FRONTEND_TO_BACKEND_MAPPING = {"List_Owner_ID": "owner_id"} + + +def get_backend_list_expansion(frontend_key: str) -> str: + result = LIST_EXPANSION_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid list expansion key: {frontend_key}") + return result + + +LIST_FIELDS_FRONTEND_TO_BACKEND_MAPPING = { + "List_ID": "id", + "List_Name": "name", + "Creation_Date": "created_at", + "Description": "description", + "Follower_Count": "follower_count", + "Member_Count": "member_count", + "Is_Private": "private", + "Owner_ID": "owner_id", +} + + +def get_backend_list_field(frontend_key: str) -> str: + result = LIST_FIELDS_FRONTEND_TO_BACKEND_MAPPING.get(frontend_key) + if result is None: + raise KeyError(f"Invalid list field key: {frontend_key}") + return result diff --git a/autogpt_platform/backend/backend/blocks/twitter/_serializer.py b/autogpt_platform/backend/backend/blocks/twitter/_serializer.py new file mode 100644 index 000000000000..906c52445686 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/_serializer.py @@ -0,0 +1,76 @@ +from typing import Any, Dict, List + + +class BaseSerializer: + @staticmethod + def _serialize_value(value: Any) -> Any: + """Helper method to serialize individual values""" + if hasattr(value, "data"): + return value.data + return value + + +class IncludesSerializer(BaseSerializer): + @classmethod + def serialize(cls, includes: Dict[str, Any]) -> Dict[str, Any]: + """Serializes the includes dictionary""" + if not includes: + return {} + + serialized_includes = {} + for key, value in includes.items(): + if isinstance(value, list): + serialized_includes[key] = [ + cls._serialize_value(item) for item in value + ] + else: + serialized_includes[key] = cls._serialize_value(value) + + return serialized_includes + + +class ResponseDataSerializer(BaseSerializer): + @classmethod + def serialize_dict(cls, item: Dict[str, Any]) -> Dict[str, Any]: + """Serializes a single dictionary item""" + serialized_item = {} + + if hasattr(item, "__dict__"): + items = item.__dict__.items() + else: + items = item.items() + + for key, value in items: + if isinstance(value, list): + serialized_item[key] = [ + cls._serialize_value(sub_item) for sub_item in value + ] + else: + serialized_item[key] = cls._serialize_value(value) + + return serialized_item + + @classmethod + def serialize_list(cls, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """Serializes a list of dictionary items""" + return [cls.serialize_dict(item) for item in data] + + +class ResponseSerializer: + @classmethod + def serialize(cls, response) -> Dict[str, Any]: + """Main serializer that handles both data and includes""" + result = {"data": None, "included": {}} + + # Handle response.data + if response.data: + if isinstance(response.data, list): + result["data"] = ResponseDataSerializer.serialize_list(response.data) + else: + result["data"] = ResponseDataSerializer.serialize_dict(response.data) + + # Handle includes + if hasattr(response, "includes") and response.includes: + result["included"] = IncludesSerializer.serialize(response.includes) + + return result diff --git a/autogpt_platform/backend/backend/blocks/twitter/_types.py b/autogpt_platform/backend/backend/blocks/twitter/_types.py new file mode 100644 index 000000000000..2b404e4f560e --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/_types.py @@ -0,0 +1,443 @@ +from datetime import datetime +from enum import Enum + +from pydantic import BaseModel + +from backend.data.block import BlockSchema +from backend.data.model import SchemaField + +# -------------- Tweets ----------------- + + +class TweetReplySettingsFilter(BaseModel): + Mentioned_Users_Only: bool = False + Following_Users_Only: bool = False + All_Users: bool = False + + +class TweetUserFieldsFilter(BaseModel): + Account_Creation_Date: bool = False + User_Bio: bool = False + User_Entities: bool = False + User_ID: bool = False + User_Location: bool = False + Latest_Tweet_ID: bool = False + Display_Name: bool = False + Pinned_Tweet_ID: bool = False + Profile_Picture_URL: bool = False + Is_Protected_Account: bool = False + Account_Statistics: bool = False + Profile_URL: bool = False + Username: bool = False + Is_Verified: bool = False + Verification_Type: bool = False + Content_Withholding_Info: bool = False + + +class TweetFieldsFilter(BaseModel): + Tweet_Attachments: bool = False + Author_ID: bool = False + Context_Annotations: bool = False + Conversation_ID: bool = False + Creation_Time: bool = False + Edit_Controls: bool = False + Tweet_Entities: bool = False + Geographic_Location: bool = False + Tweet_ID: bool = False + Reply_To_User_ID: bool = False + Language: bool = False + Public_Metrics: bool = False + Sensitive_Content_Flag: bool = False + Referenced_Tweets: bool = False + Reply_Settings: bool = False + Tweet_Source: bool = False + Tweet_Text: bool = False + Withheld_Content: bool = False + + +class PersonalTweetFieldsFilter(BaseModel): + attachments: bool = False + author_id: bool = False + context_annotations: bool = False + conversation_id: bool = False + created_at: bool = False + edit_controls: bool = False + entities: bool = False + geo: bool = False + id: bool = False + in_reply_to_user_id: bool = False + lang: bool = False + non_public_metrics: bool = False + public_metrics: bool = False + organic_metrics: bool = False + promoted_metrics: bool = False + possibly_sensitive: bool = False + referenced_tweets: bool = False + reply_settings: bool = False + source: bool = False + text: bool = False + withheld: bool = False + + +class TweetPollFieldsFilter(BaseModel): + Duration_Minutes: bool = False + End_DateTime: bool = False + Poll_ID: bool = False + Poll_Options: bool = False + Voting_Status: bool = False + + +class TweetPlaceFieldsFilter(BaseModel): + Contained_Within_Places: bool = False + Country: bool = False + Country_Code: bool = False + Full_Location_Name: bool = False + Geographic_Coordinates: bool = False + Place_ID: bool = False + Place_Name: bool = False + Place_Type: bool = False + + +class TweetMediaFieldsFilter(BaseModel): + Duration_in_Milliseconds: bool = False + Height: bool = False + Media_Key: bool = False + Preview_Image_URL: bool = False + Media_Type: bool = False + Media_URL: bool = False + Width: bool = False + Public_Metrics: bool = False + Non_Public_Metrics: bool = False + Organic_Metrics: bool = False + Promoted_Metrics: bool = False + Alternative_Text: bool = False + Media_Variants: bool = False + + +class ExpansionFilter(BaseModel): + Poll_IDs: bool = False + Media_Keys: bool = False + Author_User_ID: bool = False + Edit_History_Tweet_IDs: bool = False + Mentioned_Usernames: bool = False + Place_ID: bool = False + Reply_To_User_ID: bool = False + Referenced_Tweet_ID: bool = False + Referenced_Tweet_Author_ID: bool = False + + +class TweetExcludesFilter(BaseModel): + retweets: bool = False + replies: bool = False + + +# -------------- Users ----------------- + + +class UserExpansionsFilter(BaseModel): + pinned_tweet_id: bool = False + + +# -------------- DM's' ----------------- + + +class DMEventFieldFilter(BaseModel): + id: bool = False + text: bool = False + event_type: bool = False + created_at: bool = False + dm_conversation_id: bool = False + sender_id: bool = False + participant_ids: bool = False + referenced_tweets: bool = False + attachments: bool = False + + +class DMEventTypeFilter(BaseModel): + MessageCreate: bool = False + ParticipantsJoin: bool = False + ParticipantsLeave: bool = False + + +class DMEventExpansionFilter(BaseModel): + attachments_media_keys: bool = False + referenced_tweets_id: bool = False + sender_id: bool = False + participant_ids: bool = False + + +class DMMediaFieldFilter(BaseModel): + duration_ms: bool = False + height: bool = False + media_key: bool = False + preview_image_url: bool = False + type: bool = False + url: bool = False + width: bool = False + public_metrics: bool = False + alt_text: bool = False + variants: bool = False + + +class DMTweetFieldFilter(BaseModel): + attachments: bool = False + author_id: bool = False + context_annotations: bool = False + conversation_id: bool = False + created_at: bool = False + edit_controls: bool = False + entities: bool = False + geo: bool = False + id: bool = False + in_reply_to_user_id: bool = False + lang: bool = False + public_metrics: bool = False + possibly_sensitive: bool = False + referenced_tweets: bool = False + reply_settings: bool = False + source: bool = False + text: bool = False + withheld: bool = False + + +# -------------- Spaces ----------------- + + +class SpaceExpansionsFilter(BaseModel): + Invited_Users: bool = False + Speakers: bool = False + Creator: bool = False + Hosts: bool = False + Topics: bool = False + + +class SpaceFieldsFilter(BaseModel): + Space_ID: bool = False + Space_State: bool = False + Creation_Time: bool = False + End_Time: bool = False + Host_User_IDs: bool = False + Language: bool = False + Is_Ticketed: bool = False + Invited_User_IDs: bool = False + Participant_Count: bool = False + Subscriber_Count: bool = False + Scheduled_Start_Time: bool = False + Speaker_User_IDs: bool = False + Start_Time: bool = False + Space_Title: bool = False + Topic_IDs: bool = False + Last_Updated_Time: bool = False + + +class SpaceStatesFilter(str, Enum): + live = "live" + scheduled = "scheduled" + all = "all" + + +# -------------- List Expansions ----------------- + + +class ListExpansionsFilter(BaseModel): + List_Owner_ID: bool = False + + +class ListFieldsFilter(BaseModel): + List_ID: bool = False + List_Name: bool = False + Creation_Date: bool = False + Description: bool = False + Follower_Count: bool = False + Member_Count: bool = False + Is_Private: bool = False + Owner_ID: bool = False + + +# --------- [Input Types] ------------- +class TweetExpansionInputs(BlockSchema): + + expansions: ExpansionFilter | None = SchemaField( + description="Choose what extra information you want to get with your tweets. For example:\n- Select 'Media_Keys' to get media details\n- Select 'Author_User_ID' to get user information\n- Select 'Place_ID' to get location details", + placeholder="Pick the extra information you want to see", + default=None, + advanced=True, + ) + + media_fields: TweetMediaFieldsFilter | None = SchemaField( + description="Select what media information you want to see (images, videos, etc). To use this, you must first select 'Media_Keys' in the expansions above.", + placeholder="Choose what media details you want to see", + default=None, + advanced=True, + ) + + place_fields: TweetPlaceFieldsFilter | None = SchemaField( + description="Select what location information you want to see (country, coordinates, etc). To use this, you must first select 'Place_ID' in the expansions above.", + placeholder="Choose what location details you want to see", + default=None, + advanced=True, + ) + + poll_fields: TweetPollFieldsFilter | None = SchemaField( + description="Select what poll information you want to see (options, voting status, etc). To use this, you must first select 'Poll_IDs' in the expansions above.", + placeholder="Choose what poll details you want to see", + default=None, + advanced=True, + ) + + tweet_fields: TweetFieldsFilter | None = SchemaField( + description="Select what tweet information you want to see. For referenced tweets (like retweets), select 'Referenced_Tweet_ID' in the expansions above.", + placeholder="Choose what tweet details you want to see", + default=None, + advanced=True, + ) + + user_fields: TweetUserFieldsFilter | None = SchemaField( + description="Select what user information you want to see. To use this, you must first select one of these in expansions above:\n- 'Author_User_ID' for tweet authors\n- 'Mentioned_Usernames' for mentioned users\n- 'Reply_To_User_ID' for users being replied to\n- 'Referenced_Tweet_Author_ID' for authors of referenced tweets", + placeholder="Choose what user details you want to see", + default=None, + advanced=True, + ) + + +class DMEventExpansionInputs(BlockSchema): + expansions: DMEventExpansionFilter | None = SchemaField( + description="Select expansions to include related data objects in the 'includes' section.", + placeholder="Enter expansions", + default=None, + advanced=True, + ) + + event_types: DMEventTypeFilter | None = SchemaField( + description="Select DM event types to include in the response.", + placeholder="Enter event types", + default=None, + advanced=True, + ) + + media_fields: DMMediaFieldFilter | None = SchemaField( + description="Select media fields to include in the response (requires expansions=attachments.media_keys).", + placeholder="Enter media fields", + default=None, + advanced=True, + ) + + tweet_fields: DMTweetFieldFilter | None = SchemaField( + description="Select tweet fields to include in the response (requires expansions=referenced_tweets.id).", + placeholder="Enter tweet fields", + default=None, + advanced=True, + ) + + user_fields: TweetUserFieldsFilter | None = SchemaField( + description="Select user fields to include in the response (requires expansions=sender_id or participant_ids).", + placeholder="Enter user fields", + default=None, + advanced=True, + ) + + +class UserExpansionInputs(BlockSchema): + expansions: UserExpansionsFilter | None = SchemaField( + description="Choose what extra information you want to get with user data. Currently only 'pinned_tweet_id' is available to see a user's pinned tweet.", + placeholder="Select extra user information to include", + default=None, + advanced=True, + ) + + tweet_fields: TweetFieldsFilter | None = SchemaField( + description="Select what tweet information you want to see in pinned tweets. This only works if you select 'pinned_tweet_id' in expansions above.", + placeholder="Choose what details to see in pinned tweets", + default=None, + advanced=True, + ) + + user_fields: TweetUserFieldsFilter | None = SchemaField( + description="Select what user information you want to see, like username, bio, profile picture, etc.", + placeholder="Choose what user details you want to see", + default=None, + advanced=True, + ) + + +class SpaceExpansionInputs(BlockSchema): + expansions: SpaceExpansionsFilter | None = SchemaField( + description="Choose additional information you want to get with your Twitter Spaces:\n- Select 'Invited_Users' to see who was invited\n- Select 'Speakers' to see who can speak\n- Select 'Creator' to get details about who made the Space\n- Select 'Hosts' to see who's hosting\n- Select 'Topics' to see Space topics", + placeholder="Pick what extra information you want to see about the Space", + default=None, + advanced=True, + ) + + space_fields: SpaceFieldsFilter | None = SchemaField( + description="Choose what Space details you want to see, such as:\n- Title\n- Start/End times\n- Number of participants\n- Language\n- State (live/scheduled)\n- And more", + placeholder="Choose what Space information you want to get", + default=SpaceFieldsFilter(Space_Title=True, Host_User_IDs=True), + advanced=True, + ) + + user_fields: TweetUserFieldsFilter | None = SchemaField( + description="Choose what user information you want to see. This works when you select any of these in expansions above:\n- 'Creator' for Space creator details\n- 'Hosts' for host information\n- 'Speakers' for speaker details\n- 'Invited_Users' for invited user information", + placeholder="Pick what details you want to see about the users", + default=None, + advanced=True, + ) + + +class ListExpansionInputs(BlockSchema): + expansions: ListExpansionsFilter | None = SchemaField( + description="Choose what extra information you want to get with your Twitter Lists:\n- Select 'List_Owner_ID' to get details about who owns the list\n\nThis will let you see more details about the list owner when you also select user fields below.", + placeholder="Pick what extra list information you want to see", + default=ListExpansionsFilter(List_Owner_ID=True), + advanced=True, + ) + + user_fields: TweetUserFieldsFilter | None = SchemaField( + description="Choose what information you want to see about list owners. This only works when you select 'List_Owner_ID' in expansions above.\n\nYou can see things like:\n- Their username\n- Profile picture\n- Account details\n- And more", + placeholder="Select what details you want to see about list owners", + default=TweetUserFieldsFilter(User_ID=True, Username=True), + advanced=True, + ) + + list_fields: ListFieldsFilter | None = SchemaField( + description="Choose what information you want to see about the Twitter Lists themselves, such as:\n- List name\n- Description\n- Number of followers\n- Number of members\n- Whether it's private\n- Creation date\n- And more", + placeholder="Pick what list details you want to see", + default=ListFieldsFilter(Owner_ID=True), + advanced=True, + ) + + +class TweetTimeWindowInputs(BlockSchema): + start_time: datetime | None = SchemaField( + description="Start time in YYYY-MM-DDTHH:mm:ssZ format", + placeholder="Enter start time", + default=None, + advanced=False, + ) + + end_time: datetime | None = SchemaField( + description="End time in YYYY-MM-DDTHH:mm:ssZ format", + placeholder="Enter end time", + default=None, + advanced=False, + ) + + since_id: str | None = SchemaField( + description="Returns results with Tweet ID greater than this (more recent than), we give priority to since_id over start_time", + placeholder="Enter since ID", + default=None, + advanced=True, + ) + + until_id: str | None = SchemaField( + description="Returns results with Tweet ID less than this (that is, older than), and used with since_id", + placeholder="Enter until ID", + default=None, + advanced=True, + ) + + sort_order: str | None = SchemaField( + description="Order of returned tweets (recency or relevancy)", + placeholder="Enter sort order", + default=None, + advanced=True, + ) diff --git a/autogpt_platform/backend/backend/blocks/twitter/direct_message/direct_message_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/direct_message/direct_message_lookup.py new file mode 100644 index 000000000000..56a62d166eaf --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/direct_message/direct_message_lookup.py @@ -0,0 +1,201 @@ +# Todo : Add new Type support + +# from typing import cast +# import tweepy +# from tweepy.client import Response + +# from backend.blocks.twitter._serializer import IncludesSerializer, ResponseDataSerializer +# from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +# from backend.data.model import SchemaField +# from backend.blocks.twitter._builders import DMExpansionsBuilder +# from backend.blocks.twitter._types import DMEventExpansion, DMEventExpansionInputs, DMEventType, DMMediaField, DMTweetField, TweetUserFields +# from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +# from backend.blocks.twitter._auth import ( +# TEST_CREDENTIALS, +# TEST_CREDENTIALS_INPUT, +# TwitterCredentials, +# TwitterCredentialsField, +# TwitterCredentialsInput, +# ) + +# Require Pro or Enterprise plan [Manual Testing Required] +# class TwitterGetDMEventsBlock(Block): +# """ +# Gets a list of Direct Message events for the authenticated user +# """ + +# class Input(DMEventExpansionInputs): +# credentials: TwitterCredentialsInput = TwitterCredentialsField( +# ["dm.read", "offline.access", "user.read", "tweet.read"] +# ) + +# dm_conversation_id: str = SchemaField( +# description="The ID of the Direct Message conversation", +# placeholder="Enter conversation ID", +# required=True +# ) + +# max_results: int = SchemaField( +# description="Maximum number of results to return (1-100)", +# placeholder="Enter max results", +# advanced=True, +# default=10, +# ) + +# pagination_token: str = SchemaField( +# description="Token for pagination", +# placeholder="Enter pagination token", +# advanced=True, +# default="" +# ) + +# class Output(BlockSchema): +# # Common outputs +# event_ids: list[str] = SchemaField(description="DM Event IDs") +# event_texts: list[str] = SchemaField(description="DM Event text contents") +# event_types: list[str] = SchemaField(description="Types of DM events") +# next_token: str = SchemaField(description="Token for next page of results") + +# # Complete outputs +# data: list[dict] = SchemaField(description="Complete DM events data") +# included: dict = SchemaField(description="Additional data requested via expansions") +# meta: dict = SchemaField(description="Metadata about the response") +# error: str = SchemaField(description="Error message if request failed") + +# def __init__(self): +# super().__init__( +# id="dc37a6d4-a62e-11ef-a3a5-03061375737b", +# description="This block retrieves Direct Message events for the authenticated user.", +# categories={BlockCategory.SOCIAL}, +# input_schema=TwitterGetDMEventsBlock.Input, +# output_schema=TwitterGetDMEventsBlock.Output, +# test_input={ +# "dm_conversation_id": "1234567890", +# "max_results": 10, +# "credentials": TEST_CREDENTIALS_INPUT, +# "expansions": [], +# "event_types": [], +# "media_fields": [], +# "tweet_fields": [], +# "user_fields": [] +# }, +# test_credentials=TEST_CREDENTIALS, +# test_output=[ +# ("event_ids", ["1346889436626259968"]), +# ("event_texts", ["Hello just you..."]), +# ("event_types", ["MessageCreate"]), +# ("next_token", None), +# ("data", [{"id": "1346889436626259968", "text": "Hello just you...", "event_type": "MessageCreate"}]), +# ("included", {}), +# ("meta", {}), +# ("error", "") +# ], +# test_mock={ +# "get_dm_events": lambda *args, **kwargs: ( +# [{"id": "1346889436626259968", "text": "Hello just you...", "event_type": "MessageCreate"}], +# {}, +# {}, +# ["1346889436626259968"], +# ["Hello just you..."], +# ["MessageCreate"], +# None +# ) +# } +# ) + +# @staticmethod +# def get_dm_events( +# credentials: TwitterCredentials, +# dm_conversation_id: str, +# max_results: int, +# pagination_token: str, +# expansions: list[DMEventExpansion], +# event_types: list[DMEventType], +# media_fields: list[DMMediaField], +# tweet_fields: list[DMTweetField], +# user_fields: list[TweetUserFields] +# ): +# try: +# client = tweepy.Client( +# bearer_token=credentials.access_token.get_secret_value() +# ) + +# params = { +# "dm_conversation_id": dm_conversation_id, +# "max_results": max_results, +# "pagination_token": None if pagination_token == "" else pagination_token, +# "user_auth": False +# } + +# params = (DMExpansionsBuilder(params) +# .add_expansions(expansions) +# .add_event_types(event_types) +# .add_media_fields(media_fields) +# .add_tweet_fields(tweet_fields) +# .add_user_fields(user_fields) +# .build()) + +# response = cast(Response, client.get_direct_message_events(**params)) + +# meta = {} +# event_ids = [] +# event_texts = [] +# event_types = [] +# next_token = None + +# if response.meta: +# meta = response.meta +# next_token = meta.get("next_token") + +# included = IncludesSerializer.serialize(response.includes) +# data = ResponseDataSerializer.serialize_list(response.data) + +# if response.data: +# event_ids = [str(item.id) for item in response.data] +# event_texts = [item.text if hasattr(item, "text") else None for item in response.data] +# event_types = [item.event_type for item in response.data] + +# return data, included, meta, event_ids, event_texts, event_types, next_token + +# raise Exception("No DM events found") + +# except tweepy.TweepyException: +# raise + +# def run( +# self, +# input_data: Input, +# *, +# credentials: TwitterCredentials, +# **kwargs, +# ) -> BlockOutput: +# try: +# event_data, included, meta, event_ids, event_texts, event_types, next_token = self.get_dm_events( +# credentials, +# input_data.dm_conversation_id, +# input_data.max_results, +# input_data.pagination_token, +# input_data.expansions, +# input_data.event_types, +# input_data.media_fields, +# input_data.tweet_fields, +# input_data.user_fields +# ) + +# if event_ids: +# yield "event_ids", event_ids +# if event_texts: +# yield "event_texts", event_texts +# if event_types: +# yield "event_types", event_types +# if next_token: +# yield "next_token", next_token +# if event_data: +# yield "data", event_data +# if included: +# yield "included", included +# if meta: +# yield "meta", meta + +# except Exception as e: +# yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/direct_message/manage_direct_message.py b/autogpt_platform/backend/backend/blocks/twitter/direct_message/manage_direct_message.py new file mode 100644 index 000000000000..f25331db9748 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/direct_message/manage_direct_message.py @@ -0,0 +1,260 @@ +# Todo : Add new Type support + +# from typing import cast + +# import tweepy +# from tweepy.client import Response + +# from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +# from backend.data.model import SchemaField +# from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +# from backend.blocks.twitter._auth import ( +# TEST_CREDENTIALS, +# TEST_CREDENTIALS_INPUT, +# TwitterCredentials, +# TwitterCredentialsField, +# TwitterCredentialsInput, +# ) + +# Pro and Enterprise plan [Manual Testing Required] +# class TwitterSendDirectMessageBlock(Block): +# """ +# Sends a direct message to a Twitter user +# """ + +# class Input(BlockSchema): +# credentials: TwitterCredentialsInput = TwitterCredentialsField( +# ["offline.access", "direct_messages.write"] +# ) + +# participant_id: str = SchemaField( +# description="The User ID of the account to send DM to", +# placeholder="Enter recipient user ID", +# default="", +# advanced=False +# ) + +# dm_conversation_id: str = SchemaField( +# description="The conversation ID to send message to", +# placeholder="Enter conversation ID", +# default="", +# advanced=False +# ) + +# text: str = SchemaField( +# description="Text of the Direct Message (up to 10,000 characters)", +# placeholder="Enter message text", +# default="", +# advanced=False +# ) + +# media_id: str = SchemaField( +# description="Media ID to attach to the message", +# placeholder="Enter media ID", +# default="" +# ) + +# class Output(BlockSchema): +# dm_event_id: str = SchemaField(description="ID of the sent direct message") +# dm_conversation_id_: str = SchemaField(description="ID of the conversation") +# error: str = SchemaField(description="Error message if sending failed") + +# def __init__(self): +# super().__init__( +# id="f32f2786-a62e-11ef-a93d-a3ef199dde7f", +# description="This block sends a direct message to a specified Twitter user.", +# categories={BlockCategory.SOCIAL}, +# input_schema=TwitterSendDirectMessageBlock.Input, +# output_schema=TwitterSendDirectMessageBlock.Output, +# test_input={ +# "participant_id": "783214", +# "dm_conversation_id": "", +# "text": "Hello from Twitter API", +# "media_id": "", +# "credentials": TEST_CREDENTIALS_INPUT +# }, +# test_credentials=TEST_CREDENTIALS, +# test_output=[ +# ("dm_event_id", "0987654321"), +# ("dm_conversation_id_", "1234567890"), +# ("error", "") +# ], +# test_mock={ +# "send_direct_message": lambda *args, **kwargs: ( +# "0987654321", +# "1234567890" +# ) +# }, +# ) + +# @staticmethod +# def send_direct_message( +# credentials: TwitterCredentials, +# participant_id: str, +# dm_conversation_id: str, +# text: str, +# media_id: str +# ): +# try: +# client = tweepy.Client( +# bearer_token=credentials.access_token.get_secret_value() +# ) + +# response = cast( +# Response, +# client.create_direct_message( +# participant_id=None if participant_id == "" else participant_id, +# dm_conversation_id=None if dm_conversation_id == "" else dm_conversation_id, +# text=None if text == "" else text, +# media_id=None if media_id == "" else media_id, +# user_auth=False +# ) +# ) + +# if not response.data: +# raise Exception("Failed to send direct message") + +# return response.data["dm_event_id"], response.data["dm_conversation_id"] + +# except tweepy.TweepyException: +# raise +# except Exception as e: +# print(f"Unexpected error: {str(e)}") +# raise + +# def run( +# self, +# input_data: Input, +# *, +# credentials: TwitterCredentials, +# **kwargs, +# ) -> BlockOutput: +# try: +# dm_event_id, dm_conversation_id = self.send_direct_message( +# credentials, +# input_data.participant_id, +# input_data.dm_conversation_id, +# input_data.text, +# input_data.media_id +# ) +# yield "dm_event_id", dm_event_id +# yield "dm_conversation_id", dm_conversation_id + +# except Exception as e: +# yield "error", handle_tweepy_exception(e) + +# class TwitterCreateDMConversationBlock(Block): +# """ +# Creates a new group direct message conversation on Twitter +# """ + +# class Input(BlockSchema): +# credentials: TwitterCredentialsInput = TwitterCredentialsField( +# ["offline.access", "dm.write","dm.read","tweet.read","user.read"] +# ) + +# participant_ids: list[str] = SchemaField( +# description="Array of User IDs to create conversation with (max 50)", +# placeholder="Enter participant user IDs", +# default=[], +# advanced=False +# ) + +# text: str = SchemaField( +# description="Text of the Direct Message (up to 10,000 characters)", +# placeholder="Enter message text", +# default="", +# advanced=False +# ) + +# media_id: str = SchemaField( +# description="Media ID to attach to the message", +# placeholder="Enter media ID", +# default="", +# advanced=False +# ) + +# class Output(BlockSchema): +# dm_event_id: str = SchemaField(description="ID of the sent direct message") +# dm_conversation_id: str = SchemaField(description="ID of the conversation") +# error: str = SchemaField(description="Error message if sending failed") + +# def __init__(self): +# super().__init__( +# id="ec11cabc-a62e-11ef-8c0e-3fe37ba2ec92", +# description="This block creates a new group DM conversation with specified Twitter users.", +# categories={BlockCategory.SOCIAL}, +# input_schema=TwitterCreateDMConversationBlock.Input, +# output_schema=TwitterCreateDMConversationBlock.Output, +# test_input={ +# "participant_ids": ["783214", "2244994945"], +# "text": "Hello from Twitter API", +# "media_id": "", +# "credentials": TEST_CREDENTIALS_INPUT +# }, +# test_credentials=TEST_CREDENTIALS, +# test_output=[ +# ("dm_event_id", "0987654321"), +# ("dm_conversation_id", "1234567890"), +# ("error", "") +# ], +# test_mock={ +# "create_dm_conversation": lambda *args, **kwargs: ( +# "0987654321", +# "1234567890" +# ) +# }, +# ) + +# @staticmethod +# def create_dm_conversation( +# credentials: TwitterCredentials, +# participant_ids: list[str], +# text: str, +# media_id: str +# ): +# try: +# client = tweepy.Client( +# bearer_token=credentials.access_token.get_secret_value() +# ) + +# response = cast( +# Response, +# client.create_direct_message_conversation( +# participant_ids=participant_ids, +# text=None if text == "" else text, +# media_id=None if media_id == "" else media_id, +# user_auth=False +# ) +# ) + +# if not response.data: +# raise Exception("Failed to create DM conversation") + +# return response.data["dm_event_id"], response.data["dm_conversation_id"] + +# except tweepy.TweepyException: +# raise +# except Exception as e: +# print(f"Unexpected error: {str(e)}") +# raise + +# def run( +# self, +# input_data: Input, +# *, +# credentials: TwitterCredentials, +# **kwargs, +# ) -> BlockOutput: +# try: +# dm_event_id, dm_conversation_id = self.create_dm_conversation( +# credentials, +# input_data.participant_ids, +# input_data.text, +# input_data.media_id +# ) +# yield "dm_event_id", dm_event_id +# yield "dm_conversation_id", dm_conversation_id + +# except Exception as e: +# yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/list_follows.py b/autogpt_platform/backend/backend/blocks/twitter/lists/list_follows.py new file mode 100644 index 000000000000..99a0108296ae --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/list_follows.py @@ -0,0 +1,470 @@ +# from typing import cast +import tweepy + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) + +# from backend.blocks.twitter._builders import UserExpansionsBuilder +# from backend.blocks.twitter._types import TweetFields, TweetUserFields, UserExpansionInputs, UserExpansions +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + +# from tweepy.client import Response + + +class TwitterUnfollowListBlock(Block): + """ + Unfollows a Twitter list for the authenticated user + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["follows.write", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to unfollow", + placeholder="Enter list ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the unfollow was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="1f43310a-a62f-11ef-8276-2b06a1bbae1a", + description="This block unfollows a specified Twitter list for the authenticated user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterUnfollowListBlock.Input, + output_schema=TwitterUnfollowListBlock.Output, + test_input={"list_id": "123456789", "credentials": TEST_CREDENTIALS_INPUT}, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"unfollow_list": lambda *args, **kwargs: True}, + ) + + @staticmethod + def unfollow_list(credentials: TwitterCredentials, list_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.unfollow_list(list_id=list_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.unfollow_list(credentials, input_data.list_id) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterFollowListBlock(Block): + """ + Follows a Twitter list for the authenticated user + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "list.write", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to follow", + placeholder="Enter list ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the follow was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="03d8acf6-a62f-11ef-b17f-b72b04a09e79", + description="This block follows a specified Twitter list for the authenticated user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterFollowListBlock.Input, + output_schema=TwitterFollowListBlock.Output, + test_input={"list_id": "123456789", "credentials": TEST_CREDENTIALS_INPUT}, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"follow_list": lambda *args, **kwargs: True}, + ) + + @staticmethod + def follow_list(credentials: TwitterCredentials, list_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.follow_list(list_id=list_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.follow_list(credentials, input_data.list_id) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +# Enterprise Level [Need to do Manual testing], There is a high possibility that we might get error in this +# Needs Type Input in this + +# class TwitterListGetFollowersBlock(Block): +# """ +# Gets followers of a specified Twitter list +# """ + +# class Input(UserExpansionInputs): +# credentials: TwitterCredentialsInput = TwitterCredentialsField( +# ["tweet.read","users.read", "list.read", "offline.access"] +# ) + +# list_id: str = SchemaField( +# description="The ID of the List to get followers for", +# placeholder="Enter list ID", +# required=True +# ) + +# max_results: int = SchemaField( +# description="Max number of results per page (1-100)", +# placeholder="Enter max results", +# default=10, +# advanced=True, +# ) + +# pagination_token: str = SchemaField( +# description="Token for pagination", +# placeholder="Enter pagination token", +# default="", +# advanced=True, +# ) + +# class Output(BlockSchema): +# user_ids: list[str] = SchemaField(description="List of user IDs of followers") +# usernames: list[str] = SchemaField(description="List of usernames of followers") +# next_token: str = SchemaField(description="Token for next page of results") +# data: list[dict] = SchemaField(description="Complete follower data") +# included: dict = SchemaField(description="Additional data requested via expansions") +# meta: dict = SchemaField(description="Metadata about the response") +# error: str = SchemaField(description="Error message if the request failed") + +# def __init__(self): +# super().__init__( +# id="16b289b4-a62f-11ef-95d4-bb29b849eb99", +# description="This block retrieves followers of a specified Twitter list.", +# categories={BlockCategory.SOCIAL}, +# input_schema=TwitterListGetFollowersBlock.Input, +# output_schema=TwitterListGetFollowersBlock.Output, +# test_input={ +# "list_id": "123456789", +# "max_results": 10, +# "pagination_token": None, +# "credentials": TEST_CREDENTIALS_INPUT, +# "expansions": [], +# "tweet_fields": [], +# "user_fields": [] +# }, +# test_credentials=TEST_CREDENTIALS, +# test_output=[ +# ("user_ids", ["2244994945"]), +# ("usernames", ["testuser"]), +# ("next_token", None), +# ("data", {"followers": [{"id": "2244994945", "username": "testuser"}]}), +# ("included", {}), +# ("meta", {}), +# ("error", "") +# ], +# test_mock={ +# "get_list_followers": lambda *args, **kwargs: ({ +# "followers": [{"id": "2244994945", "username": "testuser"}] +# }, {}, {}, ["2244994945"], ["testuser"], None) +# } +# ) + +# @staticmethod +# def get_list_followers( +# credentials: TwitterCredentials, +# list_id: str, +# max_results: int, +# pagination_token: str, +# expansions: list[UserExpansions], +# tweet_fields: list[TweetFields], +# user_fields: list[TweetUserFields] +# ): +# try: +# client = tweepy.Client( +# bearer_token=credentials.access_token.get_secret_value(), +# ) + +# params = { +# "id": list_id, +# "max_results": max_results, +# "pagination_token": None if pagination_token == "" else pagination_token, +# "user_auth": False +# } + +# params = (UserExpansionsBuilder(params) +# .add_expansions(expansions) +# .add_tweet_fields(tweet_fields) +# .add_user_fields(user_fields) +# .build()) + +# response = cast( +# Response, +# client.get_list_followers(**params) +# ) + +# meta = {} +# user_ids = [] +# usernames = [] +# next_token = None + +# if response.meta: +# meta = response.meta +# next_token = meta.get("next_token") + +# included = IncludesSerializer.serialize(response.includes) +# data = ResponseDataSerializer.serialize_list(response.data) + +# if response.data: +# user_ids = [str(item.id) for item in response.data] +# usernames = [item.username for item in response.data] + +# return data, included, meta, user_ids, usernames, next_token + +# raise Exception("No followers found") + +# except tweepy.TweepyException: +# raise + +# def run( +# self, +# input_data: Input, +# *, +# credentials: TwitterCredentials, +# **kwargs, +# ) -> BlockOutput: +# try: +# followers_data, included, meta, user_ids, usernames, next_token = self.get_list_followers( +# credentials, +# input_data.list_id, +# input_data.max_results, +# input_data.pagination_token, +# input_data.expansions, +# input_data.tweet_fields, +# input_data.user_fields +# ) + +# if user_ids: +# yield "user_ids", user_ids +# if usernames: +# yield "usernames", usernames +# if next_token: +# yield "next_token", next_token +# if followers_data: +# yield "data", followers_data +# if included: +# yield "included", included +# if meta: +# yield "meta", meta + +# except Exception as e: +# yield "error", handle_tweepy_exception(e) + +# class TwitterGetFollowedListsBlock(Block): +# """ +# Gets lists followed by a specified Twitter user +# """ + +# class Input(UserExpansionInputs): +# credentials: TwitterCredentialsInput = TwitterCredentialsField( +# ["follows.read", "users.read", "list.read", "offline.access"] +# ) + +# user_id: str = SchemaField( +# description="The user ID whose followed Lists to retrieve", +# placeholder="Enter user ID", +# required=True +# ) + +# max_results: int = SchemaField( +# description="Max number of results per page (1-100)", +# placeholder="Enter max results", +# default=10, +# advanced=True, +# ) + +# pagination_token: str = SchemaField( +# description="Token for pagination", +# placeholder="Enter pagination token", +# default="", +# advanced=True, +# ) + +# class Output(BlockSchema): +# list_ids: list[str] = SchemaField(description="List of list IDs") +# list_names: list[str] = SchemaField(description="List of list names") +# data: list[dict] = SchemaField(description="Complete list data") +# includes: dict = SchemaField(description="Additional data requested via expansions") +# meta: dict = SchemaField(description="Metadata about the response") +# next_token: str = SchemaField(description="Token for next page of results") +# error: str = SchemaField(description="Error message if the request failed") + +# def __init__(self): +# super().__init__( +# id="0e18bbfc-a62f-11ef-94fa-1f1e174b809e", +# description="This block retrieves all Lists a specified user follows.", +# categories={BlockCategory.SOCIAL}, +# input_schema=TwitterGetFollowedListsBlock.Input, +# output_schema=TwitterGetFollowedListsBlock.Output, +# test_input={ +# "user_id": "123456789", +# "max_results": 10, +# "pagination_token": None, +# "credentials": TEST_CREDENTIALS_INPUT, +# "expansions": [], +# "tweet_fields": [], +# "user_fields": [] +# }, +# test_credentials=TEST_CREDENTIALS, +# test_output=[ +# ("list_ids", ["12345"]), +# ("list_names", ["Test List"]), +# ("data", {"followed_lists": [{"id": "12345", "name": "Test List"}]}), +# ("includes", {}), +# ("meta", {}), +# ("next_token", None), +# ("error", "") +# ], +# test_mock={ +# "get_followed_lists": lambda *args, **kwargs: ({ +# "followed_lists": [{"id": "12345", "name": "Test List"}] +# }, {}, {}, ["12345"], ["Test List"], None) +# } +# ) + +# @staticmethod +# def get_followed_lists( +# credentials: TwitterCredentials, +# user_id: str, +# max_results: int, +# pagination_token: str, +# expansions: list[UserExpansions], +# tweet_fields: list[TweetFields], +# user_fields: list[TweetUserFields] +# ): +# try: +# client = tweepy.Client( +# bearer_token=credentials.access_token.get_secret_value(), +# ) + +# params = { +# "id": user_id, +# "max_results": max_results, +# "pagination_token": None if pagination_token == "" else pagination_token, +# "user_auth": False +# } + +# params = (UserExpansionsBuilder(params) +# .add_expansions(expansions) +# .add_tweet_fields(tweet_fields) +# .add_user_fields(user_fields) +# .build()) + +# response = cast( +# Response, +# client.get_followed_lists(**params) +# ) + +# meta = {} +# list_ids = [] +# list_names = [] +# next_token = None + +# if response.meta: +# meta = response.meta +# next_token = meta.get("next_token") + +# included = IncludesSerializer.serialize(response.includes) +# data = ResponseDataSerializer.serialize_list(response.data) + +# if response.data: +# list_ids = [str(item.id) for item in response.data] +# list_names = [item.name for item in response.data] + +# return data, included, meta, list_ids, list_names, next_token + +# raise Exception("No followed lists found") + +# except tweepy.TweepyException: +# raise + +# def run( +# self, +# input_data: Input, +# *, +# credentials: TwitterCredentials, +# **kwargs, +# ) -> BlockOutput: +# try: +# lists_data, included, meta, list_ids, list_names, next_token = self.get_followed_lists( +# credentials, +# input_data.user_id, +# input_data.max_results, +# input_data.pagination_token, +# input_data.expansions, +# input_data.tweet_fields, +# input_data.user_fields +# ) + +# if list_ids: +# yield "list_ids", list_ids +# if list_names: +# yield "list_names", list_names +# if next_token: +# yield "next_token", next_token +# if lists_data: +# yield "data", lists_data +# if included: +# yield "includes", included +# if meta: +# yield "meta", meta + +# except Exception as e: +# yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/list_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/lists/list_lookup.py new file mode 100644 index 000000000000..5d5d3da6a40b --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/list_lookup.py @@ -0,0 +1,348 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import ListExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ListExpansionInputs, + ListExpansionsFilter, + ListFieldsFilter, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterGetListBlock(Block): + """ + Gets information about a Twitter List specified by ID + """ + + class Input(ListExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to lookup", + placeholder="Enter list ID", + required=True, + ) + + class Output(BlockSchema): + # Common outputs + id: str = SchemaField(description="ID of the Twitter List") + name: str = SchemaField(description="Name of the Twitter List") + owner_id: str = SchemaField(description="ID of the List owner") + owner_username: str = SchemaField(description="Username of the List owner") + + # Complete outputs + data: dict = SchemaField(description="Complete list data") + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata about the response") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="34ebc80a-a62f-11ef-9c2a-3fcab6c07079", + description="This block retrieves information about a specified Twitter List.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetListBlock.Input, + output_schema=TwitterGetListBlock.Output, + test_input={ + "list_id": "84839422", + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "list_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("id", "84839422"), + ("name", "Official Twitter Accounts"), + ("owner_id", "2244994945"), + ("owner_username", "TwitterAPI"), + ("data", {"id": "84839422", "name": "Official Twitter Accounts"}), + ], + test_mock={ + "get_list": lambda *args, **kwargs: ( + {"id": "84839422", "name": "Official Twitter Accounts"}, + {}, + {}, + "2244994945", + "TwitterAPI", + ) + }, + ) + + @staticmethod + def get_list( + credentials: TwitterCredentials, + list_id: str, + expansions: ListExpansionsFilter | None, + user_fields: TweetUserFieldsFilter | None, + list_fields: ListFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = {"id": list_id, "user_auth": False} + + params = ( + ListExpansionsBuilder(params) + .add_expansions(expansions) + .add_user_fields(user_fields) + .add_list_fields(list_fields) + .build() + ) + + response = cast(Response, client.get_list(**params)) + + meta = {} + owner_id = "" + owner_username = "" + included = {} + + if response.includes: + included = IncludesSerializer.serialize(response.includes) + + if "users" in included: + owner_id = str(included["users"][0]["id"]) + owner_username = included["users"][0]["username"] + + if response.meta: + meta = response.meta + + if response.data: + data_dict = ResponseDataSerializer.serialize_dict(response.data) + return data_dict, included, meta, owner_id, owner_username + + raise Exception("List not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + list_data, included, meta, owner_id, owner_username = self.get_list( + credentials, + input_data.list_id, + input_data.expansions, + input_data.user_fields, + input_data.list_fields, + ) + + yield "id", str(list_data["id"]) + yield "name", list_data["name"] + if owner_id: + yield "owner_id", owner_id + if owner_username: + yield "owner_username", owner_username + yield "data", {"id": list_data["id"], "name": list_data["name"]} + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetOwnedListsBlock(Block): + """ + Gets all Lists owned by the specified user + """ + + class Input(ListExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "list.read", "offline.access"] + ) + + user_id: str = SchemaField( + description="The user ID whose owned Lists to retrieve", + placeholder="Enter user ID", + required=True, + ) + + max_results: int | None = SchemaField( + description="Maximum number of results per page (1-100)", + placeholder="Enter max results (default 100)", + advanced=True, + default=10, + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination", + placeholder="Enter pagination token", + advanced=True, + default="", + ) + + class Output(BlockSchema): + # Common outputs + list_ids: list[str] = SchemaField(description="List ids of the owned lists") + list_names: list[str] = SchemaField(description="List names of the owned lists") + next_token: str = SchemaField(description="Token for next page of results") + + # Complete outputs + data: list[dict] = SchemaField(description="Complete owned lists data") + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata about the response") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="2b6bdb26-a62f-11ef-a9ce-ff89c2568726", + description="This block retrieves all Lists owned by a specified Twitter user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetOwnedListsBlock.Input, + output_schema=TwitterGetOwnedListsBlock.Output, + test_input={ + "user_id": "2244994945", + "max_results": 10, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "list_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("list_ids", ["84839422"]), + ("list_names", ["Official Twitter Accounts"]), + ("data", [{"id": "84839422", "name": "Official Twitter Accounts"}]), + ], + test_mock={ + "get_owned_lists": lambda *args, **kwargs: ( + [{"id": "84839422", "name": "Official Twitter Accounts"}], + {}, + {}, + ["84839422"], + ["Official Twitter Accounts"], + None, + ) + }, + ) + + @staticmethod + def get_owned_lists( + credentials: TwitterCredentials, + user_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: ListExpansionsFilter | None, + user_fields: TweetUserFieldsFilter | None, + list_fields: ListFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": user_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + ListExpansionsBuilder(params) + .add_expansions(expansions) + .add_user_fields(user_fields) + .add_list_fields(list_fields) + .build() + ) + + response = cast(Response, client.get_owned_lists(**params)) + + meta = {} + included = {} + list_ids = [] + list_names = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + if response.includes: + included = IncludesSerializer.serialize(response.includes) + + if response.data: + data = ResponseDataSerializer.serialize_list(response.data) + list_ids = [ + str(item.id) for item in response.data if hasattr(item, "id") + ] + list_names = [ + item.name for item in response.data if hasattr(item, "name") + ] + + return data, included, meta, list_ids, list_names, next_token + + raise Exception("User have no owned list") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + list_data, included, meta, list_ids, list_names, next_token = ( + self.get_owned_lists( + credentials, + input_data.user_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.user_fields, + input_data.list_fields, + ) + ) + + if list_ids: + yield "list_ids", list_ids + if list_names: + yield "list_names", list_names + if next_token: + yield "next_token", next_token + if list_data: + yield "data", list_data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/list_members.py b/autogpt_platform/backend/backend/blocks/twitter/lists/list_members.py new file mode 100644 index 000000000000..8e0bfd740521 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/list_members.py @@ -0,0 +1,527 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import ( + ListExpansionsBuilder, + UserExpansionsBuilder, +) +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ListExpansionInputs, + ListExpansionsFilter, + ListFieldsFilter, + TweetFieldsFilter, + TweetUserFieldsFilter, + UserExpansionInputs, + UserExpansionsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterRemoveListMemberBlock(Block): + """ + Removes a member from a Twitter List that the authenticated user owns + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.write", "users.read", "tweet.read", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to remove the member from", + placeholder="Enter list ID", + required=True, + ) + + user_id: str = SchemaField( + description="The ID of the user to remove from the List", + placeholder="Enter user ID to remove", + required=True, + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the member was successfully removed" + ) + error: str = SchemaField(description="Error message if the removal failed") + + def __init__(self): + super().__init__( + id="5a3d1320-a62f-11ef-b7ce-a79e7656bcb0", + description="This block removes a specified user from a Twitter List owned by the authenticated user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterRemoveListMemberBlock.Input, + output_schema=TwitterRemoveListMemberBlock.Output, + test_input={ + "list_id": "123456789", + "user_id": "987654321", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("success", True)], + test_mock={"remove_list_member": lambda *args, **kwargs: True}, + ) + + @staticmethod + def remove_list_member(credentials: TwitterCredentials, list_id: str, user_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + client.remove_list_member(id=list_id, user_id=user_id, user_auth=False) + return True + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.remove_list_member( + credentials, input_data.list_id, input_data.user_id + ) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterAddListMemberBlock(Block): + """ + Adds a member to a Twitter List that the authenticated user owns + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.write", "users.read", "tweet.read", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to add the member to", + placeholder="Enter list ID", + required=True, + ) + + user_id: str = SchemaField( + description="The ID of the user to add to the List", + placeholder="Enter user ID to add", + required=True, + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the member was successfully added" + ) + error: str = SchemaField(description="Error message if the addition failed") + + def __init__(self): + super().__init__( + id="3ee8284e-a62f-11ef-84e4-8f6e2cbf0ddb", + description="This block adds a specified user to a Twitter List owned by the authenticated user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterAddListMemberBlock.Input, + output_schema=TwitterAddListMemberBlock.Output, + test_input={ + "list_id": "123456789", + "user_id": "987654321", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("success", True)], + test_mock={"add_list_member": lambda *args, **kwargs: True}, + ) + + @staticmethod + def add_list_member(credentials: TwitterCredentials, list_id: str, user_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + client.add_list_member(id=list_id, user_id=user_id, user_auth=False) + return True + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.add_list_member( + credentials, input_data.list_id, input_data.user_id + ) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetListMembersBlock(Block): + """ + Gets the members of a specified Twitter List + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.read", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to get members from", + placeholder="Enter list ID", + required=True, + ) + + max_results: int | None = SchemaField( + description="Maximum number of results per page (1-100)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination of results", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + ids: list[str] = SchemaField(description="List of member user IDs") + usernames: list[str] = SchemaField(description="List of member usernames") + next_token: str = SchemaField(description="Next token for pagination") + + data: list[dict] = SchemaField( + description="Complete user data for list members" + ) + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata including pagination info") + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="4dba046e-a62f-11ef-b69a-87240c84b4c7", + description="This block retrieves the members of a specified Twitter List.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetListMembersBlock.Input, + output_schema=TwitterGetListMembersBlock.Output, + test_input={ + "list_id": "123456789", + "max_results": 2, + "pagination_token": None, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["12345", "67890"]), + ("usernames", ["testuser1", "testuser2"]), + ( + "data", + [ + {"id": "12345", "username": "testuser1"}, + {"id": "67890", "username": "testuser2"}, + ], + ), + ], + test_mock={ + "get_list_members": lambda *args, **kwargs: ( + ["12345", "67890"], + ["testuser1", "testuser2"], + [ + {"id": "12345", "username": "testuser1"}, + {"id": "67890", "username": "testuser2"}, + ], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_list_members( + credentials: TwitterCredentials, + list_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": list_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_list_members(**params)) + + meta = {} + included = {} + next_token = None + user_ids = [] + usernames = [] + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + if response.includes: + included = IncludesSerializer.serialize(response.includes) + + if response.data: + data = ResponseDataSerializer.serialize_list(response.data) + user_ids = [str(user.id) for user in response.data] + usernames = [user.username for user in response.data] + return user_ids, usernames, data, included, meta, next_token + + raise Exception("List members not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, usernames, data, included, meta, next_token = self.get_list_members( + credentials, + input_data.list_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + + if ids: + yield "ids", ids + if usernames: + yield "usernames", usernames + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetListMembershipsBlock(Block): + """ + Gets all Lists that a specified user is a member of + """ + + class Input(ListExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.read", "offline.access"] + ) + + user_id: str = SchemaField( + description="The ID of the user whose List memberships to retrieve", + placeholder="Enter user ID", + required=True, + ) + + max_results: int | None = SchemaField( + description="Maximum number of results per page (1-100)", + placeholder="Enter max results", + advanced=True, + default=10, + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination of results", + placeholder="Enter pagination token", + advanced=True, + default="", + ) + + class Output(BlockSchema): + list_ids: list[str] = SchemaField(description="List of list IDs") + next_token: str = SchemaField(description="Next token for pagination") + + data: list[dict] = SchemaField(description="List membership data") + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata about pagination") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="46e6429c-a62f-11ef-81c0-2b55bc7823ba", + description="This block retrieves all Lists that a specified user is a member of.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetListMembershipsBlock.Input, + output_schema=TwitterGetListMembershipsBlock.Output, + test_input={ + "user_id": "123456789", + "max_results": 1, + "pagination_token": None, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "list_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("list_ids", ["84839422"]), + ("data", [{"id": "84839422"}]), + ], + test_mock={ + "get_list_memberships": lambda *args, **kwargs: ( + [{"id": "84839422"}], + {}, + {}, + ["84839422"], + None, + ) + }, + ) + + @staticmethod + def get_list_memberships( + credentials: TwitterCredentials, + user_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: ListExpansionsFilter | None, + user_fields: TweetUserFieldsFilter | None, + list_fields: ListFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": user_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + ListExpansionsBuilder(params) + .add_expansions(expansions) + .add_user_fields(user_fields) + .add_list_fields(list_fields) + .build() + ) + + response = cast(Response, client.get_list_memberships(**params)) + + meta = {} + included = {} + next_token = None + list_ids = [] + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + if response.includes: + included = IncludesSerializer.serialize(response.includes) + + if response.data: + data = ResponseDataSerializer.serialize_list(response.data) + list_ids = [str(lst.id) for lst in response.data] + return data, included, meta, list_ids, next_token + + raise Exception("List memberships not found") + + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + data, included, meta, list_ids, next_token = self.get_list_memberships( + credentials, + input_data.user_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.user_fields, + input_data.list_fields, + ) + + if list_ids: + yield "list_ids", list_ids + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/list_tweets_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/lists/list_tweets_lookup.py new file mode 100644 index 000000000000..99de955cb3aa --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/list_tweets_lookup.py @@ -0,0 +1,217 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import TweetExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ExpansionFilter, + TweetExpansionInputs, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterGetListTweetsBlock(Block): + """ + Gets tweets from a specified Twitter list + """ + + class Input(TweetExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List whose Tweets you would like to retrieve", + placeholder="Enter list ID", + required=True, + ) + + max_results: int | None = SchemaField( + description="Maximum number of results per page (1-100)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for paginating through results", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + # Common outputs + tweet_ids: list[str] = SchemaField(description="List of tweet IDs") + texts: list[str] = SchemaField(description="List of tweet texts") + next_token: str = SchemaField(description="Token for next page of results") + + # Complete outputs + data: list[dict] = SchemaField(description="Complete list tweets data") + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField( + description="Response metadata including pagination tokens" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="6657edb0-a62f-11ef-8c10-0326d832467d", + description="This block retrieves tweets from a specified Twitter list.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetListTweetsBlock.Input, + output_schema=TwitterGetListTweetsBlock.Output, + test_input={ + "list_id": "84839422", + "max_results": 1, + "pagination_token": None, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("tweet_ids", ["1234567890"]), + ("texts", ["Test tweet"]), + ("data", [{"id": "1234567890", "text": "Test tweet"}]), + ], + test_mock={ + "get_list_tweets": lambda *args, **kwargs: ( + [{"id": "1234567890", "text": "Test tweet"}], + {}, + {}, + ["1234567890"], + ["Test tweet"], + None, + ) + }, + ) + + @staticmethod + def get_list_tweets( + credentials: TwitterCredentials, + list_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": list_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_list_tweets(**params)) + + meta = {} + included = {} + tweet_ids = [] + texts = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + if response.includes: + included = IncludesSerializer.serialize(response.includes) + + if response.data: + data = ResponseDataSerializer.serialize_list(response.data) + tweet_ids = [str(item.id) for item in response.data] + texts = [item.text for item in response.data] + + return data, included, meta, tweet_ids, texts, next_token + + raise Exception("No tweets found in this list") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + list_data, included, meta, tweet_ids, texts, next_token = ( + self.get_list_tweets( + credentials, + input_data.list_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + ) + + if tweet_ids: + yield "tweet_ids", tweet_ids + if texts: + yield "texts", texts + if next_token: + yield "next_token", next_token + if list_data: + yield "data", list_data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py b/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py new file mode 100644 index 000000000000..490a841e4aa8 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/manage_lists.py @@ -0,0 +1,278 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterDeleteListBlock(Block): + """ + Deletes a Twitter List owned by the authenticated user + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.write", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to be deleted", + placeholder="Enter list ID", + required=True, + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the deletion was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="843c6892-a62f-11ef-a5c8-b71239a78d3b", + description="This block deletes a specified Twitter List owned by the authenticated user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterDeleteListBlock.Input, + output_schema=TwitterDeleteListBlock.Output, + test_input={"list_id": "1234567890", "credentials": TEST_CREDENTIALS_INPUT}, + test_credentials=TEST_CREDENTIALS, + test_output=[("success", True)], + test_mock={"delete_list": lambda *args, **kwargs: True}, + ) + + @staticmethod + def delete_list(credentials: TwitterCredentials, list_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.delete_list(id=list_id, user_auth=False) + return True + + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.delete_list(credentials, input_data.list_id) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterUpdateListBlock(Block): + """ + Updates a Twitter List owned by the authenticated user + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.write", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to be updated", + placeholder="Enter list ID", + advanced=False, + ) + + name: str | None = SchemaField( + description="New name for the List", + placeholder="Enter list name", + default="", + advanced=False, + ) + + description: str | None = SchemaField( + description="New description for the List", + placeholder="Enter list description", + default="", + advanced=False, + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the update was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="7d12630a-a62f-11ef-90c9-8f5a996612c3", + description="This block updates a specified Twitter List owned by the authenticated user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterUpdateListBlock.Input, + output_schema=TwitterUpdateListBlock.Output, + test_input={ + "list_id": "1234567890", + "name": "Updated List Name", + "description": "Updated List Description", + "private": True, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("success", True)], + test_mock={"update_list": lambda *args, **kwargs: True}, + ) + + @staticmethod + def update_list( + credentials: TwitterCredentials, + list_id: str, + name: str | None, + description: str | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.update_list( + id=list_id, + name=None if name == "" else name, + description=None if description == "" else description, + user_auth=False, + ) + return True + + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.update_list( + credentials, input_data.list_id, input_data.name, input_data.description + ) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterCreateListBlock(Block): + """ + Creates a Twitter List owned by the authenticated user + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.write", "offline.access"] + ) + + name: str = SchemaField( + description="The name of the List to be created", + placeholder="Enter list name", + advanced=False, + default="", + ) + + description: str | None = SchemaField( + description="Description of the List", + placeholder="Enter list description", + advanced=False, + default="", + ) + + private: bool = SchemaField( + description="Whether the List should be private", + advanced=False, + default=False, + ) + + class Output(BlockSchema): + url: str = SchemaField(description="URL of the created list") + list_id: str = SchemaField(description="ID of the created list") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="724148ba-a62f-11ef-89ba-5349b813ef5f", + description="This block creates a new Twitter List for the authenticated user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterCreateListBlock.Input, + output_schema=TwitterCreateListBlock.Output, + test_input={ + "name": "New List Name", + "description": "New List Description", + "private": True, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("list_id", "1234567890"), + ("url", "https://twitter.com/i/lists/1234567890"), + ], + test_mock={"create_list": lambda *args, **kwargs: ("1234567890")}, + ) + + @staticmethod + def create_list( + credentials: TwitterCredentials, + name: str, + description: str | None, + private: bool, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + response = cast( + Response, + client.create_list( + name=None if name == "" else name, + description=None if description == "" else description, + private=private, + user_auth=False, + ), + ) + + list_id = str(response.data["id"]) + + return list_id + + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + list_id = self.create_list( + credentials, input_data.name, input_data.description, input_data.private + ) + yield "list_id", list_id + yield "url", f"https://twitter.com/i/lists/{list_id}" + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/lists/pinned_lists.py b/autogpt_platform/backend/backend/blocks/twitter/lists/pinned_lists.py new file mode 100644 index 000000000000..f9b54cfdaca2 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/lists/pinned_lists.py @@ -0,0 +1,285 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import ListExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ListExpansionInputs, + ListExpansionsFilter, + ListFieldsFilter, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterUnpinListBlock(Block): + """ + Enables the authenticated user to unpin a List. + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.write", "users.read", "tweet.read", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to unpin", + placeholder="Enter list ID", + required=True, + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the unpin was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="a099c034-a62f-11ef-9622-47d0ceb73555", + description="This block allows the authenticated user to unpin a specified List.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterUnpinListBlock.Input, + output_schema=TwitterUnpinListBlock.Output, + test_input={"list_id": "123456789", "credentials": TEST_CREDENTIALS_INPUT}, + test_credentials=TEST_CREDENTIALS, + test_output=[("success", True)], + test_mock={"unpin_list": lambda *args, **kwargs: True}, + ) + + @staticmethod + def unpin_list(credentials: TwitterCredentials, list_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.unpin_list(list_id=list_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.unpin_list(credentials, input_data.list_id) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterPinListBlock(Block): + """ + Enables the authenticated user to pin a List. + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["list.write", "users.read", "tweet.read", "offline.access"] + ) + + list_id: str = SchemaField( + description="The ID of the List to pin", + placeholder="Enter list ID", + required=True, + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the pin was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="8ec16e48-a62f-11ef-9f35-f3d6de43a802", + description="This block allows the authenticated user to pin a specified List.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterPinListBlock.Input, + output_schema=TwitterPinListBlock.Output, + test_input={"list_id": "123456789", "credentials": TEST_CREDENTIALS_INPUT}, + test_credentials=TEST_CREDENTIALS, + test_output=[("success", True)], + test_mock={"pin_list": lambda *args, **kwargs: True}, + ) + + @staticmethod + def pin_list(credentials: TwitterCredentials, list_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.pin_list(list_id=list_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.pin_list(credentials, input_data.list_id) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetPinnedListsBlock(Block): + """ + Returns the Lists pinned by the authenticated user. + """ + + class Input(ListExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["lists.read", "users.read", "offline.access"] + ) + + class Output(BlockSchema): + list_ids: list[str] = SchemaField(description="List IDs of the pinned lists") + list_names: list[str] = SchemaField( + description="List names of the pinned lists" + ) + + data: list[dict] = SchemaField( + description="Response data containing pinned lists" + ) + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata about the response") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="97e03aae-a62f-11ef-bc53-5b89cb02888f", + description="This block returns the Lists pinned by the authenticated user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetPinnedListsBlock.Input, + output_schema=TwitterGetPinnedListsBlock.Output, + test_input={ + "expansions": None, + "list_fields": None, + "user_fields": None, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("list_ids", ["84839422"]), + ("list_names", ["Twitter List"]), + ("data", [{"id": "84839422", "name": "Twitter List"}]), + ], + test_mock={ + "get_pinned_lists": lambda *args, **kwargs: ( + [{"id": "84839422", "name": "Twitter List"}], + {}, + {}, + ["84839422"], + ["Twitter List"], + ) + }, + ) + + @staticmethod + def get_pinned_lists( + credentials: TwitterCredentials, + expansions: ListExpansionsFilter | None, + user_fields: TweetUserFieldsFilter | None, + list_fields: ListFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = {"user_auth": False} + + params = ( + ListExpansionsBuilder(params) + .add_expansions(expansions) + .add_user_fields(user_fields) + .add_list_fields(list_fields) + .build() + ) + + response = cast(Response, client.get_pinned_lists(**params)) + + meta = {} + included = {} + list_ids = [] + list_names = [] + + if response.meta: + meta = response.meta + + if response.includes: + included = IncludesSerializer.serialize(response.includes) + + if response.data: + data = ResponseDataSerializer.serialize_list(response.data) + list_ids = [str(item.id) for item in response.data] + list_names = [item.name for item in response.data] + return data, included, meta, list_ids, list_names + + raise Exception("Lists not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + list_data, included, meta, list_ids, list_names = self.get_pinned_lists( + credentials, + input_data.expansions, + input_data.user_fields, + input_data.list_fields, + ) + + if list_ids: + yield "list_ids", list_ids + if list_names: + yield "list_names", list_names + if list_data: + yield "data", list_data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/spaces/search_spaces.py b/autogpt_platform/backend/backend/blocks/twitter/spaces/search_spaces.py new file mode 100644 index 000000000000..ad3399c3b119 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/spaces/search_spaces.py @@ -0,0 +1,195 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import SpaceExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + SpaceExpansionInputs, + SpaceExpansionsFilter, + SpaceFieldsFilter, + SpaceStatesFilter, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterSearchSpacesBlock(Block): + """ + Returns live or scheduled Spaces matching specified search terms [for a week only] + """ + + class Input(SpaceExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["spaces.read", "users.read", "tweet.read", "offline.access"] + ) + + query: str = SchemaField( + description="Search term to find in Space titles", + placeholder="Enter search query", + ) + + max_results: int | None = SchemaField( + description="Maximum number of results to return (1-100)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + state: SpaceStatesFilter = SchemaField( + description="Type of Spaces to return (live, scheduled, or all)", + placeholder="Enter state filter", + default=SpaceStatesFilter.all, + ) + + class Output(BlockSchema): + # Common outputs that user commonly uses + ids: list[str] = SchemaField(description="List of space IDs") + titles: list[str] = SchemaField(description="List of space titles") + host_ids: list = SchemaField(description="List of host IDs") + next_token: str = SchemaField(description="Next token for pagination") + + # Complete outputs for advanced use + data: list[dict] = SchemaField(description="Complete space data") + includes: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata including pagination info") + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="aaefdd48-a62f-11ef-a73c-3f44df63e276", + description="This block searches for Twitter Spaces based on specified terms.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterSearchSpacesBlock.Input, + output_schema=TwitterSearchSpacesBlock.Output, + test_input={ + "query": "tech", + "max_results": 1, + "state": "live", + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "space_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["1234"]), + ("titles", ["Tech Talk"]), + ("host_ids", ["5678"]), + ("data", [{"id": "1234", "title": "Tech Talk", "host_ids": ["5678"]}]), + ], + test_mock={ + "search_spaces": lambda *args, **kwargs: ( + [{"id": "1234", "title": "Tech Talk", "host_ids": ["5678"]}], + {}, + {}, + ["1234"], + ["Tech Talk"], + ["5678"], + None, + ) + }, + ) + + @staticmethod + def search_spaces( + credentials: TwitterCredentials, + query: str, + max_results: int | None, + state: SpaceStatesFilter, + expansions: SpaceExpansionsFilter | None, + space_fields: SpaceFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = {"query": query, "max_results": max_results, "state": state.value} + + params = ( + SpaceExpansionsBuilder(params) + .add_expansions(expansions) + .add_space_fields(space_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.search_spaces(**params)) + + meta = {} + next_token = "" + if response.meta: + meta = response.meta + if "next_token" in meta: + next_token = meta["next_token"] + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + ids = [str(space["id"]) for space in response.data if "id" in space] + titles = [space["title"] for space in data if "title" in space] + host_ids = [space["host_ids"] for space in data if "host_ids" in space] + + return data, included, meta, ids, titles, host_ids, next_token + + raise Exception("Spaces not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + data, included, meta, ids, titles, host_ids, next_token = ( + self.search_spaces( + credentials, + input_data.query, + input_data.max_results, + input_data.state, + input_data.expansions, + input_data.space_fields, + input_data.user_fields, + ) + ) + + if ids: + yield "ids", ids + if titles: + yield "titles", titles + if host_ids: + yield "host_ids", host_ids + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "includes", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/spaces/spaces_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/spaces/spaces_lookup.py new file mode 100644 index 000000000000..d7365e80d8ad --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/spaces/spaces_lookup.py @@ -0,0 +1,651 @@ +from typing import Literal, Union, cast + +import tweepy +from pydantic import BaseModel +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import ( + SpaceExpansionsBuilder, + TweetExpansionsBuilder, + UserExpansionsBuilder, +) +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ExpansionFilter, + SpaceExpansionInputs, + SpaceExpansionsFilter, + SpaceFieldsFilter, + TweetExpansionInputs, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetUserFieldsFilter, + UserExpansionInputs, + UserExpansionsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class SpaceList(BaseModel): + discriminator: Literal["space_list"] + space_ids: list[str] = SchemaField( + description="List of Space IDs to lookup (up to 100)", + placeholder="Enter Space IDs", + default=[], + advanced=False, + ) + + +class UserList(BaseModel): + discriminator: Literal["user_list"] + user_ids: list[str] = SchemaField( + description="List of user IDs to lookup their Spaces (up to 100)", + placeholder="Enter user IDs", + default=[], + advanced=False, + ) + + +class TwitterGetSpacesBlock(Block): + """ + Gets information about multiple Twitter Spaces specified by Space IDs or creator user IDs + """ + + class Input(SpaceExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["spaces.read", "users.read", "offline.access"] + ) + + identifier: Union[SpaceList, UserList] = SchemaField( + discriminator="discriminator", + description="Choose whether to lookup spaces by their IDs or by creator user IDs", + advanced=False, + ) + + class Output(BlockSchema): + # Common outputs + ids: list[str] = SchemaField(description="List of space IDs") + titles: list[str] = SchemaField(description="List of space titles") + + # Complete outputs for advanced use + data: list[dict] = SchemaField(description="Complete space data") + includes: dict = SchemaField( + description="Additional data requested via expansions" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="d75bd7d8-a62f-11ef-b0d8-c7a9496f617f", + description="This block retrieves information about multiple Twitter Spaces.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetSpacesBlock.Input, + output_schema=TwitterGetSpacesBlock.Output, + test_input={ + "identifier": { + "discriminator": "space_list", + "space_ids": ["1DXxyRYNejbKM"], + }, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "space_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["1DXxyRYNejbKM"]), + ("titles", ["Test Space"]), + ( + "data", + [ + { + "id": "1DXxyRYNejbKM", + "title": "Test Space", + "host_id": "1234567", + } + ], + ), + ], + test_mock={ + "get_spaces": lambda *args, **kwargs: ( + [ + { + "id": "1DXxyRYNejbKM", + "title": "Test Space", + "host_id": "1234567", + } + ], + {}, + ["1DXxyRYNejbKM"], + ["Test Space"], + ) + }, + ) + + @staticmethod + def get_spaces( + credentials: TwitterCredentials, + identifier: Union[SpaceList, UserList], + expansions: SpaceExpansionsFilter | None, + space_fields: SpaceFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "ids": ( + identifier.space_ids if isinstance(identifier, SpaceList) else None + ), + "user_ids": ( + identifier.user_ids if isinstance(identifier, UserList) else None + ), + } + + params = ( + SpaceExpansionsBuilder(params) + .add_expansions(expansions) + .add_space_fields(space_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_spaces(**params)) + + ids = [] + titles = [] + + included = IncludesSerializer.serialize(response.includes) + + if response.data: + data = ResponseDataSerializer.serialize_list(response.data) + ids = [space["id"] for space in data if "id" in space] + titles = [space["title"] for space in data if "title" in space] + + return data, included, ids, titles + + raise Exception("No spaces found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + data, included, ids, titles = self.get_spaces( + credentials, + input_data.identifier, + input_data.expansions, + input_data.space_fields, + input_data.user_fields, + ) + + if ids: + yield "ids", ids + if titles: + yield "titles", titles + + if data: + yield "data", data + if included: + yield "includes", included + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetSpaceByIdBlock(Block): + """ + Gets information about a single Twitter Space specified by Space ID + """ + + class Input(SpaceExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["spaces.read", "users.read", "offline.access"] + ) + + space_id: str = SchemaField( + description="Space ID to lookup", + placeholder="Enter Space ID", + required=True, + ) + + class Output(BlockSchema): + # Common outputs + id: str = SchemaField(description="Space ID") + title: str = SchemaField(description="Space title") + host_ids: list[str] = SchemaField(description="Host ID") + + # Complete outputs for advanced use + data: dict = SchemaField(description="Complete space data") + includes: dict = SchemaField( + description="Additional data requested via expansions" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="c79700de-a62f-11ef-ab20-fb32bf9d5a9d", + description="This block retrieves information about a single Twitter Space.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetSpaceByIdBlock.Input, + output_schema=TwitterGetSpaceByIdBlock.Output, + test_input={ + "space_id": "1DXxyRYNejbKM", + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "space_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("id", "1DXxyRYNejbKM"), + ("title", "Test Space"), + ("host_ids", ["1234567"]), + ( + "data", + { + "id": "1DXxyRYNejbKM", + "title": "Test Space", + "host_ids": ["1234567"], + }, + ), + ], + test_mock={ + "get_space": lambda *args, **kwargs: ( + { + "id": "1DXxyRYNejbKM", + "title": "Test Space", + "host_ids": ["1234567"], + }, + {}, + ) + }, + ) + + @staticmethod + def get_space( + credentials: TwitterCredentials, + space_id: str, + expansions: SpaceExpansionsFilter | None, + space_fields: SpaceFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": space_id, + } + + params = ( + SpaceExpansionsBuilder(params) + .add_expansions(expansions) + .add_space_fields(space_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_space(**params)) + + includes = {} + if response.includes: + for key, value in response.includes.items(): + if isinstance(value, list): + includes[key] = [ + item.data if hasattr(item, "data") else item + for item in value + ] + else: + includes[key] = value.data if hasattr(value, "data") else value + + data = {} + if response.data: + for key, value in response.data.items(): + if isinstance(value, list): + data[key] = [ + item.data if hasattr(item, "data") else item + for item in value + ] + else: + data[key] = value.data if hasattr(value, "data") else value + + return data, includes + + raise Exception("Space not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + space_data, includes = self.get_space( + credentials, + input_data.space_id, + input_data.expansions, + input_data.space_fields, + input_data.user_fields, + ) + + # Common outputs + if space_data: + if "id" in space_data: + yield "id", space_data.get("id") + + if "title" in space_data: + yield "title", space_data.get("title") + + if "host_ids" in space_data: + yield "host_ids", space_data.get("host_ids") + + if space_data: + yield "data", space_data + if includes: + yield "includes", includes + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +# Not tested yet, might have some problem +class TwitterGetSpaceBuyersBlock(Block): + """ + Gets list of users who purchased a ticket to the requested Space + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["spaces.read", "users.read", "offline.access"] + ) + + space_id: str = SchemaField( + description="Space ID to lookup buyers for", + placeholder="Enter Space ID", + required=True, + ) + + class Output(BlockSchema): + # Common outputs + buyer_ids: list[str] = SchemaField(description="List of buyer IDs") + usernames: list[str] = SchemaField(description="List of buyer usernames") + + # Complete outputs for advanced use + data: list[dict] = SchemaField(description="Complete space buyers data") + includes: dict = SchemaField( + description="Additional data requested via expansions" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="c1c121a8-a62f-11ef-8b0e-d7b85f96a46f", + description="This block retrieves a list of users who purchased tickets to a Twitter Space.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetSpaceBuyersBlock.Input, + output_schema=TwitterGetSpaceBuyersBlock.Output, + test_input={ + "space_id": "1DXxyRYNejbKM", + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("buyer_ids", ["2244994945"]), + ("usernames", ["testuser"]), + ( + "data", + [{"id": "2244994945", "username": "testuser", "name": "Test User"}], + ), + ], + test_mock={ + "get_space_buyers": lambda *args, **kwargs: ( + [{"id": "2244994945", "username": "testuser", "name": "Test User"}], + {}, + ["2244994945"], + ["testuser"], + ) + }, + ) + + @staticmethod + def get_space_buyers( + credentials: TwitterCredentials, + space_id: str, + expansions: UserExpansionsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": space_id, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_space_buyers(**params)) + + included = IncludesSerializer.serialize(response.includes) + + if response.data: + data = ResponseDataSerializer.serialize_list(response.data) + buyer_ids = [buyer["id"] for buyer in data] + usernames = [buyer["username"] for buyer in data] + + return data, included, buyer_ids, usernames + + raise Exception("No buyers found for this Space") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + buyers_data, included, buyer_ids, usernames = self.get_space_buyers( + credentials, + input_data.space_id, + input_data.expansions, + input_data.user_fields, + ) + + if buyer_ids: + yield "buyer_ids", buyer_ids + if usernames: + yield "usernames", usernames + + if buyers_data: + yield "data", buyers_data + if included: + yield "includes", included + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetSpaceTweetsBlock(Block): + """ + Gets list of Tweets shared in the requested Space + """ + + class Input(TweetExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["spaces.read", "users.read", "offline.access"] + ) + + space_id: str = SchemaField( + description="Space ID to lookup tweets for", + placeholder="Enter Space ID", + required=True, + ) + + class Output(BlockSchema): + # Common outputs + tweet_ids: list[str] = SchemaField(description="List of tweet IDs") + texts: list[str] = SchemaField(description="List of tweet texts") + + # Complete outputs for advanced use + data: list[dict] = SchemaField(description="Complete space tweets data") + includes: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Response metadata") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="b69731e6-a62f-11ef-b2d4-1bf14dd6aee4", + description="This block retrieves tweets shared in a Twitter Space.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetSpaceTweetsBlock.Input, + output_schema=TwitterGetSpaceTweetsBlock.Output, + test_input={ + "space_id": "1DXxyRYNejbKM", + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("tweet_ids", ["1234567890"]), + ("texts", ["Test tweet"]), + ("data", [{"id": "1234567890", "text": "Test tweet"}]), + ], + test_mock={ + "get_space_tweets": lambda *args, **kwargs: ( + [{"id": "1234567890", "text": "Test tweet"}], # data + {}, + ["1234567890"], + ["Test tweet"], + {}, + ) + }, + ) + + @staticmethod + def get_space_tweets( + credentials: TwitterCredentials, + space_id: str, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": space_id, + } + + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_space_tweets(**params)) + + included = IncludesSerializer.serialize(response.includes) + + if response.data: + data = ResponseDataSerializer.serialize_list(response.data) + tweet_ids = [str(tweet["id"]) for tweet in data] + texts = [tweet["text"] for tweet in data] + + meta = response.meta or {} + + return data, included, tweet_ids, texts, meta + + raise Exception("No tweets found for this Space") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + tweets_data, included, tweet_ids, texts, meta = self.get_space_tweets( + credentials, + input_data.space_id, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + + if tweet_ids: + yield "tweet_ids", tweet_ids + if texts: + yield "texts", texts + + if tweets_data: + yield "data", tweets_data + if included: + yield "includes", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweepy_exceptions.py b/autogpt_platform/backend/backend/blocks/twitter/tweepy_exceptions.py new file mode 100644 index 000000000000..c1900269982f --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweepy_exceptions.py @@ -0,0 +1,20 @@ +import tweepy + + +def handle_tweepy_exception(e: Exception) -> str: + if isinstance(e, tweepy.BadRequest): + return f"Bad Request (400): {str(e)}" + elif isinstance(e, tweepy.Unauthorized): + return f"Unauthorized (401): {str(e)}" + elif isinstance(e, tweepy.Forbidden): + return f"Forbidden (403): {str(e)}" + elif isinstance(e, tweepy.NotFound): + return f"Not Found (404): {str(e)}" + elif isinstance(e, tweepy.TooManyRequests): + return f"Too Many Requests (429): {str(e)}" + elif isinstance(e, tweepy.TwitterServerError): + return f"Twitter Server Error (5xx): {str(e)}" + elif isinstance(e, tweepy.TweepyException): + return f"Tweepy Error: {str(e)}" + else: + return f"Unexpected error: {str(e)}" diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/bookmark.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/bookmark.py new file mode 100644 index 000000000000..9a3147903c10 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/bookmark.py @@ -0,0 +1,372 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import TweetExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ExpansionFilter, + TweetExpansionInputs, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterBookmarkTweetBlock(Block): + """ + Bookmark a tweet on Twitter + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "bookmark.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to bookmark", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the bookmark was successful") + error: str = SchemaField(description="Error message if the bookmark failed") + + def __init__(self): + super().__init__( + id="f33d67be-a62f-11ef-a797-ff83ec29ee8e", + description="This block bookmarks a tweet on Twitter.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterBookmarkTweetBlock.Input, + output_schema=TwitterBookmarkTweetBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"bookmark_tweet": lambda *args, **kwargs: True}, + ) + + @staticmethod + def bookmark_tweet( + credentials: TwitterCredentials, + tweet_id: str, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.bookmark(tweet_id) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.bookmark_tweet(credentials, input_data.tweet_id) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetBookmarkedTweetsBlock(Block): + """ + Get All your bookmarked tweets from Twitter + """ + + class Input(TweetExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "bookmark.read", "users.read", "offline.access"] + ) + + max_results: int | None = SchemaField( + description="Maximum number of results to return (1-100)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + id: list[str] = SchemaField(description="All Tweet IDs") + text: list[str] = SchemaField(description="All Tweet texts") + userId: list[str] = SchemaField(description="IDs of the tweet authors") + userName: list[str] = SchemaField(description="Usernames of the tweet authors") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + next_token: str = SchemaField(description="Next token for pagination") + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="ed26783e-a62f-11ef-9a21-c77c57dd8a1f", + description="This block retrieves bookmarked tweets from Twitter.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetBookmarkedTweetsBlock.Input, + output_schema=TwitterGetBookmarkedTweetsBlock.Output, + test_input={ + "max_results": 2, + "pagination_token": None, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("id", ["1234567890"]), + ("text", ["Test tweet"]), + ("userId", ["12345"]), + ("userName", ["testuser"]), + ("data", [{"id": "1234567890", "text": "Test tweet"}]), + ], + test_mock={ + "get_bookmarked_tweets": lambda *args, **kwargs: ( + ["1234567890"], + ["Test tweet"], + ["12345"], + ["testuser"], + [{"id": "1234567890", "text": "Test tweet"}], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_bookmarked_tweets( + credentials: TwitterCredentials, + max_results: int | None, + pagination_token: str | None, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + } + + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast( + Response, + client.get_bookmarks(**params), + ) + + meta = {} + tweet_ids = [] + tweet_texts = [] + user_ids = [] + user_names = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + tweet_ids = [str(tweet.id) for tweet in response.data] + tweet_texts = [tweet.text for tweet in response.data] + + if "users" in included: + for user in included["users"]: + user_ids.append(str(user["id"])) + user_names.append(user["username"]) + + return ( + tweet_ids, + tweet_texts, + user_ids, + user_names, + data, + included, + meta, + next_token, + ) + + raise Exception("No bookmarked tweets found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, texts, user_ids, user_names, data, included, meta, next_token = ( + self.get_bookmarked_tweets( + credentials, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + ) + if ids: + yield "id", ids + if texts: + yield "text", texts + if user_ids: + yield "userId", user_ids + if user_names: + yield "userName", user_names + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + if next_token: + yield "next_token", next_token + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterRemoveBookmarkTweetBlock(Block): + """ + Remove a bookmark for a tweet on Twitter + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "bookmark.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to remove bookmark from", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the bookmark was successfully removed" + ) + error: str = SchemaField( + description="Error message if the bookmark removal failed" + ) + + def __init__(self): + super().__init__( + id="e4100684-a62f-11ef-9be9-770cb41a2616", + description="This block removes a bookmark from a tweet on Twitter.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterRemoveBookmarkTweetBlock.Input, + output_schema=TwitterRemoveBookmarkTweetBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"remove_bookmark_tweet": lambda *args, **kwargs: True}, + ) + + @staticmethod + def remove_bookmark_tweet( + credentials: TwitterCredentials, + tweet_id: str, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.remove_bookmark(tweet_id) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.remove_bookmark_tweet(credentials, input_data.tweet_id) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/hide.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/hide.py new file mode 100644 index 000000000000..ae9998e32d9a --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/hide.py @@ -0,0 +1,154 @@ +import tweepy + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterHideReplyBlock(Block): + """ + Hides a reply of one of your tweets + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "tweet.moderate.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet reply to hide", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the operation was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="07d58b3e-a630-11ef-a030-93701d1a465e", + description="This block hides a reply to a tweet.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterHideReplyBlock.Input, + output_schema=TwitterHideReplyBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"hide_reply": lambda *args, **kwargs: True}, + ) + + @staticmethod + def hide_reply( + credentials: TwitterCredentials, + tweet_id: str, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.hide_reply(id=tweet_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.hide_reply( + credentials, + input_data.tweet_id, + ) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterUnhideReplyBlock(Block): + """ + Unhides a reply to a tweet + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "tweet.moderate.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet reply to unhide", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the operation was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="fcf9e4e4-a62f-11ef-9d85-57d3d06b616a", + description="This block unhides a reply to a tweet.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterUnhideReplyBlock.Input, + output_schema=TwitterUnhideReplyBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"unhide_reply": lambda *args, **kwargs: True}, + ) + + @staticmethod + def unhide_reply( + credentials: TwitterCredentials, + tweet_id: str, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.unhide_reply(id=tweet_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.unhide_reply( + credentials, + input_data.tweet_id, + ) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/like.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/like.py new file mode 100644 index 000000000000..c1816a6ce31c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/like.py @@ -0,0 +1,576 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import ( + TweetExpansionsBuilder, + UserExpansionsBuilder, +) +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ExpansionFilter, + TweetExpansionInputs, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetUserFieldsFilter, + UserExpansionInputs, + UserExpansionsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterLikeTweetBlock(Block): + """ + Likes a tweet + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "like.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to like", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the operation was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="4d0b4c5c-a630-11ef-8e08-1b14c507b347", + description="This block likes a tweet.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterLikeTweetBlock.Input, + output_schema=TwitterLikeTweetBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"like_tweet": lambda *args, **kwargs: True}, + ) + + @staticmethod + def like_tweet( + credentials: TwitterCredentials, + tweet_id: str, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.like(tweet_id=tweet_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.like_tweet( + credentials, + input_data.tweet_id, + ) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetLikingUsersBlock(Block): + """ + Gets information about users who liked a one of your tweet + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "like.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to get liking users for", + placeholder="Enter tweet ID", + ) + max_results: int | None = SchemaField( + description="Maximum number of results to return (1-100)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + pagination_token: str | None = SchemaField( + description="Token for getting next/previous page of results", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + id: list[str] = SchemaField(description="All User IDs who liked the tweet") + username: list[str] = SchemaField( + description="All User usernames who liked the tweet" + ) + next_token: str = SchemaField(description="Next token for pagination") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + + # error + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="34275000-a630-11ef-b01e-5f00d9077c08", + description="This block gets information about users who liked a tweet.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetLikingUsersBlock.Input, + output_schema=TwitterGetLikingUsersBlock.Output, + test_input={ + "tweet_id": "1234567890", + "max_results": 1, + "pagination_token": None, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("id", ["1234567890"]), + ("username", ["testuser"]), + ("data", [{"id": "1234567890", "username": "testuser"}]), + ], + test_mock={ + "get_liking_users": lambda *args, **kwargs: ( + ["1234567890"], + ["testuser"], + [{"id": "1234567890", "username": "testuser"}], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_liking_users( + credentials: TwitterCredentials, + tweet_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": tweet_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_liking_users(**params)) + + if not response.data and not response.meta: + raise Exception("No liking users found") + + meta = {} + user_ids = [] + usernames = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + user_ids = [str(user.id) for user in response.data] + usernames = [user.username for user in response.data] + + return user_ids, usernames, data, included, meta, next_token + + raise Exception("No liking users found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, usernames, data, included, meta, next_token = self.get_liking_users( + credentials, + input_data.tweet_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + if ids: + yield "id", ids + if usernames: + yield "username", usernames + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetLikedTweetsBlock(Block): + """ + Gets information about tweets liked by you + """ + + class Input(TweetExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "like.read", "offline.access"] + ) + + user_id: str = SchemaField( + description="ID of the user to get liked tweets for", + placeholder="Enter user ID", + ) + max_results: int | None = SchemaField( + description="Maximum number of results to return (5-100)", + placeholder="100", + default=10, + advanced=True, + ) + pagination_token: str | None = SchemaField( + description="Token for getting next/previous page of results", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + ids: list[str] = SchemaField(description="All Tweet IDs") + texts: list[str] = SchemaField(description="All Tweet texts") + userIds: list[str] = SchemaField( + description="List of user ids that authored the tweets" + ) + userNames: list[str] = SchemaField( + description="List of user names that authored the tweets" + ) + next_token: str = SchemaField(description="Next token for pagination") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + + # error + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="292e7c78-a630-11ef-9f40-df5dffaca106", + description="This block gets information about tweets liked by a user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetLikedTweetsBlock.Input, + output_schema=TwitterGetLikedTweetsBlock.Output, + test_input={ + "user_id": "1234567890", + "max_results": 2, + "pagination_token": None, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["12345", "67890"]), + ("texts", ["Tweet 1", "Tweet 2"]), + ("userIds", ["67890", "67891"]), + ("userNames", ["testuser1", "testuser2"]), + ( + "data", + [ + {"id": "12345", "text": "Tweet 1"}, + {"id": "67890", "text": "Tweet 2"}, + ], + ), + ], + test_mock={ + "get_liked_tweets": lambda *args, **kwargs: ( + ["12345", "67890"], + ["Tweet 1", "Tweet 2"], + ["67890", "67891"], + ["testuser1", "testuser2"], + [ + {"id": "12345", "text": "Tweet 1"}, + {"id": "67890", "text": "Tweet 2"}, + ], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_liked_tweets( + credentials: TwitterCredentials, + user_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": user_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_liked_tweets(**params)) + + if not response.data and not response.meta: + raise Exception("No liked tweets found") + + meta = {} + tweet_ids = [] + tweet_texts = [] + user_ids = [] + user_names = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + tweet_ids = [str(tweet.id) for tweet in response.data] + tweet_texts = [tweet.text for tweet in response.data] + + if "users" in response.includes: + user_ids = [str(user["id"]) for user in response.includes["users"]] + user_names = [ + user["username"] for user in response.includes["users"] + ] + + return ( + tweet_ids, + tweet_texts, + user_ids, + user_names, + data, + included, + meta, + next_token, + ) + + raise Exception("No liked tweets found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, texts, user_ids, user_names, data, included, meta, next_token = ( + self.get_liked_tweets( + credentials, + input_data.user_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + ) + if ids: + yield "ids", ids + if texts: + yield "texts", texts + if user_ids: + yield "userIds", user_ids + if user_names: + yield "userNames", user_names + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterUnlikeTweetBlock(Block): + """ + Unlikes a tweet that was previously liked + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "like.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to unlike", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the operation was successful") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="1ed5eab8-a630-11ef-8e21-cbbbc80cbb85", + description="This block unlikes a tweet.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterUnlikeTweetBlock.Input, + output_schema=TwitterUnlikeTweetBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"unlike_tweet": lambda *args, **kwargs: True}, + ) + + @staticmethod + def unlike_tweet( + credentials: TwitterCredentials, + tweet_id: str, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.unlike(tweet_id=tweet_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.unlike_tweet( + credentials, + input_data.tweet_id, + ) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/manage.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/manage.py new file mode 100644 index 000000000000..7c86c0abcd57 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/manage.py @@ -0,0 +1,545 @@ +from datetime import datetime +from typing import List, Literal, Optional, Union, cast + +import tweepy +from pydantic import BaseModel +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import ( + TweetDurationBuilder, + TweetExpansionsBuilder, + TweetPostBuilder, + TweetSearchBuilder, +) +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ExpansionFilter, + TweetExpansionInputs, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetReplySettingsFilter, + TweetTimeWindowInputs, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class Media(BaseModel): + discriminator: Literal["media"] + media_ids: Optional[List[str]] = None + media_tagged_user_ids: Optional[List[str]] = None + + +class DeepLink(BaseModel): + discriminator: Literal["deep_link"] + direct_message_deep_link: Optional[str] = None + + +class Poll(BaseModel): + discriminator: Literal["poll"] + poll_options: Optional[List[str]] = None + poll_duration_minutes: Optional[int] = None + + +class Place(BaseModel): + discriminator: Literal["place"] + place_id: Optional[str] = None + + +class Quote(BaseModel): + discriminator: Literal["quote"] + quote_tweet_id: Optional[str] = None + + +class TwitterPostTweetBlock(Block): + """ + Create a tweet on Twitter with the option to include one additional element such as a media, quote, or deep link. + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "tweet.write", "users.read", "offline.access"] + ) + + tweet_text: str | None = SchemaField( + description="Text of the tweet to post", + placeholder="Enter your tweet", + default=None, + advanced=False, + ) + + for_super_followers_only: bool = SchemaField( + description="Tweet exclusively for Super Followers", + placeholder="Enter for super followers only", + advanced=True, + default=False, + ) + + attachment: Union[Media, DeepLink, Poll, Place, Quote] | None = SchemaField( + discriminator="discriminator", + description="Additional tweet data (media, deep link, poll, place or quote)", + advanced=True, + ) + + exclude_reply_user_ids: Optional[List[str]] = SchemaField( + description="User IDs to exclude from reply Tweet thread. [ex - 6253282]", + placeholder="Enter user IDs to exclude", + advanced=True, + default=None, + ) + + in_reply_to_tweet_id: Optional[str] = SchemaField( + description="Tweet ID being replied to. Please note that in_reply_to_tweet_id needs to be in the request if exclude_reply_user_ids is present", + default=None, + placeholder="Enter in reply to tweet ID", + advanced=True, + ) + + reply_settings: TweetReplySettingsFilter = SchemaField( + description="Who can reply to the Tweet (mentionedUsers or following)", + placeholder="Enter reply settings", + advanced=True, + default=TweetReplySettingsFilter(All_Users=True), + ) + + class Output(BlockSchema): + tweet_id: str = SchemaField(description="ID of the created tweet") + tweet_url: str = SchemaField(description="URL to the tweet") + error: str = SchemaField( + description="Error message if the tweet posting failed" + ) + + def __init__(self): + super().__init__( + id="7bb0048a-a630-11ef-aeb8-abc0dadb9b12", + description="This block posts a tweet on Twitter.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterPostTweetBlock.Input, + output_schema=TwitterPostTweetBlock.Output, + test_input={ + "tweet_text": "This is a test tweet.", + "credentials": TEST_CREDENTIALS_INPUT, + "attachment": { + "discriminator": "deep_link", + "direct_message_deep_link": "https://twitter.com/messages/compose", + }, + "for_super_followers_only": False, + "exclude_reply_user_ids": [], + "in_reply_to_tweet_id": "", + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("tweet_id", "1234567890"), + ("tweet_url", "https://twitter.com/user/status/1234567890"), + ], + test_mock={ + "post_tweet": lambda *args, **kwargs: ( + "1234567890", + "https://twitter.com/user/status/1234567890", + ) + }, + ) + + def post_tweet( + self, + credentials: TwitterCredentials, + input_txt: str | None, + attachment: Union[Media, DeepLink, Poll, Place, Quote] | None, + for_super_followers_only: bool, + exclude_reply_user_ids: Optional[List[str]], + in_reply_to_tweet_id: Optional[str], + reply_settings: TweetReplySettingsFilter, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = ( + TweetPostBuilder() + .add_text(input_txt) + .add_super_followers(for_super_followers_only) + .add_reply_settings( + exclude_reply_user_ids or [], + in_reply_to_tweet_id or "", + reply_settings, + ) + ) + + if isinstance(attachment, Media): + params.add_media( + attachment.media_ids or [], attachment.media_tagged_user_ids or [] + ) + elif isinstance(attachment, DeepLink): + params.add_deep_link(attachment.direct_message_deep_link or "") + elif isinstance(attachment, Poll): + params.add_poll_options(attachment.poll_options or []) + params.add_poll_duration(attachment.poll_duration_minutes or 0) + elif isinstance(attachment, Place): + params.add_place(attachment.place_id or "") + elif isinstance(attachment, Quote): + params.add_quote(attachment.quote_tweet_id or "") + + tweet = cast(Response, client.create_tweet(**params.build())) + + if not tweet.data: + raise Exception("Failed to create tweet") + + tweet_id = tweet.data["id"] + tweet_url = f"https://twitter.com/user/status/{tweet_id}" + return str(tweet_id), tweet_url + + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + tweet_id, tweet_url = self.post_tweet( + credentials, + input_data.tweet_text, + input_data.attachment, + input_data.for_super_followers_only, + input_data.exclude_reply_user_ids, + input_data.in_reply_to_tweet_id, + input_data.reply_settings, + ) + yield "tweet_id", tweet_id + yield "tweet_url", tweet_url + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterDeleteTweetBlock(Block): + """ + Deletes a tweet on Twitter using twitter Id + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "tweet.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to delete", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the tweet was successfully deleted" + ) + error: str = SchemaField( + description="Error message if the tweet deletion failed" + ) + + def __init__(self): + super().__init__( + id="761babf0-a630-11ef-a03d-abceb082f58f", + description="This block deletes a tweet on Twitter.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterDeleteTweetBlock.Input, + output_schema=TwitterDeleteTweetBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("success", True)], + test_mock={"delete_tweet": lambda *args, **kwargs: True}, + ) + + @staticmethod + def delete_tweet(credentials: TwitterCredentials, tweet_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + client.delete_tweet(id=tweet_id, user_auth=False) + return True + except tweepy.TweepyException: + raise + except Exception: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.delete_tweet( + credentials, + input_data.tweet_id, + ) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterSearchRecentTweetsBlock(Block): + """ + Searches all public Tweets in Twitter history + """ + + class Input(TweetExpansionInputs, TweetTimeWindowInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + query: str = SchemaField( + description="Search query (up to 1024 characters)", + placeholder="Enter search query", + ) + + max_results: int = SchemaField( + description="Maximum number of results per page (10-500)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + pagination: str | None = SchemaField( + description="Token for pagination", + default="", + placeholder="Enter pagination token", + advanced=True, + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + tweet_ids: list[str] = SchemaField(description="All Tweet IDs") + tweet_texts: list[str] = SchemaField(description="All Tweet texts") + next_token: str = SchemaField(description="Next token for pagination") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + + # error + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="53e5cf8e-a630-11ef-ba85-df6d666fa5d5", + description="This block searches all public Tweets in Twitter history.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterSearchRecentTweetsBlock.Input, + output_schema=TwitterSearchRecentTweetsBlock.Output, + test_input={ + "query": "from:twitterapi #twitterapi", + "credentials": TEST_CREDENTIALS_INPUT, + "max_results": 2, + "start_time": "2024-12-14T18:30:00.000Z", + "end_time": "2024-12-17T18:30:00.000Z", + "since_id": None, + "until_id": None, + "sort_order": None, + "pagination": None, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("tweet_ids", ["1373001119480344583", "1372627771717869568"]), + ( + "tweet_texts", + [ + "Looking to get started with the Twitter API but new to APIs in general?", + "Thanks to everyone who joined and made today a great session!", + ], + ), + ( + "data", + [ + { + "id": "1373001119480344583", + "text": "Looking to get started with the Twitter API but new to APIs in general?", + }, + { + "id": "1372627771717869568", + "text": "Thanks to everyone who joined and made today a great session!", + }, + ], + ), + ], + test_mock={ + "search_tweets": lambda *args, **kwargs: ( + ["1373001119480344583", "1372627771717869568"], + [ + "Looking to get started with the Twitter API but new to APIs in general?", + "Thanks to everyone who joined and made today a great session!", + ], + [ + { + "id": "1373001119480344583", + "text": "Looking to get started with the Twitter API but new to APIs in general?", + }, + { + "id": "1372627771717869568", + "text": "Thanks to everyone who joined and made today a great session!", + }, + ], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def search_tweets( + credentials: TwitterCredentials, + query: str, + max_results: int, + start_time: datetime | None, + end_time: datetime | None, + since_id: str | None, + until_id: str | None, + sort_order: str | None, + pagination: str | None, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + # Building common params + params = ( + TweetSearchBuilder() + .add_query(query) + .add_pagination(max_results, pagination) + .build() + ) + + # Adding expansions to params If required by the user + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + # Adding time window to params If required by the user + params = ( + TweetDurationBuilder(params) + .add_start_time(start_time) + .add_end_time(end_time) + .add_since_id(since_id) + .add_until_id(until_id) + .add_sort_order(sort_order) + .build() + ) + + response = cast(Response, client.search_recent_tweets(**params)) + + if not response.data and not response.meta: + raise Exception("No tweets found") + + meta = {} + tweet_ids = [] + tweet_texts = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + tweet_ids = [str(tweet.id) for tweet in response.data] + tweet_texts = [tweet.text for tweet in response.data] + + return tweet_ids, tweet_texts, data, included, meta, next_token + + raise Exception("No tweets found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, texts, data, included, meta, next_token = self.search_tweets( + credentials, + input_data.query, + input_data.max_results, + input_data.start_time, + input_data.end_time, + input_data.since_id, + input_data.until_id, + input_data.sort_order, + input_data.pagination, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + if ids: + yield "tweet_ids", ids + if texts: + yield "tweet_texts", texts + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/quote.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/quote.py new file mode 100644 index 000000000000..d46b5647c9a1 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/quote.py @@ -0,0 +1,222 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import TweetExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ExpansionFilter, + TweetExcludesFilter, + TweetExpansionInputs, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterGetQuoteTweetsBlock(Block): + """ + Gets quote tweets for a specified tweet ID + """ + + class Input(TweetExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to get quotes for", + placeholder="Enter tweet ID", + ) + + max_results: int | None = SchemaField( + description="Number of results to return (max 100)", + default=10, + advanced=True, + ) + + exclude: TweetExcludesFilter | None = SchemaField( + description="Types of tweets to exclude", advanced=True, default=None + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination", + advanced=True, + default="", + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + ids: list = SchemaField(description="All Tweet IDs ") + texts: list = SchemaField(description="All Tweet texts") + next_token: str = SchemaField(description="Next token for pagination") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + + # error + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="9fbdd208-a630-11ef-9b97-ab7a3a695ca3", + description="This block gets quote tweets for a specific tweet.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetQuoteTweetsBlock.Input, + output_schema=TwitterGetQuoteTweetsBlock.Output, + test_input={ + "tweet_id": "1234567890", + "max_results": 2, + "pagination_token": None, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["12345", "67890"]), + ("texts", ["Tweet 1", "Tweet 2"]), + ( + "data", + [ + {"id": "12345", "text": "Tweet 1"}, + {"id": "67890", "text": "Tweet 2"}, + ], + ), + ], + test_mock={ + "get_quote_tweets": lambda *args, **kwargs: ( + ["12345", "67890"], + ["Tweet 1", "Tweet 2"], + [ + {"id": "12345", "text": "Tweet 1"}, + {"id": "67890", "text": "Tweet 2"}, + ], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_quote_tweets( + credentials: TwitterCredentials, + tweet_id: str, + max_results: int | None, + exclude: TweetExcludesFilter | None, + pagination_token: str | None, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": tweet_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "exclude": None if exclude == TweetExcludesFilter() else exclude, + "user_auth": False, + } + + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_quote_tweets(**params)) + + meta = {} + tweet_ids = [] + tweet_texts = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + tweet_ids = [str(tweet.id) for tweet in response.data] + tweet_texts = [tweet.text for tweet in response.data] + + return tweet_ids, tweet_texts, data, included, meta, next_token + + raise Exception("No quote tweets found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, texts, data, included, meta, next_token = self.get_quote_tweets( + credentials, + input_data.tweet_id, + input_data.max_results, + input_data.exclude, + input_data.pagination_token, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + if ids: + yield "ids", ids + if texts: + yield "texts", texts + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/retweet.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/retweet.py new file mode 100644 index 000000000000..d46216c0854c --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/retweet.py @@ -0,0 +1,363 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import UserExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + TweetFieldsFilter, + TweetUserFieldsFilter, + UserExpansionInputs, + UserExpansionsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterRetweetBlock(Block): + """ + Retweets a tweet on Twitter + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "tweet.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to retweet", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField(description="Whether the retweet was successful") + error: str = SchemaField(description="Error message if the retweet failed") + + def __init__(self): + super().__init__( + id="bd7b8d3a-a630-11ef-be96-6f4aa4c3c4f4", + description="This block retweets a tweet on Twitter.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterRetweetBlock.Input, + output_schema=TwitterRetweetBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"retweet": lambda *args, **kwargs: True}, + ) + + @staticmethod + def retweet( + credentials: TwitterCredentials, + tweet_id: str, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.retweet( + tweet_id=tweet_id, + user_auth=False, + ) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.retweet( + credentials, + input_data.tweet_id, + ) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterRemoveRetweetBlock(Block): + """ + Removes a retweet on Twitter + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "tweet.write", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to remove retweet", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the retweet was successfully removed" + ) + error: str = SchemaField(description="Error message if the removal failed") + + def __init__(self): + super().__init__( + id="b6e663f0-a630-11ef-a7f0-8b9b0c542ff8", + description="This block removes a retweet on Twitter.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterRemoveRetweetBlock.Input, + output_schema=TwitterRemoveRetweetBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"remove_retweet": lambda *args, **kwargs: True}, + ) + + @staticmethod + def remove_retweet( + credentials: TwitterCredentials, + tweet_id: str, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.unretweet( + source_tweet_id=tweet_id, + user_auth=False, + ) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.remove_retweet( + credentials, + input_data.tweet_id, + ) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetRetweetersBlock(Block): + """ + Gets information about who has retweeted a tweet + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="ID of the tweet to get retweeters for", + placeholder="Enter tweet ID", + ) + + max_results: int | None = SchemaField( + description="Maximum number of results per page (1-100)", + default=10, + placeholder="Enter max results", + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination", + placeholder="Enter pagination token", + default="", + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + ids: list = SchemaField(description="List of user ids who retweeted") + names: list = SchemaField(description="List of user names who retweeted") + usernames: list = SchemaField( + description="List of user usernames who retweeted" + ) + next_token: str = SchemaField(description="Token for next page of results") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="ad7aa6fa-a630-11ef-a6b0-e7ca640aa030", + description="This block gets information about who has retweeted a tweet.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetRetweetersBlock.Input, + output_schema=TwitterGetRetweetersBlock.Output, + test_input={ + "tweet_id": "1234567890", + "credentials": TEST_CREDENTIALS_INPUT, + "max_results": 1, + "pagination_token": "", + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["12345"]), + ("names", ["Test User"]), + ("usernames", ["testuser"]), + ( + "data", + [{"id": "12345", "name": "Test User", "username": "testuser"}], + ), + ], + test_mock={ + "get_retweeters": lambda *args, **kwargs: ( + [{"id": "12345", "name": "Test User", "username": "testuser"}], + {}, + {}, + ["12345"], + ["Test User"], + ["testuser"], + None, + ) + }, + ) + + @staticmethod + def get_retweeters( + credentials: TwitterCredentials, + tweet_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": tweet_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_retweeters(**params)) + + meta = {} + ids = [] + names = [] + usernames = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + ids = [str(user.id) for user in response.data] + names = [user.name for user in response.data] + usernames = [user.username for user in response.data] + return data, included, meta, ids, names, usernames, next_token + + raise Exception("No retweeters found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + data, included, meta, ids, names, usernames, next_token = ( + self.get_retweeters( + credentials, + input_data.tweet_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + ) + + if ids: + yield "ids", ids + if names: + yield "names", names + if usernames: + yield "usernames", usernames + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/timeline.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/timeline.py new file mode 100644 index 000000000000..9bcbcaa347e4 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/timeline.py @@ -0,0 +1,757 @@ +from datetime import datetime +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import ( + TweetDurationBuilder, + TweetExpansionsBuilder, +) +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ExpansionFilter, + TweetExpansionInputs, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetTimeWindowInputs, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterGetUserMentionsBlock(Block): + """ + Returns Tweets where a single user is mentioned, just put that user id + """ + + class Input(TweetExpansionInputs, TweetTimeWindowInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + user_id: str = SchemaField( + description="Unique identifier of the user for whom to return Tweets mentioning the user", + placeholder="Enter user ID", + ) + + max_results: int | None = SchemaField( + description="Number of tweets to retrieve (5-100)", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination", default="", advanced=True + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + ids: list[str] = SchemaField(description="List of Tweet IDs") + texts: list[str] = SchemaField(description="All Tweet texts") + + userIds: list[str] = SchemaField( + description="List of user ids that mentioned the user" + ) + userNames: list[str] = SchemaField( + description="List of user names that mentioned the user" + ) + next_token: str = SchemaField(description="Next token for pagination") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + + # error + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="e01c890c-a630-11ef-9e20-37da24888bd0", + description="This block retrieves Tweets mentioning a specific user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetUserMentionsBlock.Input, + output_schema=TwitterGetUserMentionsBlock.Output, + test_input={ + "user_id": "12345", + "credentials": TEST_CREDENTIALS_INPUT, + "max_results": 2, + "start_time": "2024-12-14T18:30:00.000Z", + "end_time": "2024-12-17T18:30:00.000Z", + "since_id": "", + "until_id": "", + "sort_order": None, + "pagination_token": None, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["1373001119480344583", "1372627771717869568"]), + ("texts", ["Test mention 1", "Test mention 2"]), + ("userIds", ["67890", "67891"]), + ("userNames", ["testuser1", "testuser2"]), + ( + "data", + [ + {"id": "1373001119480344583", "text": "Test mention 1"}, + {"id": "1372627771717869568", "text": "Test mention 2"}, + ], + ), + ], + test_mock={ + "get_mentions": lambda *args, **kwargs: ( + ["1373001119480344583", "1372627771717869568"], + ["Test mention 1", "Test mention 2"], + ["67890", "67891"], + ["testuser1", "testuser2"], + [ + {"id": "1373001119480344583", "text": "Test mention 1"}, + {"id": "1372627771717869568", "text": "Test mention 2"}, + ], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_mentions( + credentials: TwitterCredentials, + user_id: str, + max_results: int | None, + start_time: datetime | None, + end_time: datetime | None, + since_id: str | None, + until_id: str | None, + sort_order: str | None, + pagination: str | None, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": user_id, + "max_results": max_results, + "pagination_token": None if pagination == "" else pagination, + "user_auth": False, + } + + # Adding expansions to params If required by the user + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + # Adding time window to params If required by the user + params = ( + TweetDurationBuilder(params) + .add_start_time(start_time) + .add_end_time(end_time) + .add_since_id(since_id) + .add_until_id(until_id) + .add_sort_order(sort_order) + .build() + ) + + response = cast( + Response, + client.get_users_mentions(**params), + ) + + if not response.data and not response.meta: + raise Exception("No tweets found") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + meta = response.meta or {} + next_token = meta.get("next_token", "") + + tweet_ids = [] + tweet_texts = [] + user_ids = [] + user_names = [] + + if response.data: + tweet_ids = [str(tweet.id) for tweet in response.data] + tweet_texts = [tweet.text for tweet in response.data] + + if "users" in included: + user_ids = [str(user["id"]) for user in included["users"]] + user_names = [user["username"] for user in included["users"]] + + return ( + tweet_ids, + tweet_texts, + user_ids, + user_names, + data, + included, + meta, + next_token, + ) + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, texts, user_ids, user_names, data, included, meta, next_token = ( + self.get_mentions( + credentials, + input_data.user_id, + input_data.max_results, + input_data.start_time, + input_data.end_time, + input_data.since_id, + input_data.until_id, + input_data.sort_order, + input_data.pagination_token, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + ) + if ids: + yield "ids", ids + if texts: + yield "texts", texts + if user_ids: + yield "userIds", user_ids + if user_names: + yield "userNames", user_names + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetHomeTimelineBlock(Block): + """ + Returns a collection of the most recent Tweets and Retweets posted by you and users you follow + """ + + class Input(TweetExpansionInputs, TweetTimeWindowInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + max_results: int | None = SchemaField( + description="Number of tweets to retrieve (5-100)", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination", default="", advanced=True + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + ids: list[str] = SchemaField(description="List of Tweet IDs") + texts: list[str] = SchemaField(description="All Tweet texts") + + userIds: list[str] = SchemaField( + description="List of user ids that authored the tweets" + ) + userNames: list[str] = SchemaField( + description="List of user names that authored the tweets" + ) + next_token: str = SchemaField(description="Next token for pagination") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + + # error + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="d222a070-a630-11ef-a18a-3f52f76c6962", + description="This block retrieves the authenticated user's home timeline.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetHomeTimelineBlock.Input, + output_schema=TwitterGetHomeTimelineBlock.Output, + test_input={ + "credentials": TEST_CREDENTIALS_INPUT, + "max_results": 2, + "start_time": "2024-12-14T18:30:00.000Z", + "end_time": "2024-12-17T18:30:00.000Z", + "since_id": None, + "until_id": None, + "sort_order": None, + "pagination_token": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["1373001119480344583", "1372627771717869568"]), + ("texts", ["Test tweet 1", "Test tweet 2"]), + ("userIds", ["67890", "67891"]), + ("userNames", ["testuser1", "testuser2"]), + ( + "data", + [ + {"id": "1373001119480344583", "text": "Test tweet 1"}, + {"id": "1372627771717869568", "text": "Test tweet 2"}, + ], + ), + ], + test_mock={ + "get_timeline": lambda *args, **kwargs: ( + ["1373001119480344583", "1372627771717869568"], + ["Test tweet 1", "Test tweet 2"], + ["67890", "67891"], + ["testuser1", "testuser2"], + [ + {"id": "1373001119480344583", "text": "Test tweet 1"}, + {"id": "1372627771717869568", "text": "Test tweet 2"}, + ], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_timeline( + credentials: TwitterCredentials, + max_results: int | None, + start_time: datetime | None, + end_time: datetime | None, + since_id: str | None, + until_id: str | None, + sort_order: str | None, + pagination: str | None, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "max_results": max_results, + "pagination_token": None if pagination == "" else pagination, + "user_auth": False, + } + + # Adding expansions to params If required by the user + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + # Adding time window to params If required by the user + params = ( + TweetDurationBuilder(params) + .add_start_time(start_time) + .add_end_time(end_time) + .add_since_id(since_id) + .add_until_id(until_id) + .add_sort_order(sort_order) + .build() + ) + + response = cast( + Response, + client.get_home_timeline(**params), + ) + + if not response.data and not response.meta: + raise Exception("No tweets found") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + meta = response.meta or {} + next_token = meta.get("next_token", "") + + tweet_ids = [] + tweet_texts = [] + user_ids = [] + user_names = [] + + if response.data: + tweet_ids = [str(tweet.id) for tweet in response.data] + tweet_texts = [tweet.text for tweet in response.data] + + if "users" in included: + user_ids = [str(user["id"]) for user in included["users"]] + user_names = [user["username"] for user in included["users"]] + + return ( + tweet_ids, + tweet_texts, + user_ids, + user_names, + data, + included, + meta, + next_token, + ) + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, texts, user_ids, user_names, data, included, meta, next_token = ( + self.get_timeline( + credentials, + input_data.max_results, + input_data.start_time, + input_data.end_time, + input_data.since_id, + input_data.until_id, + input_data.sort_order, + input_data.pagination_token, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + ) + if ids: + yield "ids", ids + if texts: + yield "texts", texts + if user_ids: + yield "userIds", user_ids + if user_names: + yield "userNames", user_names + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetUserTweetsBlock(Block): + """ + Returns Tweets composed by a single user, specified by the requested user ID + """ + + class Input(TweetExpansionInputs, TweetTimeWindowInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + user_id: str = SchemaField( + description="Unique identifier of the Twitter account (user ID) for whom to return results", + placeholder="Enter user ID", + ) + + max_results: int | None = SchemaField( + description="Number of tweets to retrieve (5-100)", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for pagination", default="", advanced=True + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + ids: list[str] = SchemaField(description="List of Tweet IDs") + texts: list[str] = SchemaField(description="All Tweet texts") + + userIds: list[str] = SchemaField( + description="List of user ids that authored the tweets" + ) + userNames: list[str] = SchemaField( + description="List of user names that authored the tweets" + ) + next_token: str = SchemaField(description="Next token for pagination") + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField( + description="Provides metadata such as pagination info (next_token) or result counts" + ) + + # error + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="c44c3ef2-a630-11ef-9ff7-eb7b5ea3a5cb", + description="This block retrieves Tweets composed by a single user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetUserTweetsBlock.Input, + output_schema=TwitterGetUserTweetsBlock.Output, + test_input={ + "user_id": "12345", + "credentials": TEST_CREDENTIALS_INPUT, + "max_results": 2, + "start_time": "2024-12-14T18:30:00.000Z", + "end_time": "2024-12-17T18:30:00.000Z", + "since_id": None, + "until_id": None, + "sort_order": None, + "pagination_token": None, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["1373001119480344583", "1372627771717869568"]), + ("texts", ["Test tweet 1", "Test tweet 2"]), + ("userIds", ["67890", "67891"]), + ("userNames", ["testuser1", "testuser2"]), + ( + "data", + [ + {"id": "1373001119480344583", "text": "Test tweet 1"}, + {"id": "1372627771717869568", "text": "Test tweet 2"}, + ], + ), + ], + test_mock={ + "get_user_tweets": lambda *args, **kwargs: ( + ["1373001119480344583", "1372627771717869568"], + ["Test tweet 1", "Test tweet 2"], + ["67890", "67891"], + ["testuser1", "testuser2"], + [ + {"id": "1373001119480344583", "text": "Test tweet 1"}, + {"id": "1372627771717869568", "text": "Test tweet 2"}, + ], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_user_tweets( + credentials: TwitterCredentials, + user_id: str, + max_results: int | None, + start_time: datetime | None, + end_time: datetime | None, + since_id: str | None, + until_id: str | None, + sort_order: str | None, + pagination: str | None, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": user_id, + "max_results": max_results, + "pagination_token": None if pagination == "" else pagination, + "user_auth": False, + } + + # Adding expansions to params If required by the user + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + # Adding time window to params If required by the user + params = ( + TweetDurationBuilder(params) + .add_start_time(start_time) + .add_end_time(end_time) + .add_since_id(since_id) + .add_until_id(until_id) + .add_sort_order(sort_order) + .build() + ) + + response = cast( + Response, + client.get_users_tweets(**params), + ) + + if not response.data and not response.meta: + raise Exception("No tweets found") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + meta = response.meta or {} + next_token = meta.get("next_token", "") + + tweet_ids = [] + tweet_texts = [] + user_ids = [] + user_names = [] + + if response.data: + tweet_ids = [str(tweet.id) for tweet in response.data] + tweet_texts = [tweet.text for tweet in response.data] + + if "users" in included: + user_ids = [str(user["id"]) for user in included["users"]] + user_names = [user["username"] for user in included["users"]] + + return ( + tweet_ids, + tweet_texts, + user_ids, + user_names, + data, + included, + meta, + next_token, + ) + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, texts, user_ids, user_names, data, included, meta, next_token = ( + self.get_user_tweets( + credentials, + input_data.user_id, + input_data.max_results, + input_data.start_time, + input_data.end_time, + input_data.since_id, + input_data.until_id, + input_data.sort_order, + input_data.pagination_token, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + ) + if ids: + yield "ids", ids + if texts: + yield "texts", texts + if user_ids: + yield "userIds", user_ids + if user_names: + yield "userNames", user_names + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py new file mode 100644 index 000000000000..54d4f5b43ef5 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/tweets/tweet_lookup.py @@ -0,0 +1,361 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import TweetExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + ExpansionFilter, + TweetExpansionInputs, + TweetFieldsFilter, + TweetMediaFieldsFilter, + TweetPlaceFieldsFilter, + TweetPollFieldsFilter, + TweetUserFieldsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterGetTweetBlock(Block): + """ + Returns information about a single Tweet specified by the requested ID + """ + + class Input(TweetExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + tweet_id: str = SchemaField( + description="Unique identifier of the Tweet to request (ex: 1460323737035677698)", + placeholder="Enter tweet ID", + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + id: str = SchemaField(description="Tweet ID") + text: str = SchemaField(description="Tweet text") + userId: str = SchemaField(description="ID of the tweet author") + userName: str = SchemaField(description="Username of the tweet author") + + # Complete Outputs for advanced use + data: dict = SchemaField(description="Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField(description="Metadata about the tweet") + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="f5155c3a-a630-11ef-9cc1-a309988b4d92", + description="This block retrieves information about a specific Tweet.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetTweetBlock.Input, + output_schema=TwitterGetTweetBlock.Output, + test_input={ + "tweet_id": "1460323737035677698", + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("id", "1460323737035677698"), + ("text", "Test tweet content"), + ("userId", "12345"), + ("userName", "testuser"), + ("data", {"id": "1460323737035677698", "text": "Test tweet content"}), + ("included", {"users": [{"id": "12345", "username": "testuser"}]}), + ("meta", {"result_count": 1}), + ], + test_mock={ + "get_tweet": lambda *args, **kwargs: ( + {"id": "1460323737035677698", "text": "Test tweet content"}, + {"users": [{"id": "12345", "username": "testuser"}]}, + {"result_count": 1}, + "12345", + "testuser", + ) + }, + ) + + @staticmethod + def get_tweet( + credentials: TwitterCredentials, + tweet_id: str, + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + params = {"id": tweet_id, "user_auth": False} + + # Adding expansions to params If required by the user + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_tweet(**params)) + + meta = {} + user_id = "" + user_name = "" + + if response.meta: + meta = response.meta + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_dict(response.data) + + if included and "users" in included: + user_id = str(included["users"][0]["id"]) + user_name = included["users"][0]["username"] + + if response.data: + return data, included, meta, user_id, user_name + + raise Exception("Tweet not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + + tweet_data, included, meta, user_id, user_name = self.get_tweet( + credentials, + input_data.tweet_id, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + + yield "id", str(tweet_data["id"]) + yield "text", tweet_data["text"] + if user_id: + yield "userId", user_id + if user_name: + yield "userName", user_name + yield "data", tweet_data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetTweetsBlock(Block): + """ + Returns information about multiple Tweets specified by the requested IDs + """ + + class Input(TweetExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["tweet.read", "users.read", "offline.access"] + ) + + tweet_ids: list[str] = SchemaField( + description="List of Tweet IDs to request (up to 100)", + placeholder="Enter tweet IDs", + ) + + class Output(BlockSchema): + # Common Outputs that user commonly uses + ids: list[str] = SchemaField(description="All Tweet IDs") + texts: list[str] = SchemaField(description="All Tweet texts") + userIds: list[str] = SchemaField( + description="List of user ids that authored the tweets" + ) + userNames: list[str] = SchemaField( + description="List of user names that authored the tweets" + ) + + # Complete Outputs for advanced use + data: list[dict] = SchemaField(description="Complete Tweet data") + included: dict = SchemaField( + description="Additional data that you have requested (Optional) via Expansions field" + ) + meta: dict = SchemaField(description="Metadata about the tweets") + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="e7cc5420-a630-11ef-bfaf-13bdd8096a51", + description="This block retrieves information about multiple Tweets.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetTweetsBlock.Input, + output_schema=TwitterGetTweetsBlock.Output, + test_input={ + "tweet_ids": ["1460323737035677698"], + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "media_fields": None, + "place_fields": None, + "poll_fields": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["1460323737035677698"]), + ("texts", ["Test tweet content"]), + ("userIds", ["67890"]), + ("userNames", ["testuser1"]), + ("data", [{"id": "1460323737035677698", "text": "Test tweet content"}]), + ("included", {"users": [{"id": "67890", "username": "testuser1"}]}), + ("meta", {"result_count": 1}), + ], + test_mock={ + "get_tweets": lambda *args, **kwargs: ( + ["1460323737035677698"], # ids + ["Test tweet content"], # texts + ["67890"], # user_ids + ["testuser1"], # user_names + [ + {"id": "1460323737035677698", "text": "Test tweet content"} + ], # data + {"users": [{"id": "67890", "username": "testuser1"}]}, # included + {"result_count": 1}, # meta + ) + }, + ) + + @staticmethod + def get_tweets( + credentials: TwitterCredentials, + tweet_ids: list[str], + expansions: ExpansionFilter | None, + media_fields: TweetMediaFieldsFilter | None, + place_fields: TweetPlaceFieldsFilter | None, + poll_fields: TweetPollFieldsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + params = {"ids": tweet_ids, "user_auth": False} + + # Adding expansions to params If required by the user + params = ( + TweetExpansionsBuilder(params) + .add_expansions(expansions) + .add_media_fields(media_fields) + .add_place_fields(place_fields) + .add_poll_fields(poll_fields) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_tweets(**params)) + + if not response.data and not response.meta: + raise Exception("No tweets found") + + tweet_ids = [] + tweet_texts = [] + user_ids = [] + user_names = [] + meta = {} + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + tweet_ids = [str(tweet.id) for tweet in response.data] + tweet_texts = [tweet.text for tweet in response.data] + + if included and "users" in included: + for user in included["users"]: + user_ids.append(str(user["id"])) + user_names.append(user["username"]) + + if response.meta: + meta = response.meta + + return tweet_ids, tweet_texts, user_ids, user_names, data, included, meta + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, texts, user_ids, user_names, data, included, meta = self.get_tweets( + credentials, + input_data.tweet_ids, + input_data.expansions, + input_data.media_fields, + input_data.place_fields, + input_data.poll_fields, + input_data.tweet_fields, + input_data.user_fields, + ) + if ids: + yield "ids", ids + if texts: + yield "texts", texts + if user_ids: + yield "userIds", user_ids + if user_names: + yield "userNames", user_names + if data: + yield "data", data + if included: + yield "included", included + if meta: + yield "meta", meta + + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/users/blocks.py b/autogpt_platform/backend/backend/blocks/twitter/users/blocks.py new file mode 100644 index 000000000000..62575976ee16 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/users/blocks.py @@ -0,0 +1,175 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import UserExpansionsBuilder +from backend.blocks.twitter._serializer import IncludesSerializer +from backend.blocks.twitter._types import ( + TweetFieldsFilter, + TweetUserFieldsFilter, + UserExpansionInputs, + UserExpansionsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterGetBlockedUsersBlock(Block): + """ + Get a list of users who are blocked by the authenticating user + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "offline.access", "block.read"] + ) + + max_results: int | None = SchemaField( + description="Maximum number of results to return (1-1000, default 100)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for retrieving next/previous page of results", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + user_ids: list[str] = SchemaField(description="List of blocked user IDs") + usernames_: list[str] = SchemaField(description="List of blocked usernames") + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata including pagination info") + next_token: str = SchemaField(description="Next token for pagination") + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="05f409e8-a631-11ef-ae89-93de863ee30d", + description="This block retrieves a list of users blocked by the authenticating user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetBlockedUsersBlock.Input, + output_schema=TwitterGetBlockedUsersBlock.Output, + test_input={ + "max_results": 10, + "pagination_token": "", + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("user_ids", ["12345", "67890"]), + ("usernames_", ["testuser1", "testuser2"]), + ], + test_mock={ + "get_blocked_users": lambda *args, **kwargs: ( + {}, # included + {}, # meta + ["12345", "67890"], # user_ids + ["testuser1", "testuser2"], # usernames + None, # next_token + ) + }, + ) + + @staticmethod + def get_blocked_users( + credentials: TwitterCredentials, + max_results: int | None, + pagination_token: str | None, + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_blocked(**params)) + + meta = {} + user_ids = [] + usernames = [] + next_token = None + + included = IncludesSerializer.serialize(response.includes) + + if response.data: + for user in response.data: + user_ids.append(str(user.id)) + usernames.append(user.username) + + if response.meta: + meta = response.meta + if "next_token" in meta: + next_token = meta["next_token"] + + if user_ids and usernames: + return included, meta, user_ids, usernames, next_token + else: + raise tweepy.TweepyException("No blocked users found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + included, meta, user_ids, usernames, next_token = self.get_blocked_users( + credentials, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + if user_ids: + yield "user_ids", user_ids + if usernames: + yield "usernames_", usernames + if included: + yield "included", included + if meta: + yield "meta", meta + if next_token: + yield "next_token", next_token + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/users/follows.py b/autogpt_platform/backend/backend/blocks/twitter/users/follows.py new file mode 100644 index 000000000000..3ffafa606291 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/users/follows.py @@ -0,0 +1,510 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import UserExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + TweetFieldsFilter, + TweetUserFieldsFilter, + UserExpansionInputs, + UserExpansionsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterUnfollowUserBlock(Block): + """ + Allows a user to unfollow another user specified by target user ID. + The request succeeds with no action when the authenticated user sends a request to a user they're not following or have already unfollowed. + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "users.write", "follows.write", "offline.access"] + ) + + target_user_id: str = SchemaField( + description="The user ID of the user that you would like to unfollow", + placeholder="Enter target user ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the unfollow action was successful" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="37e386a4-a631-11ef-b7bd-b78204b35fa4", + description="This block unfollows a specified Twitter user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterUnfollowUserBlock.Input, + output_schema=TwitterUnfollowUserBlock.Output, + test_input={ + "target_user_id": "12345", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"unfollow_user": lambda *args, **kwargs: True}, + ) + + @staticmethod + def unfollow_user(credentials: TwitterCredentials, target_user_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.unfollow_user(target_user_id=target_user_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.unfollow_user(credentials, input_data.target_user_id) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterFollowUserBlock(Block): + """ + Allows a user to follow another user specified by target user ID. If the target user does not have public Tweets, + this endpoint will send a follow request. The request succeeds with no action when the authenticated user sends a + request to a user they're already following, or if they're sending a follower request to a user that does not have + public Tweets. + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "users.write", "follows.write", "offline.access"] + ) + + target_user_id: str = SchemaField( + description="The user ID of the user that you would like to follow", + placeholder="Enter target user ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the follow action was successful" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="1aae6a5e-a631-11ef-a090-435900c6d429", + description="This block follows a specified Twitter user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterFollowUserBlock.Input, + output_schema=TwitterFollowUserBlock.Output, + test_input={ + "target_user_id": "12345", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[("success", True)], + test_mock={"follow_user": lambda *args, **kwargs: True}, + ) + + @staticmethod + def follow_user(credentials: TwitterCredentials, target_user_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.follow_user(target_user_id=target_user_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.follow_user(credentials, input_data.target_user_id) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetFollowersBlock(Block): + """ + Retrieves a list of followers for a specified Twitter user ID + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "offline.access", "follows.read"] + ) + + target_user_id: str = SchemaField( + description="The user ID whose followers you would like to retrieve", + placeholder="Enter target user ID", + ) + + max_results: int | None = SchemaField( + description="Maximum number of results to return (1-1000, default 100)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for retrieving next/previous page of results", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + ids: list[str] = SchemaField(description="List of follower user IDs") + usernames: list[str] = SchemaField(description="List of follower usernames") + next_token: str = SchemaField(description="Next token for pagination") + + data: list[dict] = SchemaField(description="Complete user data for followers") + includes: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata including pagination info") + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="30f66410-a631-11ef-8fe7-d7f888b4f43c", + description="This block retrieves followers of a specified Twitter user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetFollowersBlock.Input, + output_schema=TwitterGetFollowersBlock.Output, + test_input={ + "target_user_id": "12345", + "max_results": 1, + "pagination_token": "", + "expansions": None, + "tweet_fields": None, + "user_fields": None, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["1234567890"]), + ("usernames", ["testuser"]), + ("data", [{"id": "1234567890", "username": "testuser"}]), + ], + test_mock={ + "get_followers": lambda *args, **kwargs: ( + ["1234567890"], + ["testuser"], + [{"id": "1234567890", "username": "testuser"}], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_followers( + credentials: TwitterCredentials, + target_user_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": target_user_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_users_followers(**params)) + + meta = {} + follower_ids = [] + follower_usernames = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + follower_ids = [str(user.id) for user in response.data] + follower_usernames = [user.username for user in response.data] + + return ( + follower_ids, + follower_usernames, + data, + included, + meta, + next_token, + ) + + raise Exception("Followers not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, usernames, data, includes, meta, next_token = self.get_followers( + credentials, + input_data.target_user_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + if ids: + yield "ids", ids + if usernames: + yield "usernames", usernames + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if includes: + yield "includes", includes + if meta: + yield "meta", meta + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetFollowingBlock(Block): + """ + Retrieves a list of users that a specified Twitter user ID is following + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "offline.access", "follows.read"] + ) + + target_user_id: str = SchemaField( + description="The user ID whose following you would like to retrieve", + placeholder="Enter target user ID", + ) + + max_results: int | None = SchemaField( + description="Maximum number of results to return (1-1000, default 100)", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token for retrieving next/previous page of results", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + ids: list[str] = SchemaField(description="List of following user IDs") + usernames: list[str] = SchemaField(description="List of following usernames") + next_token: str = SchemaField(description="Next token for pagination") + + data: list[dict] = SchemaField(description="Complete user data for following") + includes: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata including pagination info") + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="264a399c-a631-11ef-a97d-bfde4ca91173", + description="This block retrieves the users that a specified Twitter user is following.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetFollowingBlock.Input, + output_schema=TwitterGetFollowingBlock.Output, + test_input={ + "target_user_id": "12345", + "max_results": 1, + "pagination_token": None, + "expansions": None, + "tweet_fields": None, + "user_fields": None, + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["1234567890"]), + ("usernames", ["testuser"]), + ("data", [{"id": "1234567890", "username": "testuser"}]), + ], + test_mock={ + "get_following": lambda *args, **kwargs: ( + ["1234567890"], + ["testuser"], + [{"id": "1234567890", "username": "testuser"}], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_following( + credentials: TwitterCredentials, + target_user_id: str, + max_results: int | None, + pagination_token: str | None, + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": target_user_id, + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_users_following(**params)) + + meta = {} + following_ids = [] + following_usernames = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + following_ids = [str(user.id) for user in response.data] + following_usernames = [user.username for user in response.data] + + return ( + following_ids, + following_usernames, + data, + included, + meta, + next_token, + ) + + raise Exception("Following not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, usernames, data, includes, meta, next_token = self.get_following( + credentials, + input_data.target_user_id, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + if ids: + yield "ids", ids + if usernames: + yield "usernames", usernames + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if includes: + yield "includes", includes + if meta: + yield "meta", meta + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/users/mutes.py b/autogpt_platform/backend/backend/blocks/twitter/users/mutes.py new file mode 100644 index 000000000000..175a39011d33 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/users/mutes.py @@ -0,0 +1,328 @@ +from typing import cast + +import tweepy +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import UserExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + TweetFieldsFilter, + TweetUserFieldsFilter, + UserExpansionInputs, + UserExpansionsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TwitterUnmuteUserBlock(Block): + """ + Allows a user to unmute another user specified by target user ID. + The request succeeds with no action when the user sends a request to a user they're not muting or have already unmuted. + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "users.write", "offline.access"] + ) + + target_user_id: str = SchemaField( + description="The user ID of the user that you would like to unmute", + placeholder="Enter target user ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the unmute action was successful" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="40458504-a631-11ef-940b-eff92be55422", + description="This block unmutes a specified Twitter user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterUnmuteUserBlock.Input, + output_schema=TwitterUnmuteUserBlock.Output, + test_input={ + "target_user_id": "12345", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"unmute_user": lambda *args, **kwargs: True}, + ) + + @staticmethod + def unmute_user(credentials: TwitterCredentials, target_user_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.unmute(target_user_id=target_user_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.unmute_user(credentials, input_data.target_user_id) + yield "success", success + + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterGetMutedUsersBlock(Block): + """ + Returns a list of users who are muted by the authenticating user + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "offline.access"] + ) + + max_results: int | None = SchemaField( + description="The maximum number of results to be returned per page (1-1000). Default is 100.", + placeholder="Enter max results", + default=10, + advanced=True, + ) + + pagination_token: str | None = SchemaField( + description="Token to request next/previous page of results", + placeholder="Enter pagination token", + default="", + advanced=True, + ) + + class Output(BlockSchema): + ids: list[str] = SchemaField(description="List of muted user IDs") + usernames: list[str] = SchemaField(description="List of muted usernames") + next_token: str = SchemaField(description="Next token for pagination") + + data: list[dict] = SchemaField(description="Complete user data for muted users") + includes: dict = SchemaField( + description="Additional data requested via expansions" + ) + meta: dict = SchemaField(description="Metadata including pagination info") + + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="475024da-a631-11ef-9ccd-f724b8b03cda", + description="This block gets a list of users muted by the authenticating user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetMutedUsersBlock.Input, + output_schema=TwitterGetMutedUsersBlock.Output, + test_input={ + "max_results": 2, + "pagination_token": "", + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["12345", "67890"]), + ("usernames", ["testuser1", "testuser2"]), + ( + "data", + [ + {"id": "12345", "username": "testuser1"}, + {"id": "67890", "username": "testuser2"}, + ], + ), + ], + test_mock={ + "get_muted_users": lambda *args, **kwargs: ( + ["12345", "67890"], + ["testuser1", "testuser2"], + [ + {"id": "12345", "username": "testuser1"}, + {"id": "67890", "username": "testuser2"}, + ], + {}, + {}, + None, + ) + }, + ) + + @staticmethod + def get_muted_users( + credentials: TwitterCredentials, + max_results: int | None, + pagination_token: str | None, + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "max_results": max_results, + "pagination_token": ( + None if pagination_token == "" else pagination_token + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_muted(**params)) + + meta = {} + user_ids = [] + usernames = [] + next_token = None + + if response.meta: + meta = response.meta + next_token = meta.get("next_token") + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + user_ids = [str(item.id) for item in response.data] + usernames = [item.username for item in response.data] + + return user_ids, usernames, data, included, meta, next_token + + raise Exception("Muted users not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + ids, usernames, data, includes, meta, next_token = self.get_muted_users( + credentials, + input_data.max_results, + input_data.pagination_token, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + if ids: + yield "ids", ids + if usernames: + yield "usernames", usernames + if next_token: + yield "next_token", next_token + if data: + yield "data", data + if includes: + yield "includes", includes + if meta: + yield "meta", meta + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class TwitterMuteUserBlock(Block): + """ + Allows a user to mute another user specified by target user ID + """ + + class Input(BlockSchema): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "users.write", "offline.access"] + ) + + target_user_id: str = SchemaField( + description="The user ID of the user that you would like to mute", + placeholder="Enter target user ID", + ) + + class Output(BlockSchema): + success: bool = SchemaField( + description="Whether the mute action was successful" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="4d1919d0-a631-11ef-90ab-3b73af9ce8f1", + description="This block mutes a specified Twitter user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterMuteUserBlock.Input, + output_schema=TwitterMuteUserBlock.Output, + test_input={ + "target_user_id": "12345", + "credentials": TEST_CREDENTIALS_INPUT, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("success", True), + ], + test_mock={"mute_user": lambda *args, **kwargs: True}, + ) + + @staticmethod + def mute_user(credentials: TwitterCredentials, target_user_id: str): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + client.mute(target_user_id=target_user_id, user_auth=False) + + return True + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + success = self.mute_user(credentials, input_data.target_user_id) + yield "success", success + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/twitter/users/user_lookup.py b/autogpt_platform/backend/backend/blocks/twitter/users/user_lookup.py new file mode 100644 index 000000000000..16c207f6dd87 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/twitter/users/user_lookup.py @@ -0,0 +1,383 @@ +from typing import Literal, Union, cast + +import tweepy +from pydantic import BaseModel +from tweepy.client import Response + +from backend.blocks.twitter._auth import ( + TEST_CREDENTIALS, + TEST_CREDENTIALS_INPUT, + TwitterCredentials, + TwitterCredentialsField, + TwitterCredentialsInput, +) +from backend.blocks.twitter._builders import UserExpansionsBuilder +from backend.blocks.twitter._serializer import ( + IncludesSerializer, + ResponseDataSerializer, +) +from backend.blocks.twitter._types import ( + TweetFieldsFilter, + TweetUserFieldsFilter, + UserExpansionInputs, + UserExpansionsFilter, +) +from backend.blocks.twitter.tweepy_exceptions import handle_tweepy_exception +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class UserId(BaseModel): + discriminator: Literal["user_id"] + user_id: str = SchemaField(description="The ID of the user to lookup", default="") + + +class Username(BaseModel): + discriminator: Literal["username"] + username: str = SchemaField( + description="The Twitter username (handle) of the user", default="" + ) + + +class TwitterGetUserBlock(Block): + """ + Gets information about a single Twitter user specified by ID or username + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "offline.access"] + ) + + identifier: Union[UserId, Username] = SchemaField( + discriminator="discriminator", + description="Choose whether to identify the user by their unique Twitter ID or by their username", + advanced=False, + ) + + class Output(BlockSchema): + # Common outputs + id: str = SchemaField(description="User ID") + username_: str = SchemaField(description="User username") + name_: str = SchemaField(description="User name") + + # Complete outputs + data: dict = SchemaField(description="Complete user data") + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="5446db8e-a631-11ef-812a-cf315d373ee9", + description="This block retrieves information about a specified Twitter user.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetUserBlock.Input, + output_schema=TwitterGetUserBlock.Output, + test_input={ + "identifier": {"discriminator": "username", "username": "twitter"}, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("id", "783214"), + ("username_", "twitter"), + ("name_", "Twitter"), + ( + "data", + { + "user": { + "id": "783214", + "username": "twitter", + "name": "Twitter", + } + }, + ), + ], + test_mock={ + "get_user": lambda *args, **kwargs: ( + { + "user": { + "id": "783214", + "username": "twitter", + "name": "Twitter", + } + }, + {}, + "twitter", + "783214", + "Twitter", + ) + }, + ) + + @staticmethod + def get_user( + credentials: TwitterCredentials, + identifier: Union[UserId, Username], + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "id": identifier.user_id if isinstance(identifier, UserId) else None, + "username": ( + identifier.username if isinstance(identifier, Username) else None + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_user(**params)) + + username = "" + id = "" + name = "" + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_dict(response.data) + + if response.data: + username = response.data.username + id = str(response.data.id) + name = response.data.name + + if username and id: + return data, included, username, id, name + else: + raise tweepy.TweepyException("User not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + data, included, username, id, name = self.get_user( + credentials, + input_data.identifier, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + if id: + yield "id", id + if username: + yield "username_", username + if name: + yield "name_", name + if data: + yield "data", data + if included: + yield "included", included + except Exception as e: + yield "error", handle_tweepy_exception(e) + + +class UserIdList(BaseModel): + discriminator: Literal["user_id_list"] + user_ids: list[str] = SchemaField( + description="List of user IDs to lookup (max 100)", + placeholder="Enter user IDs", + default=[], + advanced=False, + ) + + +class UsernameList(BaseModel): + discriminator: Literal["username_list"] + usernames: list[str] = SchemaField( + description="List of Twitter usernames/handles to lookup (max 100)", + placeholder="Enter usernames", + default=[], + advanced=False, + ) + + +class TwitterGetUsersBlock(Block): + """ + Gets information about multiple Twitter users specified by IDs or usernames + """ + + class Input(UserExpansionInputs): + credentials: TwitterCredentialsInput = TwitterCredentialsField( + ["users.read", "offline.access"] + ) + + identifier: Union[UserIdList, UsernameList] = SchemaField( + discriminator="discriminator", + description="Choose whether to identify users by their unique Twitter IDs or by their usernames", + advanced=False, + ) + + class Output(BlockSchema): + # Common outputs + ids: list[str] = SchemaField(description="User IDs") + usernames_: list[str] = SchemaField(description="User usernames") + names_: list[str] = SchemaField(description="User names") + + # Complete outputs + data: list[dict] = SchemaField(description="Complete users data") + included: dict = SchemaField( + description="Additional data requested via expansions" + ) + error: str = SchemaField(description="Error message if the request failed") + + def __init__(self): + super().__init__( + id="5abc857c-a631-11ef-8cfc-f7b79354f7a1", + description="This block retrieves information about multiple Twitter users.", + categories={BlockCategory.SOCIAL}, + input_schema=TwitterGetUsersBlock.Input, + output_schema=TwitterGetUsersBlock.Output, + test_input={ + "identifier": { + "discriminator": "username_list", + "usernames": ["twitter", "twitterdev"], + }, + "credentials": TEST_CREDENTIALS_INPUT, + "expansions": None, + "tweet_fields": None, + "user_fields": None, + }, + test_credentials=TEST_CREDENTIALS, + test_output=[ + ("ids", ["783214", "2244994945"]), + ("usernames_", ["twitter", "twitterdev"]), + ("names_", ["Twitter", "Twitter Dev"]), + ( + "data", + [ + {"id": "783214", "username": "twitter", "name": "Twitter"}, + { + "id": "2244994945", + "username": "twitterdev", + "name": "Twitter Dev", + }, + ], + ), + ], + test_mock={ + "get_users": lambda *args, **kwargs: ( + [ + {"id": "783214", "username": "twitter", "name": "Twitter"}, + { + "id": "2244994945", + "username": "twitterdev", + "name": "Twitter Dev", + }, + ], + {}, + ["twitter", "twitterdev"], + ["783214", "2244994945"], + ["Twitter", "Twitter Dev"], + ) + }, + ) + + @staticmethod + def get_users( + credentials: TwitterCredentials, + identifier: Union[UserIdList, UsernameList], + expansions: UserExpansionsFilter | None, + tweet_fields: TweetFieldsFilter | None, + user_fields: TweetUserFieldsFilter | None, + ): + try: + client = tweepy.Client( + bearer_token=credentials.access_token.get_secret_value() + ) + + params = { + "ids": ( + ",".join(identifier.user_ids) + if isinstance(identifier, UserIdList) + else None + ), + "usernames": ( + ",".join(identifier.usernames) + if isinstance(identifier, UsernameList) + else None + ), + "user_auth": False, + } + + params = ( + UserExpansionsBuilder(params) + .add_expansions(expansions) + .add_tweet_fields(tweet_fields) + .add_user_fields(user_fields) + .build() + ) + + response = cast(Response, client.get_users(**params)) + + usernames = [] + ids = [] + names = [] + + included = IncludesSerializer.serialize(response.includes) + data = ResponseDataSerializer.serialize_list(response.data) + + if response.data: + for user in response.data: + usernames.append(user.username) + ids.append(str(user.id)) + names.append(user.name) + + if usernames and ids: + return data, included, usernames, ids, names + else: + raise tweepy.TweepyException("Users not found") + + except tweepy.TweepyException: + raise + + def run( + self, + input_data: Input, + *, + credentials: TwitterCredentials, + **kwargs, + ) -> BlockOutput: + try: + data, included, usernames, ids, names = self.get_users( + credentials, + input_data.identifier, + input_data.expansions, + input_data.tweet_fields, + input_data.user_fields, + ) + if ids: + yield "ids", ids + if usernames: + yield "usernames_", usernames + if names: + yield "names_", names + if data: + yield "data", data + if included: + yield "included", included + except Exception as e: + yield "error", handle_tweepy_exception(e) diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py new file mode 100644 index 000000000000..648d9d6dae07 --- /dev/null +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -0,0 +1,90 @@ +from urllib.parse import parse_qs, urlparse + +from youtube_transcript_api import YouTubeTranscriptApi +from youtube_transcript_api.formatters import TextFormatter + +from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField + + +class TranscribeYoutubeVideoBlock(Block): + class Input(BlockSchema): + youtube_url: str = SchemaField( + title="YouTube URL", + description="The URL of the YouTube video to transcribe", + placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ", + ) + + class Output(BlockSchema): + video_id: str = SchemaField(description="The extracted YouTube video ID") + transcript: str = SchemaField(description="The transcribed text of the video") + error: str = SchemaField( + description="Any error message if the transcription fails" + ) + + def __init__(self): + super().__init__( + id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c", + input_schema=TranscribeYoutubeVideoBlock.Input, + output_schema=TranscribeYoutubeVideoBlock.Output, + description="Transcribes a YouTube video.", + categories={BlockCategory.SOCIAL}, + test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"}, + test_output=[ + ("video_id", "dQw4w9WgXcQ"), + ( + "transcript", + "Never gonna give you up\nNever gonna let you down", + ), + ], + test_mock={ + "get_transcript": lambda video_id: [ + {"text": "Never gonna give you up"}, + {"text": "Never gonna let you down"}, + ], + }, + ) + + @staticmethod + def extract_video_id(url: str) -> str: + parsed_url = urlparse(url) + if parsed_url.netloc == "youtu.be": + return parsed_url.path[1:] + if parsed_url.netloc in ("www.youtube.com", "youtube.com"): + if parsed_url.path == "/watch": + p = parse_qs(parsed_url.query) + return p["v"][0] + if parsed_url.path[:7] == "/embed/": + return parsed_url.path.split("/")[2] + if parsed_url.path[:3] == "/v/": + return parsed_url.path.split("/")[2] + raise ValueError(f"Invalid YouTube URL: {url}") + + @staticmethod + def get_transcript(video_id: str): + try: + transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) + + if not transcript_list: + raise ValueError(f"No transcripts found for the video: {video_id}") + + for transcript in transcript_list: + first_transcript = transcript_list.find_transcript( + [transcript.language_code] + ) + return YouTubeTranscriptApi.get_transcript( + video_id, languages=[first_transcript.language_code] + ) + + except Exception: + raise ValueError(f"No transcripts found for the video: {video_id}") + + def run(self, input_data: Input, **kwargs) -> BlockOutput: + video_id = self.extract_video_id(input_data.youtube_url) + yield "video_id", video_id + + transcript = self.get_transcript(video_id) + formatter = TextFormatter() + transcript_text = formatter.format_transcript(transcript) + + yield "transcript", transcript_text diff --git a/autogpt_platform/backend/backend/cli.py b/autogpt_platform/backend/backend/cli.py new file mode 100755 index 000000000000..efaadd02b282 --- /dev/null +++ b/autogpt_platform/backend/backend/cli.py @@ -0,0 +1,257 @@ +""" +The command line interface for the agent server +""" + +import os +import pathlib + +import click +import psutil + +from backend import app +from backend.util.process import AppProcess + + +def get_pid_path() -> pathlib.Path: + home_dir = pathlib.Path.home() + new_dir = home_dir / ".config" / "agpt" + file_path = new_dir / "running.tmp" + return file_path + + +def get_pid() -> int | None: + file_path = get_pid_path() + if not file_path.exists(): + return None + + os.makedirs(file_path.parent, exist_ok=True) + with open(file_path, "r", encoding="utf-8") as file: + pid = file.read() + try: + return int(pid) + except ValueError: + return None + + +def write_pid(pid: int): + file_path = get_pid_path() + os.makedirs(file_path.parent, exist_ok=True) + with open(file_path, "w", encoding="utf-8") as file: + file.write(str(pid)) + + +class MainApp(AppProcess): + def run(self): + app.main(silent=True) + + +@click.group() +def main(): + """AutoGPT Server CLI Tool""" + pass + + +@main.command() +def start(): + """ + Starts the server in the background and saves the PID + """ + # Define the path for the new directory and file + pid = get_pid() + if pid and psutil.pid_exists(pid): + print("Server is already running") + exit(1) + elif pid: + print("PID does not exist deleting file") + os.remove(get_pid_path()) + + print("Starting server") + pid = MainApp().start(background=True, silent=True) + print(f"Server running in process: {pid}") + + write_pid(pid) + print("done") + os._exit(status=0) + + +@main.command() +def stop(): + """ + Stops the server + """ + pid = get_pid() + if not pid: + print("Server is not running") + return + + os.remove(get_pid_path()) + process = psutil.Process(int(pid)) + for child in process.children(recursive=True): + child.terminate() + process.terminate() + + print("Server Stopped") + + +@main.command() +def gen_encrypt_key(): + """ + Generate a new encryption key + """ + from cryptography.fernet import Fernet + + print(Fernet.generate_key().decode()) + + +@click.group() +def test(): + """ + Group for test commands + """ + pass + + +@test.command() +@click.argument("server_address") +def reddit(server_address: str): + """ + Create an event graph + """ + import requests + + from backend.usecases.reddit_marketing import create_test_graph + + test_graph = create_test_graph() + url = f"{server_address}/graphs" + headers = {"Content-Type": "application/json"} + data = test_graph.model_dump_json() + + response = requests.post(url, headers=headers, data=data) + + graph_id = response.json()["id"] + print(f"Graph created with ID: {graph_id}") + + +@test.command() +@click.argument("server_address") +def populate_db(server_address: str): + """ + Create an event graph + """ + import requests + + from backend.usecases.sample import create_test_graph + + test_graph = create_test_graph() + url = f"{server_address}/graphs" + headers = {"Content-Type": "application/json"} + data = test_graph.model_dump_json() + + response = requests.post(url, headers=headers, data=data) + + graph_id = response.json()["id"] + + if response.status_code == 200: + execute_url = f"{server_address}/graphs/{response.json()['id']}/execute" + text = "Hello, World!" + input_data = {"input": text} + response = requests.post(execute_url, headers=headers, json=input_data) + + schedule_url = f"{server_address}/graphs/{graph_id}/schedules" + data = { + "graph_id": graph_id, + "cron": "*/5 * * * *", + "input_data": {"input": "Hello, World!"}, + } + response = requests.post(schedule_url, headers=headers, json=data) + + print("Database populated with: \n- graph\n- execution\n- schedule") + + +@test.command() +@click.argument("server_address") +def graph(server_address: str): + """ + Create an event graph + """ + import requests + + from backend.usecases.sample import create_test_graph + + url = f"{server_address}/graphs" + headers = {"Content-Type": "application/json"} + data = create_test_graph().model_dump_json() + response = requests.post(url, headers=headers, data=data) + + if response.status_code == 200: + print(response.json()["id"]) + execute_url = f"{server_address}/graphs/{response.json()['id']}/execute" + text = "Hello, World!" + input_data = {"input": text} + response = requests.post(execute_url, headers=headers, json=input_data) + + else: + print("Failed to send graph") + print(f"Response: {response.text}") + + +@test.command() +@click.argument("graph_id") +@click.argument("content") +def execute(graph_id: str, content: dict): + """ + Create an event graph + """ + import requests + + headers = {"Content-Type": "application/json"} + + execute_url = f"http://0.0.0.0:8000/graphs/{graph_id}/execute" + requests.post(execute_url, headers=headers, json=content) + + +@test.command() +def event(): + """ + Send an event to the running server + """ + print("Event sent") + + +@test.command() +@click.argument("server_address") +@click.argument("graph_id") +def websocket(server_address: str, graph_id: str): + """ + Tests the websocket connection. + """ + import asyncio + + import websockets.asyncio.client + + from backend.server.ws_api import ExecutionSubscription, Methods, WsMessage + + async def send_message(server_address: str): + uri = f"ws://{server_address}" + async with websockets.asyncio.client.connect(uri) as websocket: + try: + msg = WsMessage( + method=Methods.SUBSCRIBE, + data=ExecutionSubscription(graph_id=graph_id).model_dump(), + ).model_dump_json() + await websocket.send(msg) + print(f"Sending: {msg}") + while True: + response = await websocket.recv() + print(f"Response from server: {response}") + except InterruptedError: + exit(0) + + asyncio.run(send_message(server_address)) + print("Testing WS") + + +main.add_command(test) + +if __name__ == "__main__": + main() diff --git a/autogpt_platform/backend/backend/data/analytics.py b/autogpt_platform/backend/backend/data/analytics.py new file mode 100644 index 000000000000..e0ee6bc4e2b5 --- /dev/null +++ b/autogpt_platform/backend/backend/data/analytics.py @@ -0,0 +1,43 @@ +import logging + +import prisma.types + +logger = logging.getLogger(__name__) + + +async def log_raw_analytics( + user_id: str, + type: str, + data: dict, + data_index: str, +): + details = await prisma.models.AnalyticsDetails.prisma().create( + data={ + "userId": user_id, + "type": type, + "data": prisma.Json(data), + "dataIndex": data_index, + } + ) + return details + + +async def log_raw_metric( + user_id: str, + metric_name: str, + metric_value: float, + data_string: str, +): + if metric_value < 0: + raise ValueError("metric_value must be non-negative") + + result = await prisma.models.AnalyticsMetrics.prisma().create( + data={ + "value": metric_value, + "analyticMetric": metric_name, + "userId": user_id, + "dataString": data_string, + }, + ) + + return result diff --git a/autogpt_platform/backend/backend/data/api_key.py b/autogpt_platform/backend/backend/data/api_key.py new file mode 100644 index 000000000000..91fb33622e28 --- /dev/null +++ b/autogpt_platform/backend/backend/data/api_key.py @@ -0,0 +1,325 @@ +import logging +import uuid +from datetime import datetime, timezone +from typing import List, Optional + +from autogpt_libs.api_key.key_manager import APIKeyManager +from prisma.enums import APIKeyPermission, APIKeyStatus +from prisma.errors import PrismaError +from prisma.models import APIKey as PrismaAPIKey +from prisma.types import ( + APIKeyCreateInput, + APIKeyUpdateInput, + APIKeyWhereInput, + APIKeyWhereUniqueInput, +) +from pydantic import BaseModel + +from backend.data.db import BaseDbModel + +logger = logging.getLogger(__name__) + + +# Some basic exceptions +class APIKeyError(Exception): + """Base exception for API key operations""" + + pass + + +class APIKeyNotFoundError(APIKeyError): + """Raised when an API key is not found""" + + pass + + +class APIKeyPermissionError(APIKeyError): + """Raised when there are permission issues with API key operations""" + + pass + + +class APIKeyValidationError(APIKeyError): + """Raised when API key validation fails""" + + pass + + +class APIKey(BaseDbModel): + name: str + prefix: str + key: str + status: APIKeyStatus = APIKeyStatus.ACTIVE + permissions: List[APIKeyPermission] + postfix: str + created_at: datetime + last_used_at: Optional[datetime] = None + revoked_at: Optional[datetime] = None + description: Optional[str] = None + user_id: str + + @staticmethod + def from_db(api_key: PrismaAPIKey): + try: + return APIKey( + id=api_key.id, + name=api_key.name, + prefix=api_key.prefix, + postfix=api_key.postfix, + key=api_key.key, + status=APIKeyStatus(api_key.status), + permissions=[APIKeyPermission(p) for p in api_key.permissions], + created_at=api_key.createdAt, + last_used_at=api_key.lastUsedAt, + revoked_at=api_key.revokedAt, + description=api_key.description, + user_id=api_key.userId, + ) + except Exception as e: + logger.error(f"Error creating APIKey from db: {str(e)}") + raise APIKeyError(f"Failed to create API key object: {str(e)}") + + +class APIKeyWithoutHash(BaseModel): + id: str + name: str + prefix: str + postfix: str + status: APIKeyStatus + permissions: List[APIKeyPermission] + created_at: datetime + last_used_at: Optional[datetime] + revoked_at: Optional[datetime] + description: Optional[str] + user_id: str + + @staticmethod + def from_db(api_key: PrismaAPIKey): + try: + return APIKeyWithoutHash( + id=api_key.id, + name=api_key.name, + prefix=api_key.prefix, + postfix=api_key.postfix, + status=APIKeyStatus(api_key.status), + permissions=[APIKeyPermission(p) for p in api_key.permissions], + created_at=api_key.createdAt, + last_used_at=api_key.lastUsedAt, + revoked_at=api_key.revokedAt, + description=api_key.description, + user_id=api_key.userId, + ) + except Exception as e: + logger.error(f"Error creating APIKeyWithoutHash from db: {str(e)}") + raise APIKeyError(f"Failed to create API key object: {str(e)}") + + +async def generate_api_key( + name: str, + user_id: str, + permissions: List[APIKeyPermission], + description: Optional[str] = None, +) -> tuple[APIKeyWithoutHash, str]: + """ + Generate a new API key and store it in the database. + Returns the API key object (without hash) and the plain text key. + """ + try: + api_manager = APIKeyManager() + key = api_manager.generate_api_key() + + api_key = await PrismaAPIKey.prisma().create( + data=APIKeyCreateInput( + id=str(uuid.uuid4()), + name=name, + prefix=key.prefix, + postfix=key.postfix, + key=key.hash, + permissions=[p for p in permissions], + description=description, + userId=user_id, + ) + ) + + api_key_without_hash = APIKeyWithoutHash.from_db(api_key) + return api_key_without_hash, key.raw + except PrismaError as e: + logger.error(f"Database error while generating API key: {str(e)}") + raise APIKeyError(f"Failed to generate API key: {str(e)}") + except Exception as e: + logger.error(f"Unexpected error while generating API key: {str(e)}") + raise APIKeyError(f"Failed to generate API key: {str(e)}") + + +async def validate_api_key(plain_text_key: str) -> Optional[APIKey]: + """ + Validate an API key and return the API key object if valid. + """ + try: + if not plain_text_key.startswith(APIKeyManager.PREFIX): + logger.warning("Invalid API key format") + return None + + prefix = plain_text_key[: APIKeyManager.PREFIX_LENGTH] + api_manager = APIKeyManager() + + api_key = await PrismaAPIKey.prisma().find_first( + where=APIKeyWhereInput(prefix=prefix, status=(APIKeyStatus.ACTIVE)) + ) + + if not api_key: + logger.warning(f"No active API key found with prefix {prefix}") + return None + + is_valid = api_manager.verify_api_key(plain_text_key, api_key.key) + if not is_valid: + logger.warning("API key verification failed") + return None + + return APIKey.from_db(api_key) + except Exception as e: + logger.error(f"Error validating API key: {str(e)}") + raise APIKeyValidationError(f"Failed to validate API key: {str(e)}") + + +async def revoke_api_key(key_id: str, user_id: str) -> Optional[APIKeyWithoutHash]: + try: + api_key = await PrismaAPIKey.prisma().find_unique(where={"id": key_id}) + + if not api_key: + raise APIKeyNotFoundError(f"API key with id {key_id} not found") + + if api_key.userId != user_id: + raise APIKeyPermissionError( + "You do not have permission to revoke this API key." + ) + + where_clause: APIKeyWhereUniqueInput = {"id": key_id} + updated_api_key = await PrismaAPIKey.prisma().update( + where=where_clause, + data=APIKeyUpdateInput( + status=APIKeyStatus.REVOKED, revokedAt=datetime.now(timezone.utc) + ), + ) + + if updated_api_key: + return APIKeyWithoutHash.from_db(updated_api_key) + return None + except (APIKeyNotFoundError, APIKeyPermissionError) as e: + raise e + except PrismaError as e: + logger.error(f"Database error while revoking API key: {str(e)}") + raise APIKeyError(f"Failed to revoke API key: {str(e)}") + except Exception as e: + logger.error(f"Unexpected error while revoking API key: {str(e)}") + raise APIKeyError(f"Failed to revoke API key: {str(e)}") + + +async def list_user_api_keys(user_id: str) -> List[APIKeyWithoutHash]: + try: + where_clause: APIKeyWhereInput = {"userId": user_id} + + api_keys = await PrismaAPIKey.prisma().find_many( + where=where_clause, order={"createdAt": "desc"} + ) + + return [APIKeyWithoutHash.from_db(key) for key in api_keys] + except PrismaError as e: + logger.error(f"Database error while listing API keys: {str(e)}") + raise APIKeyError(f"Failed to list API keys: {str(e)}") + except Exception as e: + logger.error(f"Unexpected error while listing API keys: {str(e)}") + raise APIKeyError(f"Failed to list API keys: {str(e)}") + + +async def suspend_api_key(key_id: str, user_id: str) -> Optional[APIKeyWithoutHash]: + try: + api_key = await PrismaAPIKey.prisma().find_unique(where={"id": key_id}) + + if not api_key: + raise APIKeyNotFoundError(f"API key with id {key_id} not found") + + if api_key.userId != user_id: + raise APIKeyPermissionError( + "You do not have permission to suspend this API key." + ) + + where_clause: APIKeyWhereUniqueInput = {"id": key_id} + updated_api_key = await PrismaAPIKey.prisma().update( + where=where_clause, + data=APIKeyUpdateInput(status=APIKeyStatus.SUSPENDED), + ) + + if updated_api_key: + return APIKeyWithoutHash.from_db(updated_api_key) + return None + except (APIKeyNotFoundError, APIKeyPermissionError) as e: + raise e + except PrismaError as e: + logger.error(f"Database error while suspending API key: {str(e)}") + raise APIKeyError(f"Failed to suspend API key: {str(e)}") + except Exception as e: + logger.error(f"Unexpected error while suspending API key: {str(e)}") + raise APIKeyError(f"Failed to suspend API key: {str(e)}") + + +def has_permission(api_key: APIKey, required_permission: APIKeyPermission) -> bool: + try: + return required_permission in api_key.permissions + except Exception as e: + logger.error(f"Error checking API key permissions: {str(e)}") + return False + + +async def get_api_key_by_id(key_id: str, user_id: str) -> Optional[APIKeyWithoutHash]: + try: + api_key = await PrismaAPIKey.prisma().find_first( + where=APIKeyWhereInput(id=key_id, userId=user_id) + ) + + if not api_key: + return None + + return APIKeyWithoutHash.from_db(api_key) + except PrismaError as e: + logger.error(f"Database error while getting API key: {str(e)}") + raise APIKeyError(f"Failed to get API key: {str(e)}") + except Exception as e: + logger.error(f"Unexpected error while getting API key: {str(e)}") + raise APIKeyError(f"Failed to get API key: {str(e)}") + + +async def update_api_key_permissions( + key_id: str, user_id: str, permissions: List[APIKeyPermission] +) -> Optional[APIKeyWithoutHash]: + """ + Update the permissions of an API key. + """ + try: + api_key = await PrismaAPIKey.prisma().find_unique(where={"id": key_id}) + + if api_key is None: + raise APIKeyNotFoundError("No such API key found.") + + if api_key.userId != user_id: + raise APIKeyPermissionError( + "You do not have permission to update this API key." + ) + + where_clause: APIKeyWhereUniqueInput = {"id": key_id} + updated_api_key = await PrismaAPIKey.prisma().update( + where=where_clause, + data=APIKeyUpdateInput(permissions=permissions), + ) + + if updated_api_key: + return APIKeyWithoutHash.from_db(updated_api_key) + return None + except (APIKeyNotFoundError, APIKeyPermissionError) as e: + raise e + except PrismaError as e: + logger.error(f"Database error while updating API key permissions: {str(e)}") + raise APIKeyError(f"Failed to update API key permissions: {str(e)}") + except Exception as e: + logger.error(f"Unexpected error while updating API key permissions: {str(e)}") + raise APIKeyError(f"Failed to update API key permissions: {str(e)}") diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py new file mode 100644 index 000000000000..5592af8dc7ae --- /dev/null +++ b/autogpt_platform/backend/backend/data/block.py @@ -0,0 +1,466 @@ +import inspect +from abc import ABC, abstractmethod +from enum import Enum +from typing import ( + Any, + ClassVar, + Generator, + Generic, + Optional, + Type, + TypeVar, + cast, + get_origin, +) + +import jsonref +import jsonschema +from prisma.models import AgentBlock +from pydantic import BaseModel + +from backend.util import json +from backend.util.settings import Config + +from .model import ( + ContributorDetails, + Credentials, + CredentialsMetaInput, + is_credentials_field_name, +) + +app_config = Config() + +BlockData = tuple[str, Any] # Input & Output data should be a tuple of (name, data). +BlockInput = dict[str, Any] # Input: 1 input pin consumes 1 data. +BlockOutput = Generator[BlockData, None, None] # Output: 1 output pin produces n data. +CompletedBlockOutput = dict[str, list[Any]] # Completed stream, collected as a dict. + + +class BlockType(Enum): + STANDARD = "Standard" + INPUT = "Input" + OUTPUT = "Output" + NOTE = "Note" + WEBHOOK = "Webhook" + WEBHOOK_MANUAL = "Webhook (manual)" + AGENT = "Agent" + + +class BlockCategory(Enum): + AI = "Block that leverages AI to perform a task." + SOCIAL = "Block that interacts with social media platforms." + TEXT = "Block that processes text data." + SEARCH = "Block that searches or extracts information from the internet." + BASIC = "Block that performs basic operations." + INPUT = "Block that interacts with input of the graph." + OUTPUT = "Block that interacts with output of the graph." + LOGIC = "Programming logic to control the flow of your agent" + COMMUNICATION = "Block that interacts with communication platforms." + DEVELOPER_TOOLS = "Developer tools such as GitHub blocks." + DATA = "Block that interacts with structured data." + HARDWARE = "Block that interacts with hardware." + AGENT = "Block that interacts with other agents." + CRM = "Block that interacts with CRM services." + SAFETY = ( + "Block that provides AI safety mechanisms such as detecting harmful content" + ) + PRODUCTIVITY = "Block that helps with productivity" + ISSUE_TRACKING = "Block that helps with issue tracking" + MULTIMEDIA = "Block that interacts with multimedia content" + + def dict(self) -> dict[str, str]: + return {"category": self.name, "description": self.value} + + +class BlockSchema(BaseModel): + cached_jsonschema: ClassVar[dict[str, Any]] + + @classmethod + def jsonschema(cls) -> dict[str, Any]: + if cls.cached_jsonschema: + return cls.cached_jsonschema + + model = jsonref.replace_refs(cls.model_json_schema(), merge_props=True) + + def ref_to_dict(obj): + if isinstance(obj, dict): + # OpenAPI <3.1 does not support sibling fields that has a $ref key + # So sometimes, the schema has an "allOf"/"anyOf"/"oneOf" with 1 item. + keys = {"allOf", "anyOf", "oneOf"} + one_key = next((k for k in keys if k in obj and len(obj[k]) == 1), None) + if one_key: + obj.update(obj[one_key][0]) + + return { + key: ref_to_dict(value) + for key, value in obj.items() + if not key.startswith("$") and key != one_key + } + elif isinstance(obj, list): + return [ref_to_dict(item) for item in obj] + + return obj + + cls.cached_jsonschema = cast(dict[str, Any], ref_to_dict(model)) + + return cls.cached_jsonschema + + @classmethod + def validate_data(cls, data: BlockInput) -> str | None: + return json.validate_with_jsonschema(schema=cls.jsonschema(), data=data) + + @classmethod + def validate_field(cls, field_name: str, data: BlockInput) -> str | None: + """ + Validate the data against a specific property (one of the input/output name). + Returns the validation error message if the data does not match the schema. + """ + model_schema = cls.jsonschema().get("properties", {}) + if not model_schema: + return f"Invalid model schema {cls}" + + property_schema = model_schema.get(field_name) + if not property_schema: + return f"Invalid property name {field_name}" + + try: + jsonschema.validate(json.to_dict(data), property_schema) + return None + except jsonschema.ValidationError as e: + return str(e) + + @classmethod + def get_fields(cls) -> set[str]: + return set(cls.model_fields.keys()) + + @classmethod + def get_required_fields(cls) -> set[str]: + return { + field + for field, field_info in cls.model_fields.items() + if field_info.is_required() + } + + @classmethod + def __pydantic_init_subclass__(cls, **kwargs): + """Validates the schema definition. Rules: + - Fields with annotation `CredentialsMetaInput` MUST be + named `credentials` or `*_credentials` + - Fields named `credentials` or `*_credentials` MUST be + of type `CredentialsMetaInput` + """ + super().__pydantic_init_subclass__(**kwargs) + + # Reset cached JSON schema to prevent inheriting it from parent class + cls.cached_jsonschema = {} + + credentials_fields = cls.get_credentials_fields() + + for field_name in cls.get_fields(): + if is_credentials_field_name(field_name): + if field_name not in credentials_fields: + raise TypeError( + f"Credentials field '{field_name}' on {cls.__qualname__} " + f"is not of type {CredentialsMetaInput.__name__}" + ) + + credentials_fields[field_name].validate_credentials_field_schema(cls) + + elif field_name in credentials_fields: + raise KeyError( + f"Credentials field '{field_name}' on {cls.__qualname__} " + "has invalid name: must be 'credentials' or *_credentials" + ) + + @classmethod + def get_credentials_fields(cls) -> dict[str, type[CredentialsMetaInput]]: + return { + field_name: info.annotation + for field_name, info in cls.model_fields.items() + if ( + inspect.isclass(info.annotation) + and issubclass( + get_origin(info.annotation) or info.annotation, + CredentialsMetaInput, + ) + ) + } + + +BlockSchemaInputType = TypeVar("BlockSchemaInputType", bound=BlockSchema) +BlockSchemaOutputType = TypeVar("BlockSchemaOutputType", bound=BlockSchema) + + +class EmptySchema(BlockSchema): + pass + + +# --8<-- [start:BlockWebhookConfig] +class BlockManualWebhookConfig(BaseModel): + """ + Configuration model for webhook-triggered blocks on which + the user has to manually set up the webhook at the provider. + """ + + provider: str + """The service provider that the webhook connects to""" + + webhook_type: str + """ + Identifier for the webhook type. E.g. GitHub has repo and organization level hooks. + + Only for use in the corresponding `WebhooksManager`. + """ + + event_filter_input: str = "" + """ + Name of the block's event filter input. + Leave empty if the corresponding webhook doesn't have distinct event/payload types. + """ + + event_format: str = "{event}" + """ + Template string for the event(s) that a block instance subscribes to. + Applied individually to each event selected in the event filter input. + + Example: `"pull_request.{event}"` -> `"pull_request.opened"` + """ + + +class BlockWebhookConfig(BlockManualWebhookConfig): + """ + Configuration model for webhook-triggered blocks for which + the webhook can be automatically set up through the provider's API. + """ + + resource_format: str + """ + Template string for the resource that a block instance subscribes to. + Fields will be filled from the block's inputs (except `payload`). + + Example: `f"{repo}/pull_requests"` (note: not how it's actually implemented) + + Only for use in the corresponding `WebhooksManager`. + """ + # --8<-- [end:BlockWebhookConfig] + + +class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]): + def __init__( + self, + id: str = "", + description: str = "", + contributors: list[ContributorDetails] = [], + categories: set[BlockCategory] | None = None, + input_schema: Type[BlockSchemaInputType] = EmptySchema, + output_schema: Type[BlockSchemaOutputType] = EmptySchema, + test_input: BlockInput | list[BlockInput] | None = None, + test_output: BlockData | list[BlockData] | None = None, + test_mock: dict[str, Any] | None = None, + test_credentials: Optional[Credentials | dict[str, Credentials]] = None, + disabled: bool = False, + static_output: bool = False, + block_type: BlockType = BlockType.STANDARD, + webhook_config: Optional[BlockWebhookConfig | BlockManualWebhookConfig] = None, + ): + """ + Initialize the block with the given schema. + + Args: + id: The unique identifier for the block, this value will be persisted in the + DB. So it should be a unique and constant across the application run. + Use the UUID format for the ID. + description: The description of the block, explaining what the block does. + contributors: The list of contributors who contributed to the block. + input_schema: The schema, defined as a Pydantic model, for the input data. + output_schema: The schema, defined as a Pydantic model, for the output data. + test_input: The list or single sample input data for the block, for testing. + test_output: The list or single expected output if the test_input is run. + test_mock: function names on the block implementation to mock on test run. + disabled: If the block is disabled, it will not be available for execution. + static_output: Whether the output links of the block are static by default. + """ + self.id = id + self.input_schema = input_schema + self.output_schema = output_schema + self.test_input = test_input + self.test_output = test_output + self.test_mock = test_mock + self.test_credentials = test_credentials + self.description = description + self.categories = categories or set() + self.contributors = contributors or set() + self.disabled = disabled + self.static_output = static_output + self.block_type = block_type + self.webhook_config = webhook_config + self.execution_stats = {} + + if self.webhook_config: + if isinstance(self.webhook_config, BlockWebhookConfig): + # Enforce presence of credentials field on auto-setup webhook blocks + if not (cred_fields := self.input_schema.get_credentials_fields()): + raise TypeError( + "credentials field is required on auto-setup webhook blocks" + ) + # Disallow multiple credentials inputs on webhook blocks + elif len(cred_fields) > 1: + raise ValueError( + "Multiple credentials inputs not supported on webhook blocks" + ) + + self.block_type = BlockType.WEBHOOK + else: + self.block_type = BlockType.WEBHOOK_MANUAL + + # Enforce shape of webhook event filter, if present + if self.webhook_config.event_filter_input: + event_filter_field = self.input_schema.model_fields[ + self.webhook_config.event_filter_input + ] + if not ( + isinstance(event_filter_field.annotation, type) + and issubclass(event_filter_field.annotation, BaseModel) + and all( + field.annotation is bool + for field in event_filter_field.annotation.model_fields.values() + ) + ): + raise NotImplementedError( + f"{self.name} has an invalid webhook event selector: " + "field must be a BaseModel and all its fields must be boolean" + ) + + # Enforce presence of 'payload' input + if "payload" not in self.input_schema.model_fields: + raise TypeError( + f"{self.name} is webhook-triggered but has no 'payload' input" + ) + + # Disable webhook-triggered block if webhook functionality not available + if not app_config.platform_base_url: + self.disabled = True + + @classmethod + def create(cls: Type["Block"]) -> "Block": + return cls() + + @abstractmethod + def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput: + """ + Run the block with the given input data. + Args: + input_data: The input data with the structure of input_schema. + Returns: + A Generator that yields (output_name, output_data). + output_name: One of the output name defined in Block's output_schema. + output_data: The data for the output_name, matching the defined schema. + """ + pass + + def run_once(self, input_data: BlockSchemaInputType, output: str, **kwargs) -> Any: + for name, data in self.run(input_data, **kwargs): + if name == output: + return data + raise ValueError(f"{self.name} did not produce any output for {output}") + + def merge_stats(self, stats: dict[str, Any]) -> dict[str, Any]: + for key, value in stats.items(): + if isinstance(value, dict): + self.execution_stats.setdefault(key, {}).update(value) + elif isinstance(value, (int, float)): + self.execution_stats.setdefault(key, 0) + self.execution_stats[key] += value + elif isinstance(value, list): + self.execution_stats.setdefault(key, []) + self.execution_stats[key].extend(value) + else: + self.execution_stats[key] = value + return self.execution_stats + + @property + def name(self): + return self.__class__.__name__ + + def to_dict(self): + return { + "id": self.id, + "name": self.name, + "inputSchema": self.input_schema.jsonschema(), + "outputSchema": self.output_schema.jsonschema(), + "description": self.description, + "categories": [category.dict() for category in self.categories], + "contributors": [ + contributor.model_dump() for contributor in self.contributors + ], + "staticOutput": self.static_output, + "uiType": self.block_type.value, + } + + def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput: + # Merge the input data with the extra execution arguments, preferring the args for security + if error := self.input_schema.validate_data(input_data): + raise ValueError( + f"Unable to execute block with invalid input data: {error}" + ) + + for output_name, output_data in self.run( + self.input_schema(**input_data), **kwargs + ): + if output_name == "error": + raise RuntimeError(output_data) + if self.block_type == BlockType.STANDARD and ( + error := self.output_schema.validate_field(output_name, output_data) + ): + raise ValueError(f"Block produced an invalid output data: {error}") + yield output_name, output_data + + +# ======================= Block Helper Functions ======================= # + + +def get_blocks() -> dict[str, Type[Block]]: + from backend.blocks import AVAILABLE_BLOCKS # noqa: E402 + + return AVAILABLE_BLOCKS + + +async def initialize_blocks() -> None: + for cls in get_blocks().values(): + block = cls() + existing_block = await AgentBlock.prisma().find_first( + where={"OR": [{"id": block.id}, {"name": block.name}]} + ) + if not existing_block: + await AgentBlock.prisma().create( + data={ + "id": block.id, + "name": block.name, + "inputSchema": json.dumps(block.input_schema.jsonschema()), + "outputSchema": json.dumps(block.output_schema.jsonschema()), + } + ) + continue + + input_schema = json.dumps(block.input_schema.jsonschema()) + output_schema = json.dumps(block.output_schema.jsonschema()) + if ( + block.id != existing_block.id + or block.name != existing_block.name + or input_schema != existing_block.inputSchema + or output_schema != existing_block.outputSchema + ): + await AgentBlock.prisma().update( + where={"id": existing_block.id}, + data={ + "id": block.id, + "name": block.name, + "inputSchema": input_schema, + "outputSchema": output_schema, + }, + ) + + +def get_block(block_id: str) -> Block | None: + cls = get_blocks().get(block_id) + return cls() if cls else None diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py new file mode 100644 index 000000000000..8209a2d95a9f --- /dev/null +++ b/autogpt_platform/backend/backend/data/block_cost_config.py @@ -0,0 +1,265 @@ +from typing import Type + +from backend.blocks.ai_music_generator import AIMusicGeneratorBlock +from backend.blocks.ai_shortform_video_block import AIShortformVideoCreatorBlock +from backend.blocks.ideogram import IdeogramModelBlock +from backend.blocks.jina.embeddings import JinaEmbeddingBlock +from backend.blocks.jina.search import ExtractWebsiteContentBlock, SearchTheWebBlock +from backend.blocks.llm import ( + MODEL_METADATA, + AIConversationBlock, + AIListGeneratorBlock, + AIStructuredResponseGeneratorBlock, + AITextGeneratorBlock, + AITextSummarizerBlock, + LlmModel, +) +from backend.blocks.replicate_flux_advanced import ReplicateFluxAdvancedModelBlock +from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock +from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock +from backend.data.block import Block +from backend.data.cost import BlockCost, BlockCostType +from backend.integrations.credentials_store import ( + anthropic_credentials, + did_credentials, + groq_credentials, + ideogram_credentials, + jina_credentials, + open_router_credentials, + openai_credentials, + replicate_credentials, + revid_credentials, + unreal_credentials, +) + +# =============== Configure the cost for each LLM Model call =============== # + +MODEL_COST: dict[LlmModel, int] = { + LlmModel.O1_PREVIEW: 16, + LlmModel.O1_MINI: 4, + LlmModel.GPT4O_MINI: 1, + LlmModel.GPT4O: 3, + LlmModel.GPT4_TURBO: 10, + LlmModel.GPT3_5_TURBO: 1, + LlmModel.CLAUDE_3_5_SONNET: 4, + LlmModel.CLAUDE_3_HAIKU: 1, + LlmModel.LLAMA3_8B: 1, + LlmModel.LLAMA3_70B: 1, + LlmModel.MIXTRAL_8X7B: 1, + LlmModel.GEMMA_7B: 1, + LlmModel.GEMMA2_9B: 1, + LlmModel.LLAMA3_1_405B: 1, + LlmModel.LLAMA3_1_70B: 1, + LlmModel.LLAMA3_1_8B: 1, + LlmModel.OLLAMA_LLAMA3_2: 1, + LlmModel.OLLAMA_LLAMA3_8B: 1, + LlmModel.OLLAMA_LLAMA3_405B: 1, + LlmModel.OLLAMA_DOLPHIN: 1, + LlmModel.GEMINI_FLASH_1_5_8B: 1, + LlmModel.GROK_BETA: 5, + LlmModel.MISTRAL_NEMO: 1, + LlmModel.COHERE_COMMAND_R_08_2024: 1, + LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3, + LlmModel.EVA_QWEN_2_5_32B: 1, + LlmModel.DEEPSEEK_CHAT: 2, + LlmModel.PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE: 1, + LlmModel.QWEN_QWQ_32B_PREVIEW: 2, + LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: 1, + LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B: 1, + LlmModel.AMAZON_NOVA_LITE_V1: 1, + LlmModel.AMAZON_NOVA_MICRO_V1: 1, + LlmModel.AMAZON_NOVA_PRO_V1: 1, + LlmModel.MICROSOFT_WIZARDLM_2_8X22B: 1, + LlmModel.GRYPHE_MYTHOMAX_L2_13B: 1, +} + +for model in LlmModel: + if model not in MODEL_COST: + raise ValueError(f"Missing MODEL_COST for model: {model}") + + +LLM_COST = ( + # Anthropic Models + [ + BlockCost( + cost_type=BlockCostType.RUN, + cost_filter={ + "model": model, + "credentials": { + "id": anthropic_credentials.id, + "provider": anthropic_credentials.provider, + "type": anthropic_credentials.type, + }, + }, + cost_amount=cost, + ) + for model, cost in MODEL_COST.items() + if MODEL_METADATA[model].provider == "anthropic" + ] + # OpenAI Models + + [ + BlockCost( + cost_type=BlockCostType.RUN, + cost_filter={ + "model": model, + "credentials": { + "id": openai_credentials.id, + "provider": openai_credentials.provider, + "type": openai_credentials.type, + }, + }, + cost_amount=cost, + ) + for model, cost in MODEL_COST.items() + if MODEL_METADATA[model].provider == "openai" + ] + # Groq Models + + [ + BlockCost( + cost_type=BlockCostType.RUN, + cost_filter={ + "model": model, + "credentials": {"id": groq_credentials.id}, + }, + cost_amount=cost, + ) + for model, cost in MODEL_COST.items() + if MODEL_METADATA[model].provider == "groq" + ] + # Open Router Models + + [ + BlockCost( + cost_type=BlockCostType.RUN, + cost_filter={ + "model": model, + "credentials": { + "id": open_router_credentials.id, + "provider": open_router_credentials.provider, + "type": open_router_credentials.type, + }, + }, + cost_amount=cost, + ) + for model, cost in MODEL_COST.items() + if MODEL_METADATA[model].provider == "open_router" + ] +) + +# =============== This is the exhaustive list of cost for each Block =============== # + +BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = { + AIConversationBlock: LLM_COST, + AITextGeneratorBlock: LLM_COST, + AIStructuredResponseGeneratorBlock: LLM_COST, + AITextSummarizerBlock: LLM_COST, + AIListGeneratorBlock: LLM_COST, + CreateTalkingAvatarVideoBlock: [ + BlockCost( + cost_amount=15, + cost_filter={ + "credentials": { + "id": did_credentials.id, + "provider": did_credentials.provider, + "type": did_credentials.type, + } + }, + ) + ], + SearchTheWebBlock: [ + BlockCost( + cost_amount=1, + cost_filter={ + "credentials": { + "id": jina_credentials.id, + "provider": jina_credentials.provider, + "type": jina_credentials.type, + } + }, + ) + ], + ExtractWebsiteContentBlock: [ + BlockCost( + cost_amount=1, + cost_filter={ + "raw_content": False, + "credentials": { + "id": jina_credentials.id, + "provider": jina_credentials.provider, + "type": jina_credentials.type, + }, + }, + ) + ], + IdeogramModelBlock: [ + BlockCost( + cost_amount=16, + cost_filter={ + "credentials": { + "id": ideogram_credentials.id, + "provider": ideogram_credentials.provider, + "type": ideogram_credentials.type, + } + }, + ) + ], + AIShortformVideoCreatorBlock: [ + BlockCost( + cost_amount=50, + cost_filter={ + "credentials": { + "id": revid_credentials.id, + "provider": revid_credentials.provider, + "type": revid_credentials.type, + } + }, + ) + ], + ReplicateFluxAdvancedModelBlock: [ + BlockCost( + cost_amount=10, + cost_filter={ + "credentials": { + "id": replicate_credentials.id, + "provider": replicate_credentials.provider, + "type": replicate_credentials.type, + } + }, + ) + ], + AIMusicGeneratorBlock: [ + BlockCost( + cost_amount=11, + cost_filter={ + "credentials": { + "id": replicate_credentials.id, + "provider": replicate_credentials.provider, + "type": replicate_credentials.type, + } + }, + ) + ], + JinaEmbeddingBlock: [ + BlockCost( + cost_amount=12, + cost_filter={ + "credentials": { + "id": jina_credentials.id, + "provider": jina_credentials.provider, + "type": jina_credentials.type, + } + }, + ) + ], + UnrealTextToSpeechBlock: [ + BlockCost( + cost_amount=5, + cost_filter={ + "credentials": { + "id": unreal_credentials.id, + "provider": unreal_credentials.provider, + "type": unreal_credentials.type, + } + }, + ) + ], +} diff --git a/autogpt_platform/backend/backend/data/cost.py b/autogpt_platform/backend/backend/data/cost.py new file mode 100644 index 000000000000..3e3e9bae65aa --- /dev/null +++ b/autogpt_platform/backend/backend/data/cost.py @@ -0,0 +1,33 @@ +from enum import Enum +from typing import Any, Optional + +from pydantic import BaseModel + +from backend.data.block import BlockInput + + +class BlockCostType(str, Enum): + RUN = "run" # cost X credits per run + BYTE = "byte" # cost X credits per byte + SECOND = "second" # cost X credits per second + DOLLAR = "dollar" # cost X dollars per run + + +class BlockCost(BaseModel): + cost_amount: int + cost_filter: BlockInput + cost_type: BlockCostType + + def __init__( + self, + cost_amount: int, + cost_type: BlockCostType = BlockCostType.RUN, + cost_filter: Optional[BlockInput] = None, + **data: Any, + ) -> None: + super().__init__( + cost_amount=cost_amount, + cost_filter=cost_filter or {}, + cost_type=cost_type, + **data, + ) diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py new file mode 100644 index 000000000000..5feb5f929221 --- /dev/null +++ b/autogpt_platform/backend/backend/data/credit.py @@ -0,0 +1,537 @@ +import logging +from abc import ABC, abstractmethod +from datetime import datetime, timezone + +import stripe +from prisma import Json +from prisma.enums import CreditTransactionType +from prisma.errors import UniqueViolationError +from prisma.models import CreditTransaction, User +from prisma.types import CreditTransactionCreateInput, CreditTransactionWhereInput + +from backend.data import db +from backend.data.block import Block, BlockInput, get_block +from backend.data.block_cost_config import BLOCK_COSTS +from backend.data.cost import BlockCost, BlockCostType +from backend.data.execution import NodeExecutionEntry +from backend.data.model import AutoTopUpConfig +from backend.data.user import get_user_by_id +from backend.util.settings import Settings + +settings = Settings() +stripe.api_key = settings.secrets.stripe_api_key +logger = logging.getLogger(__name__) + + +class UserCreditBase(ABC): + @abstractmethod + async def get_credits(self, user_id: str) -> int: + """ + Get the current credits for the user. + + Returns: + int: The current credits for the user. + """ + pass + + @abstractmethod + async def spend_credits( + self, + entry: NodeExecutionEntry, + data_size: float, + run_time: float, + ) -> int: + """ + Spend the credits for the user based on the block usage. + + Args: + entry (NodeExecutionEntry): The node execution identifiers & data. + data_size (float): The size of the data being processed. + run_time (float): The time taken to run the block. + + Returns: + int: amount of credit spent + """ + pass + + @abstractmethod + async def top_up_credits(self, user_id: str, amount: int): + """ + Top up the credits for the user immediately. + + Args: + user_id (str): The user ID. + amount (int): The amount to top up. + """ + pass + + @abstractmethod + async def top_up_intent(self, user_id: str, amount: int) -> str: + """ + Create a payment intent to top up the credits for the user. + + Args: + user_id (str): The user ID. + amount (int): The amount of credits to top up. + + Returns: + str: The redirect url to the payment page. + """ + pass + + @abstractmethod + async def fulfill_checkout( + self, *, session_id: str | None = None, user_id: str | None = None + ): + """ + Fulfill the Stripe checkout session. + + Args: + session_id (str | None): The checkout session ID. Will try to fulfill most recent if None. + user_id (str | None): The user ID must be provided if session_id is None. + """ + pass + + @staticmethod + def time_now() -> datetime: + return datetime.now(timezone.utc) + + # ====== Transaction Helper Methods ====== # + # Any modifications to the transaction table should only be done through these methods # + + async def _get_credits(self, user_id: str) -> tuple[int, datetime]: + """ + Returns the current balance of the user & the latest balance snapshot time. + """ + top_time = self.time_now() + snapshot = await CreditTransaction.prisma().find_first( + where={ + "userId": user_id, + "createdAt": {"lte": top_time}, + "isActive": True, + "runningBalance": {"not": None}, # type: ignore + }, + order={"createdAt": "desc"}, + ) + datetime_min = datetime.min.replace(tzinfo=timezone.utc) + snapshot_balance = snapshot.runningBalance or 0 if snapshot else 0 + snapshot_time = snapshot.createdAt if snapshot else datetime_min + + # Get transactions after the snapshot, this should not exist, but just in case. + transactions = await CreditTransaction.prisma().group_by( + by=["userId"], + sum={"amount": True}, + max={"createdAt": True}, + where={ + "userId": user_id, + "createdAt": { + "gt": snapshot_time, + "lte": top_time, + }, + "isActive": True, + }, + ) + transaction_balance = ( + int(transactions[0].get("_sum", {}).get("amount", 0) + snapshot_balance) + if transactions + else snapshot_balance + ) + transaction_time = ( + datetime.fromisoformat( + str(transactions[0].get("_max", {}).get("createdAt", datetime_min)) + ) + if transactions + else snapshot_time + ) + return transaction_balance, transaction_time + + async def _enable_transaction( + self, transaction_key: str, user_id: str, metadata: Json + ): + + transaction = await CreditTransaction.prisma().find_first_or_raise( + where={"transactionKey": transaction_key, "userId": user_id} + ) + + if transaction.isActive: + return + + async with db.locked_transaction(f"usr_trx_{user_id}"): + user_balance, _ = await self._get_credits(user_id) + await CreditTransaction.prisma().update( + where={ + "creditTransactionIdentifier": { + "transactionKey": transaction_key, + "userId": user_id, + } + }, + data={ + "isActive": True, + "runningBalance": user_balance + transaction.amount, + "createdAt": self.time_now(), + "metadata": metadata, + }, + ) + + async def _add_transaction( + self, + user_id: str, + amount: int, + transaction_type: CreditTransactionType, + is_active: bool = True, + transaction_key: str | None = None, + metadata: Json = Json({}), + ) -> int: + async with db.locked_transaction(f"usr_trx_{user_id}"): + # Get latest balance snapshot + user_balance, _ = await self._get_credits(user_id) + + if amount < 0 and user_balance < abs(amount): + raise ValueError( + f"Insufficient balance for user {user_id}, balance: {user_balance}, amount: {amount}" + ) + + # Create the transaction + transaction_data: CreditTransactionCreateInput = { + "userId": user_id, + "amount": amount, + "runningBalance": user_balance + amount, + "type": transaction_type, + "metadata": metadata, + "isActive": is_active, + "createdAt": self.time_now(), + } + if transaction_key: + transaction_data["transactionKey"] = transaction_key + await CreditTransaction.prisma().create(data=transaction_data) + + return user_balance + amount + + +class UserCredit(UserCreditBase): + + def _block_usage_cost( + self, + block: Block, + input_data: BlockInput, + data_size: float, + run_time: float, + ) -> tuple[int, BlockInput]: + block_costs = BLOCK_COSTS.get(type(block)) + if not block_costs: + return 0, {} + + for block_cost in block_costs: + if not self._is_cost_filter_match(block_cost.cost_filter, input_data): + continue + + if block_cost.cost_type == BlockCostType.RUN: + return block_cost.cost_amount, block_cost.cost_filter + + if block_cost.cost_type == BlockCostType.SECOND: + return ( + int(run_time * block_cost.cost_amount), + block_cost.cost_filter, + ) + + if block_cost.cost_type == BlockCostType.BYTE: + return ( + int(data_size * block_cost.cost_amount), + block_cost.cost_filter, + ) + + return 0, {} + + def _is_cost_filter_match( + self, cost_filter: BlockInput, input_data: BlockInput + ) -> bool: + """ + Filter rules: + - If cost_filter is an object, then check if cost_filter is the subset of input_data + - Otherwise, check if cost_filter is equal to input_data. + - Undefined, null, and empty string are considered as equal. + """ + if not isinstance(cost_filter, dict) or not isinstance(input_data, dict): + return cost_filter == input_data + + return all( + (not input_data.get(k) and not v) + or (input_data.get(k) and self._is_cost_filter_match(v, input_data[k])) + for k, v in cost_filter.items() + ) + + async def spend_credits( + self, + entry: NodeExecutionEntry, + data_size: float, + run_time: float, + ) -> int: + block = get_block(entry.block_id) + if not block: + raise ValueError(f"Block not found: {entry.block_id}") + + cost, matching_filter = self._block_usage_cost( + block=block, input_data=entry.data, data_size=data_size, run_time=run_time + ) + if cost == 0: + return 0 + + balance = await self._add_transaction( + user_id=entry.user_id, + amount=-cost, + transaction_type=CreditTransactionType.USAGE, + metadata=Json( + { + "graph_exec_id": entry.graph_exec_id, + "graph_id": entry.graph_id, + "node_id": entry.node_id, + "node_exec_id": entry.node_exec_id, + "block_id": entry.block_id, + "block": block.name, + "input": matching_filter, + } + ), + ) + user_id = entry.user_id + + # Auto top-up if balance just went below threshold due to this transaction. + auto_top_up = await get_auto_top_up(user_id) + if balance < auto_top_up.threshold <= balance - cost: + try: + await self.top_up_credits(user_id=user_id, amount=auto_top_up.amount) + except Exception as e: + # Failed top-up is not critical, we can move on. + logger.error( + f"Auto top-up failed for user {user_id}, balance: {balance}, amount: {auto_top_up.amount}, error: {e}" + ) + + return cost + + async def top_up_credits(self, user_id: str, amount: int): + if amount < 0: + raise ValueError(f"Top up amount must not be negative: {amount}") + + customer_id = await get_stripe_customer_id(user_id) + + payment_methods = stripe.PaymentMethod.list(customer=customer_id, type="card") + if not payment_methods: + raise ValueError("No payment method found, please add it on the platform.") + + for payment_method in payment_methods: + if amount == 0: + setup_intent = stripe.SetupIntent.create( + customer=customer_id, + usage="off_session", + confirm=True, + payment_method=payment_method.id, + automatic_payment_methods={ + "enabled": True, + "allow_redirects": "never", + }, + ) + if setup_intent.status == "succeeded": + return + + else: + payment_intent = stripe.PaymentIntent.create( + amount=amount, + currency="usd", + description="AutoGPT Platform Credits", + customer=customer_id, + off_session=True, + confirm=True, + payment_method=payment_method.id, + automatic_payment_methods={ + "enabled": True, + "allow_redirects": "never", + }, + ) + if payment_intent.status == "succeeded": + await self._add_transaction( + user_id=user_id, + amount=amount, + transaction_type=CreditTransactionType.TOP_UP, + transaction_key=payment_intent.id, + metadata=Json({"payment_intent": payment_intent}), + is_active=True, + ) + return + + raise ValueError( + f"Out of {len(payment_methods)} payment methods tried, none is supported" + ) + + async def top_up_intent(self, user_id: str, amount: int) -> str: + # Create checkout session + # https://docs.stripe.com/checkout/quickstart?client=react + # unit_amount param is always in the smallest currency unit (so cents for usd) + # which is equal to amount of credits + checkout_session = stripe.checkout.Session.create( + customer=await get_stripe_customer_id(user_id), + line_items=[ + { + "price_data": { + "currency": "usd", + "product_data": { + "name": "AutoGPT Platform Credits", + }, + "unit_amount": amount, + }, + "quantity": 1, + } + ], + mode="payment", + payment_intent_data={"setup_future_usage": "off_session"}, + saved_payment_method_options={"payment_method_save": "enabled"}, + success_url=settings.config.platform_base_url + + "/marketplace/credits?topup=success", + cancel_url=settings.config.platform_base_url + + "/marketplace/credits?topup=cancel", + ) + + await self._add_transaction( + user_id=user_id, + amount=amount, + transaction_type=CreditTransactionType.TOP_UP, + transaction_key=checkout_session.id, + is_active=False, + metadata=Json({"checkout_session": checkout_session}), + ) + + return checkout_session.url or "" + + # https://docs.stripe.com/checkout/fulfillment + async def fulfill_checkout( + self, *, session_id: str | None = None, user_id: str | None = None + ): + if (not session_id and not user_id) or (session_id and user_id): + raise ValueError("Either session_id or user_id must be provided") + + # Retrieve CreditTransaction + find_filter: CreditTransactionWhereInput = { + "type": CreditTransactionType.TOP_UP, + "isActive": False, + } + if session_id: + find_filter["transactionKey"] = session_id + if user_id: + find_filter["userId"] = user_id + + # Find the most recent inactive top-up transaction + credit_transaction = await CreditTransaction.prisma().find_first( + where=find_filter, + order={"createdAt": "desc"}, + ) + + # This can be called multiple times for one id, so ignore if already fulfilled + if not credit_transaction: + return + + # Retrieve the Checkout Session from the API + checkout_session = stripe.checkout.Session.retrieve( + credit_transaction.transactionKey + ) + + # Check the Checkout Session's payment_status property + # to determine if fulfillment should be performed + if checkout_session.payment_status in ["paid", "no_payment_required"]: + await self._enable_transaction( + transaction_key=credit_transaction.transactionKey, + user_id=credit_transaction.userId, + metadata=Json({"checkout_session": checkout_session}), + ) + + async def get_credits(self, user_id: str) -> int: + balance, _ = await self._get_credits(user_id) + return balance + + +class BetaUserCredit(UserCredit): + """ + This is a temporary class to handle the test user utilizing monthly credit refill. + TODO: Remove this class & its feature toggle. + """ + + def __init__(self, num_user_credits_refill: int): + self.num_user_credits_refill = num_user_credits_refill + + async def get_credits(self, user_id: str) -> int: + cur_time = self.time_now().date() + balance, snapshot_time = await self._get_credits(user_id) + if (snapshot_time.year, snapshot_time.month) == (cur_time.year, cur_time.month): + return balance + + try: + return await self._add_transaction( + user_id=user_id, + amount=max(self.num_user_credits_refill - balance, 0), + transaction_type=CreditTransactionType.TOP_UP, + transaction_key=f"MONTHLY-CREDIT-TOP-UP-{cur_time}", + ) + except UniqueViolationError: + # Already refilled this month + return (await self._get_credits(user_id))[0] + + +class DisabledUserCredit(UserCreditBase): + async def get_credits(self, *args, **kwargs) -> int: + return 0 + + async def spend_credits(self, *args, **kwargs) -> int: + return 0 + + async def top_up_credits(self, *args, **kwargs): + pass + + async def top_up_intent(self, *args, **kwargs) -> str: + return "" + + async def fulfill_checkout(self, *args, **kwargs): + pass + + +def get_user_credit_model() -> UserCreditBase: + if not settings.config.enable_credit: + return DisabledUserCredit() + + if settings.config.enable_beta_monthly_credit: + return BetaUserCredit(settings.config.num_user_credits_refill) + + return UserCredit() + + +def get_block_costs() -> dict[str, list[BlockCost]]: + return {block().id: costs for block, costs in BLOCK_COSTS.items()} + + +async def get_stripe_customer_id(user_id: str) -> str: + user = await get_user_by_id(user_id) + if not user: + raise ValueError(f"User not found: {user_id}") + + if user.stripeCustomerId: + return user.stripeCustomerId + + customer = stripe.Customer.create(name=user.name or "", email=user.email) + await User.prisma().update( + where={"id": user_id}, data={"stripeCustomerId": customer.id} + ) + return customer.id + + +async def set_auto_top_up(user_id: str, config: AutoTopUpConfig): + await User.prisma().update( + where={"id": user_id}, + data={"topUpConfig": Json(config.model_dump())}, + ) + + +async def get_auto_top_up(user_id: str) -> AutoTopUpConfig: + user = await get_user_by_id(user_id) + if not user: + raise ValueError("Invalid user ID") + + if not user.topUpConfig: + return AutoTopUpConfig(threshold=0, amount=0) + + return AutoTopUpConfig.model_validate(user.topUpConfig) diff --git a/autogpt_platform/backend/backend/data/db.py b/autogpt_platform/backend/backend/data/db.py new file mode 100644 index 000000000000..66d4f167ada9 --- /dev/null +++ b/autogpt_platform/backend/backend/data/db.py @@ -0,0 +1,72 @@ +import logging +import os +import zlib +from contextlib import asynccontextmanager +from uuid import uuid4 + +from dotenv import load_dotenv +from prisma import Prisma +from pydantic import BaseModel, Field, field_validator + +from backend.util.retry import conn_retry + +load_dotenv() + +PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma") +os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA + +prisma = Prisma(auto_register=True) + +logger = logging.getLogger(__name__) + + +@conn_retry("Prisma", "Acquiring connection") +async def connect(): + if prisma.is_connected(): + return + + await prisma.connect() + + if not prisma.is_connected(): + raise ConnectionError("Failed to connect to Prisma.") + + # Connection acquired from a pool like Supabase somehow still possibly allows + # the db client obtains a connection but still reject query connection afterward. + try: + await prisma.execute_raw("SELECT 1") + except Exception as e: + raise ConnectionError("Failed to connect to Prisma.") from e + + +@conn_retry("Prisma", "Releasing connection") +async def disconnect(): + if not prisma.is_connected(): + return + + await prisma.disconnect() + + if prisma.is_connected(): + raise ConnectionError("Failed to disconnect from Prisma.") + + +@asynccontextmanager +async def transaction(): + async with prisma.tx() as tx: + yield tx + + +@asynccontextmanager +async def locked_transaction(key: str): + lock_key = zlib.crc32(key.encode("utf-8")) + async with transaction() as tx: + await tx.execute_raw(f"SELECT pg_advisory_xact_lock({lock_key})") + yield tx + + +class BaseDbModel(BaseModel): + id: str = Field(default_factory=lambda: str(uuid4())) + + @field_validator("id", mode="before") + def set_model_id(cls, id: str) -> str: + # In case an empty ID is submitted + return id or str(uuid4()) diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py new file mode 100644 index 000000000000..714ea0f51c6c --- /dev/null +++ b/autogpt_platform/backend/backend/data/execution.py @@ -0,0 +1,510 @@ +from collections import defaultdict +from datetime import datetime, timezone +from multiprocessing import Manager +from typing import Any, AsyncGenerator, Generator, Generic, Optional, TypeVar + +from prisma.enums import AgentExecutionStatus +from prisma.errors import PrismaError +from prisma.models import ( + AgentGraphExecution, + AgentNodeExecution, + AgentNodeExecutionInputOutput, +) +from pydantic import BaseModel + +from backend.data.block import BlockData, BlockInput, CompletedBlockOutput +from backend.data.includes import EXECUTION_RESULT_INCLUDE, GRAPH_EXECUTION_INCLUDE +from backend.data.queue import AsyncRedisEventBus, RedisEventBus +from backend.util import json, mock +from backend.util.settings import Config + + +class GraphExecutionEntry(BaseModel): + user_id: str + graph_exec_id: str + graph_id: str + start_node_execs: list["NodeExecutionEntry"] + + +class NodeExecutionEntry(BaseModel): + user_id: str + graph_exec_id: str + graph_id: str + node_exec_id: str + node_id: str + block_id: str + data: BlockInput + + +ExecutionStatus = AgentExecutionStatus + +T = TypeVar("T") + + +class ExecutionQueue(Generic[T]): + """ + Queue for managing the execution of agents. + This will be shared between different processes + """ + + def __init__(self): + self.queue = Manager().Queue() + + def add(self, execution: T) -> T: + self.queue.put(execution) + return execution + + def get(self) -> T: + return self.queue.get() + + def empty(self) -> bool: + return self.queue.empty() + + +class ExecutionResult(BaseModel): + graph_id: str + graph_version: int + graph_exec_id: str + node_exec_id: str + node_id: str + block_id: str + status: ExecutionStatus + input_data: BlockInput + output_data: CompletedBlockOutput + add_time: datetime + queue_time: datetime | None + start_time: datetime | None + end_time: datetime | None + + @staticmethod + def from_graph(graph: AgentGraphExecution): + return ExecutionResult( + graph_id=graph.agentGraphId, + graph_version=graph.agentGraphVersion, + graph_exec_id=graph.id, + node_exec_id="", + node_id="", + block_id="", + status=graph.executionStatus, + # TODO: Populate input_data & output_data from AgentNodeExecutions + # Input & Output comes AgentInputBlock & AgentOutputBlock. + input_data={}, + output_data={}, + add_time=graph.createdAt, + queue_time=graph.createdAt, + start_time=graph.startedAt, + end_time=graph.updatedAt, + ) + + @staticmethod + def from_db(execution: AgentNodeExecution): + if execution.executionData: + # Execution that has been queued for execution will persist its data. + input_data = json.loads(execution.executionData, target_type=dict[str, Any]) + else: + # For incomplete execution, executionData will not be yet available. + input_data: BlockInput = defaultdict() + for data in execution.Input or []: + input_data[data.name] = json.loads(data.data) + + output_data: CompletedBlockOutput = defaultdict(list) + for data in execution.Output or []: + output_data[data.name].append(json.loads(data.data)) + + graph_execution: AgentGraphExecution | None = execution.AgentGraphExecution + + return ExecutionResult( + graph_id=graph_execution.agentGraphId if graph_execution else "", + graph_version=graph_execution.agentGraphVersion if graph_execution else 0, + graph_exec_id=execution.agentGraphExecutionId, + block_id=execution.AgentNode.agentBlockId if execution.AgentNode else "", + node_exec_id=execution.id, + node_id=execution.agentNodeId, + status=execution.executionStatus, + input_data=input_data, + output_data=output_data, + add_time=execution.addedTime, + queue_time=execution.queuedTime, + start_time=execution.startedTime, + end_time=execution.endedTime, + ) + + +# --------------------- Model functions --------------------- # + + +async def create_graph_execution( + graph_id: str, + graph_version: int, + nodes_input: list[tuple[str, BlockInput]], + user_id: str, +) -> tuple[str, list[ExecutionResult]]: + """ + Create a new AgentGraphExecution record. + Returns: + The id of the AgentGraphExecution and the list of ExecutionResult for each node. + """ + result = await AgentGraphExecution.prisma().create( + data={ + "agentGraphId": graph_id, + "agentGraphVersion": graph_version, + "executionStatus": ExecutionStatus.QUEUED, + "AgentNodeExecutions": { + "create": [ # type: ignore + { + "agentNodeId": node_id, + "executionStatus": ExecutionStatus.INCOMPLETE, + "Input": { + "create": [ + {"name": name, "data": json.dumps(data)} + for name, data in node_input.items() + ] + }, + } + for node_id, node_input in nodes_input + ] + }, + "userId": user_id, + }, + include=GRAPH_EXECUTION_INCLUDE, + ) + + return result.id, [ + ExecutionResult.from_db(execution) + for execution in result.AgentNodeExecutions or [] + ] + + +async def upsert_execution_input( + node_id: str, + graph_exec_id: str, + input_name: str, + input_data: Any, + node_exec_id: str | None = None, +) -> tuple[str, BlockInput]: + """ + Insert AgentNodeExecutionInputOutput record for as one of AgentNodeExecution.Input. + If there is no AgentNodeExecution that has no `input_name` as input, create new one. + + Args: + node_id: The id of the AgentNode. + graph_exec_id: The id of the AgentGraphExecution. + input_name: The name of the input data. + input_data: The input data to be inserted. + node_exec_id: [Optional] The id of the AgentNodeExecution that has no `input_name` as input. If not provided, it will find the eligible incomplete AgentNodeExecution or create a new one. + + Returns: + * The id of the created or existing AgentNodeExecution. + * Dict of node input data, key is the input name, value is the input data. + """ + existing_execution = await AgentNodeExecution.prisma().find_first( + where={ # type: ignore + **({"id": node_exec_id} if node_exec_id else {}), + "agentNodeId": node_id, + "agentGraphExecutionId": graph_exec_id, + "executionStatus": ExecutionStatus.INCOMPLETE, + "Input": {"every": {"name": {"not": input_name}}}, + }, + order={"addedTime": "asc"}, + include={"Input": True}, + ) + json_input_data = json.dumps(input_data) + + if existing_execution: + await AgentNodeExecutionInputOutput.prisma().create( + data={ + "name": input_name, + "data": json_input_data, + "referencedByInputExecId": existing_execution.id, + } + ) + return existing_execution.id, { + **{ + input_data.name: json.loads(input_data.data) + for input_data in existing_execution.Input or [] + }, + input_name: input_data, + } + + elif not node_exec_id: + result = await AgentNodeExecution.prisma().create( + data={ + "agentNodeId": node_id, + "agentGraphExecutionId": graph_exec_id, + "executionStatus": ExecutionStatus.INCOMPLETE, + "Input": {"create": {"name": input_name, "data": json_input_data}}, + } + ) + return result.id, {input_name: input_data} + + else: + raise ValueError( + f"NodeExecution {node_exec_id} not found or already has input {input_name}." + ) + + +async def upsert_execution_output( + node_exec_id: str, + output_name: str, + output_data: Any, +) -> None: + """ + Insert AgentNodeExecutionInputOutput record for as one of AgentNodeExecution.Output. + """ + await AgentNodeExecutionInputOutput.prisma().create( + data={ + "name": output_name, + "data": json.dumps(output_data), + "referencedByOutputExecId": node_exec_id, + } + ) + + +async def update_graph_execution_start_time(graph_exec_id: str): + await AgentGraphExecution.prisma().update( + where={"id": graph_exec_id}, + data={ + "executionStatus": ExecutionStatus.RUNNING, + "startedAt": datetime.now(tz=timezone.utc), + }, + ) + + +async def update_graph_execution_stats( + graph_exec_id: str, + status: ExecutionStatus, + stats: dict[str, Any], +) -> ExecutionResult: + res = await AgentGraphExecution.prisma().update( + where={"id": graph_exec_id}, + data={ + "executionStatus": status, + "stats": json.dumps(stats), + }, + ) + if not res: + raise ValueError(f"Execution {graph_exec_id} not found.") + + return ExecutionResult.from_graph(res) + + +async def update_node_execution_stats(node_exec_id: str, stats: dict[str, Any]): + await AgentNodeExecution.prisma().update( + where={"id": node_exec_id}, + data={"stats": json.dumps(stats)}, + ) + + +async def update_execution_status( + node_exec_id: str, + status: ExecutionStatus, + execution_data: BlockInput | None = None, + stats: dict[str, Any] | None = None, +) -> ExecutionResult: + if status == ExecutionStatus.QUEUED and execution_data is None: + raise ValueError("Execution data must be provided when queuing an execution.") + + now = datetime.now(tz=timezone.utc) + data = { + **({"executionStatus": status}), + **({"queuedTime": now} if status == ExecutionStatus.QUEUED else {}), + **({"startedTime": now} if status == ExecutionStatus.RUNNING else {}), + **({"endedTime": now} if status == ExecutionStatus.FAILED else {}), + **({"endedTime": now} if status == ExecutionStatus.COMPLETED else {}), + **({"executionData": json.dumps(execution_data)} if execution_data else {}), + **({"stats": json.dumps(stats)} if stats else {}), + } + + res = await AgentNodeExecution.prisma().update( + where={"id": node_exec_id}, + data=data, # type: ignore + include=EXECUTION_RESULT_INCLUDE, + ) + if not res: + raise ValueError(f"Execution {node_exec_id} not found.") + + return ExecutionResult.from_db(res) + + +async def get_execution( + execution_id: str, user_id: str +) -> Optional[AgentNodeExecution]: + """ + Get an execution by ID. Returns None if not found. + + Args: + execution_id: The ID of the execution to retrieve + + Returns: + The execution if found, None otherwise + """ + try: + execution = await AgentNodeExecution.prisma().find_unique( + where={ + "id": execution_id, + "userId": user_id, + } + ) + return execution + except PrismaError: + return None + + +async def get_execution_results(graph_exec_id: str) -> list[ExecutionResult]: + executions = await AgentNodeExecution.prisma().find_many( + where={"agentGraphExecutionId": graph_exec_id}, + include=EXECUTION_RESULT_INCLUDE, + order=[ + {"queuedTime": "asc"}, + {"addedTime": "asc"}, # Fallback: Incomplete execs has no queuedTime. + ], + ) + res = [ExecutionResult.from_db(execution) for execution in executions] + return res + + +LIST_SPLIT = "_$_" +DICT_SPLIT = "_#_" +OBJC_SPLIT = "_@_" + + +def parse_execution_output(output: BlockData, name: str) -> Any | None: + # Allow extracting partial output data by name. + output_name, output_data = output + + if name == output_name: + return output_data + + if name.startswith(f"{output_name}{LIST_SPLIT}"): + index = int(name.split(LIST_SPLIT)[1]) + if not isinstance(output_data, list) or len(output_data) <= index: + return None + return output_data[int(name.split(LIST_SPLIT)[1])] + + if name.startswith(f"{output_name}{DICT_SPLIT}"): + index = name.split(DICT_SPLIT)[1] + if not isinstance(output_data, dict) or index not in output_data: + return None + return output_data[index] + + if name.startswith(f"{output_name}{OBJC_SPLIT}"): + index = name.split(OBJC_SPLIT)[1] + if isinstance(output_data, object) and hasattr(output_data, index): + return getattr(output_data, index) + return None + + return None + + +def merge_execution_input(data: BlockInput) -> BlockInput: + """ + Merge all dynamic input pins which described by the following pattern: + - _$_ for list input. + - _#_ for dict input. + - _@_ for object input. + This function will construct pins with the same name into a single list/dict/object. + """ + + # Merge all input with _$_ into a single list. + items = list(data.items()) + + for key, value in items: + if LIST_SPLIT not in key: + continue + name, index = key.split(LIST_SPLIT) + if not index.isdigit(): + raise ValueError(f"Invalid key: {key}, #{index} index must be an integer.") + + data[name] = data.get(name, []) + if int(index) >= len(data[name]): + # Pad list with empty string on missing indices. + data[name].extend([""] * (int(index) - len(data[name]) + 1)) + data[name][int(index)] = value + + # Merge all input with _#_ into a single dict. + for key, value in items: + if DICT_SPLIT not in key: + continue + name, index = key.split(DICT_SPLIT) + data[name] = data.get(name, {}) + data[name][index] = value + + # Merge all input with _@_ into a single object. + for key, value in items: + if OBJC_SPLIT not in key: + continue + name, index = key.split(OBJC_SPLIT) + if name not in data or not isinstance(data[name], object): + data[name] = mock.MockObject() + setattr(data[name], index, value) + + return data + + +async def get_latest_execution(node_id: str, graph_eid: str) -> ExecutionResult | None: + execution = await AgentNodeExecution.prisma().find_first( + where={ + "agentNodeId": node_id, + "agentGraphExecutionId": graph_eid, + "executionStatus": {"not": ExecutionStatus.INCOMPLETE}, + "executionData": {"not": None}, # type: ignore + }, + order={"queuedTime": "desc"}, + include=EXECUTION_RESULT_INCLUDE, + ) + if not execution: + return None + return ExecutionResult.from_db(execution) + + +async def get_incomplete_executions( + node_id: str, graph_eid: str +) -> list[ExecutionResult]: + executions = await AgentNodeExecution.prisma().find_many( + where={ + "agentNodeId": node_id, + "agentGraphExecutionId": graph_eid, + "executionStatus": ExecutionStatus.INCOMPLETE, + }, + include=EXECUTION_RESULT_INCLUDE, + ) + return [ExecutionResult.from_db(execution) for execution in executions] + + +# --------------------- Event Bus --------------------- # + +config = Config() + + +class RedisExecutionEventBus(RedisEventBus[ExecutionResult]): + Model = ExecutionResult + + @property + def event_bus_name(self) -> str: + return config.execution_event_bus_name + + def publish(self, res: ExecutionResult): + self.publish_event(res, f"{res.graph_id}/{res.graph_exec_id}") + + def listen( + self, graph_id: str = "*", graph_exec_id: str = "*" + ) -> Generator[ExecutionResult, None, None]: + for execution_result in self.listen_events(f"{graph_id}/{graph_exec_id}"): + yield execution_result + + +class AsyncRedisExecutionEventBus(AsyncRedisEventBus[ExecutionResult]): + Model = ExecutionResult + + @property + def event_bus_name(self) -> str: + return config.execution_event_bus_name + + async def publish(self, res: ExecutionResult): + await self.publish_event(res, f"{res.graph_id}/{res.graph_exec_id}") + + async def listen( + self, graph_id: str = "*", graph_exec_id: str = "*" + ) -> AsyncGenerator[ExecutionResult, None]: + async for execution_result in self.listen_events(f"{graph_id}/{graph_exec_id}"): + yield execution_result diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py new file mode 100644 index 000000000000..403a364c18a1 --- /dev/null +++ b/autogpt_platform/backend/backend/data/graph.py @@ -0,0 +1,782 @@ +import asyncio +import logging +import uuid +from collections import defaultdict +from datetime import datetime, timezone +from typing import Any, Literal, Optional, Type + +import prisma +from prisma.models import ( + AgentGraph, + AgentGraphExecution, + AgentNode, + AgentNodeLink, + StoreListingVersion, +) +from prisma.types import AgentGraphWhereInput +from pydantic.fields import computed_field + +from backend.blocks.agent import AgentExecutorBlock +from backend.blocks.basic import AgentInputBlock, AgentOutputBlock +from backend.util import json + +from .block import BlockInput, BlockType, get_block, get_blocks +from .db import BaseDbModel, transaction +from .execution import ExecutionStatus +from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE +from .integrations import Webhook + +logger = logging.getLogger(__name__) + + +class Link(BaseDbModel): + source_id: str + sink_id: str + source_name: str + sink_name: str + is_static: bool = False + + @staticmethod + def from_db(link: AgentNodeLink): + return Link( + id=link.id, + source_name=link.sourceName, + source_id=link.agentNodeSourceId, + sink_name=link.sinkName, + sink_id=link.agentNodeSinkId, + is_static=link.isStatic, + ) + + def __hash__(self): + return hash((self.source_id, self.sink_id, self.source_name, self.sink_name)) + + +class Node(BaseDbModel): + block_id: str + input_default: BlockInput = {} # dict[input_name, default_value] + metadata: dict[str, Any] = {} + input_links: list[Link] = [] + output_links: list[Link] = [] + + webhook_id: Optional[str] = None + + +class NodeModel(Node): + graph_id: str + graph_version: int + + webhook: Optional[Webhook] = None + + @staticmethod + def from_db(node: AgentNode): + if not node.AgentBlock: + raise ValueError(f"Invalid node {node.id}, invalid AgentBlock.") + obj = NodeModel( + id=node.id, + block_id=node.AgentBlock.id, + input_default=json.loads(node.constantInput, target_type=dict[str, Any]), + metadata=json.loads(node.metadata, target_type=dict[str, Any]), + graph_id=node.agentGraphId, + graph_version=node.agentGraphVersion, + webhook_id=node.webhookId, + webhook=Webhook.from_db(node.Webhook) if node.Webhook else None, + ) + obj.input_links = [Link.from_db(link) for link in node.Input or []] + obj.output_links = [Link.from_db(link) for link in node.Output or []] + return obj + + def is_triggered_by_event_type(self, event_type: str) -> bool: + if not (block := get_block(self.block_id)): + raise ValueError(f"Block #{self.block_id} not found for node #{self.id}") + if not block.webhook_config: + raise TypeError("This method can't be used on non-webhook blocks") + if not block.webhook_config.event_filter_input: + return True + event_filter = self.input_default.get(block.webhook_config.event_filter_input) + if not event_filter: + raise ValueError(f"Event filter is not configured on node #{self.id}") + return event_type in [ + block.webhook_config.event_format.format(event=k) + for k in event_filter + if event_filter[k] is True + ] + + +# Fix 2-way reference Node <-> Webhook +Webhook.model_rebuild() + + +class GraphExecution(BaseDbModel): + execution_id: str + started_at: datetime + ended_at: datetime + duration: float + total_run_time: float + status: ExecutionStatus + graph_id: str + graph_version: int + + @staticmethod + def from_db(execution: AgentGraphExecution): + now = datetime.now(timezone.utc) + start_time = execution.startedAt or execution.createdAt + end_time = execution.updatedAt or now + duration = (end_time - start_time).total_seconds() + total_run_time = duration + + try: + stats = json.loads(execution.stats or "{}", target_type=dict[str, Any]) + except ValueError: + stats = {} + + duration = stats.get("walltime", duration) + total_run_time = stats.get("nodes_walltime", total_run_time) + + return GraphExecution( + id=execution.id, + execution_id=execution.id, + started_at=start_time, + ended_at=end_time, + duration=duration, + total_run_time=total_run_time, + status=ExecutionStatus(execution.executionStatus), + graph_id=execution.agentGraphId, + graph_version=execution.agentGraphVersion, + ) + + +class Graph(BaseDbModel): + version: int = 1 + is_active: bool = True + is_template: bool = False + name: str + description: str + nodes: list[Node] = [] + links: list[Link] = [] + + @computed_field + @property + def input_schema(self) -> dict[str, Any]: + return self._generate_schema( + AgentInputBlock.Input, + [ + node.input_default + for node in self.nodes + if (b := get_block(node.block_id)) + and b.block_type == BlockType.INPUT + and "name" in node.input_default + ], + ) + + @computed_field + @property + def output_schema(self) -> dict[str, Any]: + return self._generate_schema( + AgentOutputBlock.Input, + [ + node.input_default + for node in self.nodes + if (b := get_block(node.block_id)) + and b.block_type == BlockType.OUTPUT + and "name" in node.input_default + ], + ) + + @staticmethod + def _generate_schema( + type_class: Type[AgentInputBlock.Input] | Type[AgentOutputBlock.Input], + data: list[dict], + ) -> dict[str, Any]: + props = [] + for p in data: + try: + props.append(type_class(**p)) + except Exception as e: + logger.warning(f"Invalid {type_class}: {p}, {e}") + + return { + "type": "object", + "properties": { + p.name: { + "secret": p.secret, + # Default value has to be set for advanced fields. + "advanced": p.advanced and p.value is not None, + "title": p.title or p.name, + **({"description": p.description} if p.description else {}), + **({"default": p.value} if p.value is not None else {}), + } + for p in props + }, + "required": [p.name for p in props if p.value is None], + } + + +class GraphModel(Graph): + user_id: str + nodes: list[NodeModel] = [] # type: ignore + + @property + def starting_nodes(self) -> list[Node]: + outbound_nodes = {link.sink_id for link in self.links} + input_nodes = { + v.id + for v in self.nodes + if (b := get_block(v.block_id)) and b.block_type == BlockType.INPUT + } + return [ + node + for node in self.nodes + if node.id not in outbound_nodes or node.id in input_nodes + ] + + def reassign_ids(self, user_id: str, reassign_graph_id: bool = False): + """ + Reassigns all IDs in the graph to new UUIDs. + This method can be used before storing a new graph to the database. + """ + + # Reassign Graph ID + id_map = {node.id: str(uuid.uuid4()) for node in self.nodes} + if reassign_graph_id: + self.id = str(uuid.uuid4()) + + # Reassign Node IDs + for node in self.nodes: + node.id = id_map[node.id] + + # Reassign Link IDs + for link in self.links: + link.source_id = id_map[link.source_id] + link.sink_id = id_map[link.sink_id] + + # Reassign User IDs for agent blocks + for node in self.nodes: + if node.block_id != AgentExecutorBlock().id: + continue + node.input_default["user_id"] = user_id + node.input_default.setdefault("data", {}) + + self.validate_graph() + + def validate_graph(self, for_run: bool = False): + def sanitize(name): + return name.split("_#_")[0].split("_@_")[0].split("_$_")[0] + + input_links = defaultdict(list) + for link in self.links: + input_links[link.sink_id].append(link) + + # Nodes: required fields are filled or connected and dependencies are satisfied + for node in self.nodes: + block = get_block(node.block_id) + if block is None: + raise ValueError(f"Invalid block {node.block_id} for node #{node.id}") + + provided_inputs = set( + [sanitize(name) for name in node.input_default] + + [sanitize(link.sink_name) for link in input_links.get(node.id, [])] + ) + for name in block.input_schema.get_required_fields(): + if ( + name not in provided_inputs + and not ( + name == "payload" + and block.block_type + in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL) + ) + and ( + for_run # Skip input completion validation, unless when executing. + or block.block_type == BlockType.INPUT + or block.block_type == BlockType.OUTPUT + or block.block_type == BlockType.AGENT + ) + ): + raise ValueError( + f"Node {block.name} #{node.id} required input missing: `{name}`" + ) + + # Get input schema properties and check dependencies + input_schema = block.input_schema.model_fields + required_fields = block.input_schema.get_required_fields() + + def has_value(name): + return ( + node is not None + and name in node.input_default + and node.input_default[name] is not None + and str(node.input_default[name]).strip() != "" + ) or (name in input_schema and input_schema[name].default is not None) + + # Validate dependencies between fields + for field_name, field_info in input_schema.items(): + # Apply input dependency validation only on run & field with depends_on + json_schema_extra = field_info.json_schema_extra or {} + dependencies = json_schema_extra.get("depends_on", []) + if not for_run or not dependencies: + continue + + # Check if dependent field has value in input_default + field_has_value = has_value(field_name) + field_is_required = field_name in required_fields + + # Check for missing dependencies when dependent field is present + missing_deps = [dep for dep in dependencies if not has_value(dep)] + if missing_deps and (field_has_value or field_is_required): + raise ValueError( + f"Node {block.name} #{node.id}: Field `{field_name}` requires [{', '.join(missing_deps)}] to be set" + ) + + node_map = {v.id: v for v in self.nodes} + + def is_static_output_block(nid: str) -> bool: + bid = node_map[nid].block_id + b = get_block(bid) + return b.static_output if b else False + + # Links: links are connected and the connected pin data type are compatible. + for link in self.links: + source = (link.source_id, link.source_name) + sink = (link.sink_id, link.sink_name) + suffix = f"Link {source} <-> {sink}" + + for i, (node_id, name) in enumerate([source, sink]): + node = node_map.get(node_id) + if not node: + raise ValueError( + f"{suffix}, {node_id} is invalid node id, available nodes: {node_map.keys()}" + ) + + block = get_block(node.block_id) + if not block: + blocks = {v().id: v().name for v in get_blocks().values()} + raise ValueError( + f"{suffix}, {node.block_id} is invalid block id, available blocks: {blocks}" + ) + + sanitized_name = sanitize(name) + vals = node.input_default + if i == 0: + fields = ( + block.output_schema.get_fields() + if block.block_type != BlockType.AGENT + else vals.get("output_schema", {}).get("properties", {}).keys() + ) + else: + fields = ( + block.input_schema.get_fields() + if block.block_type != BlockType.AGENT + else vals.get("input_schema", {}).get("properties", {}).keys() + ) + if sanitized_name not in fields: + fields_msg = f"Allowed fields: {fields}" + raise ValueError(f"{suffix}, `{name}` invalid, {fields_msg}") + + if is_static_output_block(link.source_id): + link.is_static = True # Each value block output should be static. + + @staticmethod + def from_db(graph: AgentGraph, for_export: bool = False): + return GraphModel( + id=graph.id, + user_id=graph.userId, + version=graph.version, + is_active=graph.isActive, + is_template=graph.isTemplate, + name=graph.name or "", + description=graph.description or "", + nodes=[ + NodeModel.from_db(GraphModel._process_node(node, for_export)) + for node in graph.AgentNodes or [] + ], + links=list( + { + Link.from_db(link) + for node in graph.AgentNodes or [] + for link in (node.Input or []) + (node.Output or []) + } + ), + ) + + @staticmethod + def _process_node(node: AgentNode, for_export: bool) -> AgentNode: + if for_export: + # Remove credentials from node input + if node.constantInput: + constant_input = json.loads( + node.constantInput, target_type=dict[str, Any] + ) + constant_input = GraphModel._hide_node_input_credentials(constant_input) + node.constantInput = json.dumps(constant_input) + + # Remove webhook info + node.webhookId = None + node.Webhook = None + + return node + + @staticmethod + def _hide_node_input_credentials(input_data: dict[str, Any]) -> dict[str, Any]: + sensitive_keys = ["credentials", "api_key", "password", "token", "secret"] + result = {} + for key, value in input_data.items(): + if isinstance(value, dict): + result[key] = GraphModel._hide_node_input_credentials(value) + elif isinstance(value, str) and any( + sensitive_key in key.lower() for sensitive_key in sensitive_keys + ): + # Skip this key-value pair in the result + continue + else: + result[key] = value + return result + + def clean_graph(self): + blocks = [block() for block in get_blocks().values()] + + input_blocks = [ + node + for node in self.nodes + if next( + ( + b + for b in blocks + if b.id == node.block_id and b.block_type == BlockType.INPUT + ), + None, + ) + ] + + for node in self.nodes: + if any(input_block.id == node.id for input_block in input_blocks): + node.input_default["value"] = "" + + +# --------------------- CRUD functions --------------------- # + + +async def get_node(node_id: str) -> NodeModel: + node = await AgentNode.prisma().find_unique_or_raise( + where={"id": node_id}, + include=AGENT_NODE_INCLUDE, + ) + return NodeModel.from_db(node) + + +async def set_node_webhook(node_id: str, webhook_id: str | None) -> NodeModel: + node = await AgentNode.prisma().update( + where={"id": node_id}, + data=( + {"Webhook": {"connect": {"id": webhook_id}}} + if webhook_id + else {"Webhook": {"disconnect": True}} + ), + include=AGENT_NODE_INCLUDE, + ) + if not node: + raise ValueError(f"Node #{node_id} not found") + return NodeModel.from_db(node) + + +async def get_graphs( + user_id: str, + filter_by: Literal["active", "template"] | None = "active", +) -> list[GraphModel]: + """ + Retrieves graph metadata objects. + Default behaviour is to get all currently active graphs. + + Args: + filter_by: An optional filter to either select templates or active graphs. + user_id: The ID of the user that owns the graph. + + Returns: + list[GraphModel]: A list of objects representing the retrieved graphs. + """ + where_clause: AgentGraphWhereInput = {"userId": user_id} + + if filter_by == "active": + where_clause["isActive"] = True + elif filter_by == "template": + where_clause["isTemplate"] = True + + graphs = await AgentGraph.prisma().find_many( + where=where_clause, + distinct=["id"], + order={"version": "desc"}, + include=AGENT_GRAPH_INCLUDE, + ) + + graph_models = [] + for graph in graphs: + try: + graph_models.append(GraphModel.from_db(graph)) + except Exception as e: + logger.error(f"Error processing graph {graph.id}: {e}") + continue + + return graph_models + + +async def get_executions(user_id: str) -> list[GraphExecution]: + executions = await AgentGraphExecution.prisma().find_many( + where={"userId": user_id}, + order={"createdAt": "desc"}, + ) + return [GraphExecution.from_db(execution) for execution in executions] + + +async def get_execution(user_id: str, execution_id: str) -> GraphExecution | None: + execution = await AgentGraphExecution.prisma().find_first( + where={"id": execution_id, "userId": user_id} + ) + return GraphExecution.from_db(execution) if execution else None + + +async def get_graph( + graph_id: str, + version: int | None = None, + template: bool = False, + user_id: str | None = None, + for_export: bool = False, +) -> GraphModel | None: + """ + Retrieves a graph from the DB. + Defaults to the version with `is_active` if `version` is not passed, + or the latest version with `is_template` if `template=True`. + + Returns `None` if the record is not found. + """ + where_clause: AgentGraphWhereInput = { + "id": graph_id, + } + + if version is not None: + where_clause["version"] = version + elif not template: + where_clause["isActive"] = True + + graph = await AgentGraph.prisma().find_first( + where=where_clause, + include=AGENT_GRAPH_INCLUDE, + order={"version": "desc"}, + ) + + # For access, the graph must be owned by the user or listed in the store + if graph is None or ( + graph.userId != user_id + and not ( + await StoreListingVersion.prisma().find_first( + where={ + "agentId": graph_id, + "agentVersion": version or graph.version, + "isDeleted": False, + "StoreListing": {"is": {"isApproved": True}}, + } + ) + ) + ): + return None + + return GraphModel.from_db(graph, for_export) + + +async def set_graph_active_version(graph_id: str, version: int, user_id: str) -> None: + # Activate the requested version if it exists and is owned by the user. + updated_count = await AgentGraph.prisma().update_many( + data={"isActive": True}, + where={ + "id": graph_id, + "version": version, + "userId": user_id, + }, + ) + if updated_count == 0: + raise Exception(f"Graph #{graph_id} v{version} not found or not owned by user") + + # Deactivate all other versions. + await AgentGraph.prisma().update_many( + data={"isActive": False}, + where={ + "id": graph_id, + "version": {"not": version}, + "userId": user_id, + "isActive": True, + }, + ) + + +async def get_graph_all_versions(graph_id: str, user_id: str) -> list[GraphModel]: + graph_versions = await AgentGraph.prisma().find_many( + where={"id": graph_id, "userId": user_id}, + order={"version": "desc"}, + include=AGENT_GRAPH_INCLUDE, + ) + + if not graph_versions: + return [] + + return [GraphModel.from_db(graph) for graph in graph_versions] + + +async def delete_graph(graph_id: str, user_id: str) -> int: + entries_count = await AgentGraph.prisma().delete_many( + where={"id": graph_id, "userId": user_id} + ) + if entries_count: + logger.info(f"Deleted {entries_count} graph entries for Graph #{graph_id}") + return entries_count + + +async def create_graph(graph: Graph, user_id: str) -> GraphModel: + async with transaction() as tx: + await __create_graph(tx, graph, user_id) + + if created_graph := await get_graph( + graph.id, graph.version, graph.is_template, user_id=user_id + ): + return created_graph + + raise ValueError(f"Created graph {graph.id} v{graph.version} is not in DB") + + +async def __create_graph(tx, graph: Graph, user_id: str): + await AgentGraph.prisma(tx).create( + data={ + "id": graph.id, + "version": graph.version, + "name": graph.name, + "description": graph.description, + "isTemplate": graph.is_template, + "isActive": graph.is_active, + "userId": user_id, + "AgentNodes": { + "create": [ + { + "id": node.id, + "agentBlockId": node.block_id, + "constantInput": json.dumps(node.input_default), + "metadata": json.dumps(node.metadata), + } + for node in graph.nodes + ] + }, + } + ) + + await asyncio.gather( + *[ + AgentNodeLink.prisma(tx).create( + { + "id": str(uuid.uuid4()), + "sourceName": link.source_name, + "sinkName": link.sink_name, + "agentNodeSourceId": link.source_id, + "agentNodeSinkId": link.sink_id, + "isStatic": link.is_static, + } + ) + for link in graph.links + ] + ) + + +# ------------------------ UTILITIES ------------------------ # + + +def make_graph_model(creatable_graph: Graph, user_id: str) -> GraphModel: + """ + Convert a Graph to a GraphModel, setting graph_id and graph_version on all nodes. + + Args: + creatable_graph (Graph): The creatable graph to convert. + user_id (str): The ID of the user creating the graph. + + Returns: + GraphModel: The converted Graph object. + """ + # Create a new Graph object, inheriting properties from CreatableGraph + return GraphModel( + **creatable_graph.model_dump(exclude={"nodes"}), + user_id=user_id, + nodes=[ + NodeModel( + **creatable_node.model_dump(), + graph_id=creatable_graph.id, + graph_version=creatable_graph.version, + ) + for creatable_node in creatable_graph.nodes + ], + ) + + +async def fix_llm_provider_credentials(): + """Fix node credentials with provider `llm`""" + from backend.integrations.credentials_store import IntegrationCredentialsStore + + from .user import get_user_integrations + + store = IntegrationCredentialsStore() + + broken_nodes = await prisma.get_client().query_raw( + """ + SELECT graph."userId" user_id, + node.id node_id, + node."constantInput" node_preset_input + FROM platform."AgentNode" node + LEFT JOIN platform."AgentGraph" graph + ON node."agentGraphId" = graph.id + WHERE node."constantInput"::jsonb->'credentials'->>'provider' = 'llm' + ORDER BY graph."userId"; + """ + ) + logger.info(f"Fixing LLM credential inputs on {len(broken_nodes)} nodes") + + user_id: str = "" + user_integrations = None + for node in broken_nodes: + if node["user_id"] != user_id: + # Save queries by only fetching once per user + user_id = node["user_id"] + user_integrations = await get_user_integrations(user_id) + elif not user_integrations: + raise RuntimeError(f"Impossible state while processing node {node}") + + node_id: str = node["node_id"] + node_preset_input: dict = json.loads(node["node_preset_input"]) + credentials_meta: dict = node_preset_input["credentials"] + + credentials = next( + ( + c + for c in user_integrations.credentials + if c.id == credentials_meta["id"] + ), + None, + ) + if not credentials: + continue + if credentials.type != "api_key": + logger.warning( + f"User {user_id} credentials {credentials.id} with provider 'llm' " + f"has invalid type '{credentials.type}'" + ) + continue + + api_key = credentials.api_key.get_secret_value() + if api_key.startswith("sk-ant-api03-"): + credentials.provider = credentials_meta["provider"] = "anthropic" + elif api_key.startswith("sk-"): + credentials.provider = credentials_meta["provider"] = "openai" + elif api_key.startswith("gsk_"): + credentials.provider = credentials_meta["provider"] = "groq" + else: + logger.warning( + f"Could not identify provider from key prefix {api_key[:13]}*****" + ) + continue + + store.update_creds(user_id, credentials) + await AgentNode.prisma().update( + where={"id": node_id}, + data={"constantInput": json.dumps(node_preset_input)}, + ) diff --git a/autogpt_platform/backend/backend/data/includes.py b/autogpt_platform/backend/backend/data/includes.py new file mode 100644 index 000000000000..0b791f502a1e --- /dev/null +++ b/autogpt_platform/backend/backend/data/includes.py @@ -0,0 +1,34 @@ +import prisma + +AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = { + "Input": True, + "Output": True, + "Webhook": True, + "AgentBlock": True, +} + +AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = { + "AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore +} + +EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = { + "Input": True, + "Output": True, + "AgentNode": True, + "AgentGraphExecution": True, +} + +GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = { + "AgentNodeExecutions": { + "include": { + "Input": True, + "Output": True, + "AgentNode": True, + "AgentGraphExecution": True, + } + } +} + +INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = { + "AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore +} diff --git a/autogpt_platform/backend/backend/data/integrations.py b/autogpt_platform/backend/backend/data/integrations.py new file mode 100644 index 000000000000..2ed06e201500 --- /dev/null +++ b/autogpt_platform/backend/backend/data/integrations.py @@ -0,0 +1,196 @@ +import logging +from typing import TYPE_CHECKING, AsyncGenerator, Optional + +from prisma import Json +from prisma.models import IntegrationWebhook +from pydantic import Field, computed_field + +from backend.data.includes import INTEGRATION_WEBHOOK_INCLUDE +from backend.data.queue import AsyncRedisEventBus +from backend.integrations.providers import ProviderName +from backend.integrations.webhooks.utils import webhook_ingress_url + +from .db import BaseDbModel + +if TYPE_CHECKING: + from .graph import NodeModel + +logger = logging.getLogger(__name__) + + +class Webhook(BaseDbModel): + user_id: str + provider: ProviderName + credentials_id: str + webhook_type: str + resource: str + events: list[str] + config: dict = Field(default_factory=dict) + secret: str + + provider_webhook_id: str + + attached_nodes: Optional[list["NodeModel"]] = None + + @computed_field + @property + def url(self) -> str: + return webhook_ingress_url(self.provider, self.id) + + @staticmethod + def from_db(webhook: IntegrationWebhook): + from .graph import NodeModel + + return Webhook( + id=webhook.id, + user_id=webhook.userId, + provider=ProviderName(webhook.provider), + credentials_id=webhook.credentialsId, + webhook_type=webhook.webhookType, + resource=webhook.resource, + events=webhook.events, + config=dict(webhook.config), + secret=webhook.secret, + provider_webhook_id=webhook.providerWebhookId, + attached_nodes=( + [NodeModel.from_db(node) for node in webhook.AgentNodes] + if webhook.AgentNodes is not None + else None + ), + ) + + +# --------------------- CRUD functions --------------------- # + + +async def create_webhook(webhook: Webhook) -> Webhook: + created_webhook = await IntegrationWebhook.prisma().create( + data={ + "id": webhook.id, + "userId": webhook.user_id, + "provider": webhook.provider.value, + "credentialsId": webhook.credentials_id, + "webhookType": webhook.webhook_type, + "resource": webhook.resource, + "events": webhook.events, + "config": Json(webhook.config), + "secret": webhook.secret, + "providerWebhookId": webhook.provider_webhook_id, + } + ) + return Webhook.from_db(created_webhook) + + +async def get_webhook(webhook_id: str) -> Webhook: + """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints.""" + webhook = await IntegrationWebhook.prisma().find_unique_or_raise( + where={"id": webhook_id}, + include=INTEGRATION_WEBHOOK_INCLUDE, + ) + return Webhook.from_db(webhook) + + +async def get_all_webhooks_by_creds(credentials_id: str) -> list[Webhook]: + """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints.""" + if not credentials_id: + raise ValueError("credentials_id must not be empty") + webhooks = await IntegrationWebhook.prisma().find_many( + where={"credentialsId": credentials_id}, + include=INTEGRATION_WEBHOOK_INCLUDE, + ) + return [Webhook.from_db(webhook) for webhook in webhooks] + + +async def find_webhook_by_credentials_and_props( + credentials_id: str, webhook_type: str, resource: str, events: list[str] +) -> Webhook | None: + """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints.""" + webhook = await IntegrationWebhook.prisma().find_first( + where={ + "credentialsId": credentials_id, + "webhookType": webhook_type, + "resource": resource, + "events": {"has_every": events}, + }, + include=INTEGRATION_WEBHOOK_INCLUDE, + ) + return Webhook.from_db(webhook) if webhook else None + + +async def find_webhook_by_graph_and_props( + graph_id: str, provider: str, webhook_type: str, events: list[str] +) -> Webhook | None: + """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints.""" + webhook = await IntegrationWebhook.prisma().find_first( + where={ + "provider": provider, + "webhookType": webhook_type, + "events": {"has_every": events}, + "AgentNodes": {"some": {"agentGraphId": graph_id}}, + }, + include=INTEGRATION_WEBHOOK_INCLUDE, + ) + return Webhook.from_db(webhook) if webhook else None + + +async def update_webhook_config(webhook_id: str, updated_config: dict) -> Webhook: + """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints.""" + _updated_webhook = await IntegrationWebhook.prisma().update( + where={"id": webhook_id}, + data={"config": Json(updated_config)}, + include=INTEGRATION_WEBHOOK_INCLUDE, + ) + if _updated_webhook is None: + raise ValueError(f"Webhook #{webhook_id} not found") + return Webhook.from_db(_updated_webhook) + + +async def delete_webhook(webhook_id: str) -> None: + """⚠️ No `user_id` check: DO NOT USE without check in user-facing endpoints.""" + deleted = await IntegrationWebhook.prisma().delete(where={"id": webhook_id}) + if not deleted: + raise ValueError(f"Webhook #{webhook_id} not found") + + +# --------------------- WEBHOOK EVENTS --------------------- # + + +class WebhookEvent(BaseDbModel): + provider: str + webhook_id: str + event_type: str + payload: dict + + +class WebhookEventBus(AsyncRedisEventBus[WebhookEvent]): + Model = WebhookEvent + + @property + def event_bus_name(self) -> str: + return "webhooks" + + +_webhook_event_bus = WebhookEventBus() + + +async def publish_webhook_event(event: WebhookEvent): + await _webhook_event_bus.publish_event( + event, f"{event.webhook_id}/{event.event_type}" + ) + + +async def listen_for_webhook_events( + webhook_id: str, event_type: Optional[str] = None +) -> AsyncGenerator[WebhookEvent, None]: + async for event in _webhook_event_bus.listen_events( + f"{webhook_id}/{event_type or '*'}" + ): + yield event + + +async def wait_for_webhook_event( + webhook_id: str, event_type: Optional[str] = None, timeout: Optional[float] = None +) -> WebhookEvent | None: + return await _webhook_event_bus.wait_for_event( + f"{webhook_id}/{event_type or '*'}", timeout + ) diff --git a/autogpt_platform/backend/backend/data/model.py b/autogpt_platform/backend/backend/data/model.py new file mode 100644 index 000000000000..181da2df93ce --- /dev/null +++ b/autogpt_platform/backend/backend/data/model.py @@ -0,0 +1,372 @@ +from __future__ import annotations + +import base64 +import logging +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Callable, + ClassVar, + Generic, + Literal, + Optional, + TypedDict, + TypeVar, + get_args, +) +from uuid import uuid4 + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + GetCoreSchemaHandler, + SecretStr, + field_serializer, +) +from pydantic_core import ( + CoreSchema, + PydanticUndefined, + PydanticUndefinedType, + ValidationError, + core_schema, +) + +from backend.integrations.providers import ProviderName +from backend.util.settings import Secrets + +if TYPE_CHECKING: + from backend.data.block import BlockSchema + +T = TypeVar("T") +logger = logging.getLogger(__name__) + + +class BlockSecret: + def __init__(self, key: Optional[str] = None, value: Optional[str] = None): + if value is not None: + trimmed_value = value.strip() + if value != trimmed_value: + logger.debug(BlockSecret.TRIMMING_VALUE_MSG) + self._value = trimmed_value + return + + self._value = self.__get_secret(key) + if self._value is None: + raise ValueError(f"Secret {key} not found.") + trimmed_value = self._value.strip() + if self._value != trimmed_value: + logger.debug(BlockSecret.TRIMMING_VALUE_MSG) + self._value = trimmed_value + + TRIMMING_VALUE_MSG: ClassVar[str] = "Provided secret value got trimmed." + STR: ClassVar[str] = "" + SECRETS: ClassVar[Secrets] = Secrets() + + def __repr__(self): + return BlockSecret.STR + + def __str__(self): + return BlockSecret.STR + + @staticmethod + def __get_secret(key: str | None): + if not key or not hasattr(BlockSecret.SECRETS, key): + return None + return getattr(BlockSecret.SECRETS, key) + + def get_secret_value(self): + trimmed_value = str(self._value).strip() + if self._value != trimmed_value: + logger.info(BlockSecret.TRIMMING_VALUE_MSG) + return trimmed_value + + @classmethod + def parse_value(cls, value: Any) -> BlockSecret: + if isinstance(value, BlockSecret): + return value + return BlockSecret(value=value) + + @classmethod + def __get_pydantic_json_schema__( + cls, source_type: Any, handler: GetCoreSchemaHandler + ) -> dict[str, Any]: + return { + "type": "string", + } + + @classmethod + def __get_pydantic_core_schema__( + cls, source_type: Any, handler: GetCoreSchemaHandler + ) -> CoreSchema: + validate_fun = core_schema.no_info_plain_validator_function(cls.parse_value) + return core_schema.json_or_python_schema( + json_schema=validate_fun, + python_schema=validate_fun, + serialization=core_schema.plain_serializer_function_ser_schema( + lambda val: BlockSecret.STR + ), + ) + + +def SecretField( + value: Optional[str] = None, + key: Optional[str] = None, + title: Optional[str] = None, + description: Optional[str] = None, + placeholder: Optional[str] = None, + **kwargs, +) -> BlockSecret: + return SchemaField( + BlockSecret(key=key, value=value), + title=title, + description=description, + placeholder=placeholder, + secret=True, + **kwargs, + ) + + +def SchemaField( + default: T | PydanticUndefinedType = PydanticUndefined, + *args, + default_factory: Optional[Callable[[], T]] = None, + title: Optional[str] = None, + description: Optional[str] = None, + placeholder: Optional[str] = None, + advanced: Optional[bool] = None, + secret: bool = False, + exclude: bool = False, + hidden: Optional[bool] = None, + depends_on: list[str] | None = None, + image_upload: Optional[bool] = None, + image_output: Optional[bool] = None, + **kwargs, +) -> T: + if default is PydanticUndefined and default_factory is None: + advanced = False + elif advanced is None: + advanced = True + + json_extra = { + k: v + for k, v in { + "placeholder": placeholder, + "secret": secret, + "advanced": advanced, + "hidden": hidden, + "depends_on": depends_on, + "image_upload": image_upload, + "image_output": image_output, + }.items() + if v is not None + } + + return Field( + default, + *args, + default_factory=default_factory, + title=title, + description=description, + exclude=exclude, + json_schema_extra=json_extra, + **kwargs, + ) # type: ignore + + +class _BaseCredentials(BaseModel): + id: str = Field(default_factory=lambda: str(uuid4())) + provider: str + title: Optional[str] + + @field_serializer("*") + def dump_secret_strings(value: Any, _info): + if isinstance(value, SecretStr): + return value.get_secret_value() + return value + + +class OAuth2Credentials(_BaseCredentials): + type: Literal["oauth2"] = "oauth2" + username: Optional[str] + """Username of the third-party service user that these credentials belong to""" + access_token: SecretStr + access_token_expires_at: Optional[int] + """Unix timestamp (seconds) indicating when the access token expires (if at all)""" + refresh_token: Optional[SecretStr] + refresh_token_expires_at: Optional[int] + """Unix timestamp (seconds) indicating when the refresh token expires (if at all)""" + scopes: list[str] + metadata: dict[str, Any] = Field(default_factory=dict) + + def auth_header(self) -> str: + return f"Bearer {self.access_token.get_secret_value()}" + + +class APIKeyCredentials(_BaseCredentials): + type: Literal["api_key"] = "api_key" + api_key: SecretStr + expires_at: Optional[int] = Field( + default=None, + description="Unix timestamp (seconds) indicating when the API key expires (if at all)", + ) + """Unix timestamp (seconds) indicating when the API key expires (if at all)""" + + def auth_header(self) -> str: + return f"Bearer {self.api_key.get_secret_value()}" + + +class UserPasswordCredentials(_BaseCredentials): + type: Literal["user_password"] = "user_password" + username: SecretStr + password: SecretStr + + def auth_header(self) -> str: + # Converting the string to bytes using encode() + # Base64 encoding it with base64.b64encode() + # Converting the resulting bytes back to a string with decode() + return f"Basic {base64.b64encode(f'{self.username.get_secret_value()}:{self.password.get_secret_value()}'.encode()).decode()}" + + +Credentials = Annotated[ + OAuth2Credentials | APIKeyCredentials | UserPasswordCredentials, + Field(discriminator="type"), +] + + +CredentialsType = Literal["api_key", "oauth2", "user_password"] + + +class OAuthState(BaseModel): + token: str + provider: str + expires_at: int + code_verifier: Optional[str] = None + """Unix timestamp (seconds) indicating when this OAuth state expires""" + scopes: list[str] + + +class UserMetadata(BaseModel): + integration_credentials: list[Credentials] = Field(default_factory=list) + integration_oauth_states: list[OAuthState] = Field(default_factory=list) + + +class UserMetadataRaw(TypedDict, total=False): + integration_credentials: list[dict] + integration_oauth_states: list[dict] + + +class UserIntegrations(BaseModel): + credentials: list[Credentials] = Field(default_factory=list) + oauth_states: list[OAuthState] = Field(default_factory=list) + + +CP = TypeVar("CP", bound=ProviderName) +CT = TypeVar("CT", bound=CredentialsType) + + +def is_credentials_field_name(field_name: str) -> bool: + return field_name == "credentials" or field_name.endswith("_credentials") + + +class CredentialsMetaInput(BaseModel, Generic[CP, CT]): + id: str + title: Optional[str] = None + provider: CP + type: CT + + @classmethod + def allowed_providers(cls) -> tuple[ProviderName, ...]: + return get_args(cls.model_fields["provider"].annotation) + + @classmethod + def allowed_cred_types(cls) -> tuple[CredentialsType, ...]: + return get_args(cls.model_fields["type"].annotation) + + @classmethod + def validate_credentials_field_schema(cls, model: type["BlockSchema"]): + """Validates the schema of a credentials input field""" + field_name = next( + name for name, type in model.get_credentials_fields().items() if type is cls + ) + field_schema = model.jsonschema()["properties"][field_name] + try: + schema_extra = _CredentialsFieldSchemaExtra[CP, CT].model_validate( + field_schema + ) + except ValidationError as e: + if "Field required [type=missing" not in str(e): + raise + + raise TypeError( + "Field 'credentials' JSON schema lacks required extra items: " + f"{field_schema}" + ) from e + + if len(cls.allowed_providers()) > 1 and not schema_extra.discriminator: + raise TypeError( + f"Multi-provider CredentialsField '{field_name}' " + "requires discriminator!" + ) + + @staticmethod + def _add_json_schema_extra(schema, cls: CredentialsMetaInput): + schema["credentials_provider"] = cls.allowed_providers() + schema["credentials_types"] = cls.allowed_cred_types() + + model_config = ConfigDict( + json_schema_extra=_add_json_schema_extra, # type: ignore + ) + + +class _CredentialsFieldSchemaExtra(BaseModel, Generic[CP, CT]): + # TODO: move discrimination mechanism out of CredentialsField (frontend + backend) + credentials_provider: list[CP] + credentials_scopes: Optional[list[str]] = None + credentials_types: list[CT] + discriminator: Optional[str] = None + discriminator_mapping: Optional[dict[str, CP]] = None + + +def CredentialsField( + required_scopes: set[str] = set(), + *, + discriminator: Optional[str] = None, + discriminator_mapping: Optional[dict[str, Any]] = None, + title: Optional[str] = None, + description: Optional[str] = None, + **kwargs, +) -> CredentialsMetaInput: + """ + `CredentialsField` must and can only be used on fields named `credentials`. + This is enforced by the `BlockSchema` base class. + """ + + field_schema_extra = { + k: v + for k, v in { + "credentials_scopes": list(required_scopes) or None, + "discriminator": discriminator, + "discriminator_mapping": discriminator_mapping, + }.items() + if v is not None + } + + return Field( + title=title, + description=description, + json_schema_extra=field_schema_extra, # validated on BlockSchema init + **kwargs, + ) + + +class ContributorDetails(BaseModel): + name: str = Field(title="Name", description="The name of the contributor.") + + +class AutoTopUpConfig(BaseModel): + amount: int + """Amount of credits to top up.""" + threshold: int + """Threshold to trigger auto top up.""" diff --git a/autogpt_platform/backend/backend/data/queue.py b/autogpt_platform/backend/backend/data/queue.py new file mode 100644 index 000000000000..8fbdb03cd6f0 --- /dev/null +++ b/autogpt_platform/backend/backend/data/queue.py @@ -0,0 +1,122 @@ +import asyncio +import json +import logging +from abc import ABC, abstractmethod +from datetime import datetime +from typing import Any, AsyncGenerator, Generator, Generic, Optional, TypeVar + +from pydantic import BaseModel +from redis.asyncio.client import PubSub as AsyncPubSub +from redis.client import PubSub + +from backend.data import redis + +logger = logging.getLogger(__name__) + + +class DateTimeEncoder(json.JSONEncoder): + def default(self, o): + if isinstance(o, datetime): + return o.isoformat() + return super().default(o) + + +M = TypeVar("M", bound=BaseModel) + + +class BaseRedisEventBus(Generic[M], ABC): + Model: type[M] + + @property + @abstractmethod + def event_bus_name(self) -> str: + pass + + def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]: + message = json.dumps(item.model_dump(), cls=DateTimeEncoder) + channel_name = f"{self.event_bus_name}/{channel_key}" + logger.info(f"[{channel_name}] Publishing an event to Redis {message}") + return message, channel_name + + def _deserialize_message(self, msg: Any, channel_key: str) -> M | None: + message_type = "pmessage" if "*" in channel_key else "message" + if msg["type"] != message_type: + return None + try: + data = json.loads(msg["data"]) + logger.info(f"Consuming an event from Redis {data}") + return self.Model(**data) + except Exception as e: + logger.error(f"Failed to parse event result from Redis {msg} {e}") + + def _get_pubsub_channel( + self, connection: redis.Redis | redis.AsyncRedis, channel_key: str + ) -> tuple[PubSub | AsyncPubSub, str]: + full_channel_name = f"{self.event_bus_name}/{channel_key}" + pubsub = connection.pubsub() + return pubsub, full_channel_name + + +class RedisEventBus(BaseRedisEventBus[M], ABC): + Model: type[M] + + @property + def connection(self) -> redis.Redis: + return redis.get_redis() + + def publish_event(self, event: M, channel_key: str): + message, full_channel_name = self._serialize_message(event, channel_key) + self.connection.publish(full_channel_name, message) + + def listen_events(self, channel_key: str) -> Generator[M, None, None]: + pubsub, full_channel_name = self._get_pubsub_channel( + self.connection, channel_key + ) + assert isinstance(pubsub, PubSub) + + if "*" in channel_key: + pubsub.psubscribe(full_channel_name) + else: + pubsub.subscribe(full_channel_name) + + for message in pubsub.listen(): + if event := self._deserialize_message(message, channel_key): + yield event + + +class AsyncRedisEventBus(BaseRedisEventBus[M], ABC): + Model: type[M] + + @property + async def connection(self) -> redis.AsyncRedis: + return await redis.get_redis_async() + + async def publish_event(self, event: M, channel_key: str): + message, full_channel_name = self._serialize_message(event, channel_key) + connection = await self.connection + await connection.publish(full_channel_name, message) + + async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]: + pubsub, full_channel_name = self._get_pubsub_channel( + await self.connection, channel_key + ) + assert isinstance(pubsub, AsyncPubSub) + + if "*" in channel_key: + await pubsub.psubscribe(full_channel_name) + else: + await pubsub.subscribe(full_channel_name) + + async for message in pubsub.listen(): + if event := self._deserialize_message(message, channel_key): + yield event + + async def wait_for_event( + self, channel_key: str, timeout: Optional[float] = None + ) -> M | None: + try: + return await asyncio.wait_for( + anext(aiter(self.listen_events(channel_key))), timeout + ) + except TimeoutError: + return None diff --git a/autogpt_platform/backend/backend/data/redis.py b/autogpt_platform/backend/backend/data/redis.py new file mode 100644 index 000000000000..36410fe29cb2 --- /dev/null +++ b/autogpt_platform/backend/backend/data/redis.py @@ -0,0 +1,84 @@ +import logging +import os + +from dotenv import load_dotenv +from redis import Redis +from redis.asyncio import Redis as AsyncRedis + +from backend.util.retry import conn_retry + +load_dotenv() + +HOST = os.getenv("REDIS_HOST", "localhost") +PORT = int(os.getenv("REDIS_PORT", "6379")) +PASSWORD = os.getenv("REDIS_PASSWORD", "password") + +logger = logging.getLogger(__name__) +connection: Redis | None = None +connection_async: AsyncRedis | None = None + + +@conn_retry("Redis", "Acquiring connection") +def connect() -> Redis: + global connection + if connection: + return connection + + c = Redis( + host=HOST, + port=PORT, + password=PASSWORD, + decode_responses=True, + ) + c.ping() + connection = c + return connection + + +@conn_retry("Redis", "Releasing connection") +def disconnect(): + global connection + if connection: + connection.close() + connection = None + + +def get_redis(auto_connect: bool = True) -> Redis: + if connection: + return connection + if auto_connect: + return connect() + raise RuntimeError("Redis connection is not established") + + +@conn_retry("AsyncRedis", "Acquiring connection") +async def connect_async() -> AsyncRedis: + global connection_async + if connection_async: + return connection_async + + c = AsyncRedis( + host=HOST, + port=PORT, + password=PASSWORD, + decode_responses=True, + ) + await c.ping() + connection_async = c + return connection_async + + +@conn_retry("AsyncRedis", "Releasing connection") +async def disconnect_async(): + global connection_async + if connection_async: + await connection_async.close() + connection_async = None + + +async def get_redis_async(auto_connect: bool = True) -> AsyncRedis: + if connection_async: + return connection_async + if auto_connect: + return await connect_async() + raise RuntimeError("AsyncRedis connection is not established") diff --git a/autogpt_platform/backend/backend/data/user.py b/autogpt_platform/backend/backend/data/user.py new file mode 100644 index 000000000000..8602d0f3b136 --- /dev/null +++ b/autogpt_platform/backend/backend/data/user.py @@ -0,0 +1,130 @@ +import logging +from typing import Optional, cast + +from autogpt_libs.auth.models import DEFAULT_USER_ID +from fastapi import HTTPException +from prisma import Json +from prisma.models import User + +from backend.data.db import prisma +from backend.data.model import UserIntegrations, UserMetadata, UserMetadataRaw +from backend.util.encryption import JSONCryptor + +logger = logging.getLogger(__name__) + + +async def get_or_create_user(user_data: dict) -> User: + user_id = user_data.get("sub") + if not user_id: + raise HTTPException(status_code=401, detail="User ID not found in token") + + user_email = user_data.get("email") + if not user_email: + raise HTTPException(status_code=401, detail="Email not found in token") + + user = await prisma.user.find_unique(where={"id": user_id}) + if not user: + user = await prisma.user.create( + data={ + "id": user_id, + "email": user_email, + "name": user_data.get("user_metadata", {}).get("name"), + } + ) + return User.model_validate(user) + + +async def get_user_by_id(user_id: str) -> Optional[User]: + user = await prisma.user.find_unique(where={"id": user_id}) + return User.model_validate(user) if user else None + + +async def create_default_user() -> Optional[User]: + user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID}) + if not user: + user = await prisma.user.create( + data={ + "id": DEFAULT_USER_ID, + "email": "default@example.com", + "name": "Default User", + } + ) + return User.model_validate(user) + + +async def get_user_metadata(user_id: str) -> UserMetadata: + user = await User.prisma().find_unique_or_raise( + where={"id": user_id}, + ) + + metadata = cast(UserMetadataRaw, user.metadata) + return UserMetadata.model_validate(metadata) + + +async def update_user_metadata(user_id: str, metadata: UserMetadata): + await User.prisma().update( + where={"id": user_id}, + data={"metadata": Json(metadata.model_dump())}, + ) + + +async def get_user_integrations(user_id: str) -> UserIntegrations: + user = await User.prisma().find_unique_or_raise( + where={"id": user_id}, + ) + + encrypted_integrations = user.integrations + if not encrypted_integrations: + return UserIntegrations() + else: + return UserIntegrations.model_validate( + JSONCryptor().decrypt(encrypted_integrations) + ) + + +async def update_user_integrations(user_id: str, data: UserIntegrations): + encrypted_data = JSONCryptor().encrypt(data.model_dump()) + await User.prisma().update( + where={"id": user_id}, + data={"integrations": encrypted_data}, + ) + + +async def migrate_and_encrypt_user_integrations(): + """Migrate integration credentials and OAuth states from metadata to integrations column.""" + users = await User.prisma().find_many( + where={ + "metadata": { + "path": ["integration_credentials"], + "not": Json({"a": "yolo"}), # bogus value works to check if key exists + } # type: ignore + } + ) + logger.info(f"Migrating integration credentials for {len(users)} users") + + for user in users: + raw_metadata = cast(UserMetadataRaw, user.metadata) + metadata = UserMetadata.model_validate(raw_metadata) + + # Get existing integrations data + integrations = await get_user_integrations(user_id=user.id) + + # Copy credentials and oauth states from metadata if they exist + if metadata.integration_credentials and not integrations.credentials: + integrations.credentials = metadata.integration_credentials + if metadata.integration_oauth_states: + integrations.oauth_states = metadata.integration_oauth_states + + # Save to integrations column + await update_user_integrations(user_id=user.id, data=integrations) + + # Remove from metadata + raw_metadata = dict(raw_metadata) + raw_metadata.pop("integration_credentials", None) + raw_metadata.pop("integration_oauth_states", None) + + # Update metadata without integration data + await User.prisma().update( + where={"id": user.id}, + data={"metadata": Json(raw_metadata)}, + ) diff --git a/autogpt_platform/backend/backend/exec.py b/autogpt_platform/backend/backend/exec.py new file mode 100644 index 000000000000..6e902c64df58 --- /dev/null +++ b/autogpt_platform/backend/backend/exec.py @@ -0,0 +1,16 @@ +from backend.app import run_processes +from backend.executor import DatabaseManager, ExecutionManager + + +def main(): + """ + Run all the processes required for the AutoGPT-server REST API. + """ + run_processes( + DatabaseManager(), + ExecutionManager(), + ) + + +if __name__ == "__main__": + main() diff --git a/autogpt_platform/backend/backend/executor/__init__.py b/autogpt_platform/backend/backend/executor/__init__.py new file mode 100644 index 000000000000..59a3595eea29 --- /dev/null +++ b/autogpt_platform/backend/backend/executor/__init__.py @@ -0,0 +1,9 @@ +from .database import DatabaseManager +from .manager import ExecutionManager +from .scheduler import ExecutionScheduler + +__all__ = [ + "DatabaseManager", + "ExecutionManager", + "ExecutionScheduler", +] diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py new file mode 100644 index 000000000000..1dee046ccca3 --- /dev/null +++ b/autogpt_platform/backend/backend/executor/database.py @@ -0,0 +1,91 @@ +from functools import wraps +from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast + +from backend.data.credit import get_user_credit_model +from backend.data.execution import ( + ExecutionResult, + NodeExecutionEntry, + RedisExecutionEventBus, + create_graph_execution, + get_execution_results, + get_incomplete_executions, + get_latest_execution, + update_execution_status, + update_graph_execution_stats, + update_node_execution_stats, + upsert_execution_input, + upsert_execution_output, +) +from backend.data.graph import get_graph, get_node +from backend.data.user import ( + get_user_integrations, + get_user_metadata, + update_user_integrations, + update_user_metadata, +) +from backend.util.service import AppService, expose, register_pydantic_serializers +from backend.util.settings import Config + +P = ParamSpec("P") +R = TypeVar("R") +config = Config() + + +class DatabaseManager(AppService): + def __init__(self): + super().__init__() + self.use_db = True + self.use_redis = True + self.event_queue = RedisExecutionEventBus() + + @classmethod + def get_port(cls) -> int: + return config.database_api_port + + @expose + def send_execution_update(self, execution_result: ExecutionResult): + self.event_queue.publish(execution_result) + + @staticmethod + def exposed_run_and_wait( + f: Callable[P, Coroutine[None, None, R]] + ) -> Callable[Concatenate[object, P], R]: + @expose + @wraps(f) + def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R: + coroutine = f(*args, **kwargs) + res = self.run_and_wait(coroutine) + return res + + # Register serializers for annotations on bare function + register_pydantic_serializers(f) + + return wrapper + + # Executions + create_graph_execution = exposed_run_and_wait(create_graph_execution) + get_execution_results = exposed_run_and_wait(get_execution_results) + get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions) + get_latest_execution = exposed_run_and_wait(get_latest_execution) + update_execution_status = exposed_run_and_wait(update_execution_status) + update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats) + update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats) + upsert_execution_input = exposed_run_and_wait(upsert_execution_input) + upsert_execution_output = exposed_run_and_wait(upsert_execution_output) + + # Graphs + get_node = exposed_run_and_wait(get_node) + get_graph = exposed_run_and_wait(get_graph) + + # Credits + user_credit_model = get_user_credit_model() + spend_credits = cast( + Callable[[Any, NodeExecutionEntry, float, float], int], + exposed_run_and_wait(user_credit_model.spend_credits), + ) + + # User + User Metadata + User Integrations + get_user_metadata = exposed_run_and_wait(get_user_metadata) + update_user_metadata = exposed_run_and_wait(update_user_metadata) + get_user_integrations = exposed_run_and_wait(get_user_integrations) + update_user_integrations = exposed_run_and_wait(update_user_integrations) diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py new file mode 100644 index 000000000000..ae0166e03263 --- /dev/null +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -0,0 +1,988 @@ +import atexit +import logging +import multiprocessing +import os +import signal +import sys +import threading +from concurrent.futures import Future, ProcessPoolExecutor +from contextlib import contextmanager +from multiprocessing.pool import AsyncResult, Pool +from typing import TYPE_CHECKING, Any, Generator, Optional, TypeVar, cast + +from redis.lock import Lock as RedisLock + +if TYPE_CHECKING: + from backend.executor import DatabaseManager + +from autogpt_libs.utils.cache import thread_cached + +from backend.blocks.agent import AgentExecutorBlock +from backend.data import redis +from backend.data.block import ( + Block, + BlockData, + BlockInput, + BlockSchema, + BlockType, + get_block, +) +from backend.data.execution import ( + ExecutionQueue, + ExecutionResult, + ExecutionStatus, + GraphExecutionEntry, + NodeExecutionEntry, + merge_execution_input, + parse_execution_output, +) +from backend.data.graph import GraphModel, Link, Node +from backend.integrations.creds_manager import IntegrationCredentialsManager +from backend.util import json +from backend.util.decorator import error_logged, time_measured +from backend.util.file import clean_exec_files +from backend.util.logging import configure_logging +from backend.util.process import set_service_name +from backend.util.service import ( + AppService, + close_service_client, + expose, + get_service_client, +) +from backend.util.settings import Settings +from backend.util.type import convert + +logger = logging.getLogger(__name__) +settings = Settings() + + +class LogMetadata: + def __init__( + self, + user_id: str, + graph_eid: str, + graph_id: str, + node_eid: str, + node_id: str, + block_name: str, + ): + self.metadata = { + "component": "ExecutionManager", + "user_id": user_id, + "graph_eid": graph_eid, + "graph_id": graph_id, + "node_eid": node_eid, + "node_id": node_id, + "block_name": block_name, + } + self.prefix = f"[ExecutionManager|uid:{user_id}|gid:{graph_id}|nid:{node_id}]|geid:{graph_eid}|nid:{node_eid}|{block_name}]" + + def info(self, msg: str, **extra): + msg = self._wrap(msg, **extra) + logger.info(msg, extra={"json_fields": {**self.metadata, **extra}}) + + def warning(self, msg: str, **extra): + msg = self._wrap(msg, **extra) + logger.warning(msg, extra={"json_fields": {**self.metadata, **extra}}) + + def error(self, msg: str, **extra): + msg = self._wrap(msg, **extra) + logger.error(msg, extra={"json_fields": {**self.metadata, **extra}}) + + def debug(self, msg: str, **extra): + msg = self._wrap(msg, **extra) + logger.debug(msg, extra={"json_fields": {**self.metadata, **extra}}) + + def exception(self, msg: str, **extra): + msg = self._wrap(msg, **extra) + logger.exception(msg, extra={"json_fields": {**self.metadata, **extra}}) + + def _wrap(self, msg: str, **extra): + return f"{self.prefix} {msg} {extra}" + + +T = TypeVar("T") +ExecutionStream = Generator[NodeExecutionEntry, None, None] + + +def execute_node( + db_client: "DatabaseManager", + creds_manager: IntegrationCredentialsManager, + data: NodeExecutionEntry, + execution_stats: dict[str, Any] | None = None, +) -> ExecutionStream: + """ + Execute a node in the graph. This will trigger a block execution on a node, + persist the execution result, and return the subsequent node to be executed. + + Args: + db_client: The client to send execution updates to the server. + creds_manager: The manager to acquire and release credentials. + data: The execution data for executing the current node. + execution_stats: The execution statistics to be updated. + + Returns: + The subsequent node to be enqueued, or None if there is no subsequent node. + """ + user_id = data.user_id + graph_exec_id = data.graph_exec_id + graph_id = data.graph_id + node_exec_id = data.node_exec_id + node_id = data.node_id + + def update_execution(status: ExecutionStatus) -> ExecutionResult: + exec_update = db_client.update_execution_status(node_exec_id, status) + db_client.send_execution_update(exec_update) + return exec_update + + node = db_client.get_node(node_id) + + node_block = get_block(node.block_id) + if not node_block: + logger.error(f"Block {node.block_id} not found.") + return + + log_metadata = LogMetadata( + user_id=user_id, + graph_eid=graph_exec_id, + graph_id=graph_id, + node_eid=node_exec_id, + node_id=node_id, + block_name=node_block.name, + ) + + # Sanity check: validate the execution input. + input_data, error = validate_exec(node, data.data, resolve_input=False) + if input_data is None: + log_metadata.error(f"Skip execution, input validation error: {error}") + db_client.upsert_execution_output(node_exec_id, "error", error) + update_execution(ExecutionStatus.FAILED) + return + + # Re-shape the input data for agent block. + # AgentExecutorBlock specially separate the node input_data & its input_default. + if isinstance(node_block, AgentExecutorBlock): + input_data = {**node.input_default, "data": input_data} + + # Execute the node + input_data_str = json.dumps(input_data) + input_size = len(input_data_str) + log_metadata.info("Executed node with input", input=input_data_str) + update_execution(ExecutionStatus.RUNNING) + + # Inject extra execution arguments for the blocks via kwargs + extra_exec_kwargs: dict = { + "graph_id": graph_id, + "node_id": node_id, + "graph_exec_id": graph_exec_id, + "node_exec_id": node_exec_id, + "user_id": user_id, + } + + # Last-minute fetch credentials + acquire a system-wide read-write lock to prevent + # changes during execution. ⚠️ This means a set of credentials can only be used by + # one (running) block at a time; simultaneous execution of blocks using same + # credentials is not supported. + creds_lock = None + input_model = cast(type[BlockSchema], node_block.input_schema) + for field_name, input_type in input_model.get_credentials_fields().items(): + credentials_meta = input_type(**input_data[field_name]) + credentials, creds_lock = creds_manager.acquire(user_id, credentials_meta.id) + extra_exec_kwargs[field_name] = credentials + + output_size = 0 + try: + for output_name, output_data in node_block.execute( + input_data, **extra_exec_kwargs + ): + output_size += len(json.dumps(output_data)) + log_metadata.info("Node produced output", **{output_name: output_data}) + db_client.upsert_execution_output(node_exec_id, output_name, output_data) + + for execution in _enqueue_next_nodes( + db_client=db_client, + node=node, + output=(output_name, output_data), + user_id=user_id, + graph_exec_id=graph_exec_id, + graph_id=graph_id, + log_metadata=log_metadata, + ): + yield execution + + # Update execution status and spend credits + res = update_execution(ExecutionStatus.COMPLETED) + s = input_size + output_size + t = ( + (res.end_time - res.start_time).total_seconds() + if res.end_time and res.start_time + else 0 + ) + data.data = input_data + db_client.spend_credits(data, s, t) + + except Exception as e: + error_msg = str(e) + db_client.upsert_execution_output(node_exec_id, "error", error_msg) + update_execution(ExecutionStatus.FAILED) + + for execution in _enqueue_next_nodes( + db_client=db_client, + node=node, + output=("error", error_msg), + user_id=user_id, + graph_exec_id=graph_exec_id, + graph_id=graph_id, + log_metadata=log_metadata, + ): + yield execution + + raise e + finally: + # Ensure credentials are released even if execution fails + if creds_lock: + try: + creds_lock.release() + except Exception as e: + log_metadata.error(f"Failed to release credentials lock: {e}") + + # Update execution stats + if execution_stats is not None: + execution_stats.update(node_block.execution_stats) + execution_stats["input_size"] = input_size + execution_stats["output_size"] = output_size + + +def _enqueue_next_nodes( + db_client: "DatabaseManager", + node: Node, + output: BlockData, + user_id: str, + graph_exec_id: str, + graph_id: str, + log_metadata: LogMetadata, +) -> list[NodeExecutionEntry]: + def add_enqueued_execution( + node_exec_id: str, node_id: str, block_id: str, data: BlockInput + ) -> NodeExecutionEntry: + exec_update = db_client.update_execution_status( + node_exec_id, ExecutionStatus.QUEUED, data + ) + db_client.send_execution_update(exec_update) + return NodeExecutionEntry( + user_id=user_id, + graph_exec_id=graph_exec_id, + graph_id=graph_id, + node_exec_id=node_exec_id, + node_id=node_id, + block_id=block_id, + data=data, + ) + + def register_next_executions(node_link: Link) -> list[NodeExecutionEntry]: + enqueued_executions = [] + next_output_name = node_link.source_name + next_input_name = node_link.sink_name + next_node_id = node_link.sink_id + + next_data = parse_execution_output(output, next_output_name) + if next_data is None: + return enqueued_executions + + next_node = db_client.get_node(next_node_id) + + # Multiple node can register the same next node, we need this to be atomic + # To avoid same execution to be enqueued multiple times, + # Or the same input to be consumed multiple times. + with synchronized(f"upsert_input-{next_node_id}-{graph_exec_id}"): + # Add output data to the earliest incomplete execution, or create a new one. + next_node_exec_id, next_node_input = db_client.upsert_execution_input( + node_id=next_node_id, + graph_exec_id=graph_exec_id, + input_name=next_input_name, + input_data=next_data, + ) + + # Complete missing static input pins data using the last execution input. + static_link_names = { + link.sink_name + for link in next_node.input_links + if link.is_static and link.sink_name not in next_node_input + } + if static_link_names and ( + latest_execution := db_client.get_latest_execution( + next_node_id, graph_exec_id + ) + ): + for name in static_link_names: + next_node_input[name] = latest_execution.input_data.get(name) + + # Validate the input data for the next node. + next_node_input, validation_msg = validate_exec(next_node, next_node_input) + suffix = f"{next_output_name}>{next_input_name}~{next_node_exec_id}:{validation_msg}" + + # Incomplete input data, skip queueing the execution. + if not next_node_input: + log_metadata.warning(f"Skipped queueing {suffix}") + return enqueued_executions + + # Input is complete, enqueue the execution. + log_metadata.info(f"Enqueued {suffix}") + enqueued_executions.append( + add_enqueued_execution( + node_exec_id=next_node_exec_id, + node_id=next_node_id, + block_id=next_node.block_id, + data=next_node_input, + ) + ) + + # Next execution stops here if the link is not static. + if not node_link.is_static: + return enqueued_executions + + # If link is static, there could be some incomplete executions waiting for it. + # Load and complete the input missing input data, and try to re-enqueue them. + for iexec in db_client.get_incomplete_executions( + next_node_id, graph_exec_id + ): + idata = iexec.input_data + ineid = iexec.node_exec_id + + static_link_names = { + link.sink_name + for link in next_node.input_links + if link.is_static and link.sink_name not in idata + } + for input_name in static_link_names: + idata[input_name] = next_node_input[input_name] + + idata, msg = validate_exec(next_node, idata) + suffix = f"{next_output_name}>{next_input_name}~{ineid}:{msg}" + if not idata: + log_metadata.info(f"Enqueueing static-link skipped: {suffix}") + continue + log_metadata.info(f"Enqueueing static-link execution {suffix}") + enqueued_executions.append( + add_enqueued_execution( + node_exec_id=iexec.node_exec_id, + node_id=next_node_id, + block_id=next_node.block_id, + data=idata, + ) + ) + return enqueued_executions + + return [ + execution + for link in node.output_links + for execution in register_next_executions(link) + ] + + +def validate_exec( + node: Node, + data: BlockInput, + resolve_input: bool = True, +) -> tuple[BlockInput | None, str]: + """ + Validate the input data for a node execution. + + Args: + node: The node to execute. + data: The input data for the node execution. + resolve_input: Whether to resolve dynamic pins into dict/list/object. + + Returns: + A tuple of the validated data and the block name. + If the data is invalid, the first element will be None, and the second element + will be an error message. + If the data is valid, the first element will be the resolved input data, and + the second element will be the block name. + """ + node_block: Block | None = get_block(node.block_id) + if not node_block: + return None, f"Block for {node.block_id} not found." + + if isinstance(node_block, AgentExecutorBlock): + # Validate the execution metadata for the agent executor block. + try: + exec_data = AgentExecutorBlock.Input(**node.input_default) + except Exception as e: + return None, f"Input data doesn't match {node_block.name}: {str(e)}" + + # Validation input + input_schema = exec_data.input_schema + required_fields = set(input_schema["required"]) + input_default = exec_data.data + else: + # Convert non-matching data types to the expected input schema. + for name, data_type in node_block.input_schema.__annotations__.items(): + if (value := data.get(name)) and (type(value) is not data_type): + data[name] = convert(value, data_type) + + # Validation input + input_schema = node_block.input_schema.jsonschema() + required_fields = node_block.input_schema.get_required_fields() + input_default = node.input_default + + # Input data (without default values) should contain all required fields. + error_prefix = f"Input data missing or mismatch for `{node_block.name}`:" + input_fields_from_nodes = {link.sink_name for link in node.input_links} + if not input_fields_from_nodes.issubset(data): + return None, f"{error_prefix} {input_fields_from_nodes - set(data)}" + + # Merge input data with default values and resolve dynamic dict/list/object pins. + data = {**input_default, **data} + if resolve_input: + data = merge_execution_input(data) + + # Input data post-merge should contain all required fields from the schema. + if not required_fields.issubset(data): + return None, f"{error_prefix} {required_fields - set(data)}" + + # Last validation: Validate the input values against the schema. + if error := json.validate_with_jsonschema(schema=input_schema, data=data): + error_message = f"{error_prefix} {error}" + logger.error(error_message) + return None, error_message + + return data, node_block.name + + +class Executor: + """ + This class contains event handlers for the process pool executor events. + + The main events are: + on_node_executor_start: Initialize the process that executes the node. + on_node_execution: Execution logic for a node. + + on_graph_executor_start: Initialize the process that executes the graph. + on_graph_execution: Execution logic for a graph. + + The execution flow: + 1. Graph execution request is added to the queue. + 2. Graph executor loop picks the request from the queue. + 3. Graph executor loop submits the graph execution request to the executor pool. + [on_graph_execution] + 4. Graph executor initialize the node execution queue. + 5. Graph executor adds the starting nodes to the node execution queue. + 6. Graph executor waits for all nodes to be executed. + [on_node_execution] + 7. Node executor picks the node execution request from the queue. + 8. Node executor executes the node. + 9. Node executor enqueues the next executed nodes to the node execution queue. + """ + + @classmethod + def on_node_executor_start(cls): + configure_logging() + set_service_name("NodeExecutor") + redis.connect() + cls.pid = os.getpid() + cls.db_client = get_db_client() + cls.creds_manager = IntegrationCredentialsManager() + + # Set up shutdown handlers + cls.shutdown_lock = threading.Lock() + atexit.register(cls.on_node_executor_stop) # handle regular shutdown + signal.signal( # handle termination + signal.SIGTERM, lambda _, __: cls.on_node_executor_sigterm() + ) + + @classmethod + def on_node_executor_stop(cls): + if not cls.shutdown_lock.acquire(blocking=False): + return # already shutting down + + logger.info(f"[on_node_executor_stop {cls.pid}] ⏳ Releasing locks...") + cls.creds_manager.release_all_locks() + logger.info(f"[on_node_executor_stop {cls.pid}] ⏳ Disconnecting Redis...") + redis.disconnect() + logger.info(f"[on_node_executor_stop {cls.pid}] ⏳ Disconnecting DB manager...") + close_service_client(cls.db_client) + logger.info(f"[on_node_executor_stop {cls.pid}] ✅ Finished cleanup") + + @classmethod + def on_node_executor_sigterm(cls): + llprint(f"[on_node_executor_sigterm {cls.pid}] ⚠️ SIGTERM received") + if not cls.shutdown_lock.acquire(blocking=False): + return # already shutting down + + llprint(f"[on_node_executor_stop {cls.pid}] ⏳ Releasing locks...") + cls.creds_manager.release_all_locks() + llprint(f"[on_node_executor_stop {cls.pid}] ⏳ Disconnecting Redis...") + redis.disconnect() + llprint(f"[on_node_executor_stop {cls.pid}] ✅ Finished cleanup") + sys.exit(0) + + @classmethod + @error_logged + def on_node_execution( + cls, + q: ExecutionQueue[NodeExecutionEntry], + node_exec: NodeExecutionEntry, + ) -> dict[str, Any]: + log_metadata = LogMetadata( + user_id=node_exec.user_id, + graph_eid=node_exec.graph_exec_id, + graph_id=node_exec.graph_id, + node_eid=node_exec.node_exec_id, + node_id=node_exec.node_id, + block_name="-", + ) + + execution_stats = {} + timing_info, _ = cls._on_node_execution( + q, node_exec, log_metadata, execution_stats + ) + execution_stats["walltime"] = timing_info.wall_time + execution_stats["cputime"] = timing_info.cpu_time + + cls.db_client.update_node_execution_stats( + node_exec.node_exec_id, execution_stats + ) + return execution_stats + + @classmethod + @time_measured + def _on_node_execution( + cls, + q: ExecutionQueue[NodeExecutionEntry], + node_exec: NodeExecutionEntry, + log_metadata: LogMetadata, + stats: dict[str, Any] | None = None, + ): + try: + log_metadata.info(f"Start node execution {node_exec.node_exec_id}") + for execution in execute_node( + cls.db_client, cls.creds_manager, node_exec, stats + ): + q.add(execution) + log_metadata.info(f"Finished node execution {node_exec.node_exec_id}") + except Exception as e: + # Avoid user error being marked as an actual error. + if isinstance(e, ValueError): + log_metadata.info( + f"Failed node execution {node_exec.node_exec_id}: {e}" + ) + else: + log_metadata.exception( + f"Failed node execution {node_exec.node_exec_id}: {e}" + ) + + @classmethod + def on_graph_executor_start(cls): + configure_logging() + set_service_name("GraphExecutor") + + cls.db_client = get_db_client() + cls.pool_size = settings.config.num_node_workers + cls.pid = os.getpid() + cls._init_node_executor_pool() + logger.info( + f"Graph executor {cls.pid} started with {cls.pool_size} node workers" + ) + + # Set up shutdown handler + atexit.register(cls.on_graph_executor_stop) + + @classmethod + def on_graph_executor_stop(cls): + prefix = f"[on_graph_executor_stop {cls.pid}]" + logger.info(f"{prefix} ⏳ Terminating node executor pool...") + cls.executor.terminate() + logger.info(f"{prefix} ⏳ Disconnecting DB manager...") + close_service_client(cls.db_client) + logger.info(f"{prefix} ✅ Finished cleanup") + + @classmethod + def _init_node_executor_pool(cls): + cls.executor = Pool( + processes=cls.pool_size, + initializer=cls.on_node_executor_start, + ) + + @classmethod + @error_logged + def on_graph_execution( + cls, graph_exec: GraphExecutionEntry, cancel: threading.Event + ): + log_metadata = LogMetadata( + user_id=graph_exec.user_id, + graph_eid=graph_exec.graph_exec_id, + graph_id=graph_exec.graph_id, + node_id="*", + node_eid="*", + block_name="-", + ) + timing_info, (exec_stats, status, error) = cls._on_graph_execution( + graph_exec, cancel, log_metadata + ) + exec_stats["walltime"] = timing_info.wall_time + exec_stats["cputime"] = timing_info.cpu_time + exec_stats["error"] = str(error) if error else None + result = cls.db_client.update_graph_execution_stats( + graph_exec_id=graph_exec.graph_exec_id, + status=status, + stats=exec_stats, + ) + cls.db_client.send_execution_update(result) + + @classmethod + @time_measured + def _on_graph_execution( + cls, + graph_exec: GraphExecutionEntry, + cancel: threading.Event, + log_metadata: LogMetadata, + ) -> tuple[dict[str, Any], ExecutionStatus, Exception | None]: + """ + Returns: + dict: The execution statistics of the graph execution. + ExecutionStatus: The final status of the graph execution. + Exception | None: The error that occurred during the execution, if any. + """ + log_metadata.info(f"Start graph execution {graph_exec.graph_exec_id}") + exec_stats = { + "nodes_walltime": 0, + "nodes_cputime": 0, + "node_count": 0, + } + error = None + finished = False + + def cancel_handler(): + while not cancel.is_set(): + cancel.wait(1) + if finished: + return + cls.executor.terminate() + log_metadata.info(f"Terminated graph execution {graph_exec.graph_exec_id}") + cls._init_node_executor_pool() + + cancel_thread = threading.Thread(target=cancel_handler) + cancel_thread.start() + + try: + queue = ExecutionQueue[NodeExecutionEntry]() + for node_exec in graph_exec.start_node_execs: + exec_update = cls.db_client.update_execution_status( + node_exec.node_exec_id, ExecutionStatus.QUEUED, node_exec.data + ) + cls.db_client.send_execution_update(exec_update) + queue.add(node_exec) + + running_executions: dict[str, AsyncResult] = {} + + def make_exec_callback(exec_data: NodeExecutionEntry): + node_id = exec_data.node_id + + def callback(result: object): + running_executions.pop(node_id) + nonlocal exec_stats + if isinstance(result, dict): + exec_stats["node_count"] += 1 + exec_stats["nodes_cputime"] += result.get("cputime", 0) + exec_stats["nodes_walltime"] += result.get("walltime", 0) + + return callback + + while not queue.empty(): + if cancel.is_set(): + return exec_stats, ExecutionStatus.TERMINATED, error + + exec_data = queue.get() + + # Avoid parallel execution of the same node. + execution = running_executions.get(exec_data.node_id) + if execution and not execution.ready(): + # TODO (performance improvement): + # Wait for the completion of the same node execution is blocking. + # To improve this we need a separate queue for each node. + # Re-enqueueing the data back to the queue will disrupt the order. + execution.wait() + + log_metadata.debug( + f"Dispatching node execution {exec_data.node_exec_id} " + f"for node {exec_data.node_id}", + ) + running_executions[exec_data.node_id] = cls.executor.apply_async( + cls.on_node_execution, + (queue, exec_data), + callback=make_exec_callback(exec_data), + ) + + # Avoid terminating graph execution when some nodes are still running. + while queue.empty() and running_executions: + log_metadata.debug( + f"Queue empty; running nodes: {list(running_executions.keys())}" + ) + for node_id, execution in list(running_executions.items()): + if cancel.is_set(): + return exec_stats, ExecutionStatus.TERMINATED, error + + if not queue.empty(): + break # yield to parent loop to execute new queue items + + log_metadata.debug(f"Waiting on execution of node {node_id}") + execution.wait(3) + + log_metadata.info(f"Finished graph execution {graph_exec.graph_exec_id}") + except Exception as e: + log_metadata.exception( + f"Failed graph execution {graph_exec.graph_exec_id}: {e}" + ) + error = e + finally: + if not cancel.is_set(): + finished = True + cancel.set() + cancel_thread.join() + clean_exec_files(graph_exec.graph_exec_id) + + return ( + exec_stats, + ExecutionStatus.FAILED if error else ExecutionStatus.COMPLETED, + error, + ) + + +class ExecutionManager(AppService): + def __init__(self): + super().__init__() + self.use_redis = True + self.use_supabase = True + self.pool_size = settings.config.num_graph_workers + self.queue = ExecutionQueue[GraphExecutionEntry]() + self.active_graph_runs: dict[str, tuple[Future, threading.Event]] = {} + + @classmethod + def get_port(cls) -> int: + return settings.config.execution_manager_port + + def run_service(self): + from backend.integrations.credentials_store import IntegrationCredentialsStore + + self.credentials_store = IntegrationCredentialsStore() + self.executor = ProcessPoolExecutor( + max_workers=self.pool_size, + initializer=Executor.on_graph_executor_start, + ) + sync_manager = multiprocessing.Manager() + logger.info( + f"[{self.service_name}] Started with max-{self.pool_size} graph workers" + ) + while True: + graph_exec_data = self.queue.get() + graph_exec_id = graph_exec_data.graph_exec_id + logger.debug( + f"[ExecutionManager] Dispatching graph execution {graph_exec_id}" + ) + cancel_event = sync_manager.Event() + future = self.executor.submit( + Executor.on_graph_execution, graph_exec_data, cancel_event + ) + self.active_graph_runs[graph_exec_id] = (future, cancel_event) + future.add_done_callback( + lambda _: self.active_graph_runs.pop(graph_exec_id, None) + ) + + def cleanup(self): + logger.info(f"[{__class__.__name__}] ⏳ Shutting down graph executor pool...") + self.executor.shutdown(cancel_futures=True) + + super().cleanup() + + @property + def db_client(self) -> "DatabaseManager": + return get_db_client() + + @expose + def add_execution( + self, + graph_id: str, + data: BlockInput, + user_id: str, + graph_version: Optional[int] = None, + ) -> GraphExecutionEntry: + graph: GraphModel | None = self.db_client.get_graph( + graph_id=graph_id, user_id=user_id, version=graph_version + ) + if not graph: + raise ValueError(f"Graph #{graph_id} not found.") + + graph.validate_graph(for_run=True) + self._validate_node_input_credentials(graph, user_id) + + nodes_input = [] + for node in graph.starting_nodes: + input_data = {} + block = get_block(node.block_id) + + # Invalid block & Note block should never be executed. + if not block or block.block_type == BlockType.NOTE: + continue + + # Extract request input data, and assign it to the input pin. + if block.block_type == BlockType.INPUT: + name = node.input_default.get("name") + if name in data.get("node_input", {}): + input_data = {"value": data["node_input"][name]} + + # Extract webhook payload, and assign it to the input pin + webhook_payload_key = f"webhook_{node.webhook_id}_payload" + if ( + block.block_type in (BlockType.WEBHOOK, BlockType.WEBHOOK_MANUAL) + and node.webhook_id + ): + if webhook_payload_key not in data: + raise ValueError( + f"Node {block.name} #{node.id} webhook payload is missing" + ) + input_data = {"payload": data[webhook_payload_key]} + + input_data, error = validate_exec(node, input_data) + if input_data is None: + raise ValueError(error) + else: + nodes_input.append((node.id, input_data)) + + graph_exec_id, node_execs = self.db_client.create_graph_execution( + graph_id=graph_id, + graph_version=graph.version, + nodes_input=nodes_input, + user_id=user_id, + ) + + starting_node_execs = [] + for node_exec in node_execs: + starting_node_execs.append( + NodeExecutionEntry( + user_id=user_id, + graph_exec_id=node_exec.graph_exec_id, + graph_id=node_exec.graph_id, + node_exec_id=node_exec.node_exec_id, + node_id=node_exec.node_id, + block_id=node_exec.block_id, + data=node_exec.input_data, + ) + ) + + graph_exec = GraphExecutionEntry( + user_id=user_id, + graph_id=graph_id, + graph_exec_id=graph_exec_id, + start_node_execs=starting_node_execs, + ) + self.queue.add(graph_exec) + + return graph_exec + + @expose + def cancel_execution(self, graph_exec_id: str) -> None: + """ + Mechanism: + 1. Set the cancel event + 2. Graph executor's cancel handler thread detects the event, terminates workers, + reinitializes worker pool, and returns. + 3. Update execution statuses in DB and set `error` outputs to `"TERMINATED"`. + """ + if graph_exec_id not in self.active_graph_runs: + raise Exception( + f"Graph execution #{graph_exec_id} not active/running: " + "possibly already completed/cancelled." + ) + + future, cancel_event = self.active_graph_runs[graph_exec_id] + if cancel_event.is_set(): + return + + cancel_event.set() + future.result() + + # Update the status of the unfinished node executions + node_execs = self.db_client.get_execution_results(graph_exec_id) + for node_exec in node_execs: + if node_exec.status not in ( + ExecutionStatus.COMPLETED, + ExecutionStatus.FAILED, + ): + exec_update = self.db_client.update_execution_status( + node_exec.node_exec_id, ExecutionStatus.TERMINATED + ) + self.db_client.send_execution_update(exec_update) + + def _validate_node_input_credentials(self, graph: GraphModel, user_id: str): + """Checks all credentials for all nodes of the graph""" + + for node in graph.nodes: + block = get_block(node.block_id) + if not block: + raise ValueError(f"Unknown block {node.block_id} for node #{node.id}") + + # Find any fields of type CredentialsMetaInput + credentials_fields = cast( + type[BlockSchema], block.input_schema + ).get_credentials_fields() + if not credentials_fields: + continue + + for field_name, credentials_meta_type in credentials_fields.items(): + credentials_meta = credentials_meta_type.model_validate( + node.input_default[field_name] + ) + # Fetch the corresponding Credentials and perform sanity checks + credentials = self.credentials_store.get_creds_by_id( + user_id, credentials_meta.id + ) + if not credentials: + raise ValueError( + f"Unknown credentials #{credentials_meta.id} " + f"for node #{node.id} input '{field_name}'" + ) + if ( + credentials.provider != credentials_meta.provider + or credentials.type != credentials_meta.type + ): + logger.warning( + f"Invalid credentials #{credentials.id} for node #{node.id}: " + "type/provider mismatch: " + f"{credentials_meta.type}<>{credentials.type};" + f"{credentials_meta.provider}<>{credentials.provider}" + ) + raise ValueError( + f"Invalid credentials #{credentials.id} for node #{node.id}: " + "type/provider mismatch" + ) + + +# ------- UTILITIES ------- # + + +@thread_cached +def get_db_client() -> "DatabaseManager": + from backend.executor import DatabaseManager + + return get_service_client(DatabaseManager) + + +@contextmanager +def synchronized(key: str, timeout: int = 60): + lock: RedisLock = redis.get_redis().lock(f"lock:{key}", timeout=timeout) + try: + lock.acquire() + yield + finally: + if lock.locked(): + lock.release() + + +def llprint(message: str): + """ + Low-level print/log helper function for use in signal handlers. + Regular log/print statements are not allowed in signal handlers. + """ + if logger.getEffectiveLevel() == logging.DEBUG: + os.write(sys.stdout.fileno(), (message + "\n").encode()) diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py new file mode 100644 index 000000000000..eee45819c5fd --- /dev/null +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -0,0 +1,186 @@ +import logging +import os +from urllib.parse import parse_qs, urlencode, urlparse, urlunparse + +from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_EXECUTED +from apscheduler.job import Job as JobObj +from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore +from apscheduler.schedulers.blocking import BlockingScheduler +from apscheduler.triggers.cron import CronTrigger +from autogpt_libs.utils.cache import thread_cached +from dotenv import load_dotenv +from pydantic import BaseModel +from sqlalchemy import MetaData, create_engine + +from backend.data.block import BlockInput +from backend.executor.manager import ExecutionManager +from backend.util.service import AppService, expose, get_service_client +from backend.util.settings import Config + + +def _extract_schema_from_url(database_url) -> tuple[str, str]: + """ + Extracts the schema from the DATABASE_URL and returns the schema and cleaned URL. + """ + parsed_url = urlparse(database_url) + query_params = parse_qs(parsed_url.query) + + # Extract the 'schema' parameter + schema_list = query_params.pop("schema", None) + schema = schema_list[0] if schema_list else "public" + + # Reconstruct the query string without the 'schema' parameter + new_query = urlencode(query_params, doseq=True) + new_parsed_url = parsed_url._replace(query=new_query) + database_url_clean = str(urlunparse(new_parsed_url)) + + return schema, database_url_clean + + +logger = logging.getLogger(__name__) +config = Config() + + +def log(msg, **kwargs): + logger.info("[ExecutionScheduler] " + msg, **kwargs) + + +def job_listener(event): + """Logs job execution outcomes for better monitoring.""" + if event.exception: + log(f"Job {event.job_id} failed.") + else: + log(f"Job {event.job_id} completed successfully.") + + +@thread_cached +def get_execution_client() -> ExecutionManager: + return get_service_client(ExecutionManager) + + +def execute_graph(**kwargs): + args = JobArgs(**kwargs) + try: + log(f"Executing recurring job for graph #{args.graph_id}") + get_execution_client().add_execution( + graph_id=args.graph_id, + data=args.input_data, + user_id=args.user_id, + graph_version=args.graph_version, + ) + except Exception as e: + logger.exception(f"Error executing graph {args.graph_id}: {e}") + + +class JobArgs(BaseModel): + graph_id: str + input_data: BlockInput + user_id: str + graph_version: int + cron: str + + +class JobInfo(JobArgs): + id: str + name: str + next_run_time: str + + @staticmethod + def from_db(job_args: JobArgs, job_obj: JobObj) -> "JobInfo": + return JobInfo( + id=job_obj.id, + name=job_obj.name, + next_run_time=job_obj.next_run_time.isoformat(), + **job_args.model_dump(), + ) + + +class ExecutionScheduler(AppService): + scheduler: BlockingScheduler + + @classmethod + def get_port(cls) -> int: + return config.execution_scheduler_port + + @classmethod + def db_pool_size(cls) -> int: + return config.scheduler_db_pool_size + + @property + @thread_cached + def execution_client(self) -> ExecutionManager: + return get_service_client(ExecutionManager) + + def run_service(self): + load_dotenv() + db_schema, db_url = _extract_schema_from_url(os.getenv("DATABASE_URL")) + self.scheduler = BlockingScheduler( + jobstores={ + "default": SQLAlchemyJobStore( + engine=create_engine( + url=db_url, + pool_size=self.db_pool_size(), + max_overflow=0, + ), + metadata=MetaData(schema=db_schema), + ) + } + ) + self.scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR) + self.scheduler.start() + + @expose + def add_execution_schedule( + self, + graph_id: str, + graph_version: int, + cron: str, + input_data: BlockInput, + user_id: str, + ) -> JobInfo: + job_args = JobArgs( + graph_id=graph_id, + input_data=input_data, + user_id=user_id, + graph_version=graph_version, + cron=cron, + ) + job = self.scheduler.add_job( + execute_graph, + CronTrigger.from_crontab(cron), + kwargs=job_args.model_dump(), + replace_existing=True, + ) + log(f"Added job {job.id} with cron schedule '{cron}' input data: {input_data}") + return JobInfo.from_db(job_args, job) + + @expose + def delete_schedule(self, schedule_id: str, user_id: str) -> JobInfo: + job = self.scheduler.get_job(schedule_id) + if not job: + log(f"Job {schedule_id} not found.") + raise ValueError(f"Job #{schedule_id} not found.") + + job_args = JobArgs(**job.kwargs) + if job_args.user_id != user_id: + raise ValueError("User ID does not match the job's user ID.") + + log(f"Deleting job {schedule_id}") + job.remove() + + return JobInfo.from_db(job_args, job) + + @expose + def get_execution_schedules( + self, graph_id: str | None = None, user_id: str | None = None + ) -> list[JobInfo]: + schedules = [] + for job in self.scheduler.get_jobs(): + job_args = JobArgs(**job.kwargs) + if ( + job.next_run_time is not None + and (graph_id is None or job_args.graph_id == graph_id) + and (user_id is None or job_args.user_id == user_id) + ): + schedules.append(JobInfo.from_db(job_args, job)) + return schedules diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py new file mode 100644 index 000000000000..79edc769b7f6 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -0,0 +1,363 @@ +import base64 +import hashlib +import secrets +from datetime import datetime, timedelta, timezone +from typing import TYPE_CHECKING, Optional + +from pydantic import SecretStr + +if TYPE_CHECKING: + from backend.executor.database import DatabaseManager + +from autogpt_libs.utils.cache import thread_cached +from autogpt_libs.utils.synchronize import RedisKeyedMutex + +from backend.data.model import ( + APIKeyCredentials, + Credentials, + OAuth2Credentials, + OAuthState, + UserIntegrations, +) +from backend.util.settings import Settings + +settings = Settings() + +# This is an overrride since ollama doesn't actually require an API key, but the creddential system enforces one be attached +ollama_credentials = APIKeyCredentials( + id="744fdc56-071a-4761-b5a5-0af0ce10a2b5", + provider="ollama", + api_key=SecretStr("FAKE_API_KEY"), + title="Use Credits for Ollama", + expires_at=None, +) + +revid_credentials = APIKeyCredentials( + id="fdb7f412-f519-48d1-9b5f-d2f73d0e01fe", + provider="revid", + api_key=SecretStr(settings.secrets.revid_api_key), + title="Use Credits for Revid", + expires_at=None, +) +ideogram_credentials = APIKeyCredentials( + id="760f84fc-b270-42de-91f6-08efe1b512d0", + provider="ideogram", + api_key=SecretStr(settings.secrets.ideogram_api_key), + title="Use Credits for Ideogram", + expires_at=None, +) +replicate_credentials = APIKeyCredentials( + id="6b9fc200-4726-4973-86c9-cd526f5ce5db", + provider="replicate", + api_key=SecretStr(settings.secrets.replicate_api_key), + title="Use Credits for Replicate", + expires_at=None, +) +openai_credentials = APIKeyCredentials( + id="53c25cb8-e3ee-465c-a4d1-e75a4c899c2a", + provider="openai", + api_key=SecretStr(settings.secrets.openai_api_key), + title="Use Credits for OpenAI", + expires_at=None, +) +anthropic_credentials = APIKeyCredentials( + id="24e5d942-d9e3-4798-8151-90143ee55629", + provider="anthropic", + api_key=SecretStr(settings.secrets.anthropic_api_key), + title="Use Credits for Anthropic", + expires_at=None, +) +groq_credentials = APIKeyCredentials( + id="4ec22295-8f97-4dd1-b42b-2c6957a02545", + provider="groq", + api_key=SecretStr(settings.secrets.groq_api_key), + title="Use Credits for Groq", + expires_at=None, +) +did_credentials = APIKeyCredentials( + id="7f7b0654-c36b-4565-8fa7-9a52575dfae2", + provider="d_id", + api_key=SecretStr(settings.secrets.did_api_key), + title="Use Credits for D-ID", + expires_at=None, +) +jina_credentials = APIKeyCredentials( + id="7f26de70-ba0d-494e-ba76-238e65e7b45f", + provider="jina", + api_key=SecretStr(settings.secrets.jina_api_key), + title="Use Credits for Jina", + expires_at=None, +) +unreal_credentials = APIKeyCredentials( + id="66f20754-1b81-48e4-91d0-f4f0dd82145f", + provider="unreal", + api_key=SecretStr(settings.secrets.unreal_speech_api_key), + title="Use Credits for Unreal", + expires_at=None, +) +open_router_credentials = APIKeyCredentials( + id="b5a0e27d-0c98-4df3-a4b9-10193e1f3c40", + provider="open_router", + api_key=SecretStr(settings.secrets.open_router_api_key), + title="Use Credits for Open Router", + expires_at=None, +) +fal_credentials = APIKeyCredentials( + id="6c0f5bd0-9008-4638-9d79-4b40b631803e", + provider="fal", + api_key=SecretStr(settings.secrets.fal_api_key), + title="Use Credits for FAL", + expires_at=None, +) +exa_credentials = APIKeyCredentials( + id="96153e04-9c6c-4486-895f-5bb683b1ecec", + provider="exa", + api_key=SecretStr(settings.secrets.exa_api_key), + title="Use Credits for Exa search", + expires_at=None, +) +e2b_credentials = APIKeyCredentials( + id="78d19fd7-4d59-4a16-8277-3ce310acf2b7", + provider="e2b", + api_key=SecretStr(settings.secrets.e2b_api_key), + title="Use Credits for E2B", + expires_at=None, +) +nvidia_credentials = APIKeyCredentials( + id="96b83908-2789-4dec-9968-18f0ece4ceb3", + provider="nvidia", + api_key=SecretStr(settings.secrets.nvidia_api_key), + title="Use Credits for Nvidia", + expires_at=None, +) +mem0_credentials = APIKeyCredentials( + id="ed55ac19-356e-4243-a6cb-bc599e9b716f", + provider="mem0", + api_key=SecretStr(settings.secrets.mem0_api_key), + title="Use Credits for Mem0", + expires_at=None, +) + + +DEFAULT_CREDENTIALS = [ + ollama_credentials, + revid_credentials, + ideogram_credentials, + replicate_credentials, + openai_credentials, + anthropic_credentials, + groq_credentials, + did_credentials, + jina_credentials, + unreal_credentials, + open_router_credentials, + fal_credentials, + exa_credentials, + e2b_credentials, + nvidia_credentials, + mem0_credentials, +] + + +class IntegrationCredentialsStore: + def __init__(self): + from backend.data.redis import get_redis + + self.locks = RedisKeyedMutex(get_redis()) + + @property + @thread_cached + def db_manager(self) -> "DatabaseManager": + from backend.executor.database import DatabaseManager + from backend.util.service import get_service_client + + return get_service_client(DatabaseManager) + + def add_creds(self, user_id: str, credentials: Credentials) -> None: + with self.locked_user_integrations(user_id): + if self.get_creds_by_id(user_id, credentials.id): + raise ValueError( + f"Can not re-create existing credentials #{credentials.id} " + f"for user #{user_id}" + ) + self._set_user_integration_creds( + user_id, [*self.get_all_creds(user_id), credentials] + ) + + def get_all_creds(self, user_id: str) -> list[Credentials]: + users_credentials = self._get_user_integrations(user_id).credentials + all_credentials = users_credentials + # These will always be added + all_credentials.append(ollama_credentials) + + # These will only be added if the API key is set + if settings.secrets.revid_api_key: + all_credentials.append(revid_credentials) + if settings.secrets.ideogram_api_key: + all_credentials.append(ideogram_credentials) + if settings.secrets.groq_api_key: + all_credentials.append(groq_credentials) + if settings.secrets.replicate_api_key: + all_credentials.append(replicate_credentials) + if settings.secrets.openai_api_key: + all_credentials.append(openai_credentials) + if settings.secrets.anthropic_api_key: + all_credentials.append(anthropic_credentials) + if settings.secrets.did_api_key: + all_credentials.append(did_credentials) + if settings.secrets.jina_api_key: + all_credentials.append(jina_credentials) + if settings.secrets.unreal_speech_api_key: + all_credentials.append(unreal_credentials) + if settings.secrets.open_router_api_key: + all_credentials.append(open_router_credentials) + if settings.secrets.fal_api_key: + all_credentials.append(fal_credentials) + if settings.secrets.exa_api_key: + all_credentials.append(exa_credentials) + if settings.secrets.e2b_api_key: + all_credentials.append(e2b_credentials) + if settings.secrets.nvidia_api_key: + all_credentials.append(nvidia_credentials) + if settings.secrets.mem0_api_key: + all_credentials.append(mem0_credentials) + return all_credentials + + def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None: + all_credentials = self.get_all_creds(user_id) + return next((c for c in all_credentials if c.id == credentials_id), None) + + def get_creds_by_provider(self, user_id: str, provider: str) -> list[Credentials]: + credentials = self.get_all_creds(user_id) + return [c for c in credentials if c.provider == provider] + + def get_authorized_providers(self, user_id: str) -> list[str]: + credentials = self.get_all_creds(user_id) + return list(set(c.provider for c in credentials)) + + def update_creds(self, user_id: str, updated: Credentials) -> None: + with self.locked_user_integrations(user_id): + current = self.get_creds_by_id(user_id, updated.id) + if not current: + raise ValueError( + f"Credentials with ID {updated.id} " + f"for user with ID {user_id} not found" + ) + if type(current) is not type(updated): + raise TypeError( + f"Can not update credentials with ID {updated.id} " + f"from type {type(current)} " + f"to type {type(updated)}" + ) + + # Ensure no scopes are removed when updating credentials + if ( + isinstance(updated, OAuth2Credentials) + and isinstance(current, OAuth2Credentials) + and not set(updated.scopes).issuperset(current.scopes) + ): + raise ValueError( + f"Can not update credentials with ID {updated.id} " + f"and scopes {current.scopes} " + f"to more restrictive set of scopes {updated.scopes}" + ) + + # Update the credentials + updated_credentials_list = [ + updated if c.id == updated.id else c + for c in self.get_all_creds(user_id) + ] + self._set_user_integration_creds(user_id, updated_credentials_list) + + def delete_creds_by_id(self, user_id: str, credentials_id: str) -> None: + with self.locked_user_integrations(user_id): + filtered_credentials = [ + c for c in self.get_all_creds(user_id) if c.id != credentials_id + ] + self._set_user_integration_creds(user_id, filtered_credentials) + + def store_state_token( + self, user_id: str, provider: str, scopes: list[str], use_pkce: bool = False + ) -> tuple[str, str]: + token = secrets.token_urlsafe(32) + expires_at = datetime.now(timezone.utc) + timedelta(minutes=10) + + (code_challenge, code_verifier) = self._generate_code_challenge() + + state = OAuthState( + token=token, + provider=provider, + code_verifier=code_verifier, + expires_at=int(expires_at.timestamp()), + scopes=scopes, + ) + + with self.locked_user_integrations(user_id): + + user_integrations = self._get_user_integrations(user_id) + oauth_states = user_integrations.oauth_states + oauth_states.append(state) + user_integrations.oauth_states = oauth_states + + self.db_manager.update_user_integrations( + user_id=user_id, data=user_integrations + ) + + return token, code_challenge + + def _generate_code_challenge(self) -> tuple[str, str]: + """ + Generate code challenge using SHA256 from the code verifier. + Currently only SHA256 is supported.(In future if we want to support more methods we can add them here) + """ + code_verifier = secrets.token_urlsafe(128) + sha256_hash = hashlib.sha256(code_verifier.encode("utf-8")).digest() + code_challenge = base64.urlsafe_b64encode(sha256_hash).decode("utf-8") + return code_challenge.replace("=", ""), code_verifier + + def verify_state_token( + self, user_id: str, token: str, provider: str + ) -> Optional[OAuthState]: + with self.locked_user_integrations(user_id): + user_integrations = self._get_user_integrations(user_id) + oauth_states = user_integrations.oauth_states + + now = datetime.now(timezone.utc) + valid_state = next( + ( + state + for state in oauth_states + if state.token == token + and state.provider == provider + and state.expires_at > now.timestamp() + ), + None, + ) + + if valid_state: + # Remove the used state + oauth_states.remove(valid_state) + user_integrations.oauth_states = oauth_states + self.db_manager.update_user_integrations(user_id, user_integrations) + return valid_state + + return None + + def _set_user_integration_creds( + self, user_id: str, credentials: list[Credentials] + ) -> None: + integrations = self._get_user_integrations(user_id) + # Remove default credentials from the list + credentials = [c for c in credentials if c not in DEFAULT_CREDENTIALS] + integrations.credentials = credentials + self.db_manager.update_user_integrations(user_id, integrations) + + def _get_user_integrations(self, user_id: str) -> UserIntegrations: + integrations: UserIntegrations = self.db_manager.get_user_integrations( + user_id=user_id + ) + return integrations + + def locked_user_integrations(self, user_id: str): + key = (f"user:{user_id}", "integrations") + return self.locks.locked(key) diff --git a/autogpt_platform/backend/backend/integrations/creds_manager.py b/autogpt_platform/backend/backend/integrations/creds_manager.py new file mode 100644 index 000000000000..89bdb5bc9081 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/creds_manager.py @@ -0,0 +1,175 @@ +import logging +from contextlib import contextmanager +from datetime import datetime +from typing import TYPE_CHECKING + +from autogpt_libs.utils.synchronize import RedisKeyedMutex +from redis.lock import Lock as RedisLock + +from backend.data import redis +from backend.data.model import Credentials +from backend.integrations.credentials_store import IntegrationCredentialsStore +from backend.integrations.oauth import HANDLERS_BY_NAME +from backend.util.exceptions import MissingConfigError +from backend.util.settings import Settings + +if TYPE_CHECKING: + from backend.integrations.oauth import BaseOAuthHandler + +logger = logging.getLogger(__name__) +settings = Settings() + + +class IntegrationCredentialsManager: + """ + Handles the lifecycle of integration credentials. + - Automatically refreshes requested credentials if needed. + - Uses locking mechanisms to ensure system-wide consistency and + prevent invalidation of in-use tokens. + + ### ⚠️ Gotcha + With `acquire(..)`, credentials can only be in use in one place at a time (e.g. one + block execution). + + ### Locking mechanism + - Because *getting* credentials can result in a refresh (= *invalidation* + + *replacement*) of the stored credentials, *getting* is an operation that + potentially requires read/write access. + - Checking whether a token has to be refreshed is subject to an additional `refresh` + scoped lock to prevent unnecessary sequential refreshes when multiple executions + try to access the same credentials simultaneously. + - We MUST lock credentials while in use to prevent them from being invalidated while + they are in use, e.g. because they are being refreshed by a different part + of the system. + - The `!time_sensitive` lock in `acquire(..)` is part of a two-tier locking + mechanism in which *updating* gets priority over *getting* credentials. + This is to prevent a long queue of waiting *get* requests from blocking essential + credential refreshes or user-initiated updates. + + It is possible to implement a reader/writer locking system where either multiple + readers or a single writer can have simultaneous access, but this would add a lot of + complexity to the mechanism. I don't expect the current ("simple") mechanism to + cause so much latency that it's worth implementing. + """ + + def __init__(self): + redis_conn = redis.get_redis() + self._locks = RedisKeyedMutex(redis_conn) + self.store = IntegrationCredentialsStore() + + def create(self, user_id: str, credentials: Credentials) -> None: + return self.store.add_creds(user_id, credentials) + + def exists(self, user_id: str, credentials_id: str) -> bool: + return self.store.get_creds_by_id(user_id, credentials_id) is not None + + def get( + self, user_id: str, credentials_id: str, lock: bool = True + ) -> Credentials | None: + credentials = self.store.get_creds_by_id(user_id, credentials_id) + if not credentials: + return None + + # Refresh OAuth credentials if needed + if credentials.type == "oauth2" and credentials.access_token_expires_at: + logger.debug( + f"Credentials #{credentials.id} expire at " + f"{datetime.fromtimestamp(credentials.access_token_expires_at)}; " + f"current time is {datetime.now()}" + ) + + with self._locked(user_id, credentials_id, "refresh"): + oauth_handler = _get_provider_oauth_handler(credentials.provider) + if oauth_handler.needs_refresh(credentials): + logger.debug( + f"Refreshing '{credentials.provider}' " + f"credentials #{credentials.id}" + ) + _lock = None + if lock: + # Wait until the credentials are no longer in use anywhere + _lock = self._acquire_lock(user_id, credentials_id) + + fresh_credentials = oauth_handler.refresh_tokens(credentials) + self.store.update_creds(user_id, fresh_credentials) + if _lock and _lock.locked(): + _lock.release() + + credentials = fresh_credentials + else: + logger.debug(f"Credentials #{credentials.id} never expire") + + return credentials + + def acquire( + self, user_id: str, credentials_id: str + ) -> tuple[Credentials, RedisLock]: + """ + ⚠️ WARNING: this locks credentials system-wide and blocks both acquiring + and updating them elsewhere until the lock is released. + See the class docstring for more info. + """ + # Use a low-priority (!time_sensitive) locking queue on top of the general lock + # to allow priority access for refreshing/updating the tokens. + with self._locked(user_id, credentials_id, "!time_sensitive"): + lock = self._acquire_lock(user_id, credentials_id) + credentials = self.get(user_id, credentials_id, lock=False) + if not credentials: + raise ValueError( + f"Credentials #{credentials_id} for user #{user_id} not found" + ) + return credentials, lock + + def update(self, user_id: str, updated: Credentials) -> None: + with self._locked(user_id, updated.id): + self.store.update_creds(user_id, updated) + + def delete(self, user_id: str, credentials_id: str) -> None: + with self._locked(user_id, credentials_id): + self.store.delete_creds_by_id(user_id, credentials_id) + + # -- Locking utilities -- # + + def _acquire_lock(self, user_id: str, credentials_id: str, *args: str) -> RedisLock: + key = ( + f"user:{user_id}", + f"credentials:{credentials_id}", + *args, + ) + return self._locks.acquire(key) + + @contextmanager + def _locked(self, user_id: str, credentials_id: str, *args: str): + lock = self._acquire_lock(user_id, credentials_id, *args) + try: + yield + finally: + if lock.locked(): + lock.release() + + def release_all_locks(self): + """Call this on process termination to ensure all locks are released""" + self._locks.release_all_locks() + self.store.locks.release_all_locks() + + +def _get_provider_oauth_handler(provider_name: str) -> "BaseOAuthHandler": + if provider_name not in HANDLERS_BY_NAME: + raise KeyError(f"Unknown provider '{provider_name}'") + + client_id = getattr(settings.secrets, f"{provider_name}_client_id") + client_secret = getattr(settings.secrets, f"{provider_name}_client_secret") + if not (client_id and client_secret): + raise MissingConfigError( + f"Integration with provider '{provider_name}' is not configured", + ) + + handler_class = HANDLERS_BY_NAME[provider_name] + frontend_base_url = ( + settings.config.frontend_base_url or settings.config.platform_base_url + ) + return handler_class( + client_id=client_id, + client_secret=client_secret, + redirect_uri=f"{frontend_base_url}/auth/integrations/oauth_callback", + ) diff --git a/autogpt_platform/backend/backend/integrations/oauth/__init__.py b/autogpt_platform/backend/backend/integrations/oauth/__init__.py new file mode 100644 index 000000000000..50f8a155a6f6 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/oauth/__init__.py @@ -0,0 +1,26 @@ +from typing import TYPE_CHECKING + +from .github import GitHubOAuthHandler +from .google import GoogleOAuthHandler +from .linear import LinearOAuthHandler +from .notion import NotionOAuthHandler +from .twitter import TwitterOAuthHandler + +if TYPE_CHECKING: + from ..providers import ProviderName + from .base import BaseOAuthHandler + +# --8<-- [start:HANDLERS_BY_NAMEExample] +HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = { + handler.PROVIDER_NAME: handler + for handler in [ + GitHubOAuthHandler, + GoogleOAuthHandler, + NotionOAuthHandler, + TwitterOAuthHandler, + LinearOAuthHandler, + ] +} +# --8<-- [end:HANDLERS_BY_NAMEExample] + +__all__ = ["HANDLERS_BY_NAME"] diff --git a/autogpt_platform/backend/backend/integrations/oauth/base.py b/autogpt_platform/backend/backend/integrations/oauth/base.py new file mode 100644 index 000000000000..fc6c68c16177 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/oauth/base.py @@ -0,0 +1,86 @@ +import logging +import time +from abc import ABC, abstractmethod +from typing import ClassVar, Optional + +from backend.data.model import OAuth2Credentials +from backend.integrations.providers import ProviderName + +logger = logging.getLogger(__name__) + + +class BaseOAuthHandler(ABC): + # --8<-- [start:BaseOAuthHandler1] + PROVIDER_NAME: ClassVar[ProviderName] + DEFAULT_SCOPES: ClassVar[list[str]] = [] + # --8<-- [end:BaseOAuthHandler1] + + @abstractmethod + # --8<-- [start:BaseOAuthHandler2] + def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ... + + # --8<-- [end:BaseOAuthHandler2] + + @abstractmethod + # --8<-- [start:BaseOAuthHandler3] + def get_login_url( + self, scopes: list[str], state: str, code_challenge: Optional[str] + ) -> str: + # --8<-- [end:BaseOAuthHandler3] + """Constructs a login URL that the user can be redirected to""" + ... + + @abstractmethod + # --8<-- [start:BaseOAuthHandler4] + def exchange_code_for_tokens( + self, code: str, scopes: list[str], code_verifier: Optional[str] + ) -> OAuth2Credentials: + # --8<-- [end:BaseOAuthHandler4] + """Exchanges the acquired authorization code from login for a set of tokens""" + ... + + @abstractmethod + # --8<-- [start:BaseOAuthHandler5] + def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: + # --8<-- [end:BaseOAuthHandler5] + """Implements the token refresh mechanism""" + ... + + @abstractmethod + # --8<-- [start:BaseOAuthHandler6] + def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: + # --8<-- [end:BaseOAuthHandler6] + """Revokes the given token at provider, + returns False provider does not support it""" + ... + + def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: + if credentials.provider != self.PROVIDER_NAME: + raise ValueError( + f"{self.__class__.__name__} can not refresh tokens " + f"for other provider '{credentials.provider}'" + ) + return self._refresh_tokens(credentials) + + def get_access_token(self, credentials: OAuth2Credentials) -> str: + """Returns a valid access token, refreshing it first if needed""" + if self.needs_refresh(credentials): + credentials = self.refresh_tokens(credentials) + return credentials.access_token.get_secret_value() + + def needs_refresh(self, credentials: OAuth2Credentials) -> bool: + """Indicates whether the given tokens need to be refreshed""" + return ( + credentials.access_token_expires_at is not None + and credentials.access_token_expires_at < int(time.time()) + 300 + ) + + def handle_default_scopes(self, scopes: list[str]) -> list[str]: + """Handles the default scopes for the provider""" + # If scopes are empty, use the default scopes for the provider + if not scopes: + logger.debug( + f"Using default scopes for provider {self.PROVIDER_NAME.value}" + ) + scopes = self.DEFAULT_SCOPES + return scopes diff --git a/autogpt_platform/backend/backend/integrations/oauth/github.py b/autogpt_platform/backend/backend/integrations/oauth/github.py new file mode 100644 index 000000000000..e6b3db37b41f --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/oauth/github.py @@ -0,0 +1,144 @@ +import time +from typing import Optional +from urllib.parse import urlencode + +from backend.data.model import OAuth2Credentials +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +from .base import BaseOAuthHandler + + +# --8<-- [start:GithubOAuthHandlerExample] +class GitHubOAuthHandler(BaseOAuthHandler): + """ + Based on the documentation at: + - [Authorizing OAuth apps - GitHub Docs](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps) + - [Refreshing user access tokens - GitHub Docs](https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/refreshing-user-access-tokens) + + Notes: + - By default, token expiration is disabled on GitHub Apps. This means the access + token doesn't expire and no refresh token is returned by the authorization flow. + - When token expiration gets enabled, any existing tokens will remain non-expiring. + - When token expiration gets disabled, token refreshes will return a non-expiring + access token *with no refresh token*. + """ # noqa + + PROVIDER_NAME = ProviderName.GITHUB + + def __init__(self, client_id: str, client_secret: str, redirect_uri: str): + self.client_id = client_id + self.client_secret = client_secret + self.redirect_uri = redirect_uri + self.auth_base_url = "https://github.com/login/oauth/authorize" + self.token_url = "https://github.com/login/oauth/access_token" + self.revoke_url = "https://api.github.com/applications/{client_id}/token" + + def get_login_url( + self, scopes: list[str], state: str, code_challenge: Optional[str] + ) -> str: + params = { + "client_id": self.client_id, + "redirect_uri": self.redirect_uri, + "scope": " ".join(scopes), + "state": state, + } + return f"{self.auth_base_url}?{urlencode(params)}" + + def exchange_code_for_tokens( + self, code: str, scopes: list[str], code_verifier: Optional[str] + ) -> OAuth2Credentials: + return self._request_tokens({"code": code, "redirect_uri": self.redirect_uri}) + + def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: + if not credentials.access_token: + raise ValueError("No access token to revoke") + + headers = { + "Accept": "application/vnd.github+json", + "X-GitHub-Api-Version": "2022-11-28", + } + + requests.delete( + url=self.revoke_url.format(client_id=self.client_id), + auth=(self.client_id, self.client_secret), + headers=headers, + json={"access_token": credentials.access_token.get_secret_value()}, + ) + return True + + def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: + if not credentials.refresh_token: + return credentials + + return self._request_tokens( + { + "refresh_token": credentials.refresh_token.get_secret_value(), + "grant_type": "refresh_token", + } + ) + + def _request_tokens( + self, + params: dict[str, str], + current_credentials: Optional[OAuth2Credentials] = None, + ) -> OAuth2Credentials: + request_body = { + "client_id": self.client_id, + "client_secret": self.client_secret, + **params, + } + headers = {"Accept": "application/json"} + response = requests.post(self.token_url, data=request_body, headers=headers) + token_data: dict = response.json() + + username = self._request_username(token_data["access_token"]) + + now = int(time.time()) + new_credentials = OAuth2Credentials( + provider=self.PROVIDER_NAME, + title=current_credentials.title if current_credentials else None, + username=username, + access_token=token_data["access_token"], + # Token refresh responses have an empty `scope` property (see docs), + # so we have to get the scope from the existing credentials object. + scopes=( + token_data.get("scope", "").split(",") + or (current_credentials.scopes if current_credentials else []) + ), + # Refresh token and expiration intervals are only given if token expiration + # is enabled in the GitHub App's settings. + refresh_token=token_data.get("refresh_token"), + access_token_expires_at=( + now + expires_in + if (expires_in := token_data.get("expires_in", None)) + else None + ), + refresh_token_expires_at=( + now + expires_in + if (expires_in := token_data.get("refresh_token_expires_in", None)) + else None + ), + ) + if current_credentials: + new_credentials.id = current_credentials.id + return new_credentials + + def _request_username(self, access_token: str) -> str | None: + url = "https://api.github.com/user" + headers = { + "Accept": "application/vnd.github+json", + "Authorization": f"Bearer {access_token}", + "X-GitHub-Api-Version": "2022-11-28", + } + + response = requests.get(url, headers=headers) + + if not response.ok: + return None + + # Get the login (username) + return response.json().get("login") + + +# --8<-- [end:GithubOAuthHandlerExample] diff --git a/autogpt_platform/backend/backend/integrations/oauth/google.py b/autogpt_platform/backend/backend/integrations/oauth/google.py new file mode 100644 index 000000000000..310eb5ae7326 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/oauth/google.py @@ -0,0 +1,172 @@ +import logging +from typing import Optional + +from google.auth.external_account_authorized_user import ( + Credentials as ExternalAccountCredentials, +) +from google.auth.transport.requests import AuthorizedSession, Request +from google.oauth2.credentials import Credentials +from google_auth_oauthlib.flow import Flow +from pydantic import SecretStr + +from backend.data.model import OAuth2Credentials +from backend.integrations.providers import ProviderName + +from .base import BaseOAuthHandler + +logger = logging.getLogger(__name__) + + +# --8<-- [start:GoogleOAuthHandlerExample] +class GoogleOAuthHandler(BaseOAuthHandler): + """ + Based on the documentation at https://developers.google.com/identity/protocols/oauth2/web-server + """ # noqa + + PROVIDER_NAME = ProviderName.GOOGLE + EMAIL_ENDPOINT = "https://www.googleapis.com/oauth2/v2/userinfo" + DEFAULT_SCOPES = [ + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/userinfo.profile", + "openid", + ] + # --8<-- [end:GoogleOAuthHandlerExample] + + def __init__(self, client_id: str, client_secret: str, redirect_uri: str): + self.client_id = client_id + self.client_secret = client_secret + self.redirect_uri = redirect_uri + self.token_uri = "https://oauth2.googleapis.com/token" + self.revoke_uri = "https://oauth2.googleapis.com/revoke" + + def get_login_url( + self, scopes: list[str], state: str, code_challenge: Optional[str] + ) -> str: + all_scopes = list(set(scopes + self.DEFAULT_SCOPES)) + logger.debug(f"Setting up OAuth flow with scopes: {all_scopes}") + flow = self._setup_oauth_flow(all_scopes) + flow.redirect_uri = self.redirect_uri + authorization_url, _ = flow.authorization_url( + access_type="offline", + include_granted_scopes="true", + state=state, + prompt="consent", + ) + return authorization_url + + def exchange_code_for_tokens( + self, code: str, scopes: list[str], code_verifier: Optional[str] + ) -> OAuth2Credentials: + logger.debug(f"Exchanging code for tokens with scopes: {scopes}") + + # Use the scopes from the initial request + flow = self._setup_oauth_flow(scopes) + flow.redirect_uri = self.redirect_uri + + logger.debug("Fetching token from Google") + + # Disable scope check in fetch_token + flow.oauth2session.scope = None + token = flow.fetch_token(code=code) + logger.debug("Token fetched successfully") + + # Get the actual scopes granted by Google + granted_scopes: list[str] = token.get("scope", []) + + logger.debug(f"Scopes granted by Google: {granted_scopes}") + + google_creds = flow.credentials + logger.debug(f"Received credentials: {google_creds}") + + logger.debug("Requesting user email") + username = self._request_email(google_creds) + logger.debug(f"User email retrieved: {username}") + + assert google_creds.token + assert google_creds.refresh_token + assert google_creds.expiry + assert granted_scopes + + # Create OAuth2Credentials with the granted scopes + credentials = OAuth2Credentials( + provider=self.PROVIDER_NAME, + title=None, + username=username, + access_token=SecretStr(google_creds.token), + refresh_token=(SecretStr(google_creds.refresh_token)), + access_token_expires_at=( + int(google_creds.expiry.timestamp()) if google_creds.expiry else None + ), + refresh_token_expires_at=None, + scopes=granted_scopes, + ) + logger.debug( + f"OAuth2Credentials object created successfully with scopes: {credentials.scopes}" + ) + + return credentials + + def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: + session = AuthorizedSession(credentials) + session.post( + self.revoke_uri, + params={"token": credentials.access_token.get_secret_value()}, + headers={"content-type": "application/x-www-form-urlencoded"}, + ) + return True + + def _request_email( + self, creds: Credentials | ExternalAccountCredentials + ) -> str | None: + session = AuthorizedSession(creds) + response = session.get(self.EMAIL_ENDPOINT) + if not response.ok: + logger.error( + f"Failed to get user email. Status code: {response.status_code}" + ) + return None + return response.json()["email"] + + def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: + # Google credentials should ALWAYS have a refresh token + assert credentials.refresh_token + + google_creds = Credentials( + token=credentials.access_token.get_secret_value(), + refresh_token=credentials.refresh_token.get_secret_value(), + token_uri=self.token_uri, + client_id=self.client_id, + client_secret=self.client_secret, + scopes=credentials.scopes, + ) + # Google's OAuth library is poorly typed so we need some of these: + assert google_creds.refresh_token + assert google_creds.scopes + + google_creds.refresh(Request()) + assert google_creds.expiry + + return OAuth2Credentials( + provider=self.PROVIDER_NAME, + id=credentials.id, + title=credentials.title, + username=credentials.username, + access_token=SecretStr(google_creds.token), + refresh_token=SecretStr(google_creds.refresh_token), + access_token_expires_at=int(google_creds.expiry.timestamp()), + refresh_token_expires_at=None, + scopes=google_creds.scopes, + ) + + def _setup_oauth_flow(self, scopes: list[str]) -> Flow: + return Flow.from_client_config( + { + "web": { + "client_id": self.client_id, + "client_secret": self.client_secret, + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": self.token_uri, + } + }, + scopes=scopes, + ) diff --git a/autogpt_platform/backend/backend/integrations/oauth/linear.py b/autogpt_platform/backend/backend/integrations/oauth/linear.py new file mode 100644 index 000000000000..fd9d379c1e0e --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/oauth/linear.py @@ -0,0 +1,165 @@ +import json +from typing import Optional +from urllib.parse import urlencode + +from pydantic import SecretStr + +from backend.blocks.linear._api import LinearAPIException +from backend.data.model import APIKeyCredentials, OAuth2Credentials +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +from .base import BaseOAuthHandler + + +class LinearOAuthHandler(BaseOAuthHandler): + """ + OAuth2 handler for Linear. + """ + + PROVIDER_NAME = ProviderName.LINEAR + + def __init__(self, client_id: str, client_secret: str, redirect_uri: str): + self.client_id = client_id + self.client_secret = client_secret + self.redirect_uri = redirect_uri + self.auth_base_url = "https://linear.app/oauth/authorize" + self.token_url = "https://api.linear.app/oauth/token" # Correct token URL + self.revoke_url = "https://api.linear.app/oauth/revoke" + + def get_login_url( + self, scopes: list[str], state: str, code_challenge: Optional[str] + ) -> str: + + params = { + "client_id": self.client_id, + "redirect_uri": self.redirect_uri, + "response_type": "code", # Important: include "response_type" + "scope": ",".join(scopes), # Comma-separated, not space-separated + "state": state, + } + return f"{self.auth_base_url}?{urlencode(params)}" + + def exchange_code_for_tokens( + self, code: str, scopes: list[str], code_verifier: Optional[str] + ) -> OAuth2Credentials: + return self._request_tokens({"code": code, "redirect_uri": self.redirect_uri}) + + def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: + if not credentials.access_token: + raise ValueError("No access token to revoke") + + headers = { + "Authorization": f"Bearer {credentials.access_token.get_secret_value()}" + } + + response = requests.post(self.revoke_url, headers=headers) + if not response.ok: + try: + error_data = response.json() + error_message = error_data.get("error", "Unknown error") + except json.JSONDecodeError: + error_message = response.text + raise LinearAPIException( + f"Failed to revoke Linear tokens ({response.status_code}): {error_message}", + response.status_code, + ) + + return True # Linear doesn't return JSON on successful revoke + + def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: + if not credentials.refresh_token: + raise ValueError( + "No refresh token available." + ) # Linear uses non-expiring tokens + + return self._request_tokens( + { + "refresh_token": credentials.refresh_token.get_secret_value(), + "grant_type": "refresh_token", + } + ) + + def _request_tokens( + self, + params: dict[str, str], + current_credentials: Optional[OAuth2Credentials] = None, + ) -> OAuth2Credentials: + request_body = { + "client_id": self.client_id, + "client_secret": self.client_secret, + "grant_type": "authorization_code", # Ensure grant_type is correct + **params, + } + + headers = { + "Content-Type": "application/x-www-form-urlencoded" + } # Correct header for token request + response = requests.post(self.token_url, data=request_body, headers=headers) + + if not response.ok: + try: + error_data = response.json() + error_message = error_data.get("error", "Unknown error") + + except json.JSONDecodeError: + error_message = response.text + raise LinearAPIException( + f"Failed to fetch Linear tokens ({response.status_code}): {error_message}", + response.status_code, + ) + + token_data = response.json() + + # Note: Linear access tokens do not expire, so we set expires_at to None + new_credentials = OAuth2Credentials( + provider=self.PROVIDER_NAME, + title=current_credentials.title if current_credentials else None, + username=token_data.get("user", {}).get( + "name", "Unknown User" + ), # extract name or set appropriate + access_token=token_data["access_token"], + scopes=token_data["scope"].split( + "," + ), # Linear returns comma-separated scopes + refresh_token=token_data.get( + "refresh_token" + ), # Linear uses non-expiring tokens so this might be null + access_token_expires_at=None, + refresh_token_expires_at=None, + ) + if current_credentials: + new_credentials.id = current_credentials.id + return new_credentials + + def _request_username(self, access_token: str) -> Optional[str]: + + # Use the LinearClient to fetch user details using GraphQL + from backend.blocks.linear._api import LinearClient + + try: + + linear_client = LinearClient( + APIKeyCredentials( + api_key=SecretStr(access_token), + title="temp", + provider=self.PROVIDER_NAME, + expires_at=None, + ) + ) # Temporary credentials for this request + + query = """ + query Viewer { + viewer { + name + } + } + """ + + response = linear_client.query(query) + return response["viewer"]["name"] + + except Exception as e: # Handle any errors + + print(f"Error fetching username: {e}") + return None diff --git a/autogpt_platform/backend/backend/integrations/oauth/notion.py b/autogpt_platform/backend/backend/integrations/oauth/notion.py new file mode 100644 index 000000000000..3cd3249fef04 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/oauth/notion.py @@ -0,0 +1,93 @@ +from base64 import b64encode +from typing import Optional +from urllib.parse import urlencode + +from backend.data.model import OAuth2Credentials +from backend.integrations.providers import ProviderName +from backend.util.request import requests + +from .base import BaseOAuthHandler + + +class NotionOAuthHandler(BaseOAuthHandler): + """ + Based on the documentation at https://developers.notion.com/docs/authorization + + Notes: + - Notion uses non-expiring access tokens and therefore doesn't have a refresh flow + - Notion doesn't use scopes + """ + + PROVIDER_NAME = ProviderName.NOTION + + def __init__(self, client_id: str, client_secret: str, redirect_uri: str): + self.client_id = client_id + self.client_secret = client_secret + self.redirect_uri = redirect_uri + self.auth_base_url = "https://api.notion.com/v1/oauth/authorize" + self.token_url = "https://api.notion.com/v1/oauth/token" + + def get_login_url( + self, scopes: list[str], state: str, code_challenge: Optional[str] + ) -> str: + params = { + "client_id": self.client_id, + "redirect_uri": self.redirect_uri, + "response_type": "code", + "owner": "user", + "state": state, + } + return f"{self.auth_base_url}?{urlencode(params)}" + + def exchange_code_for_tokens( + self, code: str, scopes: list[str], code_verifier: Optional[str] + ) -> OAuth2Credentials: + request_body = { + "grant_type": "authorization_code", + "code": code, + "redirect_uri": self.redirect_uri, + } + auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode() + headers = { + "Authorization": f"Basic {auth_str}", + "Accept": "application/json", + } + response = requests.post(self.token_url, json=request_body, headers=headers) + token_data = response.json() + # Email is only available for non-bot users + email = ( + token_data["owner"]["person"]["email"] + if "person" in token_data["owner"] + and "email" in token_data["owner"]["person"] + else None + ) + + return OAuth2Credentials( + provider=self.PROVIDER_NAME, + title=token_data.get("workspace_name"), + username=email, + access_token=token_data["access_token"], + refresh_token=None, + access_token_expires_at=None, # Notion tokens don't expire + refresh_token_expires_at=None, + scopes=[], + metadata={ + "owner": token_data["owner"], + "bot_id": token_data["bot_id"], + "workspace_id": token_data["workspace_id"], + "workspace_name": token_data.get("workspace_name"), + "workspace_icon": token_data.get("workspace_icon"), + }, + ) + + def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: + # Notion doesn't support token revocation + return False + + def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: + # Notion doesn't support token refresh + return credentials + + def needs_refresh(self, credentials: OAuth2Credentials) -> bool: + # Notion access tokens don't expire + return False diff --git a/autogpt_platform/backend/backend/integrations/oauth/twitter.py b/autogpt_platform/backend/backend/integrations/oauth/twitter.py new file mode 100644 index 000000000000..519ccd354ecf --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/oauth/twitter.py @@ -0,0 +1,171 @@ +import time +import urllib.parse +from typing import ClassVar, Optional + +import requests + +from backend.data.model import OAuth2Credentials, ProviderName +from backend.integrations.oauth.base import BaseOAuthHandler + + +class TwitterOAuthHandler(BaseOAuthHandler): + PROVIDER_NAME = ProviderName.TWITTER + DEFAULT_SCOPES: ClassVar[list[str]] = [ + "tweet.read", + "tweet.write", + "tweet.moderate.write", + "users.read", + "follows.read", + "follows.write", + "offline.access", + "space.read", + "mute.read", + "mute.write", + "like.read", + "like.write", + "list.read", + "list.write", + "block.read", + "block.write", + "bookmark.read", + "bookmark.write", + ] + + AUTHORIZE_URL = "https://twitter.com/i/oauth2/authorize" + TOKEN_URL = "https://api.x.com/2/oauth2/token" + USERNAME_URL = "https://api.x.com/2/users/me" + REVOKE_URL = "https://api.x.com/2/oauth2/revoke" + + def __init__(self, client_id: str, client_secret: str, redirect_uri: str): + self.client_id = client_id + self.client_secret = client_secret + self.redirect_uri = redirect_uri + + def get_login_url( + self, scopes: list[str], state: str, code_challenge: Optional[str] + ) -> str: + """Generate Twitter OAuth 2.0 authorization URL""" + # scopes = self.handle_default_scopes(scopes) + + if code_challenge is None: + raise ValueError("code_challenge is required for Twitter OAuth") + + params = { + "response_type": "code", + "client_id": self.client_id, + "redirect_uri": self.redirect_uri, + "scope": " ".join(self.DEFAULT_SCOPES), + "state": state, + "code_challenge": code_challenge, + "code_challenge_method": "S256", + } + + return f"{self.AUTHORIZE_URL}?{urllib.parse.urlencode(params)}" + + def exchange_code_for_tokens( + self, code: str, scopes: list[str], code_verifier: Optional[str] + ) -> OAuth2Credentials: + """Exchange authorization code for access tokens""" + + headers = {"Content-Type": "application/x-www-form-urlencoded"} + + data = { + "code": code, + "grant_type": "authorization_code", + "redirect_uri": self.redirect_uri, + "code_verifier": code_verifier, + } + + auth = (self.client_id, self.client_secret) + + response = requests.post(self.TOKEN_URL, headers=headers, data=data, auth=auth) + response.raise_for_status() + + tokens = response.json() + + username = self._get_username(tokens["access_token"]) + + return OAuth2Credentials( + provider=self.PROVIDER_NAME, + title=None, + username=username, + access_token=tokens["access_token"], + refresh_token=tokens.get("refresh_token"), + access_token_expires_at=int(time.time()) + tokens["expires_in"], + refresh_token_expires_at=None, + scopes=scopes, + ) + + def _get_username(self, access_token: str) -> str: + """Get the username from the access token""" + headers = {"Authorization": f"Bearer {access_token}"} + + params = {"user.fields": "username"} + + response = requests.get( + f"{self.USERNAME_URL}?{urllib.parse.urlencode(params)}", headers=headers + ) + response.raise_for_status() + + return response.json()["data"]["username"] + + def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials: + """Refresh access tokens using refresh token""" + if not credentials.refresh_token: + raise ValueError("No refresh token available") + + header = {"Content-Type": "application/x-www-form-urlencoded"} + data = { + "grant_type": "refresh_token", + "refresh_token": credentials.refresh_token.get_secret_value(), + } + + auth = (self.client_id, self.client_secret) + + response = requests.post(self.TOKEN_URL, headers=header, data=data, auth=auth) + + try: + response.raise_for_status() + except requests.exceptions.HTTPError as e: + print("HTTP Error:", e) + print("Response Content:", response.text) + raise + + tokens = response.json() + + username = self._get_username(tokens["access_token"]) + + return OAuth2Credentials( + id=credentials.id, + provider=self.PROVIDER_NAME, + title=None, + username=username, + access_token=tokens["access_token"], + refresh_token=tokens["refresh_token"], + access_token_expires_at=int(time.time()) + tokens["expires_in"], + scopes=credentials.scopes, + refresh_token_expires_at=None, + ) + + def revoke_tokens(self, credentials: OAuth2Credentials) -> bool: + """Revoke the access token""" + + header = {"Content-Type": "application/x-www-form-urlencoded"} + + data = { + "token": credentials.access_token.get_secret_value(), + "token_type_hint": "access_token", + } + + auth = (self.client_id, self.client_secret) + + response = requests.post(self.REVOKE_URL, headers=header, data=data, auth=auth) + + try: + response.raise_for_status() + except requests.exceptions.HTTPError as e: + print("HTTP Error:", e) + print("Response Content:", response.text) + raise + + return response.status_code == 200 diff --git a/autogpt_platform/backend/backend/integrations/providers.py b/autogpt_platform/backend/backend/integrations/providers.py new file mode 100644 index 000000000000..a9f810fbc9a0 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/providers.py @@ -0,0 +1,37 @@ +from enum import Enum + + +# --8<-- [start:ProviderName] +class ProviderName(str, Enum): + ANTHROPIC = "anthropic" + COMPASS = "compass" + DISCORD = "discord" + D_ID = "d_id" + E2B = "e2b" + EXA = "exa" + FAL = "fal" + GITHUB = "github" + GOOGLE = "google" + GOOGLE_MAPS = "google_maps" + GROQ = "groq" + HUBSPOT = "hubspot" + IDEOGRAM = "ideogram" + JINA = "jina" + LINEAR = "linear" + MEDIUM = "medium" + MEM0 = "mem0" + NOTION = "notion" + NVIDIA = "nvidia" + OLLAMA = "ollama" + OPENAI = "openai" + OPENWEATHERMAP = "openweathermap" + OPEN_ROUTER = "open_router" + PINECONE = "pinecone" + REDDIT = "reddit" + REPLICATE = "replicate" + REVID = "revid" + SLANT3D = "slant3d" + SMTP = "smtp" + TWITTER = "twitter" + UNREAL_SPEECH = "unreal_speech" + # --8<-- [end:ProviderName] diff --git a/autogpt_platform/backend/backend/integrations/webhooks/__init__.py b/autogpt_platform/backend/backend/integrations/webhooks/__init__.py new file mode 100644 index 000000000000..4ff4f8b5e0c5 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/__init__.py @@ -0,0 +1,22 @@ +from typing import TYPE_CHECKING + +from .compass import CompassWebhookManager +from .github import GithubWebhooksManager +from .slant3d import Slant3DWebhooksManager + +if TYPE_CHECKING: + from ..providers import ProviderName + from ._base import BaseWebhooksManager + +# --8<-- [start:WEBHOOK_MANAGERS_BY_NAME] +WEBHOOK_MANAGERS_BY_NAME: dict["ProviderName", type["BaseWebhooksManager"]] = { + handler.PROVIDER_NAME: handler + for handler in [ + CompassWebhookManager, + GithubWebhooksManager, + Slant3DWebhooksManager, + ] +} +# --8<-- [end:WEBHOOK_MANAGERS_BY_NAME] + +__all__ = ["WEBHOOK_MANAGERS_BY_NAME"] diff --git a/autogpt_platform/backend/backend/integrations/webhooks/_base.py b/autogpt_platform/backend/backend/integrations/webhooks/_base.py new file mode 100644 index 000000000000..be3a71055238 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/_base.py @@ -0,0 +1,195 @@ +import logging +import secrets +from abc import ABC, abstractmethod +from typing import ClassVar, Generic, Optional, TypeVar +from uuid import uuid4 + +from fastapi import Request +from strenum import StrEnum + +from backend.data import integrations +from backend.data.model import Credentials +from backend.integrations.providers import ProviderName +from backend.integrations.webhooks.utils import webhook_ingress_url +from backend.util.exceptions import MissingConfigError +from backend.util.settings import Config + +logger = logging.getLogger(__name__) +app_config = Config() + +WT = TypeVar("WT", bound=StrEnum) + + +class BaseWebhooksManager(ABC, Generic[WT]): + # --8<-- [start:BaseWebhooksManager1] + PROVIDER_NAME: ClassVar[ProviderName] + # --8<-- [end:BaseWebhooksManager1] + + WebhookType: WT + + async def get_suitable_auto_webhook( + self, + user_id: str, + credentials: Credentials, + webhook_type: WT, + resource: str, + events: list[str], + ) -> integrations.Webhook: + if not app_config.platform_base_url: + raise MissingConfigError( + "PLATFORM_BASE_URL must be set to use Webhook functionality" + ) + + if webhook := await integrations.find_webhook_by_credentials_and_props( + credentials.id, webhook_type, resource, events + ): + return webhook + return await self._create_webhook( + user_id, webhook_type, events, resource, credentials + ) + + async def get_manual_webhook( + self, + user_id: str, + graph_id: str, + webhook_type: WT, + events: list[str], + ): + if current_webhook := await integrations.find_webhook_by_graph_and_props( + graph_id, self.PROVIDER_NAME, webhook_type, events + ): + return current_webhook + return await self._create_webhook( + user_id, + webhook_type, + events, + register=False, + ) + + async def prune_webhook_if_dangling( + self, webhook_id: str, credentials: Optional[Credentials] + ) -> bool: + webhook = await integrations.get_webhook(webhook_id) + if webhook.attached_nodes is None: + raise ValueError("Error retrieving webhook including attached nodes") + if webhook.attached_nodes: + # Don't prune webhook if in use + return False + + if credentials: + await self._deregister_webhook(webhook, credentials) + await integrations.delete_webhook(webhook.id) + return True + + # --8<-- [start:BaseWebhooksManager3] + @classmethod + @abstractmethod + async def validate_payload( + cls, webhook: integrations.Webhook, request: Request + ) -> tuple[dict, str]: + """ + Validates an incoming webhook request and returns its payload and type. + + Params: + webhook: Object representing the configured webhook and its properties in our system. + request: Incoming FastAPI `Request` + + Returns: + dict: The validated payload + str: The event type associated with the payload + """ + + # --8<-- [end:BaseWebhooksManager3] + + # --8<-- [start:BaseWebhooksManager5] + async def trigger_ping( + self, webhook: integrations.Webhook, credentials: Credentials | None + ) -> None: + """ + Triggers a ping to the given webhook. + + Raises: + NotImplementedError: if the provider doesn't support pinging + """ + # --8<-- [end:BaseWebhooksManager5] + raise NotImplementedError(f"{self.__class__.__name__} doesn't support pinging") + + # --8<-- [start:BaseWebhooksManager2] + @abstractmethod + async def _register_webhook( + self, + credentials: Credentials, + webhook_type: WT, + resource: str, + events: list[str], + ingress_url: str, + secret: str, + ) -> tuple[str, dict]: + """ + Registers a new webhook with the provider. + + Params: + credentials: The credentials with which to create the webhook + webhook_type: The provider-specific webhook type to create + resource: The resource to receive events for + events: The events to subscribe to + ingress_url: The ingress URL for webhook payloads + secret: Secret used to verify webhook payloads + + Returns: + str: Webhook ID assigned by the provider + config: Provider-specific configuration for the webhook + """ + ... + + # --8<-- [end:BaseWebhooksManager2] + + # --8<-- [start:BaseWebhooksManager4] + @abstractmethod + async def _deregister_webhook( + self, webhook: integrations.Webhook, credentials: Credentials + ) -> None: ... + + # --8<-- [end:BaseWebhooksManager4] + + async def _create_webhook( + self, + user_id: str, + webhook_type: WT, + events: list[str], + resource: str = "", + credentials: Optional[Credentials] = None, + register: bool = True, + ) -> integrations.Webhook: + if not app_config.platform_base_url: + raise MissingConfigError( + "PLATFORM_BASE_URL must be set to use Webhook functionality" + ) + + id = str(uuid4()) + secret = secrets.token_hex(32) + provider_name: ProviderName = self.PROVIDER_NAME + ingress_url = webhook_ingress_url(provider_name=provider_name, webhook_id=id) + if register: + if not credentials: + raise TypeError("credentials are required if register = True") + provider_webhook_id, config = await self._register_webhook( + credentials, webhook_type, resource, events, ingress_url, secret + ) + else: + provider_webhook_id, config = "", {} + + return await integrations.create_webhook( + integrations.Webhook( + id=id, + user_id=user_id, + provider=provider_name, + credentials_id=credentials.id if credentials else "", + webhook_type=webhook_type, + resource=resource, + events=events, + provider_webhook_id=provider_webhook_id, + config=config, + secret=secret, + ) + ) diff --git a/autogpt_platform/backend/backend/integrations/webhooks/_manual_base.py b/autogpt_platform/backend/backend/integrations/webhooks/_manual_base.py new file mode 100644 index 000000000000..cf749a3cf9b5 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/_manual_base.py @@ -0,0 +1,30 @@ +import logging + +from backend.data import integrations +from backend.data.model import Credentials + +from ._base import WT, BaseWebhooksManager + +logger = logging.getLogger(__name__) + + +class ManualWebhookManagerBase(BaseWebhooksManager[WT]): + async def _register_webhook( + self, + credentials: Credentials, + webhook_type: WT, + resource: str, + events: list[str], + ingress_url: str, + secret: str, + ) -> tuple[str, dict]: + print(ingress_url) # FIXME: pass URL to user in front end + + return "", {} + + async def _deregister_webhook( + self, + webhook: integrations.Webhook, + credentials: Credentials, + ) -> None: + pass diff --git a/autogpt_platform/backend/backend/integrations/webhooks/compass.py b/autogpt_platform/backend/backend/integrations/webhooks/compass.py new file mode 100644 index 000000000000..8a2076a1dab1 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/compass.py @@ -0,0 +1,30 @@ +import logging + +from fastapi import Request +from strenum import StrEnum + +from backend.data import integrations +from backend.integrations.providers import ProviderName + +from ._manual_base import ManualWebhookManagerBase + +logger = logging.getLogger(__name__) + + +class CompassWebhookType(StrEnum): + TRANSCRIPTION = "transcription" + TASK = "task" + + +class CompassWebhookManager(ManualWebhookManagerBase): + PROVIDER_NAME = ProviderName.COMPASS + WebhookType = CompassWebhookType + + @classmethod + async def validate_payload( + cls, webhook: integrations.Webhook, request: Request + ) -> tuple[dict, str]: + payload = await request.json() + event_type = CompassWebhookType.TRANSCRIPTION # currently the only type + + return payload, event_type diff --git a/autogpt_platform/backend/backend/integrations/webhooks/github.py b/autogpt_platform/backend/backend/integrations/webhooks/github.py new file mode 100644 index 000000000000..6a391920453c --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/github.py @@ -0,0 +1,181 @@ +import hashlib +import hmac +import logging + +import requests +from fastapi import HTTPException, Request +from strenum import StrEnum + +from backend.data import integrations +from backend.data.model import Credentials +from backend.integrations.providers import ProviderName + +from ._base import BaseWebhooksManager + +logger = logging.getLogger(__name__) + + +# --8<-- [start:GithubWebhooksManager] +class GithubWebhookType(StrEnum): + REPO = "repo" + + +class GithubWebhooksManager(BaseWebhooksManager): + PROVIDER_NAME = ProviderName.GITHUB + + WebhookType = GithubWebhookType + + GITHUB_API_URL = "https://api.github.com" + GITHUB_API_DEFAULT_HEADERS = {"Accept": "application/vnd.github.v3+json"} + + @classmethod + async def validate_payload( + cls, webhook: integrations.Webhook, request: Request + ) -> tuple[dict, str]: + if not (event_type := request.headers.get("X-GitHub-Event")): + raise HTTPException( + status_code=400, detail="X-GitHub-Event header is missing!" + ) + + if not (signature_header := request.headers.get("X-Hub-Signature-256")): + raise HTTPException( + status_code=403, detail="X-Hub-Signature-256 header is missing!" + ) + + payload_body = await request.body() + hash_object = hmac.new( + webhook.secret.encode("utf-8"), msg=payload_body, digestmod=hashlib.sha256 + ) + expected_signature = "sha256=" + hash_object.hexdigest() + + if not hmac.compare_digest(expected_signature, signature_header): + raise HTTPException( + status_code=403, detail="Request signatures didn't match!" + ) + + payload = await request.json() + if action := payload.get("action"): + event_type += f".{action}" + + return payload, event_type + + async def trigger_ping( + self, webhook: integrations.Webhook, credentials: Credentials | None + ) -> None: + if not credentials: + raise ValueError("Credentials are required but were not passed") + + headers = { + **self.GITHUB_API_DEFAULT_HEADERS, + "Authorization": credentials.auth_header(), + } + + repo, github_hook_id = webhook.resource, webhook.provider_webhook_id + ping_url = f"{self.GITHUB_API_URL}/repos/{repo}/hooks/{github_hook_id}/pings" + + response = requests.post(ping_url, headers=headers) + + if response.status_code != 204: + error_msg = extract_github_error_msg(response) + raise ValueError(f"Failed to ping GitHub webhook: {error_msg}") + + async def _register_webhook( + self, + credentials: Credentials, + webhook_type: GithubWebhookType, + resource: str, + events: list[str], + ingress_url: str, + secret: str, + ) -> tuple[str, dict]: + if webhook_type == self.WebhookType.REPO and resource.count("/") > 1: + raise ValueError("Invalid repo format: expected 'owner/repo'") + + # Extract main event, e.g. `pull_request.opened` -> `pull_request` + github_events = list({event.split(".")[0] for event in events}) + + headers = { + **self.GITHUB_API_DEFAULT_HEADERS, + "Authorization": credentials.auth_header(), + } + webhook_data = { + "name": "web", + "active": True, + "events": github_events, + "config": { + "url": ingress_url, + "content_type": "json", + "insecure_ssl": "0", + "secret": secret, + }, + } + + response = requests.post( + f"{self.GITHUB_API_URL}/repos/{resource}/hooks", + headers=headers, + json=webhook_data, + ) + + if response.status_code != 201: + error_msg = extract_github_error_msg(response) + if "not found" in error_msg.lower(): + error_msg = ( + f"{error_msg} " + "(Make sure the GitHub account or API key has 'repo' or " + f"webhook create permissions to '{resource}')" + ) + raise ValueError(f"Failed to create GitHub webhook: {error_msg}") + + webhook_id = response.json()["id"] + config = response.json()["config"] + + return str(webhook_id), config + + async def _deregister_webhook( + self, webhook: integrations.Webhook, credentials: Credentials + ) -> None: + webhook_type = self.WebhookType(webhook.webhook_type) + if webhook.credentials_id != credentials.id: + raise ValueError( + f"Webhook #{webhook.id} does not belong to credentials {credentials.id}" + ) + + headers = { + **self.GITHUB_API_DEFAULT_HEADERS, + "Authorization": credentials.auth_header(), + } + + if webhook_type == self.WebhookType.REPO: + repo = webhook.resource + delete_url = f"{self.GITHUB_API_URL}/repos/{repo}/hooks/{webhook.provider_webhook_id}" # noqa + else: + raise NotImplementedError( + f"Unsupported webhook type '{webhook.webhook_type}'" + ) + + response = requests.delete(delete_url, headers=headers) + + if response.status_code not in [204, 404]: + # 204 means successful deletion, 404 means the webhook was already deleted + error_msg = extract_github_error_msg(response) + raise ValueError(f"Failed to delete GitHub webhook: {error_msg}") + + # If we reach here, the webhook was successfully deleted or didn't exist + + +# --8<-- [end:GithubWebhooksManager] + + +def extract_github_error_msg(response: requests.Response) -> str: + error_msgs = [] + resp = response.json() + if resp.get("message"): + error_msgs.append(resp["message"]) + if resp.get("errors"): + error_msgs.extend(f"* {err.get('message', err)}" for err in resp["errors"]) + if resp.get("error"): + if isinstance(resp["error"], dict): + error_msgs.append(resp["error"].get("message", resp["error"])) + else: + error_msgs.append(resp["error"]) + return "\n".join(error_msgs) diff --git a/autogpt_platform/backend/backend/integrations/webhooks/graph_lifecycle_hooks.py b/autogpt_platform/backend/backend/integrations/webhooks/graph_lifecycle_hooks.py new file mode 100644 index 000000000000..ef3bad02ae22 --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/graph_lifecycle_hooks.py @@ -0,0 +1,274 @@ +import logging +from typing import TYPE_CHECKING, Callable, Optional, cast + +from backend.data.block import BlockSchema, BlockWebhookConfig, get_block +from backend.data.graph import set_node_webhook +from backend.integrations.webhooks import WEBHOOK_MANAGERS_BY_NAME + +if TYPE_CHECKING: + from backend.data.graph import GraphModel, NodeModel + from backend.data.model import Credentials + + from ._base import BaseWebhooksManager + +logger = logging.getLogger(__name__) + + +async def on_graph_activate( + graph: "GraphModel", get_credentials: Callable[[str], "Credentials | None"] +): + """ + Hook to be called when a graph is activated/created. + + ⚠️ Assuming node entities are not re-used between graph versions, ⚠️ + this hook calls `on_node_activate` on all nodes in this graph. + + Params: + get_credentials: `credentials_id` -> Credentials + """ + # Compare nodes in new_graph_version with previous_graph_version + updated_nodes = [] + for new_node in graph.nodes: + block = get_block(new_node.block_id) + if not block: + raise ValueError( + f"Node #{new_node.id} is instance of unknown block #{new_node.block_id}" + ) + block_input_schema = cast(BlockSchema, block.input_schema) + + node_credentials = None + if ( + # Webhook-triggered blocks are only allowed to have 1 credentials input + ( + creds_field_name := next( + iter(block_input_schema.get_credentials_fields()), None + ) + ) + and (creds_meta := new_node.input_default.get(creds_field_name)) + and not (node_credentials := get_credentials(creds_meta["id"])) + ): + raise ValueError( + f"Node #{new_node.id} input '{creds_field_name}' updated with " + f"non-existent credentials #{creds_meta['id']}" + ) + + updated_node = await on_node_activate( + graph.user_id, new_node, credentials=node_credentials + ) + updated_nodes.append(updated_node) + + graph.nodes = updated_nodes + return graph + + +async def on_graph_deactivate( + graph: "GraphModel", get_credentials: Callable[[str], "Credentials | None"] +): + """ + Hook to be called when a graph is deactivated/deleted. + + ⚠️ Assuming node entities are not re-used between graph versions, ⚠️ + this hook calls `on_node_deactivate` on all nodes in `graph`. + + Params: + get_credentials: `credentials_id` -> Credentials + """ + updated_nodes = [] + for node in graph.nodes: + block = get_block(node.block_id) + if not block: + raise ValueError( + f"Node #{node.id} is instance of unknown block #{node.block_id}" + ) + block_input_schema = cast(BlockSchema, block.input_schema) + + node_credentials = None + if ( + # Webhook-triggered blocks are only allowed to have 1 credentials input + ( + creds_field_name := next( + iter(block_input_schema.get_credentials_fields()), None + ) + ) + and (creds_meta := node.input_default.get(creds_field_name)) + and not (node_credentials := get_credentials(creds_meta["id"])) + ): + logger.error( + f"Node #{node.id} input '{creds_field_name}' referenced non-existent " + f"credentials #{creds_meta['id']}" + ) + + updated_node = await on_node_deactivate(node, credentials=node_credentials) + updated_nodes.append(updated_node) + + graph.nodes = updated_nodes + return graph + + +async def on_node_activate( + user_id: str, + node: "NodeModel", + *, + credentials: Optional["Credentials"] = None, +) -> "NodeModel": + """Hook to be called when the node is activated/created""" + + block = get_block(node.block_id) + if not block: + raise ValueError( + f"Node #{node.id} is instance of unknown block #{node.block_id}" + ) + + if not block.webhook_config: + return node + + provider = block.webhook_config.provider + if provider not in WEBHOOK_MANAGERS_BY_NAME: + raise ValueError( + f"Block #{block.id} has webhook_config for provider {provider} " + "which does not support webhooks" + ) + + logger.debug( + f"Activating webhook node #{node.id} with config {block.webhook_config}" + ) + + webhooks_manager = WEBHOOK_MANAGERS_BY_NAME[provider]() + + if auto_setup_webhook := isinstance(block.webhook_config, BlockWebhookConfig): + try: + resource = block.webhook_config.resource_format.format(**node.input_default) + except KeyError: + resource = None + logger.debug( + f"Constructed resource string {resource} from input {node.input_default}" + ) + else: + resource = "" # not relevant for manual webhooks + + block_input_schema = cast(BlockSchema, block.input_schema) + credentials_field_name = next(iter(block_input_schema.get_credentials_fields()), "") + credentials_meta = ( + node.input_default.get(credentials_field_name) + if credentials_field_name + else None + ) + event_filter_input_name = block.webhook_config.event_filter_input + has_everything_for_webhook = ( + resource is not None + and (credentials_meta or not credentials_field_name) + and ( + not event_filter_input_name + or ( + event_filter_input_name in node.input_default + and any( + is_on + for is_on in node.input_default[event_filter_input_name].values() + ) + ) + ) + ) + + if has_everything_for_webhook and resource is not None: + logger.debug(f"Node #{node} has everything for a webhook!") + if credentials_meta and not credentials: + raise ValueError( + f"Cannot set up webhook for node #{node.id}: " + f"credentials #{credentials_meta['id']} not available" + ) + + if event_filter_input_name: + # Shape of the event filter is enforced in Block.__init__ + event_filter = cast(dict, node.input_default[event_filter_input_name]) + events = [ + block.webhook_config.event_format.format(event=event) + for event, enabled in event_filter.items() + if enabled is True + ] + logger.debug(f"Webhook events to subscribe to: {', '.join(events)}") + else: + events = [] + + # Find/make and attach a suitable webhook to the node + if auto_setup_webhook: + assert credentials is not None + new_webhook = await webhooks_manager.get_suitable_auto_webhook( + user_id, + credentials, + block.webhook_config.webhook_type, + resource, + events, + ) + else: + # Manual webhook -> no credentials -> don't register but do create + new_webhook = await webhooks_manager.get_manual_webhook( + user_id, + node.graph_id, + block.webhook_config.webhook_type, + events, + ) + logger.debug(f"Acquired webhook: {new_webhook}") + return await set_node_webhook(node.id, new_webhook.id) + else: + logger.debug(f"Node #{node.id} does not have everything for a webhook") + + return node + + +async def on_node_deactivate( + node: "NodeModel", + *, + credentials: Optional["Credentials"] = None, + webhooks_manager: Optional["BaseWebhooksManager"] = None, +) -> "NodeModel": + """Hook to be called when node is deactivated/deleted""" + + logger.debug(f"Deactivating node #{node.id}") + block = get_block(node.block_id) + if not block: + raise ValueError( + f"Node #{node.id} is instance of unknown block #{node.block_id}" + ) + + if not block.webhook_config: + return node + + provider = block.webhook_config.provider + if provider not in WEBHOOK_MANAGERS_BY_NAME: + raise ValueError( + f"Block #{block.id} has webhook_config for provider {provider} " + "which does not support webhooks" + ) + + webhooks_manager = WEBHOOK_MANAGERS_BY_NAME[provider]() + + if node.webhook_id: + logger.debug(f"Node #{node.id} has webhook_id {node.webhook_id}") + if not node.webhook: + logger.error(f"Node #{node.id} has webhook_id but no webhook object") + raise ValueError("node.webhook not included") + + # Detach webhook from node + logger.debug(f"Detaching webhook from node #{node.id}") + updated_node = await set_node_webhook(node.id, None) + + # Prune and deregister the webhook if it is no longer used anywhere + webhook = node.webhook + logger.debug( + f"Pruning{' and deregistering' if credentials else ''} " + f"webhook #{webhook.id}" + ) + await webhooks_manager.prune_webhook_if_dangling(webhook.id, credentials) + if ( + cast(BlockSchema, block.input_schema).get_credentials_fields() + and not credentials + ): + logger.warning( + f"Cannot deregister webhook #{webhook.id}: credentials " + f"#{webhook.credentials_id} not available " + f"({webhook.provider.value} webhook ID: {webhook.provider_webhook_id})" + ) + return updated_node + + logger.debug(f"Node #{node.id} has no webhook_id, returning") + return node diff --git a/autogpt_platform/backend/backend/integrations/webhooks/slant3d.py b/autogpt_platform/backend/backend/integrations/webhooks/slant3d.py new file mode 100644 index 000000000000..189ab72083ef --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/slant3d.py @@ -0,0 +1,99 @@ +import logging + +import requests +from fastapi import Request + +from backend.data import integrations +from backend.data.model import APIKeyCredentials, Credentials +from backend.integrations.providers import ProviderName +from backend.integrations.webhooks._base import BaseWebhooksManager + +logger = logging.getLogger(__name__) + + +class Slant3DWebhooksManager(BaseWebhooksManager): + """Manager for Slant3D webhooks""" + + PROVIDER_NAME = ProviderName.SLANT3D + BASE_URL = "https://www.slant3dapi.com/api" + + async def _register_webhook( + self, + credentials: Credentials, + webhook_type: str, + resource: str, + events: list[str], + ingress_url: str, + secret: str, + ) -> tuple[str, dict]: + """Register a new webhook with Slant3D""" + + if not isinstance(credentials, APIKeyCredentials): + raise ValueError("API key is required to register a webhook") + + headers = { + "api-key": credentials.api_key.get_secret_value(), + "Content-Type": "application/json", + } + + # Slant3D's API doesn't use events list, just register for all order updates + payload = {"endPoint": ingress_url} + + response = requests.post( + f"{self.BASE_URL}/customer/webhookSubscribe", headers=headers, json=payload + ) + + if not response.ok: + error = response.json().get("error", "Unknown error") + raise RuntimeError(f"Failed to register webhook: {error}") + + webhook_config = { + "endpoint": ingress_url, + "provider": self.PROVIDER_NAME, + "events": ["order.shipped"], # Currently the only supported event + "type": webhook_type, + } + + return "", webhook_config + + @classmethod + async def validate_payload( + cls, webhook: integrations.Webhook, request: Request + ) -> tuple[dict, str]: + """Validate incoming webhook payload from Slant3D""" + + payload = await request.json() + + # Validate required fields from Slant3D API spec + required_fields = ["orderId", "status", "trackingNumber", "carrierCode"] + missing_fields = [field for field in required_fields if field not in payload] + + if missing_fields: + raise ValueError(f"Missing required fields: {', '.join(missing_fields)}") + + # Normalize payload structure + normalized_payload = { + "orderId": payload["orderId"], + "status": payload["status"], + "trackingNumber": payload["trackingNumber"], + "carrierCode": payload["carrierCode"], + } + + # Currently Slant3D only sends shipping notifications + # Convert status to lowercase for event format compatibility + event_type = f"order.{payload['status'].lower()}" + + return normalized_payload, event_type + + async def _deregister_webhook( + self, webhook: integrations.Webhook, credentials: Credentials + ) -> None: + """ + Note: Slant3D API currently doesn't provide a deregistration endpoint. + This would need to be handled through support. + """ + # Log warning since we can't properly deregister + logger.warning( + f"Warning: Manual deregistration required for webhook {webhook.id}" + ) + pass diff --git a/autogpt_platform/backend/backend/integrations/webhooks/utils.py b/autogpt_platform/backend/backend/integrations/webhooks/utils.py new file mode 100644 index 000000000000..e53a18f0fb1c --- /dev/null +++ b/autogpt_platform/backend/backend/integrations/webhooks/utils.py @@ -0,0 +1,12 @@ +from backend.integrations.providers import ProviderName +from backend.util.settings import Config + +app_config = Config() + + +# TODO: add test to assert this matches the actual API route +def webhook_ingress_url(provider_name: ProviderName, webhook_id: str) -> str: + return ( + f"{app_config.platform_base_url}/api/integrations/{provider_name.value}" + f"/webhooks/{webhook_id}/ingress" + ) diff --git a/autogpt_platform/backend/backend/rest.py b/autogpt_platform/backend/backend/rest.py new file mode 100644 index 000000000000..e0da452ca2be --- /dev/null +++ b/autogpt_platform/backend/backend/rest.py @@ -0,0 +1,18 @@ +from backend.app import run_processes +from backend.executor import DatabaseManager, ExecutionScheduler +from backend.server.rest_api import AgentServer + + +def main(): + """ + Run all the processes required for the AutoGPT-server REST API. + """ + run_processes( + DatabaseManager(), + ExecutionScheduler(), + AgentServer(), + ) + + +if __name__ == "__main__": + main() diff --git a/autogpts/autogpt/autogpt/json_utils/__init__.py b/autogpt_platform/backend/backend/server/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/json_utils/__init__.py rename to autogpt_platform/backend/backend/server/__init__.py diff --git a/autogpt_platform/backend/backend/server/conn_manager.py b/autogpt_platform/backend/backend/server/conn_manager.py new file mode 100644 index 000000000000..6a4fb8a57860 --- /dev/null +++ b/autogpt_platform/backend/backend/server/conn_manager.py @@ -0,0 +1,43 @@ +from typing import Dict, Set + +from fastapi import WebSocket + +from backend.data import execution +from backend.server.model import Methods, WsMessage + + +class ConnectionManager: + def __init__(self): + self.active_connections: Set[WebSocket] = set() + self.subscriptions: Dict[str, Set[WebSocket]] = {} + + async def connect(self, websocket: WebSocket): + await websocket.accept() + self.active_connections.add(websocket) + + def disconnect(self, websocket: WebSocket): + self.active_connections.remove(websocket) + for subscribers in self.subscriptions.values(): + subscribers.discard(websocket) + + async def subscribe(self, graph_id: str, websocket: WebSocket): + if graph_id not in self.subscriptions: + self.subscriptions[graph_id] = set() + self.subscriptions[graph_id].add(websocket) + + async def unsubscribe(self, graph_id: str, websocket: WebSocket): + if graph_id in self.subscriptions: + self.subscriptions[graph_id].discard(websocket) + if not self.subscriptions[graph_id]: + del self.subscriptions[graph_id] + + async def send_execution_result(self, result: execution.ExecutionResult): + graph_id = result.graph_id + if graph_id in self.subscriptions: + message = WsMessage( + method=Methods.EXECUTION_EVENT, + channel=graph_id, + data=result.model_dump(), + ).model_dump_json() + for connection in self.subscriptions[graph_id]: + await connection.send_text(message) diff --git a/autogpt_platform/backend/backend/server/external/api.py b/autogpt_platform/backend/backend/server/external/api.py new file mode 100644 index 000000000000..3236766fddf1 --- /dev/null +++ b/autogpt_platform/backend/backend/server/external/api.py @@ -0,0 +1,11 @@ +from fastapi import FastAPI + +from .routes.v1 import v1_router + +external_app = FastAPI( + title="AutoGPT External API", + description="External API for AutoGPT integrations", + docs_url="/docs", + version="1.0", +) +external_app.include_router(v1_router, prefix="/v1") diff --git a/autogpt_platform/backend/backend/server/external/middleware.py b/autogpt_platform/backend/backend/server/external/middleware.py new file mode 100644 index 000000000000..2878e3d310d1 --- /dev/null +++ b/autogpt_platform/backend/backend/server/external/middleware.py @@ -0,0 +1,37 @@ +from fastapi import Depends, HTTPException, Request +from fastapi.security import APIKeyHeader +from prisma.enums import APIKeyPermission + +from backend.data.api_key import has_permission, validate_api_key + +api_key_header = APIKeyHeader(name="X-API-Key") + + +async def require_api_key(request: Request): + """Base middleware for API key authentication""" + api_key = await api_key_header(request) + + if api_key is None: + raise HTTPException(status_code=401, detail="Missing API key") + + api_key_obj = await validate_api_key(api_key) + + if not api_key_obj: + raise HTTPException(status_code=401, detail="Invalid API key") + + request.state.api_key = api_key_obj + return api_key_obj + + +def require_permission(permission: APIKeyPermission): + """Dependency function for checking specific permissions""" + + async def check_permission(api_key=Depends(require_api_key)): + if not has_permission(api_key, permission): + raise HTTPException( + status_code=403, + detail=f"API key missing required permission: {permission}", + ) + return api_key + + return check_permission diff --git a/autogpts/autogpt/autogpt/llm/providers/__init__.py b/autogpt_platform/backend/backend/server/external/routes/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/llm/providers/__init__.py rename to autogpt_platform/backend/backend/server/external/routes/__init__.py diff --git a/autogpt_platform/backend/backend/server/external/routes/v1.py b/autogpt_platform/backend/backend/server/external/routes/v1.py new file mode 100644 index 000000000000..ecc64a60375d --- /dev/null +++ b/autogpt_platform/backend/backend/server/external/routes/v1.py @@ -0,0 +1,111 @@ +import logging +from collections import defaultdict +from typing import Any, Sequence + +from autogpt_libs.utils.cache import thread_cached +from fastapi import APIRouter, Depends, HTTPException +from prisma.enums import APIKeyPermission + +import backend.data.block +from backend.data import execution as execution_db +from backend.data import graph as graph_db +from backend.data.api_key import APIKey +from backend.data.block import BlockInput, CompletedBlockOutput +from backend.executor import ExecutionManager +from backend.server.external.middleware import require_permission +from backend.util.service import get_service_client +from backend.util.settings import Settings + + +@thread_cached +def execution_manager_client() -> ExecutionManager: + return get_service_client(ExecutionManager) + + +settings = Settings() +logger = logging.getLogger(__name__) + +v1_router = APIRouter() + + +@v1_router.get( + path="/blocks", + tags=["blocks"], + dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))], +) +def get_graph_blocks() -> Sequence[dict[Any, Any]]: + blocks = [block() for block in backend.data.block.get_blocks().values()] + return [b.to_dict() for b in blocks] + + +@v1_router.post( + path="/blocks/{block_id}/execute", + tags=["blocks"], + dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))], +) +def execute_graph_block( + block_id: str, + data: BlockInput, + api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)), +) -> CompletedBlockOutput: + obj = backend.data.block.get_block(block_id) + if not obj: + raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + + output = defaultdict(list) + for name, data in obj.execute(data): + output[name].append(data) + return output + + +@v1_router.post( + path="/graphs/{graph_id}/execute", + tags=["graphs"], +) +def execute_graph( + graph_id: str, + node_input: dict[Any, Any], + api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)), +) -> dict[str, Any]: + try: + graph_exec = execution_manager_client().add_execution( + graph_id, node_input, user_id=api_key.user_id + ) + return {"id": graph_exec.graph_exec_id} + except Exception as e: + msg = e.__str__().encode().decode("unicode_escape") + raise HTTPException(status_code=400, detail=msg) + + +@v1_router.get( + path="/graphs/{graph_id}/executions/{graph_exec_id}/results", + tags=["graphs"], +) +async def get_graph_execution_results( + graph_id: str, + graph_exec_id: str, + api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)), +) -> dict: + graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id) + if not graph: + raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") + + results = await execution_db.get_execution_results(graph_exec_id) + + return { + "execution_id": graph_exec_id, + "nodes": [ + { + "node_id": result.node_id, + "input": ( + result.input_data.get("value") + if "value" in result.input_data + else result.input_data + ), + "output": result.output_data.get( + "response", result.output_data.get("result", []) + ), + } + for result in results + ], + } diff --git a/autogpt_platform/backend/backend/server/integrations/router.py b/autogpt_platform/backend/backend/server/integrations/router.py new file mode 100644 index 000000000000..4ab01866b1ba --- /dev/null +++ b/autogpt_platform/backend/backend/server/integrations/router.py @@ -0,0 +1,416 @@ +import logging +from typing import TYPE_CHECKING, Annotated, Literal + +from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request +from pydantic import BaseModel, Field + +from backend.data.graph import set_node_webhook +from backend.data.integrations import ( + WebhookEvent, + get_all_webhooks_by_creds, + get_webhook, + publish_webhook_event, + wait_for_webhook_event, +) +from backend.data.model import Credentials, CredentialsType, OAuth2Credentials +from backend.executor.manager import ExecutionManager +from backend.integrations.creds_manager import IntegrationCredentialsManager +from backend.integrations.oauth import HANDLERS_BY_NAME +from backend.integrations.providers import ProviderName +from backend.integrations.webhooks import WEBHOOK_MANAGERS_BY_NAME +from backend.util.exceptions import NeedConfirmation +from backend.util.service import get_service_client +from backend.util.settings import Settings + +if TYPE_CHECKING: + from backend.integrations.oauth import BaseOAuthHandler + +from ..utils import get_user_id + +logger = logging.getLogger(__name__) +settings = Settings() +router = APIRouter() + +creds_manager = IntegrationCredentialsManager() + + +class LoginResponse(BaseModel): + login_url: str + state_token: str + + +@router.get("/{provider}/login") +def login( + provider: Annotated[ + ProviderName, Path(title="The provider to initiate an OAuth flow for") + ], + user_id: Annotated[str, Depends(get_user_id)], + request: Request, + scopes: Annotated[ + str, Query(title="Comma-separated list of authorization scopes") + ] = "", +) -> LoginResponse: + handler = _get_provider_oauth_handler(request, provider) + + requested_scopes = scopes.split(",") if scopes else [] + + # Generate and store a secure random state token along with the scopes + state_token, code_challenge = creds_manager.store.store_state_token( + user_id, provider, requested_scopes + ) + login_url = handler.get_login_url( + requested_scopes, state_token, code_challenge=code_challenge + ) + + return LoginResponse(login_url=login_url, state_token=state_token) + + +class CredentialsMetaResponse(BaseModel): + id: str + provider: str + type: CredentialsType + title: str | None + scopes: list[str] | None + username: str | None + + +@router.post("/{provider}/callback") +def callback( + provider: Annotated[ + ProviderName, Path(title="The target provider for this OAuth exchange") + ], + code: Annotated[str, Body(title="Authorization code acquired by user login")], + state_token: Annotated[str, Body(title="Anti-CSRF nonce")], + user_id: Annotated[str, Depends(get_user_id)], + request: Request, +) -> CredentialsMetaResponse: + logger.debug(f"Received OAuth callback for provider: {provider}") + handler = _get_provider_oauth_handler(request, provider) + + # Verify the state token + valid_state = creds_manager.store.verify_state_token(user_id, state_token, provider) + + if not valid_state: + logger.warning(f"Invalid or expired state token for user {user_id}") + raise HTTPException(status_code=400, detail="Invalid or expired state token") + try: + scopes = valid_state.scopes + logger.debug(f"Retrieved scopes from state token: {scopes}") + + scopes = handler.handle_default_scopes(scopes) + + credentials = handler.exchange_code_for_tokens( + code, scopes, valid_state.code_verifier + ) + + logger.debug(f"Received credentials with final scopes: {credentials.scopes}") + + # Linear returns scopes as a single string with spaces, so we need to split them + # TODO: make a bypass of this part of the OAuth handler + if len(credentials.scopes) == 1 and " " in credentials.scopes[0]: + credentials.scopes = credentials.scopes[0].split(" ") + + # Check if the granted scopes are sufficient for the requested scopes + if not set(scopes).issubset(set(credentials.scopes)): + # For now, we'll just log the warning and continue + logger.warning( + f"Granted scopes {credentials.scopes} for provider {provider.value} " + f"do not include all requested scopes {scopes}" + ) + + except Exception as e: + logger.error(f"Code->Token exchange failed for provider {provider.value}: {e}") + raise HTTPException( + status_code=400, detail=f"Failed to exchange code for tokens: {str(e)}" + ) + + # TODO: Allow specifying `title` to set on `credentials` + creds_manager.create(user_id, credentials) + + logger.debug( + f"Successfully processed OAuth callback for user {user_id} " + f"and provider {provider.value}" + ) + return CredentialsMetaResponse( + id=credentials.id, + provider=credentials.provider, + type=credentials.type, + title=credentials.title, + scopes=credentials.scopes, + username=credentials.username, + ) + + +@router.get("/credentials") +def list_credentials( + user_id: Annotated[str, Depends(get_user_id)], +) -> list[CredentialsMetaResponse]: + credentials = creds_manager.store.get_all_creds(user_id) + return [ + CredentialsMetaResponse( + id=cred.id, + provider=cred.provider, + type=cred.type, + title=cred.title, + scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None, + username=cred.username if isinstance(cred, OAuth2Credentials) else None, + ) + for cred in credentials + ] + + +@router.get("/{provider}/credentials") +def list_credentials_by_provider( + provider: Annotated[ + ProviderName, Path(title="The provider to list credentials for") + ], + user_id: Annotated[str, Depends(get_user_id)], +) -> list[CredentialsMetaResponse]: + credentials = creds_manager.store.get_creds_by_provider(user_id, provider) + return [ + CredentialsMetaResponse( + id=cred.id, + provider=cred.provider, + type=cred.type, + title=cred.title, + scopes=cred.scopes if isinstance(cred, OAuth2Credentials) else None, + username=cred.username if isinstance(cred, OAuth2Credentials) else None, + ) + for cred in credentials + ] + + +@router.get("/{provider}/credentials/{cred_id}") +def get_credential( + provider: Annotated[ + ProviderName, Path(title="The provider to retrieve credentials for") + ], + cred_id: Annotated[str, Path(title="The ID of the credentials to retrieve")], + user_id: Annotated[str, Depends(get_user_id)], +) -> Credentials: + credential = creds_manager.get(user_id, cred_id) + if not credential: + raise HTTPException(status_code=404, detail="Credentials not found") + if credential.provider != provider: + raise HTTPException( + status_code=404, detail="Credentials do not match the specified provider" + ) + return credential + + +@router.post("/{provider}/credentials", status_code=201) +def create_credentials( + user_id: Annotated[str, Depends(get_user_id)], + provider: Annotated[ + ProviderName, Path(title="The provider to create credentials for") + ], + credentials: Credentials, +) -> Credentials: + credentials.provider = provider + try: + creds_manager.create(user_id, credentials) + except Exception as e: + raise HTTPException( + status_code=500, detail=f"Failed to store credentials: {str(e)}" + ) + return credentials + + +class CredentialsDeletionResponse(BaseModel): + deleted: Literal[True] = True + revoked: bool | None = Field( + description="Indicates whether the credentials were also revoked by their " + "provider. `None`/`null` if not applicable, e.g. when deleting " + "non-revocable credentials such as API keys." + ) + + +class CredentialsDeletionNeedsConfirmationResponse(BaseModel): + deleted: Literal[False] = False + need_confirmation: Literal[True] = True + message: str + + +@router.delete("/{provider}/credentials/{cred_id}") +async def delete_credentials( + request: Request, + provider: Annotated[ + ProviderName, Path(title="The provider to delete credentials for") + ], + cred_id: Annotated[str, Path(title="The ID of the credentials to delete")], + user_id: Annotated[str, Depends(get_user_id)], + force: Annotated[ + bool, Query(title="Whether to proceed if any linked webhooks are still in use") + ] = False, +) -> CredentialsDeletionResponse | CredentialsDeletionNeedsConfirmationResponse: + creds = creds_manager.store.get_creds_by_id(user_id, cred_id) + if not creds: + raise HTTPException(status_code=404, detail="Credentials not found") + if creds.provider != provider: + raise HTTPException( + status_code=404, detail="Credentials do not match the specified provider" + ) + + try: + await remove_all_webhooks_for_credentials(creds, force) + except NeedConfirmation as e: + return CredentialsDeletionNeedsConfirmationResponse(message=str(e)) + + creds_manager.delete(user_id, cred_id) + + tokens_revoked = None + if isinstance(creds, OAuth2Credentials): + handler = _get_provider_oauth_handler(request, provider) + tokens_revoked = handler.revoke_tokens(creds) + + return CredentialsDeletionResponse(revoked=tokens_revoked) + + +# ------------------------- WEBHOOK STUFF -------------------------- # + + +# ⚠️ Note +# No user auth check because this endpoint is for webhook ingress and relies on +# validation by the provider-specific `WebhooksManager`. +@router.post("/{provider}/webhooks/{webhook_id}/ingress") +async def webhook_ingress_generic( + request: Request, + provider: Annotated[ + ProviderName, Path(title="Provider where the webhook was registered") + ], + webhook_id: Annotated[str, Path(title="Our ID for the webhook")], +): + logger.debug(f"Received {provider.value} webhook ingress for ID {webhook_id}") + webhook_manager = WEBHOOK_MANAGERS_BY_NAME[provider]() + webhook = await get_webhook(webhook_id) + logger.debug(f"Webhook #{webhook_id}: {webhook}") + payload, event_type = await webhook_manager.validate_payload(webhook, request) + logger.debug( + f"Validated {provider.value} {webhook.webhook_type} {event_type} event " + f"with payload {payload}" + ) + + webhook_event = WebhookEvent( + provider=provider, + webhook_id=webhook_id, + event_type=event_type, + payload=payload, + ) + await publish_webhook_event(webhook_event) + logger.debug(f"Webhook event published: {webhook_event}") + + if not webhook.attached_nodes: + return + + executor = get_service_client(ExecutionManager) + for node in webhook.attached_nodes: + logger.debug(f"Webhook-attached node: {node}") + if not node.is_triggered_by_event_type(event_type): + logger.debug(f"Node #{node.id} doesn't trigger on event {event_type}") + continue + logger.debug(f"Executing graph #{node.graph_id} node #{node.id}") + executor.add_execution( + graph_id=node.graph_id, + graph_version=node.graph_version, + data={f"webhook_{webhook_id}_payload": payload}, + user_id=webhook.user_id, + ) + + +@router.post("/webhooks/{webhook_id}/ping") +async def webhook_ping( + webhook_id: Annotated[str, Path(title="Our ID for the webhook")], + user_id: Annotated[str, Depends(get_user_id)], # require auth +): + webhook = await get_webhook(webhook_id) + webhook_manager = WEBHOOK_MANAGERS_BY_NAME[webhook.provider]() + + credentials = ( + creds_manager.get(user_id, webhook.credentials_id) + if webhook.credentials_id + else None + ) + try: + await webhook_manager.trigger_ping(webhook, credentials) + except NotImplementedError: + return False + + if not await wait_for_webhook_event(webhook_id, event_type="ping", timeout=10): + raise HTTPException(status_code=504, detail="Webhook ping timed out") + + return True + + +# --------------------------- UTILITIES ---------------------------- # + + +async def remove_all_webhooks_for_credentials( + credentials: Credentials, force: bool = False +) -> None: + """ + Remove and deregister all webhooks that were registered using the given credentials. + + Params: + credentials: The credentials for which to remove the associated webhooks. + force: Whether to proceed if any of the webhooks are still in use. + + Raises: + NeedConfirmation: If any of the webhooks are still in use and `force` is `False` + """ + webhooks = await get_all_webhooks_by_creds(credentials.id) + if credentials.provider not in WEBHOOK_MANAGERS_BY_NAME: + if webhooks: + logger.error( + f"Credentials #{credentials.id} for provider {credentials.provider} " + f"are attached to {len(webhooks)} webhooks, " + f"but there is no available WebhooksHandler for {credentials.provider}" + ) + return + if any(w.attached_nodes for w in webhooks) and not force: + raise NeedConfirmation( + "Some webhooks linked to these credentials are still in use by an agent" + ) + for webhook in webhooks: + # Unlink all nodes + for node in webhook.attached_nodes or []: + await set_node_webhook(node.id, None) + + # Prune the webhook + webhook_manager = WEBHOOK_MANAGERS_BY_NAME[credentials.provider]() + success = await webhook_manager.prune_webhook_if_dangling( + webhook.id, credentials + ) + if not success: + logger.warning(f"Webhook #{webhook.id} failed to prune") + + +def _get_provider_oauth_handler( + req: Request, provider_name: ProviderName +) -> "BaseOAuthHandler": + if provider_name not in HANDLERS_BY_NAME: + raise HTTPException( + status_code=404, + detail=f"Provider '{provider_name.value}' does not support OAuth", + ) + + client_id = getattr(settings.secrets, f"{provider_name.value}_client_id") + client_secret = getattr(settings.secrets, f"{provider_name.value}_client_secret") + if not (client_id and client_secret): + raise HTTPException( + status_code=501, + detail=( + f"Integration with provider '{provider_name.value}' is not configured" + ), + ) + + handler_class = HANDLERS_BY_NAME[provider_name] + frontend_base_url = ( + settings.config.frontend_base_url + or settings.config.platform_base_url + or str(req.base_url) + ) + return handler_class( + client_id=client_id, + client_secret=client_secret, + redirect_uri=f"{frontend_base_url}/auth/integrations/oauth_callback", + ) diff --git a/autogpt_platform/backend/backend/server/integrations/utils.py b/autogpt_platform/backend/backend/server/integrations/utils.py new file mode 100644 index 000000000000..0fa1052e5be0 --- /dev/null +++ b/autogpt_platform/backend/backend/server/integrations/utils.py @@ -0,0 +1,11 @@ +from supabase import Client, create_client + +from backend.util.settings import Settings + +settings = Settings() + + +def get_supabase() -> Client: + return create_client( + settings.secrets.supabase_url, settings.secrets.supabase_service_role_key + ) diff --git a/autogpt_platform/backend/backend/server/model.py b/autogpt_platform/backend/backend/server/model.py new file mode 100644 index 000000000000..14a7925c6b7f --- /dev/null +++ b/autogpt_platform/backend/backend/server/model.py @@ -0,0 +1,63 @@ +import enum +from typing import Any, List, Optional, Union + +import pydantic + +import backend.data.graph +from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash + + +class Methods(enum.Enum): + SUBSCRIBE = "subscribe" + UNSUBSCRIBE = "unsubscribe" + EXECUTION_EVENT = "execution_event" + ERROR = "error" + HEARTBEAT = "heartbeat" + + +class WsMessage(pydantic.BaseModel): + method: Methods + data: Optional[Union[dict[str, Any], list[Any], str]] = None + success: bool | None = None + channel: str | None = None + error: str | None = None + + +class ExecutionSubscription(pydantic.BaseModel): + graph_id: str + + +class SubscriptionDetails(pydantic.BaseModel): + event_type: str + channel: str + graph_id: str + + +class CreateGraph(pydantic.BaseModel): + template_id: str | None = None + template_version: int | None = None + graph: backend.data.graph.Graph | None = None + + +class CreateAPIKeyRequest(pydantic.BaseModel): + name: str + permissions: List[APIKeyPermission] + description: Optional[str] = None + + +class CreateAPIKeyResponse(pydantic.BaseModel): + api_key: APIKeyWithoutHash + plain_text_key: str + + +class SetGraphActiveVersion(pydantic.BaseModel): + active_graph_version: int + + +class UpdatePermissionsRequest(pydantic.BaseModel): + permissions: List[APIKeyPermission] + + +class RequestTopUp(pydantic.BaseModel): + amount: int + """Amount of credits to top up.""" diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py new file mode 100644 index 000000000000..e247d2e8fd6b --- /dev/null +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -0,0 +1,189 @@ +import contextlib +import logging +from typing import Any, Optional + +import autogpt_libs.auth.models +import fastapi +import fastapi.responses +import starlette.middleware.cors +import uvicorn +from autogpt_libs.feature_flag.client import ( + initialize_launchdarkly, + shutdown_launchdarkly, +) + +import backend.data.block +import backend.data.db +import backend.data.graph +import backend.data.user +import backend.server.routers.v1 +import backend.server.v2.library.routes +import backend.server.v2.store.model +import backend.server.v2.store.routes +import backend.util.service +import backend.util.settings +from backend.server.external.api import external_app + +settings = backend.util.settings.Settings() +logger = logging.getLogger(__name__) + +logging.getLogger("autogpt_libs").setLevel(logging.INFO) + + +@contextlib.contextmanager +def launch_darkly_context(): + if settings.config.app_env != backend.util.settings.AppEnvironment.LOCAL: + initialize_launchdarkly() + try: + yield + finally: + shutdown_launchdarkly() + else: + yield + + +@contextlib.asynccontextmanager +async def lifespan_context(app: fastapi.FastAPI): + await backend.data.db.connect() + await backend.data.block.initialize_blocks() + await backend.data.user.migrate_and_encrypt_user_integrations() + await backend.data.graph.fix_llm_provider_credentials() + with launch_darkly_context(): + yield + await backend.data.db.disconnect() + + +docs_url = ( + "/docs" + if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL + else None +) + +app = fastapi.FastAPI( + title="AutoGPT Agent Server", + description=( + "This server is used to execute agents that are created by the " + "AutoGPT system." + ), + summary="AutoGPT Agent Server", + version="0.1", + lifespan=lifespan_context, + docs_url=docs_url, +) + + +def handle_internal_http_error(status_code: int = 500, log_error: bool = True): + def handler(request: fastapi.Request, exc: Exception): + if log_error: + logger.exception(f"{request.method} {request.url.path} failed: {exc}") + return fastapi.responses.JSONResponse( + content={ + "message": f"{request.method} {request.url.path} failed", + "detail": str(exc), + }, + status_code=status_code, + ) + + return handler + + +app.add_exception_handler(ValueError, handle_internal_http_error(400)) +app.add_exception_handler(Exception, handle_internal_http_error(500)) +app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/api") +app.include_router( + backend.server.v2.store.routes.router, tags=["v2"], prefix="/api/store" +) +app.include_router( + backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library" +) + +app.mount("/external-api", external_app) + + +@app.get(path="/health", tags=["health"], dependencies=[]) +async def health(): + return {"status": "healthy"} + + +class AgentServer(backend.util.service.AppProcess): + def run(self): + server_app = starlette.middleware.cors.CORSMiddleware( + app=app, + allow_origins=settings.config.backend_cors_allow_origins, + allow_credentials=True, + allow_methods=["*"], # Allows all methods + allow_headers=["*"], # Allows all headers + ) + uvicorn.run( + server_app, + host=backend.util.settings.Config().agent_api_host, + port=backend.util.settings.Config().agent_api_port, + ) + + @staticmethod + async def test_execute_graph( + graph_id: str, + node_input: dict[str, Any], + user_id: str, + graph_version: Optional[int] = None, + ): + return backend.server.routers.v1.execute_graph( + user_id=user_id, + graph_id=graph_id, + graph_version=graph_version, + node_input=node_input, + ) + + @staticmethod + async def test_get_graph( + graph_id: str, + graph_version: int, + user_id: str, + ): + return await backend.server.routers.v1.get_graph( + graph_id, user_id, graph_version + ) + + @staticmethod + async def test_create_graph( + create_graph: backend.server.routers.v1.CreateGraph, + user_id: str, + ): + return await backend.server.routers.v1.create_new_graph(create_graph, user_id) + + @staticmethod + async def test_get_graph_run_status(graph_exec_id: str, user_id: str): + execution = await backend.data.graph.get_execution( + user_id=user_id, execution_id=graph_exec_id + ) + if not execution: + raise ValueError(f"Execution {graph_exec_id} not found") + return execution.status + + @staticmethod + async def test_get_graph_run_node_execution_results( + graph_id: str, graph_exec_id: str, user_id: str + ): + return await backend.server.routers.v1.get_graph_run_node_execution_results( + graph_id, graph_exec_id, user_id + ) + + @staticmethod + async def test_delete_graph(graph_id: str, user_id: str): + return await backend.server.routers.v1.delete_graph(graph_id, user_id) + + @staticmethod + async def test_create_store_listing( + request: backend.server.v2.store.model.StoreSubmissionRequest, user_id: str + ): + return await backend.server.v2.store.routes.create_submission(request, user_id) + + @staticmethod + async def test_review_store_listing( + request: backend.server.v2.store.model.ReviewSubmissionRequest, + user: autogpt_libs.auth.models.User, + ): + return await backend.server.v2.store.routes.review_submission(request, user) + + def set_test_dependency_overrides(self, overrides: dict): + app.dependency_overrides.update(overrides) diff --git a/autogpt_platform/backend/backend/server/routers/analytics.py b/autogpt_platform/backend/backend/server/routers/analytics.py new file mode 100644 index 000000000000..d7416c3e0e5a --- /dev/null +++ b/autogpt_platform/backend/backend/server/routers/analytics.py @@ -0,0 +1,49 @@ +"""Analytics API""" + +from typing import Annotated + +import fastapi + +import backend.data.analytics +from backend.server.utils import get_user_id + +router = fastapi.APIRouter() + + +@router.post(path="/log_raw_metric") +async def log_raw_metric( + user_id: Annotated[str, fastapi.Depends(get_user_id)], + metric_name: Annotated[str, fastapi.Body(..., embed=True)], + metric_value: Annotated[float, fastapi.Body(..., embed=True)], + data_string: Annotated[str, fastapi.Body(..., embed=True)], +): + result = await backend.data.analytics.log_raw_metric( + user_id=user_id, + metric_name=metric_name, + metric_value=metric_value, + data_string=data_string, + ) + return result.id + + +@router.post("/log_raw_analytics") +async def log_raw_analytics( + user_id: Annotated[str, fastapi.Depends(get_user_id)], + type: Annotated[str, fastapi.Body(..., embed=True)], + data: Annotated[ + dict, + fastapi.Body(..., embed=True, description="The data to log"), + ], + data_index: Annotated[ + str, + fastapi.Body( + ..., + embed=True, + description="Indexable field for any count based analytical measures like page order clicking, tutorial step completion, etc.", + ), + ], +): + result = await backend.data.analytics.log_raw_analytics( + user_id, type, data, data_index + ) + return result.id diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py new file mode 100644 index 000000000000..39d16632b8f0 --- /dev/null +++ b/autogpt_platform/backend/backend/server/routers/v1.py @@ -0,0 +1,775 @@ +import asyncio +import logging +from collections import defaultdict +from typing import TYPE_CHECKING, Annotated, Any, Sequence + +import pydantic +import stripe +from autogpt_libs.auth.middleware import auth_middleware +from autogpt_libs.feature_flag.client import feature_flag +from autogpt_libs.utils.cache import thread_cached +from fastapi import APIRouter, Depends, HTTPException, Request, Response +from typing_extensions import Optional, TypedDict + +import backend.data.block +import backend.server.integrations.router +import backend.server.routers.analytics +from backend.data import execution as execution_db +from backend.data import graph as graph_db +from backend.data.api_key import ( + APIKeyError, + APIKeyNotFoundError, + APIKeyPermissionError, + APIKeyWithoutHash, + generate_api_key, + get_api_key_by_id, + list_user_api_keys, + revoke_api_key, + suspend_api_key, + update_api_key_permissions, +) +from backend.data.block import BlockInput, CompletedBlockOutput +from backend.data.credit import ( + AutoTopUpConfig, + get_auto_top_up, + get_block_costs, + get_stripe_customer_id, + get_user_credit_model, + set_auto_top_up, +) +from backend.data.user import get_or_create_user +from backend.executor import ExecutionManager, ExecutionScheduler, scheduler +from backend.integrations.creds_manager import IntegrationCredentialsManager +from backend.integrations.webhooks.graph_lifecycle_hooks import ( + on_graph_activate, + on_graph_deactivate, +) +from backend.server.model import ( + CreateAPIKeyRequest, + CreateAPIKeyResponse, + CreateGraph, + RequestTopUp, + SetGraphActiveVersion, + UpdatePermissionsRequest, +) +from backend.server.utils import get_user_id +from backend.util.service import get_service_client +from backend.util.settings import Settings + +if TYPE_CHECKING: + from backend.data.model import Credentials + + +@thread_cached +def execution_manager_client() -> ExecutionManager: + return get_service_client(ExecutionManager) + + +@thread_cached +def execution_scheduler_client() -> ExecutionScheduler: + return get_service_client(ExecutionScheduler) + + +settings = Settings() +logger = logging.getLogger(__name__) +integration_creds_manager = IntegrationCredentialsManager() + +_user_credit_model = get_user_credit_model() + +# Define the API routes +v1_router = APIRouter() + +v1_router.include_router( + backend.server.integrations.router.router, + prefix="/integrations", + tags=["integrations"], +) + +v1_router.include_router( + backend.server.routers.analytics.router, + prefix="/analytics", + tags=["analytics"], + dependencies=[Depends(auth_middleware)], +) + + +######################################################## +##################### Auth ############################# +######################################################## + + +@v1_router.post("/auth/user", tags=["auth"], dependencies=[Depends(auth_middleware)]) +async def get_or_create_user_route(user_data: dict = Depends(auth_middleware)): + user = await get_or_create_user(user_data) + return user.model_dump() + + +######################################################## +##################### Blocks ########################### +######################################################## + + +@v1_router.get(path="/blocks", tags=["blocks"], dependencies=[Depends(auth_middleware)]) +def get_graph_blocks() -> Sequence[dict[Any, Any]]: + blocks = [block() for block in backend.data.block.get_blocks().values()] + costs = get_block_costs() + return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks] + + +@v1_router.post( + path="/blocks/{block_id}/execute", + tags=["blocks"], + dependencies=[Depends(auth_middleware)], +) +def execute_graph_block(block_id: str, data: BlockInput) -> CompletedBlockOutput: + obj = backend.data.block.get_block(block_id) + if not obj: + raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.") + + output = defaultdict(list) + for name, data in obj.execute(data): + output[name].append(data) + return output + + +######################################################## +##################### Credits ########################## +######################################################## + + +@v1_router.get(path="/credits", dependencies=[Depends(auth_middleware)]) +async def get_user_credits( + user_id: Annotated[str, Depends(get_user_id)], +) -> dict[str, int]: + # Credits can go negative, so ensure it's at least 0 for user to see. + return {"credits": max(await _user_credit_model.get_credits(user_id), 0)} + + +@v1_router.post( + path="/credits", tags=["credits"], dependencies=[Depends(auth_middleware)] +) +async def request_top_up( + request: RequestTopUp, user_id: Annotated[str, Depends(get_user_id)] +): + checkout_url = await _user_credit_model.top_up_intent(user_id, request.amount) + return {"checkout_url": checkout_url} + + +@v1_router.patch( + path="/credits", tags=["credits"], dependencies=[Depends(auth_middleware)] +) +async def fulfill_checkout(user_id: Annotated[str, Depends(get_user_id)]): + await _user_credit_model.fulfill_checkout(user_id=user_id) + return Response(status_code=200) + + +@v1_router.post( + path="/credits/auto-top-up", + tags=["credits"], + dependencies=[Depends(auth_middleware)], +) +async def configure_user_auto_top_up( + request: AutoTopUpConfig, user_id: Annotated[str, Depends(get_user_id)] +) -> str: + if request.threshold < 0: + raise ValueError("Threshold must be greater than 0") + if request.amount < request.threshold: + raise ValueError("Amount must be greater than or equal to threshold") + + current_balance = await _user_credit_model.get_credits(user_id) + + if current_balance < request.threshold: + await _user_credit_model.top_up_credits(user_id, request.amount) + else: + await _user_credit_model.top_up_credits(user_id, 0) + + await set_auto_top_up( + user_id, AutoTopUpConfig(threshold=request.threshold, amount=request.amount) + ) + return "Auto top-up settings updated" + + +@v1_router.get( + path="/credits/auto-top-up", + tags=["credits"], + dependencies=[Depends(auth_middleware)], +) +async def get_user_auto_top_up( + user_id: Annotated[str, Depends(get_user_id)] +) -> AutoTopUpConfig: + return await get_auto_top_up(user_id) + + +@v1_router.post(path="/credits/stripe_webhook", tags=["credits"]) +async def stripe_webhook(request: Request): + # Get the raw request body + payload = await request.body() + # Get the signature header + sig_header = request.headers.get("stripe-signature") + + try: + event = stripe.Webhook.construct_event( + payload, sig_header, settings.secrets.stripe_webhook_secret + ) + except ValueError: + # Invalid payload + raise HTTPException(status_code=400) + except stripe.SignatureVerificationError: + # Invalid signature + raise HTTPException(status_code=400) + + if ( + event["type"] == "checkout.session.completed" + or event["type"] == "checkout.session.async_payment_succeeded" + ): + await _user_credit_model.fulfill_checkout( + session_id=event["data"]["object"]["id"] + ) + + return Response(status_code=200) + + +@v1_router.get(path="/credits/manage", dependencies=[Depends(auth_middleware)]) +async def manage_payment_method( + user_id: Annotated[str, Depends(get_user_id)], +) -> dict[str, str]: + session = stripe.billing_portal.Session.create( + customer=await get_stripe_customer_id(user_id), + return_url=settings.config.platform_base_url + "/marketplace/credits", + ) + if not session: + raise HTTPException( + status_code=400, detail="Failed to create billing portal session" + ) + return {"url": session.url} + + +######################################################## +##################### Graphs ########################### +######################################################## + + +class DeleteGraphResponse(TypedDict): + version_counts: int + + +@v1_router.get(path="/graphs", tags=["graphs"], dependencies=[Depends(auth_middleware)]) +async def get_graphs( + user_id: Annotated[str, Depends(get_user_id)] +) -> Sequence[graph_db.GraphModel]: + return await graph_db.get_graphs(filter_by="active", user_id=user_id) + + +@v1_router.get( + path="/graphs/{graph_id}", tags=["graphs"], dependencies=[Depends(auth_middleware)] +) +@v1_router.get( + path="/graphs/{graph_id}/versions/{version}", + tags=["graphs"], + dependencies=[Depends(auth_middleware)], +) +async def get_graph( + graph_id: str, + user_id: Annotated[str, Depends(get_user_id)], + version: int | None = None, + hide_credentials: bool = False, +) -> graph_db.GraphModel: + graph = await graph_db.get_graph( + graph_id, version, user_id=user_id, for_export=hide_credentials + ) + if not graph: + raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") + return graph + + +@v1_router.get( + path="/graphs/{graph_id}/versions", + tags=["graphs"], + dependencies=[Depends(auth_middleware)], +) +@v1_router.get( + path="/templates/{graph_id}/versions", + tags=["templates", "graphs"], + dependencies=[Depends(auth_middleware)], +) +async def get_graph_all_versions( + graph_id: str, user_id: Annotated[str, Depends(get_user_id)] +) -> Sequence[graph_db.GraphModel]: + graphs = await graph_db.get_graph_all_versions(graph_id, user_id=user_id) + if not graphs: + raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") + return graphs + + +@v1_router.post( + path="/graphs", tags=["graphs"], dependencies=[Depends(auth_middleware)] +) +async def create_new_graph( + create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)] +) -> graph_db.GraphModel: + return await do_create_graph(create_graph, is_template=False, user_id=user_id) + + +async def do_create_graph( + create_graph: CreateGraph, + is_template: bool, + # user_id doesn't have to be annotated like on other endpoints, + # because create_graph isn't used directly as an endpoint + user_id: str, +) -> graph_db.GraphModel: + if create_graph.graph: + graph = graph_db.make_graph_model(create_graph.graph, user_id) + elif create_graph.template_id: + # Create a new graph from a template + graph = await graph_db.get_graph( + create_graph.template_id, + create_graph.template_version, + template=True, + user_id=user_id, + ) + if not graph: + raise HTTPException( + 400, detail=f"Template #{create_graph.template_id} not found" + ) + graph.version = 1 + else: + raise HTTPException( + status_code=400, detail="Either graph or template_id must be provided." + ) + + graph.is_template = is_template + graph.is_active = not is_template + graph.reassign_ids(user_id=user_id, reassign_graph_id=True) + + graph = await graph_db.create_graph(graph, user_id=user_id) + graph = await on_graph_activate( + graph, + get_credentials=lambda id: integration_creds_manager.get(user_id, id), + ) + return graph + + +@v1_router.delete( + path="/graphs/{graph_id}", tags=["graphs"], dependencies=[Depends(auth_middleware)] +) +async def delete_graph( + graph_id: str, user_id: Annotated[str, Depends(get_user_id)] +) -> DeleteGraphResponse: + if active_version := await graph_db.get_graph(graph_id, user_id=user_id): + + def get_credentials(credentials_id: str) -> "Credentials | None": + return integration_creds_manager.get(user_id, credentials_id) + + await on_graph_deactivate(active_version, get_credentials) + + return {"version_counts": await graph_db.delete_graph(graph_id, user_id=user_id)} + + +@v1_router.put( + path="/graphs/{graph_id}", tags=["graphs"], dependencies=[Depends(auth_middleware)] +) +@v1_router.put( + path="/templates/{graph_id}", + tags=["templates", "graphs"], + dependencies=[Depends(auth_middleware)], +) +async def update_graph( + graph_id: str, + graph: graph_db.Graph, + user_id: Annotated[str, Depends(get_user_id)], +) -> graph_db.GraphModel: + # Sanity check + if graph.id and graph.id != graph_id: + raise HTTPException(400, detail="Graph ID does not match ID in URI") + + # Determine new version + existing_versions = await graph_db.get_graph_all_versions(graph_id, user_id=user_id) + if not existing_versions: + raise HTTPException(404, detail=f"Graph #{graph_id} not found") + latest_version_number = max(g.version for g in existing_versions) + graph.version = latest_version_number + 1 + + latest_version_graph = next( + v for v in existing_versions if v.version == latest_version_number + ) + current_active_version = next((v for v in existing_versions if v.is_active), None) + if latest_version_graph.is_template != graph.is_template: + raise HTTPException( + 400, detail="Changing is_template on an existing graph is forbidden" + ) + graph.is_active = not graph.is_template + graph = graph_db.make_graph_model(graph, user_id) + graph.reassign_ids(user_id=user_id) + + new_graph_version = await graph_db.create_graph(graph, user_id=user_id) + + if new_graph_version.is_active: + + def get_credentials(credentials_id: str) -> "Credentials | None": + return integration_creds_manager.get(user_id, credentials_id) + + # Handle activation of the new graph first to ensure continuity + new_graph_version = await on_graph_activate( + new_graph_version, + get_credentials=get_credentials, + ) + # Ensure new version is the only active version + await graph_db.set_graph_active_version( + graph_id=graph_id, version=new_graph_version.version, user_id=user_id + ) + if current_active_version: + # Handle deactivation of the previously active version + await on_graph_deactivate( + current_active_version, + get_credentials=get_credentials, + ) + + return new_graph_version + + +@v1_router.put( + path="/graphs/{graph_id}/versions/active", + tags=["graphs"], + dependencies=[Depends(auth_middleware)], +) +async def set_graph_active_version( + graph_id: str, + request_body: SetGraphActiveVersion, + user_id: Annotated[str, Depends(get_user_id)], +): + new_active_version = request_body.active_graph_version + new_active_graph = await graph_db.get_graph( + graph_id, new_active_version, user_id=user_id + ) + if not new_active_graph: + raise HTTPException(404, f"Graph #{graph_id} v{new_active_version} not found") + + current_active_graph = await graph_db.get_graph(graph_id, user_id=user_id) + + def get_credentials(credentials_id: str) -> "Credentials | None": + return integration_creds_manager.get(user_id, credentials_id) + + # Handle activation of the new graph first to ensure continuity + await on_graph_activate( + new_active_graph, + get_credentials=get_credentials, + ) + # Ensure new version is the only active version + await graph_db.set_graph_active_version( + graph_id=graph_id, + version=new_active_version, + user_id=user_id, + ) + if current_active_graph and current_active_graph.version != new_active_version: + # Handle deactivation of the previously active version + await on_graph_deactivate( + current_active_graph, + get_credentials=get_credentials, + ) + + +@v1_router.post( + path="/graphs/{graph_id}/execute", + tags=["graphs"], + dependencies=[Depends(auth_middleware)], +) +def execute_graph( + graph_id: str, + node_input: dict[Any, Any], + user_id: Annotated[str, Depends(get_user_id)], + graph_version: Optional[int] = None, +) -> dict[str, Any]: # FIXME: add proper return type + try: + graph_exec = execution_manager_client().add_execution( + graph_id, node_input, user_id=user_id, graph_version=graph_version + ) + return {"id": graph_exec.graph_exec_id} + except Exception as e: + msg = e.__str__().encode().decode("unicode_escape") + raise HTTPException(status_code=400, detail=msg) + + +@v1_router.post( + path="/graphs/{graph_id}/executions/{graph_exec_id}/stop", + tags=["graphs"], + dependencies=[Depends(auth_middleware)], +) +async def stop_graph_run( + graph_exec_id: str, user_id: Annotated[str, Depends(get_user_id)] +) -> Sequence[execution_db.ExecutionResult]: + if not await graph_db.get_execution(user_id=user_id, execution_id=graph_exec_id): + raise HTTPException(404, detail=f"Agent execution #{graph_exec_id} not found") + + await asyncio.to_thread( + lambda: execution_manager_client().cancel_execution(graph_exec_id) + ) + + # Retrieve & return canceled graph execution in its final state + return await execution_db.get_execution_results(graph_exec_id) + + +@v1_router.get( + path="/executions", + tags=["graphs"], + dependencies=[Depends(auth_middleware)], +) +async def get_executions( + user_id: Annotated[str, Depends(get_user_id)], +) -> list[graph_db.GraphExecution]: + return await graph_db.get_executions(user_id=user_id) + + +@v1_router.get( + path="/graphs/{graph_id}/executions/{graph_exec_id}", + tags=["graphs"], + dependencies=[Depends(auth_middleware)], +) +async def get_graph_run_node_execution_results( + graph_id: str, + graph_exec_id: str, + user_id: Annotated[str, Depends(get_user_id)], +) -> Sequence[execution_db.ExecutionResult]: + graph = await graph_db.get_graph(graph_id, user_id=user_id) + if not graph: + raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.") + + return await execution_db.get_execution_results(graph_exec_id) + + +######################################################## +##################### Templates ######################## +######################################################## + + +@v1_router.get( + path="/templates", + tags=["graphs", "templates"], + dependencies=[Depends(auth_middleware)], +) +async def get_templates( + user_id: Annotated[str, Depends(get_user_id)] +) -> Sequence[graph_db.GraphModel]: + return await graph_db.get_graphs(filter_by="template", user_id=user_id) + + +@v1_router.get( + path="/templates/{graph_id}", + tags=["templates", "graphs"], + dependencies=[Depends(auth_middleware)], +) +async def get_template( + graph_id: str, version: int | None = None +) -> graph_db.GraphModel: + graph = await graph_db.get_graph(graph_id, version, template=True) + if not graph: + raise HTTPException(status_code=404, detail=f"Template #{graph_id} not found.") + return graph + + +@v1_router.post( + path="/templates", + tags=["templates", "graphs"], + dependencies=[Depends(auth_middleware)], +) +async def create_new_template( + create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)] +) -> graph_db.GraphModel: + return await do_create_graph(create_graph, is_template=True, user_id=user_id) + + +######################################################## +##################### Schedules ######################## +######################################################## + + +class ScheduleCreationRequest(pydantic.BaseModel): + cron: str + input_data: dict[Any, Any] + graph_id: str + + +@v1_router.post( + path="/schedules", + tags=["schedules"], + dependencies=[Depends(auth_middleware)], +) +async def create_schedule( + user_id: Annotated[str, Depends(get_user_id)], + schedule: ScheduleCreationRequest, +) -> scheduler.JobInfo: + graph = await graph_db.get_graph(schedule.graph_id, user_id=user_id) + if not graph: + raise HTTPException( + status_code=404, detail=f"Graph #{schedule.graph_id} not found." + ) + + return await asyncio.to_thread( + lambda: execution_scheduler_client().add_execution_schedule( + graph_id=schedule.graph_id, + graph_version=graph.version, + cron=schedule.cron, + input_data=schedule.input_data, + user_id=user_id, + ) + ) + + +@v1_router.delete( + path="/schedules/{schedule_id}", + tags=["schedules"], + dependencies=[Depends(auth_middleware)], +) +def delete_schedule( + schedule_id: str, + user_id: Annotated[str, Depends(get_user_id)], +) -> dict[Any, Any]: + execution_scheduler_client().delete_schedule(schedule_id, user_id=user_id) + return {"id": schedule_id} + + +@v1_router.get( + path="/schedules", + tags=["schedules"], + dependencies=[Depends(auth_middleware)], +) +def get_execution_schedules( + user_id: Annotated[str, Depends(get_user_id)], + graph_id: str | None = None, +) -> list[scheduler.JobInfo]: + return execution_scheduler_client().get_execution_schedules( + user_id=user_id, + graph_id=graph_id, + ) + + +######################################################## +##################### API KEY ############################## +######################################################## + + +@v1_router.post( + "/api-keys", + response_model=CreateAPIKeyResponse, + tags=["api-keys"], + dependencies=[Depends(auth_middleware)], +) +async def create_api_key( + request: CreateAPIKeyRequest, user_id: Annotated[str, Depends(get_user_id)] +) -> CreateAPIKeyResponse: + """Create a new API key""" + try: + api_key, plain_text = await generate_api_key( + name=request.name, + user_id=user_id, + permissions=request.permissions, + description=request.description, + ) + return CreateAPIKeyResponse(api_key=api_key, plain_text_key=plain_text) + except APIKeyError as e: + logger.error(f"Failed to create API key: {str(e)}") + raise HTTPException(status_code=400, detail=str(e)) + + +@v1_router.get( + "/api-keys", + response_model=list[APIKeyWithoutHash] | dict[str, str], + tags=["api-keys"], + dependencies=[Depends(auth_middleware)], +) +async def get_api_keys( + user_id: Annotated[str, Depends(get_user_id)] +) -> list[APIKeyWithoutHash]: + """List all API keys for the user""" + try: + return await list_user_api_keys(user_id) + except APIKeyError as e: + logger.error(f"Failed to list API keys: {str(e)}") + raise HTTPException(status_code=400, detail=str(e)) + + +@v1_router.get( + "/api-keys/{key_id}", + response_model=APIKeyWithoutHash, + tags=["api-keys"], + dependencies=[Depends(auth_middleware)], +) +async def get_api_key( + key_id: str, user_id: Annotated[str, Depends(get_user_id)] +) -> APIKeyWithoutHash: + """Get a specific API key""" + try: + api_key = await get_api_key_by_id(key_id, user_id) + if not api_key: + raise HTTPException(status_code=404, detail="API key not found") + return api_key + except APIKeyError as e: + logger.error(f"Failed to get API key: {str(e)}") + raise HTTPException(status_code=400, detail=str(e)) + + +@v1_router.delete( + "/api-keys/{key_id}", + response_model=APIKeyWithoutHash, + tags=["api-keys"], + dependencies=[Depends(auth_middleware)], +) +@feature_flag("api-keys-enabled") +async def delete_api_key( + key_id: str, user_id: Annotated[str, Depends(get_user_id)] +) -> Optional[APIKeyWithoutHash]: + """Revoke an API key""" + try: + return await revoke_api_key(key_id, user_id) + except APIKeyNotFoundError: + raise HTTPException(status_code=404, detail="API key not found") + except APIKeyPermissionError: + raise HTTPException(status_code=403, detail="Permission denied") + except APIKeyError as e: + logger.error(f"Failed to revoke API key: {str(e)}") + raise HTTPException(status_code=400, detail=str(e)) + + +@v1_router.post( + "/api-keys/{key_id}/suspend", + response_model=APIKeyWithoutHash, + tags=["api-keys"], + dependencies=[Depends(auth_middleware)], +) +@feature_flag("api-keys-enabled") +async def suspend_key( + key_id: str, user_id: Annotated[str, Depends(get_user_id)] +) -> Optional[APIKeyWithoutHash]: + """Suspend an API key""" + try: + return await suspend_api_key(key_id, user_id) + except APIKeyNotFoundError: + raise HTTPException(status_code=404, detail="API key not found") + except APIKeyPermissionError: + raise HTTPException(status_code=403, detail="Permission denied") + except APIKeyError as e: + logger.error(f"Failed to suspend API key: {str(e)}") + raise HTTPException(status_code=400, detail=str(e)) + + +@v1_router.put( + "/api-keys/{key_id}/permissions", + response_model=APIKeyWithoutHash, + tags=["api-keys"], + dependencies=[Depends(auth_middleware)], +) +@feature_flag("api-keys-enabled") +async def update_permissions( + key_id: str, + request: UpdatePermissionsRequest, + user_id: Annotated[str, Depends(get_user_id)], +) -> Optional[APIKeyWithoutHash]: + """Update API key permissions""" + try: + return await update_api_key_permissions(key_id, user_id, request.permissions) + except APIKeyNotFoundError: + raise HTTPException(status_code=404, detail="API key not found") + except APIKeyPermissionError: + raise HTTPException(status_code=403, detail="Permission denied") + except APIKeyError as e: + logger.error(f"Failed to update API key permissions: {str(e)}") + raise HTTPException(status_code=400, detail=str(e)) diff --git a/autogpt_platform/backend/backend/server/utils.py b/autogpt_platform/backend/backend/server/utils.py new file mode 100644 index 000000000000..c31cd0d4ee58 --- /dev/null +++ b/autogpt_platform/backend/backend/server/utils.py @@ -0,0 +1,11 @@ +from autogpt_libs.auth.depends import requires_user +from autogpt_libs.auth.models import User +from fastapi import Depends + +from backend.util.settings import Settings + +settings = Settings() + + +def get_user_id(user: User = Depends(requires_user)) -> str: + return user.user_id diff --git a/autogpts/autogpt/autogpt/models/__init__.py b/autogpt_platform/backend/backend/server/v2/library/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/models/__init__.py rename to autogpt_platform/backend/backend/server/v2/library/__init__.py diff --git a/autogpt_platform/backend/backend/server/v2/library/db.py b/autogpt_platform/backend/backend/server/v2/library/db.py new file mode 100644 index 000000000000..8d142ef40c76 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/db.py @@ -0,0 +1,165 @@ +import logging +from typing import List + +import prisma.errors +import prisma.models +import prisma.types + +import backend.data.graph +import backend.data.includes +import backend.server.v2.library.model +import backend.server.v2.store.exceptions + +logger = logging.getLogger(__name__) + + +async def get_library_agents( + user_id: str, +) -> List[backend.server.v2.library.model.LibraryAgent]: + """ + Returns all agents (AgentGraph) that belong to the user and all agents in their library (UserAgent table) + """ + logger.debug(f"Getting library agents for user {user_id}") + + try: + # Get agents created by user with nodes and links + user_created = await prisma.models.AgentGraph.prisma().find_many( + where=prisma.types.AgentGraphWhereInput(userId=user_id, isActive=True), + include=backend.data.includes.AGENT_GRAPH_INCLUDE, + ) + + # Get agents in user's library with nodes and links + library_agents = await prisma.models.UserAgent.prisma().find_many( + where=prisma.types.UserAgentWhereInput( + userId=user_id, isDeleted=False, isArchived=False + ), + include={ + "Agent": { + "include": { + "AgentNodes": { + "include": { + "Input": True, + "Output": True, + "Webhook": True, + "AgentBlock": True, + } + } + } + } + }, + ) + + # Convert to Graph models first + graphs = [] + + # Add user created agents + for agent in user_created: + try: + graphs.append(backend.data.graph.GraphModel.from_db(agent)) + except Exception as e: + logger.error(f"Error processing user created agent {agent.id}: {e}") + continue + + # Add library agents + for agent in library_agents: + if agent.Agent: + try: + graphs.append(backend.data.graph.GraphModel.from_db(agent.Agent)) + except Exception as e: + logger.error(f"Error processing library agent {agent.agentId}: {e}") + continue + + # Convert Graph models to LibraryAgent models + result = [] + for graph in graphs: + result.append( + backend.server.v2.library.model.LibraryAgent( + id=graph.id, + version=graph.version, + is_active=graph.is_active, + name=graph.name, + description=graph.description, + isCreatedByUser=any(a.id == graph.id for a in user_created), + input_schema=graph.input_schema, + output_schema=graph.output_schema, + ) + ) + + logger.debug(f"Found {len(result)} library agents") + return result + + except prisma.errors.PrismaError as e: + logger.error(f"Database error getting library agents: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to fetch library agents" + ) from e + + +async def add_agent_to_library(store_listing_version_id: str, user_id: str) -> None: + """ + Finds the agent from the store listing version and adds it to the user's library (UserAgent table) + if they don't already have it + """ + logger.debug( + f"Adding agent from store listing version {store_listing_version_id} to library for user {user_id}" + ) + + try: + # Get store listing version to find agent + store_listing_version = ( + await prisma.models.StoreListingVersion.prisma().find_unique( + where={"id": store_listing_version_id}, include={"Agent": True} + ) + ) + + if not store_listing_version or not store_listing_version.Agent: + logger.warning( + f"Store listing version not found: {store_listing_version_id}" + ) + raise backend.server.v2.store.exceptions.AgentNotFoundError( + f"Store listing version {store_listing_version_id} not found" + ) + + agent = store_listing_version.Agent + + if agent.userId == user_id: + logger.warning( + f"User {user_id} cannot add their own agent to their library" + ) + raise backend.server.v2.store.exceptions.DatabaseError( + "Cannot add own agent to library" + ) + + # Check if user already has this agent + existing_user_agent = await prisma.models.UserAgent.prisma().find_first( + where={ + "userId": user_id, + "agentId": agent.id, + "agentVersion": agent.version, + } + ) + + if existing_user_agent: + logger.debug( + f"User {user_id} already has agent {agent.id} in their library" + ) + return + + # Create UserAgent entry + await prisma.models.UserAgent.prisma().create( + data=prisma.types.UserAgentCreateInput( + userId=user_id, + agentId=agent.id, + agentVersion=agent.version, + isCreatedByUser=False, + ) + ) + logger.debug(f"Added agent {agent.id} to library for user {user_id}") + + except backend.server.v2.store.exceptions.AgentNotFoundError: + raise + except prisma.errors.PrismaError as e: + logger.error(f"Database error adding agent to library: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to add agent to library" + ) from e diff --git a/autogpt_platform/backend/backend/server/v2/library/db_test.py b/autogpt_platform/backend/backend/server/v2/library/db_test.py new file mode 100644 index 000000000000..e06d4bfa9af3 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/db_test.py @@ -0,0 +1,197 @@ +from datetime import datetime + +import prisma.errors +import prisma.models +import pytest +from prisma import Prisma + +import backend.data.includes +import backend.server.v2.library.db as db +import backend.server.v2.store.exceptions + + +@pytest.fixture(autouse=True) +async def setup_prisma(): + # Don't register client if already registered + try: + Prisma() + except prisma.errors.ClientAlreadyRegisteredError: + pass + yield + + +@pytest.mark.asyncio +async def test_get_library_agents(mocker): + # Mock data + mock_user_created = [ + prisma.models.AgentGraph( + id="agent1", + version=1, + name="Test Agent 1", + description="Test Description 1", + userId="test-user", + isActive=True, + createdAt=datetime.now(), + isTemplate=False, + ) + ] + + mock_library_agents = [ + prisma.models.UserAgent( + id="ua1", + userId="test-user", + agentId="agent2", + agentVersion=1, + isCreatedByUser=False, + isDeleted=False, + isArchived=False, + createdAt=datetime.now(), + updatedAt=datetime.now(), + isFavorite=False, + Agent=prisma.models.AgentGraph( + id="agent2", + version=1, + name="Test Agent 2", + description="Test Description 2", + userId="other-user", + isActive=True, + createdAt=datetime.now(), + isTemplate=False, + ), + ) + ] + + # Mock prisma calls + mock_agent_graph = mocker.patch("prisma.models.AgentGraph.prisma") + mock_agent_graph.return_value.find_many = mocker.AsyncMock( + return_value=mock_user_created + ) + + mock_user_agent = mocker.patch("prisma.models.UserAgent.prisma") + mock_user_agent.return_value.find_many = mocker.AsyncMock( + return_value=mock_library_agents + ) + + # Call function + result = await db.get_library_agents("test-user") + + # Verify results + assert len(result) == 2 + assert result[0].id == "agent1" + assert result[0].name == "Test Agent 1" + assert result[0].description == "Test Description 1" + assert result[0].isCreatedByUser is True + assert result[1].id == "agent2" + assert result[1].name == "Test Agent 2" + assert result[1].description == "Test Description 2" + assert result[1].isCreatedByUser is False + + # Verify mocks called correctly + mock_agent_graph.return_value.find_many.assert_called_once_with( + where=prisma.types.AgentGraphWhereInput(userId="test-user", isActive=True), + include=backend.data.includes.AGENT_GRAPH_INCLUDE, + ) + mock_user_agent.return_value.find_many.assert_called_once_with( + where=prisma.types.UserAgentWhereInput( + userId="test-user", isDeleted=False, isArchived=False + ), + include={ + "Agent": { + "include": { + "AgentNodes": { + "include": { + "Input": True, + "Output": True, + "Webhook": True, + "AgentBlock": True, + } + } + } + } + }, + ) + + +@pytest.mark.asyncio +async def test_add_agent_to_library(mocker): + # Mock data + mock_store_listing = prisma.models.StoreListingVersion( + id="version123", + version=1, + createdAt=datetime.now(), + updatedAt=datetime.now(), + agentId="agent1", + agentVersion=1, + slug="test-agent", + name="Test Agent", + subHeading="Test Agent Subheading", + imageUrls=["https://example.com/image.jpg"], + description="Test Description", + categories=["test"], + isFeatured=False, + isDeleted=False, + isAvailable=True, + isApproved=True, + Agent=prisma.models.AgentGraph( + id="agent1", + version=1, + name="Test Agent", + description="Test Description", + userId="creator", + isActive=True, + createdAt=datetime.now(), + isTemplate=False, + ), + ) + + # Mock prisma calls + mock_store_listing_version = mocker.patch( + "prisma.models.StoreListingVersion.prisma" + ) + mock_store_listing_version.return_value.find_unique = mocker.AsyncMock( + return_value=mock_store_listing + ) + + mock_user_agent = mocker.patch("prisma.models.UserAgent.prisma") + mock_user_agent.return_value.find_first = mocker.AsyncMock(return_value=None) + mock_user_agent.return_value.create = mocker.AsyncMock() + + # Call function + await db.add_agent_to_library("version123", "test-user") + + # Verify mocks called correctly + mock_store_listing_version.return_value.find_unique.assert_called_once_with( + where={"id": "version123"}, include={"Agent": True} + ) + mock_user_agent.return_value.find_first.assert_called_once_with( + where={ + "userId": "test-user", + "agentId": "agent1", + "agentVersion": 1, + } + ) + mock_user_agent.return_value.create.assert_called_once_with( + data=prisma.types.UserAgentCreateInput( + userId="test-user", agentId="agent1", agentVersion=1, isCreatedByUser=False + ) + ) + + +@pytest.mark.asyncio +async def test_add_agent_to_library_not_found(mocker): + # Mock prisma calls + mock_store_listing_version = mocker.patch( + "prisma.models.StoreListingVersion.prisma" + ) + mock_store_listing_version.return_value.find_unique = mocker.AsyncMock( + return_value=None + ) + + # Call function and verify exception + with pytest.raises(backend.server.v2.store.exceptions.AgentNotFoundError): + await db.add_agent_to_library("version123", "test-user") + + # Verify mock called correctly + mock_store_listing_version.return_value.find_unique.assert_called_once_with( + where={"id": "version123"}, include={"Agent": True} + ) diff --git a/autogpt_platform/backend/backend/server/v2/library/model.py b/autogpt_platform/backend/backend/server/v2/library/model.py new file mode 100644 index 000000000000..88a81f6d77a0 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/model.py @@ -0,0 +1,16 @@ +import typing + +import pydantic + + +class LibraryAgent(pydantic.BaseModel): + id: str # Changed from agent_id to match GraphMeta + version: int # Changed from agent_version to match GraphMeta + is_active: bool # Added to match GraphMeta + name: str + description: str + + isCreatedByUser: bool + # Made input_schema and output_schema match GraphMeta's type + input_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend + output_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend diff --git a/autogpt_platform/backend/backend/server/v2/library/model_test.py b/autogpt_platform/backend/backend/server/v2/library/model_test.py new file mode 100644 index 000000000000..81aa8fe07b23 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/model_test.py @@ -0,0 +1,43 @@ +import backend.server.v2.library.model + + +def test_library_agent(): + agent = backend.server.v2.library.model.LibraryAgent( + id="test-agent-123", + version=1, + is_active=True, + name="Test Agent", + description="Test description", + isCreatedByUser=False, + input_schema={"type": "object", "properties": {}}, + output_schema={"type": "object", "properties": {}}, + ) + assert agent.id == "test-agent-123" + assert agent.version == 1 + assert agent.is_active is True + assert agent.name == "Test Agent" + assert agent.description == "Test description" + assert agent.isCreatedByUser is False + assert agent.input_schema == {"type": "object", "properties": {}} + assert agent.output_schema == {"type": "object", "properties": {}} + + +def test_library_agent_with_user_created(): + agent = backend.server.v2.library.model.LibraryAgent( + id="user-agent-456", + version=2, + is_active=True, + name="User Created Agent", + description="An agent created by the user", + isCreatedByUser=True, + input_schema={"type": "object", "properties": {}}, + output_schema={"type": "object", "properties": {}}, + ) + assert agent.id == "user-agent-456" + assert agent.version == 2 + assert agent.is_active is True + assert agent.name == "User Created Agent" + assert agent.description == "An agent created by the user" + assert agent.isCreatedByUser is True + assert agent.input_schema == {"type": "object", "properties": {}} + assert agent.output_schema == {"type": "object", "properties": {}} diff --git a/autogpt_platform/backend/backend/server/v2/library/routes.py b/autogpt_platform/backend/backend/server/v2/library/routes.py new file mode 100644 index 000000000000..3ee8680254e2 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/routes.py @@ -0,0 +1,123 @@ +import logging +import typing + +import autogpt_libs.auth.depends +import autogpt_libs.auth.middleware +import fastapi +import prisma + +import backend.data.graph +import backend.integrations.creds_manager +import backend.integrations.webhooks.graph_lifecycle_hooks +import backend.server.v2.library.db +import backend.server.v2.library.model + +logger = logging.getLogger(__name__) + +router = fastapi.APIRouter() +integration_creds_manager = ( + backend.integrations.creds_manager.IntegrationCredentialsManager() +) + + +@router.get( + "/agents", + tags=["library", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], +) +async def get_library_agents( + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ] +) -> typing.Sequence[backend.server.v2.library.model.LibraryAgent]: + """ + Get all agents in the user's library, including both created and saved agents. + """ + try: + agents = await backend.server.v2.library.db.get_library_agents(user_id) + return agents + except Exception: + logger.exception("Exception occurred whilst getting library agents") + raise fastapi.HTTPException( + status_code=500, detail="Failed to get library agents" + ) + + +@router.post( + "/agents/{store_listing_version_id}", + tags=["library", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], + status_code=201, +) +async def add_agent_to_library( + store_listing_version_id: str, + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ], +) -> fastapi.Response: + """ + Add an agent from the store to the user's library. + + Args: + store_listing_version_id (str): ID of the store listing version to add + user_id (str): ID of the authenticated user + + Returns: + fastapi.Response: 201 status code on success + + Raises: + HTTPException: If there is an error adding the agent to the library + """ + try: + # Get the graph from the store listing + store_listing_version = ( + await prisma.models.StoreListingVersion.prisma().find_unique( + where={"id": store_listing_version_id}, include={"Agent": True} + ) + ) + + if not store_listing_version or not store_listing_version.Agent: + raise fastapi.HTTPException( + status_code=404, + detail=f"Store listing version {store_listing_version_id} not found", + ) + + agent = store_listing_version.Agent + + if agent.userId == user_id: + raise fastapi.HTTPException( + status_code=400, detail="Cannot add own agent to library" + ) + + # Create a new graph from the template + graph = await backend.data.graph.get_graph( + agent.id, agent.version, user_id=user_id + ) + + if not graph: + raise fastapi.HTTPException( + status_code=404, detail=f"Agent {agent.id} not found" + ) + + # Create a deep copy with new IDs + graph.version = 1 + graph.is_template = False + graph.is_active = True + graph.reassign_ids(user_id=user_id, reassign_graph_id=True) + + # Save the new graph + graph = await backend.data.graph.create_graph(graph, user_id=user_id) + graph = ( + await backend.integrations.webhooks.graph_lifecycle_hooks.on_graph_activate( + graph, + get_credentials=lambda id: integration_creds_manager.get(user_id, id), + ) + ) + + return fastapi.Response(status_code=201) + + except Exception: + logger.exception("Exception occurred whilst adding agent to library") + raise fastapi.HTTPException( + status_code=500, detail="Failed to add agent to library" + ) diff --git a/autogpt_platform/backend/backend/server/v2/library/routes_test.py b/autogpt_platform/backend/backend/server/v2/library/routes_test.py new file mode 100644 index 000000000000..d793ce13b6da --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/library/routes_test.py @@ -0,0 +1,106 @@ +import autogpt_libs.auth.depends +import autogpt_libs.auth.middleware +import fastapi +import fastapi.testclient +import pytest +import pytest_mock + +import backend.server.v2.library.db +import backend.server.v2.library.model +import backend.server.v2.library.routes + +app = fastapi.FastAPI() +app.include_router(backend.server.v2.library.routes.router) + +client = fastapi.testclient.TestClient(app) + + +def override_auth_middleware(): + """Override auth middleware for testing""" + return {"sub": "test-user-id"} + + +def override_get_user_id(): + """Override get_user_id for testing""" + return "test-user-id" + + +app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = ( + override_auth_middleware +) +app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id + + +def test_get_library_agents_success(mocker: pytest_mock.MockFixture): + mocked_value = [ + backend.server.v2.library.model.LibraryAgent( + id="test-agent-1", + version=1, + is_active=True, + name="Test Agent 1", + description="Test Description 1", + isCreatedByUser=True, + input_schema={"type": "object", "properties": {}}, + output_schema={"type": "object", "properties": {}}, + ), + backend.server.v2.library.model.LibraryAgent( + id="test-agent-2", + version=1, + is_active=True, + name="Test Agent 2", + description="Test Description 2", + isCreatedByUser=False, + input_schema={"type": "object", "properties": {}}, + output_schema={"type": "object", "properties": {}}, + ), + ] + mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents") + mock_db_call.return_value = mocked_value + + response = client.get("/agents") + assert response.status_code == 200 + + data = [ + backend.server.v2.library.model.LibraryAgent.model_validate(agent) + for agent in response.json() + ] + assert len(data) == 2 + assert data[0].id == "test-agent-1" + assert data[0].isCreatedByUser is True + assert data[1].id == "test-agent-2" + assert data[1].isCreatedByUser is False + mock_db_call.assert_called_once_with("test-user-id") + + +def test_get_library_agents_error(mocker: pytest_mock.MockFixture): + mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents") + mock_db_call.side_effect = Exception("Test error") + + response = client.get("/agents") + assert response.status_code == 500 + mock_db_call.assert_called_once_with("test-user-id") + + +@pytest.mark.skip(reason="Mocker Not implemented") +def test_add_agent_to_library_success(mocker: pytest_mock.MockFixture): + mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library") + mock_db_call.return_value = None + + response = client.post("/agents/test-version-id") + assert response.status_code == 201 + mock_db_call.assert_called_once_with( + store_listing_version_id="test-version-id", user_id="test-user-id" + ) + + +@pytest.mark.skip(reason="Mocker Not implemented") +def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture): + mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library") + mock_db_call.side_effect = Exception("Test error") + + response = client.post("/agents/test-version-id") + assert response.status_code == 500 + assert response.json()["detail"] == "Failed to add agent to library" + mock_db_call.assert_called_once_with( + store_listing_version_id="test-version-id", user_id="test-user-id" + ) diff --git a/autogpt_platform/backend/backend/server/v2/store/README.md b/autogpt_platform/backend/backend/server/v2/store/README.md new file mode 100644 index 000000000000..90d41e8d7164 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/README.md @@ -0,0 +1,53 @@ +# Store Module + +This module implements the backend API for the AutoGPT Store, handling agents, creators, profiles, submissions and media uploads. + +## Files + +### routes.py +Contains the FastAPI route handlers for the store API endpoints: + +- Profile endpoints for managing user profiles +- Agent endpoints for browsing and retrieving store agents +- Creator endpoints for browsing and retrieving creator details +- Store submission endpoints for submitting agents to the store +- Media upload endpoints for submission images/videos + +### model.py +Contains Pydantic models for request/response validation and serialization: + +- Pagination model for paginated responses +- Models for agents, creators, profiles, submissions +- Request/response models for all API endpoints + +### db.py +Contains database access functions using Prisma ORM: + +- Functions to query and manipulate store data +- Handles database operations for all API endpoints +- Implements business logic and data validation + +### media.py +Handles media file uploads to Google Cloud Storage: + +- Validates file types and sizes +- Processes image and video uploads +- Stores files in GCS buckets +- Returns public URLs for uploaded media + +## Key Features + +- Paginated listings of store agents and creators +- Search and filtering of agents and creators +- Agent submission workflow +- Media file upload handling +- Profile management +- Reviews and ratings + +## Authentication + +Most endpoints require authentication via the AutoGPT auth middleware. Public endpoints are marked with the "public" tag. + +## Error Handling + +All database and storage operations include proper error handling and logging. Errors are mapped to appropriate HTTP status codes. diff --git a/autogpts/autogpt/autogpt/processing/__init__.py b/autogpt_platform/backend/backend/server/v2/store/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/processing/__init__.py rename to autogpt_platform/backend/backend/server/v2/store/__init__.py diff --git a/autogpt_platform/backend/backend/server/v2/store/db.py b/autogpt_platform/backend/backend/server/v2/store/db.py new file mode 100644 index 000000000000..e23c09afc3f2 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/db.py @@ -0,0 +1,911 @@ +import logging +import random +from datetime import datetime +from typing import Optional + +import fastapi +import prisma.enums +import prisma.errors +import prisma.models +import prisma.types + +import backend.data.graph +import backend.server.v2.store.exceptions +import backend.server.v2.store.model +from backend.data.graph import GraphModel + +logger = logging.getLogger(__name__) + + +async def get_store_agents( + featured: bool = False, + creator: str | None = None, + sorted_by: str | None = None, + search_query: str | None = None, + category: str | None = None, + page: int = 1, + page_size: int = 20, +) -> backend.server.v2.store.model.StoreAgentsResponse: + logger.debug( + f"Getting store agents. featured={featured}, creator={creator}, sorted_by={sorted_by}, search={search_query}, category={category}, page={page}" + ) + sanitized_query = None + # Sanitize and validate search query by escaping special characters + if search_query is not None: + sanitized_query = search_query.strip() + if not sanitized_query or len(sanitized_query) > 100: # Reasonable length limit + raise backend.server.v2.store.exceptions.DatabaseError( + f"Invalid search query: len({len(sanitized_query)}) query: {search_query}" + ) + + # Escape special SQL characters + sanitized_query = ( + sanitized_query.replace("\\", "\\\\") + .replace("%", "\\%") + .replace("_", "\\_") + .replace("[", "\\[") + .replace("]", "\\]") + .replace("'", "\\'") + .replace('"', '\\"') + .replace(";", "\\;") + .replace("--", "\\--") + .replace("/*", "\\/*") + .replace("*/", "\\*/") + ) + + where_clause = {} + if featured: + where_clause["featured"] = featured + if creator: + where_clause["creator_username"] = creator + if category: + where_clause["categories"] = {"has": category} + + if sanitized_query: + where_clause["OR"] = [ + {"agent_name": {"contains": sanitized_query, "mode": "insensitive"}}, + {"description": {"contains": sanitized_query, "mode": "insensitive"}}, + ] + + order_by = [] + if sorted_by == "rating": + order_by.append({"rating": "desc"}) + elif sorted_by == "runs": + order_by.append({"runs": "desc"}) + elif sorted_by == "name": + order_by.append({"agent_name": "asc"}) + + try: + agents = await prisma.models.StoreAgent.prisma().find_many( + where=prisma.types.StoreAgentWhereInput(**where_clause), + order=order_by, + skip=(page - 1) * page_size, + take=page_size, + ) + + total = await prisma.models.StoreAgent.prisma().count( + where=prisma.types.StoreAgentWhereInput(**where_clause) + ) + total_pages = (total + page_size - 1) // page_size + + store_agents = [ + backend.server.v2.store.model.StoreAgent( + slug=agent.slug, + agent_name=agent.agent_name, + agent_image=agent.agent_image[0] if agent.agent_image else "", + creator=agent.creator_username, + creator_avatar=agent.creator_avatar, + sub_heading=agent.sub_heading, + description=agent.description, + runs=agent.runs, + rating=agent.rating, + ) + for agent in agents + ] + + logger.debug(f"Found {len(store_agents)} agents") + return backend.server.v2.store.model.StoreAgentsResponse( + agents=store_agents, + pagination=backend.server.v2.store.model.Pagination( + current_page=page, + total_items=total, + total_pages=total_pages, + page_size=page_size, + ), + ) + except Exception as e: + logger.error(f"Error getting store agents: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to fetch store agents" + ) from e + + +async def get_store_agent_details( + username: str, agent_name: str +) -> backend.server.v2.store.model.StoreAgentDetails: + logger.debug(f"Getting store agent details for {username}/{agent_name}") + + try: + agent = await prisma.models.StoreAgent.prisma().find_first( + where={"creator_username": username, "slug": agent_name} + ) + + if not agent: + logger.warning(f"Agent not found: {username}/{agent_name}") + raise backend.server.v2.store.exceptions.AgentNotFoundError( + f"Agent {username}/{agent_name} not found" + ) + + logger.debug(f"Found agent details for {username}/{agent_name}") + return backend.server.v2.store.model.StoreAgentDetails( + store_listing_version_id=agent.storeListingVersionId, + slug=agent.slug, + agent_name=agent.agent_name, + agent_video=agent.agent_video or "", + agent_image=agent.agent_image, + creator=agent.creator_username, + creator_avatar=agent.creator_avatar, + sub_heading=agent.sub_heading, + description=agent.description, + categories=agent.categories, + runs=agent.runs, + rating=agent.rating, + versions=agent.versions, + last_updated=agent.updated_at, + ) + except backend.server.v2.store.exceptions.AgentNotFoundError: + raise + except Exception as e: + logger.error(f"Error getting store agent details: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to fetch agent details" + ) from e + + +async def get_store_creators( + featured: bool = False, + search_query: str | None = None, + sorted_by: str | None = None, + page: int = 1, + page_size: int = 20, +) -> backend.server.v2.store.model.CreatorsResponse: + logger.debug( + f"Getting store creators. featured={featured}, search={search_query}, sorted_by={sorted_by}, page={page}" + ) + + # Build where clause with sanitized inputs + where = {} + + if featured: + where["is_featured"] = featured + + # Add search filter if provided, using parameterized queries + if search_query: + # Sanitize and validate search query by escaping special characters + sanitized_query = search_query.strip() + if not sanitized_query or len(sanitized_query) > 100: # Reasonable length limit + raise backend.server.v2.store.exceptions.DatabaseError( + "Invalid search query" + ) + + # Escape special SQL characters + sanitized_query = ( + sanitized_query.replace("\\", "\\\\") + .replace("%", "\\%") + .replace("_", "\\_") + .replace("[", "\\[") + .replace("]", "\\]") + .replace("'", "\\'") + .replace('"', '\\"') + .replace(";", "\\;") + .replace("--", "\\--") + .replace("/*", "\\/*") + .replace("*/", "\\*/") + ) + + where["OR"] = [ + {"username": {"contains": sanitized_query, "mode": "insensitive"}}, + {"name": {"contains": sanitized_query, "mode": "insensitive"}}, + {"description": {"contains": sanitized_query, "mode": "insensitive"}}, + ] + + try: + # Validate pagination parameters + if not isinstance(page, int) or page < 1: + raise backend.server.v2.store.exceptions.DatabaseError( + "Invalid page number" + ) + if not isinstance(page_size, int) or page_size < 1 or page_size > 100: + raise backend.server.v2.store.exceptions.DatabaseError("Invalid page size") + + # Get total count for pagination using sanitized where clause + total = await prisma.models.Creator.prisma().count( + where=prisma.types.CreatorWhereInput(**where) + ) + total_pages = (total + page_size - 1) // page_size + + # Add pagination with validated parameters + skip = (page - 1) * page_size + take = page_size + + # Add sorting with validated sort parameter + order = [] + valid_sort_fields = {"agent_rating", "agent_runs", "num_agents"} + if sorted_by in valid_sort_fields: + order.append({sorted_by: "desc"}) + else: + order.append({"username": "asc"}) + + # Execute query with sanitized parameters + creators = await prisma.models.Creator.prisma().find_many( + where=prisma.types.CreatorWhereInput(**where), + skip=skip, + take=take, + order=order, + ) + + # Convert to response model + creator_models = [ + backend.server.v2.store.model.Creator( + username=creator.username, + name=creator.name, + description=creator.description, + avatar_url=creator.avatar_url, + num_agents=creator.num_agents, + agent_rating=creator.agent_rating, + agent_runs=creator.agent_runs, + is_featured=creator.is_featured, + ) + for creator in creators + ] + + logger.debug(f"Found {len(creator_models)} creators") + return backend.server.v2.store.model.CreatorsResponse( + creators=creator_models, + pagination=backend.server.v2.store.model.Pagination( + current_page=page, + total_items=total, + total_pages=total_pages, + page_size=page_size, + ), + ) + except Exception as e: + logger.error(f"Error getting store creators: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to fetch store creators" + ) from e + + +async def get_store_creator_details( + username: str, +) -> backend.server.v2.store.model.CreatorDetails: + logger.debug(f"Getting store creator details for {username}") + + try: + # Query creator details from database + creator = await prisma.models.Creator.prisma().find_unique( + where={"username": username} + ) + + if not creator: + logger.warning(f"Creator not found: {username}") + raise backend.server.v2.store.exceptions.CreatorNotFoundError( + f"Creator {username} not found" + ) + + logger.debug(f"Found creator details for {username}") + return backend.server.v2.store.model.CreatorDetails( + name=creator.name, + username=creator.username, + description=creator.description, + links=creator.links, + avatar_url=creator.avatar_url, + agent_rating=creator.agent_rating, + agent_runs=creator.agent_runs, + top_categories=creator.top_categories, + ) + except backend.server.v2.store.exceptions.CreatorNotFoundError: + raise + except Exception as e: + logger.error(f"Error getting store creator details: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to fetch creator details" + ) from e + + +async def get_store_submissions( + user_id: str, page: int = 1, page_size: int = 20 +) -> backend.server.v2.store.model.StoreSubmissionsResponse: + logger.debug(f"Getting store submissions for user {user_id}, page={page}") + + try: + # Calculate pagination values + skip = (page - 1) * page_size + + where = prisma.types.StoreSubmissionWhereInput(user_id=user_id) + # Query submissions from database + submissions = await prisma.models.StoreSubmission.prisma().find_many( + where=where, + skip=skip, + take=page_size, + order=[{"date_submitted": "desc"}], + ) + + # Get total count for pagination + total = await prisma.models.StoreSubmission.prisma().count(where=where) + + total_pages = (total + page_size - 1) // page_size + + # Convert to response models + submission_models = [ + backend.server.v2.store.model.StoreSubmission( + agent_id=sub.agent_id, + agent_version=sub.agent_version, + name=sub.name, + sub_heading=sub.sub_heading, + slug=sub.slug, + description=sub.description, + image_urls=sub.image_urls or [], + date_submitted=sub.date_submitted or datetime.now(), + status=sub.status, + runs=sub.runs or 0, + rating=sub.rating or 0.0, + ) + for sub in submissions + ] + + logger.debug(f"Found {len(submission_models)} submissions") + return backend.server.v2.store.model.StoreSubmissionsResponse( + submissions=submission_models, + pagination=backend.server.v2.store.model.Pagination( + current_page=page, + total_items=total, + total_pages=total_pages, + page_size=page_size, + ), + ) + + except Exception as e: + logger.error(f"Error fetching store submissions: {str(e)}") + # Return empty response rather than exposing internal errors + return backend.server.v2.store.model.StoreSubmissionsResponse( + submissions=[], + pagination=backend.server.v2.store.model.Pagination( + current_page=page, + total_items=0, + total_pages=0, + page_size=page_size, + ), + ) + + +async def delete_store_submission( + user_id: str, + submission_id: str, +) -> bool: + """ + Delete a store listing submission. + + Args: + user_id: ID of the authenticated user + submission_id: ID of the submission to be deleted + + Returns: + bool: True if the submission was successfully deleted, False otherwise + """ + logger.debug(f"Deleting store submission {submission_id} for user {user_id}") + + try: + # Verify the submission belongs to this user + submission = await prisma.models.StoreListing.prisma().find_first( + where={"agentId": submission_id, "owningUserId": user_id} + ) + + if not submission: + logger.warning(f"Submission not found for user {user_id}: {submission_id}") + raise backend.server.v2.store.exceptions.SubmissionNotFoundError( + f"Submission not found for this user. User ID: {user_id}, Submission ID: {submission_id}" + ) + + # Delete the submission + await prisma.models.StoreListing.prisma().delete(where={"id": submission.id}) + + logger.debug( + f"Successfully deleted submission {submission_id} for user {user_id}" + ) + return True + + except Exception as e: + logger.error(f"Error deleting store submission: {str(e)}") + return False + + +async def create_store_submission( + user_id: str, + agent_id: str, + agent_version: int, + slug: str, + name: str, + video_url: str | None = None, + image_urls: list[str] = [], + description: str = "", + sub_heading: str = "", + categories: list[str] = [], +) -> backend.server.v2.store.model.StoreSubmission: + """ + Create a new store listing submission. + + Args: + user_id: ID of the authenticated user submitting the listing + agent_id: ID of the agent being submitted + agent_version: Version of the agent being submitted + slug: URL slug for the listing + name: Name of the agent + video_url: Optional URL to video demo + image_urls: List of image URLs for the listing + description: Description of the agent + categories: List of categories for the agent + + Returns: + StoreSubmission: The created store submission + """ + logger.debug( + f"Creating store submission for user {user_id}, agent {agent_id} v{agent_version}" + ) + + try: + # Sanitize slug to only allow letters and hyphens + slug = "".join( + c if c.isalpha() or c == "-" or c.isnumeric() else "" for c in slug + ).lower() + + # First verify the agent belongs to this user + agent = await prisma.models.AgentGraph.prisma().find_first( + where=prisma.types.AgentGraphWhereInput( + id=agent_id, version=agent_version, userId=user_id + ) + ) + + if not agent: + logger.warning( + f"Agent not found for user {user_id}: {agent_id} v{agent_version}" + ) + raise backend.server.v2.store.exceptions.AgentNotFoundError( + f"Agent not found for this user. User ID: {user_id}, Agent ID: {agent_id}, Version: {agent_version}" + ) + + listing = await prisma.models.StoreListing.prisma().find_first( + where=prisma.types.StoreListingWhereInput( + agentId=agent_id, owningUserId=user_id + ) + ) + if listing is not None: + logger.warning(f"Listing already exists for agent {agent_id}") + raise backend.server.v2.store.exceptions.ListingExistsError( + "Listing already exists for this agent" + ) + + # Create the store listing + listing = await prisma.models.StoreListing.prisma().create( + data={ + "agentId": agent_id, + "agentVersion": agent_version, + "owningUserId": user_id, + "createdAt": datetime.now(), + "StoreListingVersions": { + "create": { + "agentId": agent_id, + "agentVersion": agent_version, + "slug": slug, + "name": name, + "videoUrl": video_url, + "imageUrls": image_urls, + "description": description, + "categories": categories, + "subHeading": sub_heading, + } + }, + }, + include={"StoreListingVersions": True}, + ) + + store_listing_version_id = ( + listing.StoreListingVersions[0].id + if listing.StoreListingVersions is not None + and len(listing.StoreListingVersions) > 0 + else None + ) + + logger.debug(f"Created store listing for agent {agent_id}") + # Return submission details + return backend.server.v2.store.model.StoreSubmission( + agent_id=agent_id, + agent_version=agent_version, + name=name, + slug=slug, + sub_heading=sub_heading, + description=description, + image_urls=image_urls, + date_submitted=listing.createdAt, + status=prisma.enums.SubmissionStatus.PENDING, + runs=0, + rating=0.0, + store_listing_version_id=store_listing_version_id, + ) + + except ( + backend.server.v2.store.exceptions.AgentNotFoundError, + backend.server.v2.store.exceptions.ListingExistsError, + ): + raise + except prisma.errors.PrismaError as e: + logger.error(f"Database error creating store submission: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to create store submission" + ) from e + + +async def create_store_review( + user_id: str, + store_listing_version_id: str, + score: int, + comments: str | None = None, +) -> backend.server.v2.store.model.StoreReview: + try: + review = await prisma.models.StoreListingReview.prisma().upsert( + where={ + "storeListingVersionId_reviewByUserId": { + "storeListingVersionId": store_listing_version_id, + "reviewByUserId": user_id, + } + }, + data={ + "create": { + "reviewByUserId": user_id, + "storeListingVersionId": store_listing_version_id, + "score": score, + "comments": comments, + }, + "update": { + "score": score, + "comments": comments, + }, + }, + ) + + return backend.server.v2.store.model.StoreReview( + score=review.score, + comments=review.comments, + ) + + except prisma.errors.PrismaError as e: + logger.error(f"Database error creating store review: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to create store review" + ) from e + + +async def get_user_profile( + user_id: str, +) -> backend.server.v2.store.model.ProfileDetails: + logger.debug(f"Getting user profile for {user_id}") + + try: + profile = await prisma.models.Profile.prisma().find_first( + where={"userId": user_id} # type: ignore + ) + + if not profile: + logger.warning(f"Profile not found for user {user_id}") + new_profile = await prisma.models.Profile.prisma().create( + data=prisma.types.ProfileCreateInput( + userId=user_id, + name="No Profile Data", + username=f"{random.choice(['happy', 'clever', 'swift', 'bright', 'wise'])}-{random.choice(['fox', 'wolf', 'bear', 'eagle', 'owl'])}_{random.randint(1000,9999)}".lower(), + description="No Profile Data", + links=[], + avatarUrl="", + ) + ) + return backend.server.v2.store.model.ProfileDetails( + name=new_profile.name, + username=new_profile.username, + description=new_profile.description, + links=new_profile.links, + avatar_url=new_profile.avatarUrl, + ) + + return backend.server.v2.store.model.ProfileDetails( + name=profile.name, + username=profile.username, + description=profile.description, + links=profile.links, + avatar_url=profile.avatarUrl, + ) + except Exception as e: + logger.error(f"Error getting user profile: {str(e)}") + return backend.server.v2.store.model.ProfileDetails( + name="No Profile Data", + username="No Profile Data", + description="No Profile Data", + links=[], + avatar_url="", + ) + + +async def update_or_create_profile( + user_id: str, profile: backend.server.v2.store.model.Profile +) -> backend.server.v2.store.model.CreatorDetails: + """ + Update the store profile for a user. Creates a new profile if one doesn't exist. + Only allows updating if the user_id matches the owning user. + If a field is None, it will not overwrite the existing value in the case of an update. + + Args: + user_id: ID of the authenticated user + profile: Updated profile details + + Returns: + CreatorDetails: The updated profile + + Raises: + HTTPException: If user is not authorized to update this profile + DatabaseError: If profile cannot be updated due to database issues + """ + logger.info(f"Updating profile for user {user_id} data: {profile}") + + try: + # Sanitize username to only allow letters and hyphens + username = "".join( + c if c.isalpha() or c == "-" or c.isnumeric() else "" + for c in profile.username + ).lower() + + existing_profile = await prisma.models.Profile.prisma().find_first( + where={"userId": user_id} + ) + + # If no profile exists, create a new one + if not existing_profile: + logger.debug( + f"No existing profile found. Creating new profile for user {user_id}" + ) + # Create new profile since one doesn't exist + new_profile = await prisma.models.Profile.prisma().create( + data={ + "userId": user_id, + "name": profile.name, + "username": username, + "description": profile.description, + "links": profile.links or [], + "avatarUrl": profile.avatar_url, + "isFeatured": False, + } + ) + + return backend.server.v2.store.model.CreatorDetails( + name=new_profile.name, + username=new_profile.username, + description=new_profile.description, + links=new_profile.links, + avatar_url=new_profile.avatarUrl or "", + agent_rating=0.0, + agent_runs=0, + top_categories=[], + ) + else: + logger.debug(f"Updating existing profile for user {user_id}") + # Update only provided fields for the existing profile + update_data = {} + if profile.name is not None: + update_data["name"] = profile.name + if profile.username is not None: + update_data["username"] = username + if profile.description is not None: + update_data["description"] = profile.description + if profile.links is not None: + update_data["links"] = profile.links + if profile.avatar_url is not None: + update_data["avatarUrl"] = profile.avatar_url + + # Update the existing profile + updated_profile = await prisma.models.Profile.prisma().update( + where={"id": existing_profile.id}, + data=prisma.types.ProfileUpdateInput(**update_data), + ) + if updated_profile is None: + logger.error(f"Failed to update profile for user {user_id}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to update profile" + ) + + return backend.server.v2.store.model.CreatorDetails( + name=updated_profile.name, + username=updated_profile.username, + description=updated_profile.description, + links=updated_profile.links, + avatar_url=updated_profile.avatarUrl or "", + agent_rating=0.0, + agent_runs=0, + top_categories=[], + ) + + except prisma.errors.PrismaError as e: + logger.error(f"Database error updating profile: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to update profile" + ) from e + + +async def get_my_agents( + user_id: str, + page: int = 1, + page_size: int = 20, +) -> backend.server.v2.store.model.MyAgentsResponse: + logger.debug(f"Getting my agents for user {user_id}, page={page}") + + try: + agents_with_max_version = await prisma.models.AgentGraph.prisma().find_many( + where=prisma.types.AgentGraphWhereInput( + userId=user_id, StoreListing={"none": {"isDeleted": False}} + ), + order=[{"version": "desc"}], + distinct=["id"], + skip=(page - 1) * page_size, + take=page_size, + ) + + # store_listings = await prisma.models.StoreListing.prisma().find_many( + # where=prisma.types.StoreListingWhereInput( + # isDeleted=False, + # ), + # ) + + total = len( + await prisma.models.AgentGraph.prisma().find_many( + where=prisma.types.AgentGraphWhereInput( + userId=user_id, StoreListing={"none": {"isDeleted": False}} + ), + order=[{"version": "desc"}], + distinct=["id"], + ) + ) + + total_pages = (total + page_size - 1) // page_size + + agents = agents_with_max_version + + my_agents = [ + backend.server.v2.store.model.MyAgent( + agent_id=agent.id, + agent_version=agent.version, + agent_name=agent.name or "", + last_edited=agent.updatedAt or agent.createdAt, + description=agent.description or "", + ) + for agent in agents + ] + + return backend.server.v2.store.model.MyAgentsResponse( + agents=my_agents, + pagination=backend.server.v2.store.model.Pagination( + current_page=page, + total_items=total, + total_pages=total_pages, + page_size=page_size, + ), + ) + except Exception as e: + logger.error(f"Error getting my agents: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to fetch my agents" + ) from e + + +async def get_agent( + store_listing_version_id: str, version_id: Optional[int] +) -> GraphModel: + """Get agent using the version ID and store listing version ID.""" + try: + store_listing_version = ( + await prisma.models.StoreListingVersion.prisma().find_unique( + where={"id": store_listing_version_id} + ) + ) + + if not store_listing_version or not store_listing_version.Agent: + raise fastapi.HTTPException( + status_code=404, + detail=f"Store listing version {store_listing_version_id} not found", + ) + + graph_id = store_listing_version.agentId + graph_version = store_listing_version.agentVersion + graph = await backend.data.graph.get_graph(graph_id, graph_version) + + if not graph: + raise fastapi.HTTPException( + status_code=404, + detail=( + f"Agent #{graph_id} not found " + f"for store listing version #{store_listing_version_id}" + ), + ) + + graph.version = 1 + graph.is_template = False + graph.is_active = True + delattr(graph, "user_id") + + return graph + + except Exception as e: + logger.error(f"Error getting agent: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to fetch agent" + ) from e + + +async def review_store_submission( + store_listing_version_id: str, is_approved: bool, comments: str, reviewer_id: str +) -> prisma.models.StoreListingSubmission: + """Review a store listing submission.""" + try: + store_listing_version = ( + await prisma.models.StoreListingVersion.prisma().find_unique( + where={"id": store_listing_version_id}, + include={"StoreListing": True}, + ) + ) + + if not store_listing_version or not store_listing_version.StoreListing: + raise fastapi.HTTPException( + status_code=404, + detail=f"Store listing version {store_listing_version_id} not found", + ) + + if is_approved: + await prisma.models.StoreListing.prisma().update( + where={"id": store_listing_version.StoreListing.id}, + data={"isApproved": True}, + ) + + submission_status = ( + prisma.enums.SubmissionStatus.APPROVED + if is_approved + else prisma.enums.SubmissionStatus.REJECTED + ) + + update_data: prisma.types.StoreListingSubmissionUpdateInput = { + "Status": submission_status, + "reviewComments": comments, + "Reviewer": {"connect": {"id": reviewer_id}}, + "StoreListing": {"connect": {"id": store_listing_version.StoreListing.id}}, + } + + create_data: prisma.types.StoreListingSubmissionCreateInput = { + **update_data, + "StoreListingVersion": {"connect": {"id": store_listing_version_id}}, + } + + submission = await prisma.models.StoreListingSubmission.prisma().upsert( + where={"storeListingVersionId": store_listing_version_id}, + data={ + "create": create_data, + "update": update_data, + }, + ) + + if not submission: + raise fastapi.HTTPException( # FIXME: don't return HTTP exceptions here + status_code=404, + detail=f"Store listing submission {store_listing_version_id} not found", + ) + + return submission + + except Exception as e: + logger.error(f"Could not create store submission review: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to create store submission review" + ) from e diff --git a/autogpt_platform/backend/backend/server/v2/store/db_test.py b/autogpt_platform/backend/backend/server/v2/store/db_test.py new file mode 100644 index 000000000000..24a068017819 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/db_test.py @@ -0,0 +1,264 @@ +from datetime import datetime + +import prisma.errors +import prisma.models +import pytest +from prisma import Prisma + +import backend.server.v2.store.db as db +from backend.server.v2.store.model import Profile + + +@pytest.fixture(autouse=True) +async def setup_prisma(): + # Don't register client if already registered + try: + Prisma() + except prisma.errors.ClientAlreadyRegisteredError: + pass + yield + + +@pytest.mark.asyncio +async def test_get_store_agents(mocker): + # Mock data + mock_agents = [ + prisma.models.StoreAgent( + listing_id="test-id", + storeListingVersionId="version123", + slug="test-agent", + agent_name="Test Agent", + agent_video=None, + agent_image=["image.jpg"], + featured=False, + creator_username="creator", + creator_avatar="avatar.jpg", + sub_heading="Test heading", + description="Test description", + categories=[], + runs=10, + rating=4.5, + versions=["1.0"], + updated_at=datetime.now(), + ) + ] + + # Mock prisma calls + mock_store_agent = mocker.patch("prisma.models.StoreAgent.prisma") + mock_store_agent.return_value.find_many = mocker.AsyncMock(return_value=mock_agents) + mock_store_agent.return_value.count = mocker.AsyncMock(return_value=1) + + # Call function + result = await db.get_store_agents() + + # Verify results + assert len(result.agents) == 1 + assert result.agents[0].slug == "test-agent" + assert result.pagination.total_items == 1 + + # Verify mocks called correctly + mock_store_agent.return_value.find_many.assert_called_once() + mock_store_agent.return_value.count.assert_called_once() + + +@pytest.mark.asyncio +async def test_get_store_agent_details(mocker): + # Mock data + mock_agent = prisma.models.StoreAgent( + listing_id="test-id", + storeListingVersionId="version123", + slug="test-agent", + agent_name="Test Agent", + agent_video="video.mp4", + agent_image=["image.jpg"], + featured=False, + creator_username="creator", + creator_avatar="avatar.jpg", + sub_heading="Test heading", + description="Test description", + categories=["test"], + runs=10, + rating=4.5, + versions=["1.0"], + updated_at=datetime.now(), + ) + + # Mock prisma call + mock_store_agent = mocker.patch("prisma.models.StoreAgent.prisma") + mock_store_agent.return_value.find_first = mocker.AsyncMock(return_value=mock_agent) + + # Call function + result = await db.get_store_agent_details("creator", "test-agent") + + # Verify results + assert result.slug == "test-agent" + assert result.agent_name == "Test Agent" + + # Verify mock called correctly + mock_store_agent.return_value.find_first.assert_called_once_with( + where={"creator_username": "creator", "slug": "test-agent"} + ) + + +@pytest.mark.asyncio +async def test_get_store_creator_details(mocker): + # Mock data + mock_creator_data = prisma.models.Creator( + name="Test Creator", + username="creator", + description="Test description", + links=["link1"], + avatar_url="avatar.jpg", + num_agents=1, + agent_rating=4.5, + agent_runs=10, + top_categories=["test"], + is_featured=False, + ) + + # Mock prisma call + mock_creator = mocker.patch("prisma.models.Creator.prisma") + mock_creator.return_value.find_unique = mocker.AsyncMock() + # Configure the mock to return values that will pass validation + mock_creator.return_value.find_unique.return_value = mock_creator_data + + # Call function + result = await db.get_store_creator_details("creator") + + # Verify results + assert result.username == "creator" + assert result.name == "Test Creator" + assert result.description == "Test description" + assert result.avatar_url == "avatar.jpg" + + # Verify mock called correctly + mock_creator.return_value.find_unique.assert_called_once_with( + where={"username": "creator"} + ) + + +@pytest.mark.asyncio +async def test_create_store_submission(mocker): + # Mock data + mock_agent = prisma.models.AgentGraph( + id="agent-id", + version=1, + userId="user-id", + createdAt=datetime.now(), + isActive=True, + isTemplate=False, + ) + + mock_listing = prisma.models.StoreListing( + id="listing-id", + createdAt=datetime.now(), + updatedAt=datetime.now(), + isDeleted=False, + isApproved=False, + agentId="agent-id", + agentVersion=1, + owningUserId="user-id", + ) + + # Mock prisma calls + mock_agent_graph = mocker.patch("prisma.models.AgentGraph.prisma") + mock_agent_graph.return_value.find_first = mocker.AsyncMock(return_value=mock_agent) + + mock_store_listing = mocker.patch("prisma.models.StoreListing.prisma") + mock_store_listing.return_value.find_first = mocker.AsyncMock(return_value=None) + mock_store_listing.return_value.create = mocker.AsyncMock(return_value=mock_listing) + + # Call function + result = await db.create_store_submission( + user_id="user-id", + agent_id="agent-id", + agent_version=1, + slug="test-agent", + name="Test Agent", + description="Test description", + ) + + # Verify results + assert result.name == "Test Agent" + assert result.description == "Test description" + + # Verify mocks called correctly + mock_agent_graph.return_value.find_first.assert_called_once() + mock_store_listing.return_value.find_first.assert_called_once() + mock_store_listing.return_value.create.assert_called_once() + + +@pytest.mark.asyncio +async def test_update_profile(mocker): + # Mock data + mock_profile = prisma.models.Profile( + id="profile-id", + name="Test Creator", + username="creator", + description="Test description", + links=["link1"], + avatarUrl="avatar.jpg", + isFeatured=False, + createdAt=datetime.now(), + updatedAt=datetime.now(), + ) + + # Mock prisma calls + mock_profile_db = mocker.patch("prisma.models.Profile.prisma") + mock_profile_db.return_value.find_first = mocker.AsyncMock( + return_value=mock_profile + ) + mock_profile_db.return_value.update = mocker.AsyncMock(return_value=mock_profile) + + # Test data + profile = Profile( + name="Test Creator", + username="creator", + description="Test description", + links=["link1"], + avatar_url="avatar.jpg", + is_featured=False, + ) + + # Call function + result = await db.update_or_create_profile("user-id", profile) + + # Verify results + assert result.username == "creator" + assert result.name == "Test Creator" + + # Verify mocks called correctly + mock_profile_db.return_value.find_first.assert_called_once() + mock_profile_db.return_value.update.assert_called_once() + + +@pytest.mark.asyncio +async def test_get_user_profile(mocker): + # Mock data + mock_profile = prisma.models.Profile( + id="profile-id", + name="No Profile Data", + username="testuser", + description="Test description", + links=["link1", "link2"], + avatarUrl="avatar.jpg", + isFeatured=False, + createdAt=datetime.now(), + updatedAt=datetime.now(), + ) + + # Mock prisma calls + mock_profile_db = mocker.patch("prisma.models.Profile.prisma") + mock_profile_db.return_value.find_unique = mocker.AsyncMock( + return_value=mock_profile + ) + + # Call function + result = await db.get_user_profile("user-id") + + # Verify results + assert result.name == "No Profile Data" + assert result.username == "No Profile Data" + assert result.description == "No Profile Data" + assert result.links == [] + assert result.avatar_url == "" diff --git a/autogpt_platform/backend/backend/server/v2/store/exceptions.py b/autogpt_platform/backend/backend/server/v2/store/exceptions.py new file mode 100644 index 000000000000..f63264be4d94 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/exceptions.py @@ -0,0 +1,76 @@ +class MediaUploadError(Exception): + """Base exception for media upload errors""" + + pass + + +class InvalidFileTypeError(MediaUploadError): + """Raised when file type is not supported""" + + pass + + +class FileSizeTooLargeError(MediaUploadError): + """Raised when file size exceeds maximum limit""" + + pass + + +class FileReadError(MediaUploadError): + """Raised when there's an error reading the file""" + + pass + + +class StorageConfigError(MediaUploadError): + """Raised when storage configuration is invalid""" + + pass + + +class StorageUploadError(MediaUploadError): + """Raised when upload to storage fails""" + + pass + + +class StoreError(Exception): + """Base exception for store-related errors""" + + pass + + +class AgentNotFoundError(StoreError): + """Raised when an agent is not found""" + + pass + + +class CreatorNotFoundError(StoreError): + """Raised when a creator is not found""" + + pass + + +class ListingExistsError(StoreError): + """Raised when trying to create a listing that already exists""" + + pass + + +class DatabaseError(StoreError): + """Raised when there is an error interacting with the database""" + + pass + + +class ProfileNotFoundError(StoreError): + """Raised when a profile is not found""" + + pass + + +class SubmissionNotFoundError(StoreError): + """Raised when a submission is not found""" + + pass diff --git a/autogpt_platform/backend/backend/server/v2/store/image_gen.py b/autogpt_platform/backend/backend/server/v2/store/image_gen.py new file mode 100644 index 000000000000..a1a22d159248 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/image_gen.py @@ -0,0 +1,94 @@ +import io +import logging +from enum import Enum + +import replicate +import replicate.exceptions +import requests +from replicate.helpers import FileOutput + +from backend.data.graph import Graph +from backend.util.settings import Settings + +logger = logging.getLogger(__name__) + + +class ImageSize(str, Enum): + LANDSCAPE = "1024x768" + + +class ImageStyle(str, Enum): + DIGITAL_ART = "digital art" + + +async def generate_agent_image(agent: Graph) -> io.BytesIO: + """ + Generate an image for an agent using Flux model via Replicate API. + + Args: + agent (Graph): The agent to generate an image for + + Returns: + io.BytesIO: The generated image as bytes + """ + try: + settings = Settings() + + if not settings.secrets.replicate_api_key: + raise ValueError("Missing Replicate API key in settings") + + # Construct prompt from agent details + prompt = f"Create a visually engaging app store thumbnail for the AI agent that highlights what it does in a clear and captivating way:\n- **Name**: {agent.name}\n- **Description**: {agent.description}\nFocus on showcasing its core functionality with an appealing design." + + # Set up Replicate client + client = replicate.Client(api_token=settings.secrets.replicate_api_key) + + # Model parameters + input_data = { + "prompt": prompt, + "width": 1024, + "height": 768, + "aspect_ratio": "4:3", + "output_format": "jpg", + "output_quality": 90, + "num_inference_steps": 30, + "guidance": 3.5, + "negative_prompt": "blurry, low quality, distorted, deformed", + "disable_safety_checker": True, + } + + try: + # Run model + output = client.run("black-forest-labs/flux-1.1-pro", input=input_data) + + # Depending on the model output, extract the image URL or bytes + # If the output is a list of FileOutput or URLs + if isinstance(output, list) and output: + if isinstance(output[0], FileOutput): + image_bytes = output[0].read() + else: + # If it's a URL string, fetch the image bytes + result_url = output[0] + response = requests.get(result_url) + response.raise_for_status() + image_bytes = response.content + elif isinstance(output, FileOutput): + image_bytes = output.read() + elif isinstance(output, str): + # Output is a URL + response = requests.get(output) + response.raise_for_status() + image_bytes = response.content + else: + raise RuntimeError("Unexpected output format from the model.") + + return io.BytesIO(image_bytes) + + except replicate.exceptions.ReplicateError as e: + if e.status == 401: + raise RuntimeError("Invalid Replicate API token") from e + raise RuntimeError(f"Replicate API error: {str(e)}") from e + + except Exception as e: + logger.exception("Failed to generate agent image") + raise RuntimeError(f"Image generation failed: {str(e)}") diff --git a/autogpt_platform/backend/backend/server/v2/store/media.py b/autogpt_platform/backend/backend/server/v2/store/media.py new file mode 100644 index 000000000000..3ae9f0f79911 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/media.py @@ -0,0 +1,198 @@ +import logging +import os +import uuid + +import fastapi +from google.cloud import storage + +import backend.server.v2.store.exceptions +from backend.util.settings import Settings + +logger = logging.getLogger(__name__) + +ALLOWED_IMAGE_TYPES = {"image/jpeg", "image/png", "image/gif", "image/webp"} +ALLOWED_VIDEO_TYPES = {"video/mp4", "video/webm"} +MAX_FILE_SIZE = 50 * 1024 * 1024 # 50MB + + +async def check_media_exists(user_id: str, filename: str) -> str | None: + """ + Check if a media file exists in storage for the given user. + Tries both images and videos directories. + + Args: + user_id (str): ID of the user who uploaded the file + filename (str): Name of the file to check + + Returns: + str | None: URL of the blob if it exists, None otherwise + """ + try: + settings = Settings() + storage_client = storage.Client() + bucket = storage_client.bucket(settings.config.media_gcs_bucket_name) + + # Check images + image_path = f"users/{user_id}/images/{filename}" + image_blob = bucket.blob(image_path) + if image_blob.exists(): + return image_blob.public_url + + # Check videos + video_path = f"users/{user_id}/videos/{filename}" + + video_blob = bucket.blob(video_path) + if video_blob.exists(): + return video_blob.public_url + + return None + except Exception as e: + logger.error(f"Error checking if media file exists: {str(e)}") + return None + + +async def upload_media( + user_id: str, file: fastapi.UploadFile, use_file_name: bool = False +) -> str: + + # Get file content for deeper validation + try: + content = await file.read(1024) # Read first 1KB for validation + await file.seek(0) # Reset file pointer + except Exception as e: + logger.error(f"Error reading file content: {str(e)}") + raise backend.server.v2.store.exceptions.FileReadError( + "Failed to read file content" + ) from e + + # Validate file signature/magic bytes + if file.content_type in ALLOWED_IMAGE_TYPES: + # Check image file signatures + if content.startswith(b"\xFF\xD8\xFF"): # JPEG + if file.content_type != "image/jpeg": + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + "File signature does not match content type" + ) + elif content.startswith(b"\x89PNG\r\n\x1a\n"): # PNG + if file.content_type != "image/png": + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + "File signature does not match content type" + ) + elif content.startswith(b"GIF87a") or content.startswith(b"GIF89a"): # GIF + if file.content_type != "image/gif": + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + "File signature does not match content type" + ) + elif content.startswith(b"RIFF") and content[8:12] == b"WEBP": # WebP + if file.content_type != "image/webp": + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + "File signature does not match content type" + ) + else: + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + "Invalid image file signature" + ) + + elif file.content_type in ALLOWED_VIDEO_TYPES: + # Check video file signatures + if content.startswith(b"\x00\x00\x00") and (content[4:8] == b"ftyp"): # MP4 + if file.content_type != "video/mp4": + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + "File signature does not match content type" + ) + elif content.startswith(b"\x1a\x45\xdf\xa3"): # WebM + if file.content_type != "video/webm": + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + "File signature does not match content type" + ) + else: + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + "Invalid video file signature" + ) + + settings = Settings() + + # Check required settings first before doing any file processing + if not settings.config.media_gcs_bucket_name: + logger.error("Missing GCS bucket name setting") + raise backend.server.v2.store.exceptions.StorageConfigError( + "Missing storage bucket configuration" + ) + + try: + # Validate file type + content_type = file.content_type + if content_type is None: + content_type = "image/jpeg" + + if ( + content_type not in ALLOWED_IMAGE_TYPES + and content_type not in ALLOWED_VIDEO_TYPES + ): + logger.warning(f"Invalid file type attempted: {content_type}") + raise backend.server.v2.store.exceptions.InvalidFileTypeError( + f"File type not supported. Must be jpeg, png, gif, webp, mp4 or webm. Content type: {content_type}" + ) + + # Validate file size + file_size = 0 + chunk_size = 8192 # 8KB chunks + + try: + while chunk := await file.read(chunk_size): + file_size += len(chunk) + if file_size > MAX_FILE_SIZE: + logger.warning(f"File size too large: {file_size} bytes") + raise backend.server.v2.store.exceptions.FileSizeTooLargeError( + "File too large. Maximum size is 50MB" + ) + except backend.server.v2.store.exceptions.FileSizeTooLargeError: + raise + except Exception as e: + logger.error(f"Error reading file chunks: {str(e)}") + raise backend.server.v2.store.exceptions.FileReadError( + "Failed to read uploaded file" + ) from e + + # Reset file pointer + await file.seek(0) + + # Generate unique filename + filename = file.filename or "" + file_ext = os.path.splitext(filename)[1].lower() + if use_file_name: + unique_filename = filename + else: + unique_filename = f"{uuid.uuid4()}{file_ext}" + + # Construct storage path + media_type = "images" if content_type in ALLOWED_IMAGE_TYPES else "videos" + storage_path = f"users/{user_id}/{media_type}/{unique_filename}" + + try: + storage_client = storage.Client() + bucket = storage_client.bucket(settings.config.media_gcs_bucket_name) + blob = bucket.blob(storage_path) + blob.content_type = content_type + + file_bytes = await file.read() + blob.upload_from_string(file_bytes, content_type=content_type) + + public_url = blob.public_url + + logger.info(f"Successfully uploaded file to: {storage_path}") + return public_url + + except Exception as e: + logger.error(f"GCS storage error: {str(e)}") + raise backend.server.v2.store.exceptions.StorageUploadError( + "Failed to upload file to storage" + ) from e + + except backend.server.v2.store.exceptions.MediaUploadError: + raise + except Exception as e: + logger.exception("Unexpected error in upload_media") + raise backend.server.v2.store.exceptions.MediaUploadError( + "Unexpected error during media upload" + ) from e diff --git a/autogpt_platform/backend/backend/server/v2/store/media_test.py b/autogpt_platform/backend/backend/server/v2/store/media_test.py new file mode 100644 index 000000000000..bd5222e4f749 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/media_test.py @@ -0,0 +1,190 @@ +import io +import unittest.mock + +import fastapi +import pytest +import starlette.datastructures + +import backend.server.v2.store.exceptions +import backend.server.v2.store.media +from backend.util.settings import Settings + + +@pytest.fixture +def mock_settings(monkeypatch): + settings = Settings() + settings.config.media_gcs_bucket_name = "test-bucket" + settings.config.google_application_credentials = "test-credentials" + monkeypatch.setattr("backend.server.v2.store.media.Settings", lambda: settings) + return settings + + +@pytest.fixture +def mock_storage_client(mocker): + mock_client = unittest.mock.MagicMock() + mock_bucket = unittest.mock.MagicMock() + mock_blob = unittest.mock.MagicMock() + + mock_client.bucket.return_value = mock_bucket + mock_bucket.blob.return_value = mock_blob + mock_blob.public_url = "http://test-url/media/laptop.jpeg" + + mocker.patch("google.cloud.storage.Client", return_value=mock_client) + + return mock_client + + +async def test_upload_media_success(mock_settings, mock_storage_client): + # Create test JPEG data with valid signature + test_data = b"\xFF\xD8\xFF" + b"test data" + + test_file = fastapi.UploadFile( + filename="laptop.jpeg", + file=io.BytesIO(test_data), + headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}), + ) + + result = await backend.server.v2.store.media.upload_media("test-user", test_file) + + assert result == "http://test-url/media/laptop.jpeg" + mock_bucket = mock_storage_client.bucket.return_value + mock_blob = mock_bucket.blob.return_value + mock_blob.upload_from_string.assert_called_once() + + +async def test_upload_media_invalid_type(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="test.txt", + file=io.BytesIO(b"test data"), + headers=starlette.datastructures.Headers({"content-type": "text/plain"}), + ) + + with pytest.raises(backend.server.v2.store.exceptions.InvalidFileTypeError): + await backend.server.v2.store.media.upload_media("test-user", test_file) + + mock_bucket = mock_storage_client.bucket.return_value + mock_blob = mock_bucket.blob.return_value + mock_blob.upload_from_string.assert_not_called() + + +async def test_upload_media_missing_credentials(monkeypatch): + settings = Settings() + settings.config.media_gcs_bucket_name = "" + settings.config.google_application_credentials = "" + monkeypatch.setattr("backend.server.v2.store.media.Settings", lambda: settings) + + test_file = fastapi.UploadFile( + filename="laptop.jpeg", + file=io.BytesIO(b"\xFF\xD8\xFF" + b"test data"), # Valid JPEG signature + headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}), + ) + + with pytest.raises(backend.server.v2.store.exceptions.StorageConfigError): + await backend.server.v2.store.media.upload_media("test-user", test_file) + + +async def test_upload_media_video_type(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="test.mp4", + file=io.BytesIO(b"\x00\x00\x00\x18ftypmp42"), # Valid MP4 signature + headers=starlette.datastructures.Headers({"content-type": "video/mp4"}), + ) + + result = await backend.server.v2.store.media.upload_media("test-user", test_file) + + assert result == "http://test-url/media/laptop.jpeg" + mock_bucket = mock_storage_client.bucket.return_value + mock_blob = mock_bucket.blob.return_value + mock_blob.upload_from_string.assert_called_once() + + +async def test_upload_media_file_too_large(mock_settings, mock_storage_client): + large_data = b"\xFF\xD8\xFF" + b"x" * ( + 50 * 1024 * 1024 + 1 + ) # 50MB + 1 byte with valid JPEG signature + test_file = fastapi.UploadFile( + filename="laptop.jpeg", + file=io.BytesIO(large_data), + headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}), + ) + + with pytest.raises(backend.server.v2.store.exceptions.FileSizeTooLargeError): + await backend.server.v2.store.media.upload_media("test-user", test_file) + + +async def test_upload_media_file_read_error(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="laptop.jpeg", + file=io.BytesIO(b""), # Empty file that will raise error on read + headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}), + ) + test_file.read = unittest.mock.AsyncMock(side_effect=Exception("Read error")) + + with pytest.raises(backend.server.v2.store.exceptions.FileReadError): + await backend.server.v2.store.media.upload_media("test-user", test_file) + + +async def test_upload_media_png_success(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="test.png", + file=io.BytesIO(b"\x89PNG\r\n\x1a\n"), # Valid PNG signature + headers=starlette.datastructures.Headers({"content-type": "image/png"}), + ) + + result = await backend.server.v2.store.media.upload_media("test-user", test_file) + assert result == "http://test-url/media/laptop.jpeg" + + +async def test_upload_media_gif_success(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="test.gif", + file=io.BytesIO(b"GIF89a"), # Valid GIF signature + headers=starlette.datastructures.Headers({"content-type": "image/gif"}), + ) + + result = await backend.server.v2.store.media.upload_media("test-user", test_file) + assert result == "http://test-url/media/laptop.jpeg" + + +async def test_upload_media_webp_success(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="test.webp", + file=io.BytesIO(b"RIFF\x00\x00\x00\x00WEBP"), # Valid WebP signature + headers=starlette.datastructures.Headers({"content-type": "image/webp"}), + ) + + result = await backend.server.v2.store.media.upload_media("test-user", test_file) + assert result == "http://test-url/media/laptop.jpeg" + + +async def test_upload_media_webm_success(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="test.webm", + file=io.BytesIO(b"\x1a\x45\xdf\xa3"), # Valid WebM signature + headers=starlette.datastructures.Headers({"content-type": "video/webm"}), + ) + + result = await backend.server.v2.store.media.upload_media("test-user", test_file) + assert result == "http://test-url/media/laptop.jpeg" + + +async def test_upload_media_mismatched_signature(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="test.jpeg", + file=io.BytesIO(b"\x89PNG\r\n\x1a\n"), # PNG signature with JPEG content type + headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}), + ) + + with pytest.raises(backend.server.v2.store.exceptions.InvalidFileTypeError): + await backend.server.v2.store.media.upload_media("test-user", test_file) + + +async def test_upload_media_invalid_signature(mock_settings, mock_storage_client): + test_file = fastapi.UploadFile( + filename="test.jpeg", + file=io.BytesIO(b"invalid signature"), + headers=starlette.datastructures.Headers({"content-type": "image/jpeg"}), + ) + + with pytest.raises(backend.server.v2.store.exceptions.InvalidFileTypeError): + await backend.server.v2.store.media.upload_media("test-user", test_file) diff --git a/autogpt_platform/backend/backend/server/v2/store/model.py b/autogpt_platform/backend/backend/server/v2/store/model.py new file mode 100644 index 000000000000..7f09e853ae39 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/model.py @@ -0,0 +1,160 @@ +import datetime +from typing import List + +import prisma.enums +import pydantic + + +class Pagination(pydantic.BaseModel): + total_items: int = pydantic.Field( + description="Total number of items.", examples=[42] + ) + total_pages: int = pydantic.Field( + description="Total number of pages.", examples=[97] + ) + current_page: int = pydantic.Field( + description="Current_page page number.", examples=[1] + ) + page_size: int = pydantic.Field( + description="Number of items per page.", examples=[25] + ) + + +class MyAgent(pydantic.BaseModel): + agent_id: str + agent_version: int + agent_name: str + description: str + last_edited: datetime.datetime + + +class MyAgentsResponse(pydantic.BaseModel): + agents: list[MyAgent] + pagination: Pagination + + +class StoreAgent(pydantic.BaseModel): + slug: str + agent_name: str + agent_image: str + creator: str + creator_avatar: str + sub_heading: str + description: str + runs: int + rating: float + + +class StoreAgentsResponse(pydantic.BaseModel): + agents: list[StoreAgent] + pagination: Pagination + + +class StoreAgentDetails(pydantic.BaseModel): + store_listing_version_id: str + slug: str + agent_name: str + agent_video: str + agent_image: list[str] + creator: str + creator_avatar: str + sub_heading: str + description: str + categories: list[str] + runs: int + rating: float + versions: list[str] + last_updated: datetime.datetime + + +class Creator(pydantic.BaseModel): + name: str + username: str + description: str + avatar_url: str + num_agents: int + agent_rating: float + agent_runs: int + is_featured: bool + + +class CreatorsResponse(pydantic.BaseModel): + creators: List[Creator] + pagination: Pagination + + +class CreatorDetails(pydantic.BaseModel): + name: str + username: str + description: str + links: list[str] + avatar_url: str + agent_rating: float + agent_runs: int + top_categories: list[str] + + +class Profile(pydantic.BaseModel): + name: str + username: str + description: str + links: list[str] + avatar_url: str + is_featured: bool = False + + +class StoreSubmission(pydantic.BaseModel): + agent_id: str + agent_version: int + name: str + sub_heading: str + slug: str + description: str + image_urls: list[str] + date_submitted: datetime.datetime + status: prisma.enums.SubmissionStatus + runs: int + rating: float + store_listing_version_id: str | None = None + + +class StoreSubmissionsResponse(pydantic.BaseModel): + submissions: list[StoreSubmission] + pagination: Pagination + + +class StoreSubmissionRequest(pydantic.BaseModel): + agent_id: str + agent_version: int + slug: str + name: str + sub_heading: str + video_url: str | None = None + image_urls: list[str] = [] + description: str = "" + categories: list[str] = [] + + +class ProfileDetails(pydantic.BaseModel): + name: str + username: str + description: str + links: list[str] + avatar_url: str | None = None + + +class StoreReview(pydantic.BaseModel): + score: int + comments: str | None = None + + +class StoreReviewCreate(pydantic.BaseModel): + store_listing_version_id: str + score: int + comments: str | None = None + + +class ReviewSubmissionRequest(pydantic.BaseModel): + store_listing_version_id: str + is_approved: bool + comments: str diff --git a/autogpt_platform/backend/backend/server/v2/store/model_test.py b/autogpt_platform/backend/backend/server/v2/store/model_test.py new file mode 100644 index 000000000000..ec90fe685426 --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/model_test.py @@ -0,0 +1,195 @@ +import datetime + +import prisma.enums + +import backend.server.v2.store.model + + +def test_pagination(): + pagination = backend.server.v2.store.model.Pagination( + total_items=100, total_pages=5, current_page=2, page_size=20 + ) + assert pagination.total_items == 100 + assert pagination.total_pages == 5 + assert pagination.current_page == 2 + assert pagination.page_size == 20 + + +def test_store_agent(): + agent = backend.server.v2.store.model.StoreAgent( + slug="test-agent", + agent_name="Test Agent", + agent_image="test.jpg", + creator="creator1", + creator_avatar="avatar.jpg", + sub_heading="Test subheading", + description="Test description", + runs=50, + rating=4.5, + ) + assert agent.slug == "test-agent" + assert agent.agent_name == "Test Agent" + assert agent.runs == 50 + assert agent.rating == 4.5 + + +def test_store_agents_response(): + response = backend.server.v2.store.model.StoreAgentsResponse( + agents=[ + backend.server.v2.store.model.StoreAgent( + slug="test-agent", + agent_name="Test Agent", + agent_image="test.jpg", + creator="creator1", + creator_avatar="avatar.jpg", + sub_heading="Test subheading", + description="Test description", + runs=50, + rating=4.5, + ) + ], + pagination=backend.server.v2.store.model.Pagination( + total_items=1, total_pages=1, current_page=1, page_size=20 + ), + ) + assert len(response.agents) == 1 + assert response.pagination.total_items == 1 + + +def test_store_agent_details(): + details = backend.server.v2.store.model.StoreAgentDetails( + store_listing_version_id="version123", + slug="test-agent", + agent_name="Test Agent", + agent_video="video.mp4", + agent_image=["image1.jpg", "image2.jpg"], + creator="creator1", + creator_avatar="avatar.jpg", + sub_heading="Test subheading", + description="Test description", + categories=["cat1", "cat2"], + runs=50, + rating=4.5, + versions=["1.0", "2.0"], + last_updated=datetime.datetime.now(), + ) + assert details.slug == "test-agent" + assert len(details.agent_image) == 2 + assert len(details.categories) == 2 + assert len(details.versions) == 2 + + +def test_creator(): + creator = backend.server.v2.store.model.Creator( + agent_rating=4.8, + agent_runs=1000, + name="Test Creator", + username="creator1", + description="Test description", + avatar_url="avatar.jpg", + num_agents=5, + is_featured=False, + ) + assert creator.name == "Test Creator" + assert creator.num_agents == 5 + + +def test_creators_response(): + response = backend.server.v2.store.model.CreatorsResponse( + creators=[ + backend.server.v2.store.model.Creator( + agent_rating=4.8, + agent_runs=1000, + name="Test Creator", + username="creator1", + description="Test description", + avatar_url="avatar.jpg", + num_agents=5, + is_featured=False, + ) + ], + pagination=backend.server.v2.store.model.Pagination( + total_items=1, total_pages=1, current_page=1, page_size=20 + ), + ) + assert len(response.creators) == 1 + assert response.pagination.total_items == 1 + + +def test_creator_details(): + details = backend.server.v2.store.model.CreatorDetails( + name="Test Creator", + username="creator1", + description="Test description", + links=["link1.com", "link2.com"], + avatar_url="avatar.jpg", + agent_rating=4.8, + agent_runs=1000, + top_categories=["cat1", "cat2"], + ) + assert details.name == "Test Creator" + assert len(details.links) == 2 + assert details.agent_rating == 4.8 + assert len(details.top_categories) == 2 + + +def test_store_submission(): + submission = backend.server.v2.store.model.StoreSubmission( + agent_id="agent123", + agent_version=1, + sub_heading="Test subheading", + name="Test Agent", + slug="test-agent", + description="Test description", + image_urls=["image1.jpg", "image2.jpg"], + date_submitted=datetime.datetime(2023, 1, 1), + status=prisma.enums.SubmissionStatus.PENDING, + runs=50, + rating=4.5, + ) + assert submission.name == "Test Agent" + assert len(submission.image_urls) == 2 + assert submission.status == prisma.enums.SubmissionStatus.PENDING + + +def test_store_submissions_response(): + response = backend.server.v2.store.model.StoreSubmissionsResponse( + submissions=[ + backend.server.v2.store.model.StoreSubmission( + agent_id="agent123", + agent_version=1, + sub_heading="Test subheading", + name="Test Agent", + slug="test-agent", + description="Test description", + image_urls=["image1.jpg"], + date_submitted=datetime.datetime(2023, 1, 1), + status=prisma.enums.SubmissionStatus.PENDING, + runs=50, + rating=4.5, + ) + ], + pagination=backend.server.v2.store.model.Pagination( + total_items=1, total_pages=1, current_page=1, page_size=20 + ), + ) + assert len(response.submissions) == 1 + assert response.pagination.total_items == 1 + + +def test_store_submission_request(): + request = backend.server.v2.store.model.StoreSubmissionRequest( + agent_id="agent123", + agent_version=1, + slug="test-agent", + name="Test Agent", + sub_heading="Test subheading", + video_url="video.mp4", + image_urls=["image1.jpg", "image2.jpg"], + description="Test description", + categories=["cat1", "cat2"], + ) + assert request.agent_id == "agent123" + assert request.agent_version == 1 + assert len(request.image_urls) == 2 + assert len(request.categories) == 2 diff --git a/autogpt_platform/backend/backend/server/v2/store/routes.py b/autogpt_platform/backend/backend/server/v2/store/routes.py new file mode 100644 index 000000000000..50b71fad0c6d --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/routes.py @@ -0,0 +1,671 @@ +import json +import logging +import tempfile +import typing +import urllib.parse + +import autogpt_libs.auth.depends +import autogpt_libs.auth.middleware +import fastapi +import fastapi.responses +from fastapi.encoders import jsonable_encoder + +import backend.data.block +import backend.data.graph +import backend.server.v2.store.db +import backend.server.v2.store.image_gen +import backend.server.v2.store.media +import backend.server.v2.store.model + +logger = logging.getLogger(__name__) + +router = fastapi.APIRouter() + + +############################################## +############### Profile Endpoints ############ +############################################## + + +@router.get( + "/profile", + tags=["store", "private"], + response_model=backend.server.v2.store.model.ProfileDetails, +) +async def get_profile( + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ] +): + """ + Get the profile details for the authenticated user. + """ + try: + profile = await backend.server.v2.store.db.get_user_profile(user_id) + return profile + except Exception: + logger.exception("Exception occurred whilst getting user profile") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while retrieving the user profile"}, + ) + + +@router.post( + "/profile", + tags=["store", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], + response_model=backend.server.v2.store.model.CreatorDetails, +) +async def update_or_create_profile( + profile: backend.server.v2.store.model.Profile, + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ], +): + """ + Update the store profile for the authenticated user. + + Args: + profile (Profile): The updated profile details + user_id (str): ID of the authenticated user + + Returns: + CreatorDetails: The updated profile + + Raises: + HTTPException: If there is an error updating the profile + """ + try: + updated_profile = await backend.server.v2.store.db.update_or_create_profile( + user_id=user_id, profile=profile + ) + return updated_profile + except Exception: + logger.exception("Exception occurred whilst updating profile") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while updating the user profile"}, + ) + + +############################################## +############### Agent Endpoints ############## +############################################## + + +@router.get( + "/agents", + tags=["store", "public"], + response_model=backend.server.v2.store.model.StoreAgentsResponse, +) +async def get_agents( + featured: bool = False, + creator: str | None = None, + sorted_by: str | None = None, + search_query: str | None = None, + category: str | None = None, + page: int = 1, + page_size: int = 20, +): + """ + Get a paginated list of agents from the store with optional filtering and sorting. + + Args: + featured (bool, optional): Filter to only show featured agents. Defaults to False. + creator (str | None, optional): Filter agents by creator username. Defaults to None. + sorted_by (str | None, optional): Sort agents by "runs" or "rating". Defaults to None. + search_query (str | None, optional): Search agents by name, subheading and description. Defaults to None. + category (str | None, optional): Filter agents by category. Defaults to None. + page (int, optional): Page number for pagination. Defaults to 1. + page_size (int, optional): Number of agents per page. Defaults to 20. + + Returns: + StoreAgentsResponse: Paginated list of agents matching the filters + + Raises: + HTTPException: If page or page_size are less than 1 + + Used for: + - Home Page Featured Agents + - Home Page Top Agents + - Search Results + - Agent Details - Other Agents By Creator + - Agent Details - Similar Agents + - Creator Details - Agents By Creator + """ + if page < 1: + raise fastapi.HTTPException( + status_code=422, detail="Page must be greater than 0" + ) + + if page_size < 1: + raise fastapi.HTTPException( + status_code=422, detail="Page size must be greater than 0" + ) + + try: + agents = await backend.server.v2.store.db.get_store_agents( + featured=featured, + creator=creator, + sorted_by=sorted_by, + search_query=search_query, + category=category, + page=page, + page_size=page_size, + ) + return agents + except Exception: + logger.exception("Exception occured whilst getting store agents") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while retrieving the store agents"}, + ) + + +@router.get( + "/agents/{username}/{agent_name}", + tags=["store", "public"], + response_model=backend.server.v2.store.model.StoreAgentDetails, +) +async def get_agent(username: str, agent_name: str): + """ + This is only used on the AgentDetails Page + + It returns the store listing agents details. + """ + try: + username = urllib.parse.unquote(username).lower() + # URL decode the agent name since it comes from the URL path + agent_name = urllib.parse.unquote(agent_name).lower() + agent = await backend.server.v2.store.db.get_store_agent_details( + username=username, agent_name=agent_name + ) + return agent + except Exception: + logger.exception("Exception occurred whilst getting store agent details") + return fastapi.responses.JSONResponse( + status_code=500, + content={ + "detail": "An error occurred while retrieving the store agent details" + }, + ) + + +@router.post( + "/agents/{username}/{agent_name}/review", + tags=["store"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], + response_model=backend.server.v2.store.model.StoreReview, +) +async def create_review( + username: str, + agent_name: str, + review: backend.server.v2.store.model.StoreReviewCreate, + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ], +): + """ + Create a review for a store agent. + + Args: + username: Creator's username + agent_name: Name/slug of the agent + review: Review details including score and optional comments + user_id: ID of authenticated user creating the review + + Returns: + The created review + """ + try: + username = urllib.parse.unquote(username).lower() + agent_name = urllib.parse.unquote(agent_name) + # Create the review + created_review = await backend.server.v2.store.db.create_store_review( + user_id=user_id, + store_listing_version_id=review.store_listing_version_id, + score=review.score, + comments=review.comments, + ) + + return created_review + except Exception: + logger.exception("Exception occurred whilst creating store review") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while creating the store review"}, + ) + + +############################################## +############# Creator Endpoints ############# +############################################## + + +@router.get( + "/creators", + tags=["store", "public"], + response_model=backend.server.v2.store.model.CreatorsResponse, +) +async def get_creators( + featured: bool = False, + search_query: str | None = None, + sorted_by: str | None = None, + page: int = 1, + page_size: int = 20, +): + """ + This is needed for: + - Home Page Featured Creators + - Search Results Page + + --- + + To support this functionality we need: + - featured: bool - to limit the list to just featured agents + - search_query: str - vector search based on the creators profile description. + - sorted_by: [agent_rating, agent_runs] - + """ + if page < 1: + raise fastapi.HTTPException( + status_code=422, detail="Page must be greater than 0" + ) + + if page_size < 1: + raise fastapi.HTTPException( + status_code=422, detail="Page size must be greater than 0" + ) + + try: + creators = await backend.server.v2.store.db.get_store_creators( + featured=featured, + search_query=search_query, + sorted_by=sorted_by, + page=page, + page_size=page_size, + ) + return creators + except Exception: + logger.exception("Exception occurred whilst getting store creators") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while retrieving the store creators"}, + ) + + +@router.get( + "/creator/{username}", + tags=["store", "public"], + response_model=backend.server.v2.store.model.CreatorDetails, +) +async def get_creator( + username: str, +): + """ + Get the details of a creator + - Creator Details Page + """ + try: + username = urllib.parse.unquote(username).lower() + creator = await backend.server.v2.store.db.get_store_creator_details( + username=username.lower() + ) + return creator + except Exception: + logger.exception("Exception occurred whilst getting creator details") + return fastapi.responses.JSONResponse( + status_code=500, + content={ + "detail": "An error occurred while retrieving the creator details" + }, + ) + + +############################################ +############# Store Submissions ############### +############################################ +@router.get( + "/myagents", + tags=["store", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], + response_model=backend.server.v2.store.model.MyAgentsResponse, +) +async def get_my_agents( + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ] +): + try: + agents = await backend.server.v2.store.db.get_my_agents(user_id) + return agents + except Exception: + logger.exception("Exception occurred whilst getting my agents") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while retrieving the my agents"}, + ) + + +@router.delete( + "/submissions/{submission_id}", + tags=["store", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], + response_model=bool, +) +async def delete_submission( + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ], + submission_id: str, +): + """ + Delete a store listing submission. + + Args: + user_id (str): ID of the authenticated user + submission_id (str): ID of the submission to be deleted + + Returns: + bool: True if the submission was successfully deleted, False otherwise + """ + try: + result = await backend.server.v2.store.db.delete_store_submission( + user_id=user_id, + submission_id=submission_id, + ) + return result + except Exception: + logger.exception("Exception occurred whilst deleting store submission") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while deleting the store submission"}, + ) + + +@router.get( + "/submissions", + tags=["store", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], + response_model=backend.server.v2.store.model.StoreSubmissionsResponse, +) +async def get_submissions( + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ], + page: int = 1, + page_size: int = 20, +): + """ + Get a paginated list of store submissions for the authenticated user. + + Args: + user_id (str): ID of the authenticated user + page (int, optional): Page number for pagination. Defaults to 1. + page_size (int, optional): Number of submissions per page. Defaults to 20. + + Returns: + StoreListingsResponse: Paginated list of store submissions + + Raises: + HTTPException: If page or page_size are less than 1 + """ + if page < 1: + raise fastapi.HTTPException( + status_code=422, detail="Page must be greater than 0" + ) + + if page_size < 1: + raise fastapi.HTTPException( + status_code=422, detail="Page size must be greater than 0" + ) + try: + listings = await backend.server.v2.store.db.get_store_submissions( + user_id=user_id, + page=page, + page_size=page_size, + ) + return listings + except Exception: + logger.exception("Exception occurred whilst getting store submissions") + return fastapi.responses.JSONResponse( + status_code=500, + content={ + "detail": "An error occurred while retrieving the store submissions" + }, + ) + + +@router.post( + "/submissions", + tags=["store", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], + response_model=backend.server.v2.store.model.StoreSubmission, +) +async def create_submission( + submission_request: backend.server.v2.store.model.StoreSubmissionRequest, + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ], +): + """ + Create a new store listing submission. + + Args: + submission_request (StoreSubmissionRequest): The submission details + user_id (str): ID of the authenticated user submitting the listing + + Returns: + StoreSubmission: The created store submission + + Raises: + HTTPException: If there is an error creating the submission + """ + try: + submission = await backend.server.v2.store.db.create_store_submission( + user_id=user_id, + agent_id=submission_request.agent_id, + agent_version=submission_request.agent_version, + slug=submission_request.slug, + name=submission_request.name, + video_url=submission_request.video_url, + image_urls=submission_request.image_urls, + description=submission_request.description, + sub_heading=submission_request.sub_heading, + categories=submission_request.categories, + ) + return submission + except Exception: + logger.exception("Exception occurred whilst creating store submission") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while creating the store submission"}, + ) + + +@router.post( + "/submissions/media", + tags=["store", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], +) +async def upload_submission_media( + file: fastapi.UploadFile, + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ], +): + """ + Upload media (images/videos) for a store listing submission. + + Args: + file (UploadFile): The media file to upload + user_id (str): ID of the authenticated user uploading the media + + Returns: + str: URL of the uploaded media file + + Raises: + HTTPException: If there is an error uploading the media + """ + try: + media_url = await backend.server.v2.store.media.upload_media( + user_id=user_id, file=file + ) + return media_url + except Exception: + logger.exception("Exception occurred whilst uploading submission media") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while uploading the media file"}, + ) + + +@router.post( + "/submissions/generate_image", + tags=["store", "private"], + dependencies=[fastapi.Depends(autogpt_libs.auth.middleware.auth_middleware)], +) +async def generate_image( + agent_id: str, + user_id: typing.Annotated[ + str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id) + ], +) -> fastapi.responses.Response: + """ + Generate an image for a store listing submission. + + Args: + agent_id (str): ID of the agent to generate an image for + user_id (str): ID of the authenticated user + + Returns: + JSONResponse: JSON containing the URL of the generated image + """ + try: + agent = await backend.data.graph.get_graph(agent_id, user_id=user_id) + + if not agent: + raise fastapi.HTTPException( + status_code=404, detail=f"Agent with ID {agent_id} not found" + ) + # Use .jpeg here since we are generating JPEG images + filename = f"agent_{agent_id}.jpeg" + + existing_url = await backend.server.v2.store.media.check_media_exists( + user_id, filename + ) + if existing_url: + logger.info(f"Using existing image for agent {agent_id}") + return fastapi.responses.JSONResponse(content={"image_url": existing_url}) + # Generate agent image as JPEG + image = await backend.server.v2.store.image_gen.generate_agent_image( + agent=agent + ) + + # Create UploadFile with the correct filename and content_type + image_file = fastapi.UploadFile( + file=image, + filename=filename, + ) + + image_url = await backend.server.v2.store.media.upload_media( + user_id=user_id, file=image_file, use_file_name=True + ) + + return fastapi.responses.JSONResponse(content={"image_url": image_url}) + except Exception: + logger.exception("Exception occurred whilst generating submission image") + return fastapi.responses.JSONResponse( + status_code=500, + content={"detail": "An error occurred while generating the image"}, + ) + + +@router.get( + "/download/agents/{store_listing_version_id}", + tags=["store", "public"], +) +async def download_agent_file( + store_listing_version_id: str = fastapi.Path( + ..., description="The ID of the agent to download" + ), + version: typing.Optional[int] = fastapi.Query( + None, description="Specific version of the agent" + ), +) -> fastapi.responses.FileResponse: + """ + Download the agent file by streaming its content. + + Args: + agent_id (str): The ID of the agent to download. + version (Optional[int]): Specific version of the agent to download. + + Returns: + StreamingResponse: A streaming response containing the agent's graph data. + + Raises: + HTTPException: If the agent is not found or an unexpected error occurs. + """ + + graph_data = await backend.server.v2.store.db.get_agent( + store_listing_version_id=store_listing_version_id, version_id=version + ) + + graph_data.clean_graph() + graph_date_dict = jsonable_encoder(graph_data) + + def remove_credentials(obj): + if obj and isinstance(obj, dict): + if "credentials" in obj: + del obj["credentials"] + if "creds" in obj: + del obj["creds"] + + for value in obj.values(): + remove_credentials(value) + elif isinstance(obj, list): + for item in obj: + remove_credentials(item) + return obj + + graph_date_dict = remove_credentials(graph_date_dict) + + file_name = f"agent_{store_listing_version_id}_v{version or 'latest'}.json" + + # Sending graph as a stream (similar to marketplace v1) + with tempfile.NamedTemporaryFile( + mode="w", suffix=".json", delete=False + ) as tmp_file: + tmp_file.write(json.dumps(graph_date_dict)) + tmp_file.flush() + + return fastapi.responses.FileResponse( + tmp_file.name, filename=file_name, media_type="application/json" + ) + + +@router.post( + "/submissions/review/{store_listing_version_id}", + tags=["store", "private"], +) +async def review_submission( + request: backend.server.v2.store.model.ReviewSubmissionRequest, + user: typing.Annotated[ + autogpt_libs.auth.models.User, + fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user), + ], +): + # Proceed with the review submission logic + try: + submission = await backend.server.v2.store.db.review_store_submission( + store_listing_version_id=request.store_listing_version_id, + is_approved=request.is_approved, + comments=request.comments, + reviewer_id=user.user_id, + ) + return submission + except Exception: + raise fastapi.HTTPException( + status_code=500, + detail="An error occurred while creating the store submission review", + ) diff --git a/autogpt_platform/backend/backend/server/v2/store/routes_test.py b/autogpt_platform/backend/backend/server/v2/store/routes_test.py new file mode 100644 index 000000000000..ae4496f9eb2f --- /dev/null +++ b/autogpt_platform/backend/backend/server/v2/store/routes_test.py @@ -0,0 +1,552 @@ +import datetime + +import autogpt_libs.auth.depends +import autogpt_libs.auth.middleware +import fastapi +import fastapi.testclient +import prisma.enums +import pytest_mock + +import backend.server.v2.store.model +import backend.server.v2.store.routes + +app = fastapi.FastAPI() +app.include_router(backend.server.v2.store.routes.router) + +client = fastapi.testclient.TestClient(app) + + +def override_auth_middleware(): + """Override auth middleware for testing""" + return {"sub": "test-user-id"} + + +def override_get_user_id(): + """Override get_user_id for testing""" + return "test-user-id" + + +app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = ( + override_auth_middleware +) +app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id + + +def test_get_agents_defaults(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreAgentsResponse( + agents=[], + pagination=backend.server.v2.store.model.Pagination( + current_page=0, + total_items=0, + total_pages=0, + page_size=10, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agents") + mock_db_call.return_value = mocked_value + response = client.get("/agents") + assert response.status_code == 200 + + data = backend.server.v2.store.model.StoreAgentsResponse.model_validate( + response.json() + ) + assert data.pagination.total_pages == 0 + assert data.agents == [] + mock_db_call.assert_called_once_with( + featured=False, + creator=None, + sorted_by=None, + search_query=None, + category=None, + page=1, + page_size=20, + ) + + +def test_get_agents_featured(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreAgentsResponse( + agents=[ + backend.server.v2.store.model.StoreAgent( + slug="featured-agent", + agent_name="Featured Agent", + agent_image="featured.jpg", + creator="creator1", + creator_avatar="avatar1.jpg", + sub_heading="Featured agent subheading", + description="Featured agent description", + runs=100, + rating=4.5, + ) + ], + pagination=backend.server.v2.store.model.Pagination( + current_page=1, + total_items=1, + total_pages=1, + page_size=20, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agents") + mock_db_call.return_value = mocked_value + response = client.get("/agents?featured=true") + assert response.status_code == 200 + data = backend.server.v2.store.model.StoreAgentsResponse.model_validate( + response.json() + ) + assert len(data.agents) == 1 + assert data.agents[0].slug == "featured-agent" + mock_db_call.assert_called_once_with( + featured=True, + creator=None, + sorted_by=None, + search_query=None, + category=None, + page=1, + page_size=20, + ) + + +def test_get_agents_by_creator(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreAgentsResponse( + agents=[ + backend.server.v2.store.model.StoreAgent( + slug="creator-agent", + agent_name="Creator Agent", + agent_image="agent.jpg", + creator="specific-creator", + creator_avatar="avatar.jpg", + sub_heading="Creator agent subheading", + description="Creator agent description", + runs=50, + rating=4.0, + ) + ], + pagination=backend.server.v2.store.model.Pagination( + current_page=1, + total_items=1, + total_pages=1, + page_size=20, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agents") + mock_db_call.return_value = mocked_value + response = client.get("/agents?creator=specific-creator") + assert response.status_code == 200 + data = backend.server.v2.store.model.StoreAgentsResponse.model_validate( + response.json() + ) + assert len(data.agents) == 1 + assert data.agents[0].creator == "specific-creator" + mock_db_call.assert_called_once_with( + featured=False, + creator="specific-creator", + sorted_by=None, + search_query=None, + category=None, + page=1, + page_size=20, + ) + + +def test_get_agents_sorted(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreAgentsResponse( + agents=[ + backend.server.v2.store.model.StoreAgent( + slug="top-agent", + agent_name="Top Agent", + agent_image="top.jpg", + creator="creator1", + creator_avatar="avatar1.jpg", + sub_heading="Top agent subheading", + description="Top agent description", + runs=1000, + rating=5.0, + ) + ], + pagination=backend.server.v2.store.model.Pagination( + current_page=1, + total_items=1, + total_pages=1, + page_size=20, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agents") + mock_db_call.return_value = mocked_value + response = client.get("/agents?sorted_by=runs") + assert response.status_code == 200 + data = backend.server.v2.store.model.StoreAgentsResponse.model_validate( + response.json() + ) + assert len(data.agents) == 1 + assert data.agents[0].runs == 1000 + mock_db_call.assert_called_once_with( + featured=False, + creator=None, + sorted_by="runs", + search_query=None, + category=None, + page=1, + page_size=20, + ) + + +def test_get_agents_search(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreAgentsResponse( + agents=[ + backend.server.v2.store.model.StoreAgent( + slug="search-agent", + agent_name="Search Agent", + agent_image="search.jpg", + creator="creator1", + creator_avatar="avatar1.jpg", + sub_heading="Search agent subheading", + description="Specific search term description", + runs=75, + rating=4.2, + ) + ], + pagination=backend.server.v2.store.model.Pagination( + current_page=1, + total_items=1, + total_pages=1, + page_size=20, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agents") + mock_db_call.return_value = mocked_value + response = client.get("/agents?search_query=specific") + assert response.status_code == 200 + data = backend.server.v2.store.model.StoreAgentsResponse.model_validate( + response.json() + ) + assert len(data.agents) == 1 + assert "specific" in data.agents[0].description.lower() + mock_db_call.assert_called_once_with( + featured=False, + creator=None, + sorted_by=None, + search_query="specific", + category=None, + page=1, + page_size=20, + ) + + +def test_get_agents_category(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreAgentsResponse( + agents=[ + backend.server.v2.store.model.StoreAgent( + slug="category-agent", + agent_name="Category Agent", + agent_image="category.jpg", + creator="creator1", + creator_avatar="avatar1.jpg", + sub_heading="Category agent subheading", + description="Category agent description", + runs=60, + rating=4.1, + ) + ], + pagination=backend.server.v2.store.model.Pagination( + current_page=1, + total_items=1, + total_pages=1, + page_size=20, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agents") + mock_db_call.return_value = mocked_value + response = client.get("/agents?category=test-category") + assert response.status_code == 200 + data = backend.server.v2.store.model.StoreAgentsResponse.model_validate( + response.json() + ) + assert len(data.agents) == 1 + mock_db_call.assert_called_once_with( + featured=False, + creator=None, + sorted_by=None, + search_query=None, + category="test-category", + page=1, + page_size=20, + ) + + +def test_get_agents_pagination(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreAgentsResponse( + agents=[ + backend.server.v2.store.model.StoreAgent( + slug=f"agent-{i}", + agent_name=f"Agent {i}", + agent_image=f"agent{i}.jpg", + creator="creator1", + creator_avatar="avatar1.jpg", + sub_heading=f"Agent {i} subheading", + description=f"Agent {i} description", + runs=i * 10, + rating=4.0, + ) + for i in range(5) + ], + pagination=backend.server.v2.store.model.Pagination( + current_page=2, + total_items=15, + total_pages=3, + page_size=5, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agents") + mock_db_call.return_value = mocked_value + response = client.get("/agents?page=2&page_size=5") + assert response.status_code == 200 + data = backend.server.v2.store.model.StoreAgentsResponse.model_validate( + response.json() + ) + assert len(data.agents) == 5 + assert data.pagination.current_page == 2 + assert data.pagination.page_size == 5 + mock_db_call.assert_called_once_with( + featured=False, + creator=None, + sorted_by=None, + search_query=None, + category=None, + page=2, + page_size=5, + ) + + +def test_get_agents_malformed_request(mocker: pytest_mock.MockFixture): + # Test with invalid page number + response = client.get("/agents?page=-1") + assert response.status_code == 422 + + # Test with invalid page size + response = client.get("/agents?page_size=0") + assert response.status_code == 422 + + # Test with non-numeric values + response = client.get("/agents?page=abc&page_size=def") + assert response.status_code == 422 + + # Verify no DB calls were made + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agents") + mock_db_call.assert_not_called() + + +def test_get_agent_details(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreAgentDetails( + store_listing_version_id="test-version-id", + slug="test-agent", + agent_name="Test Agent", + agent_video="video.mp4", + agent_image=["image1.jpg", "image2.jpg"], + creator="creator1", + creator_avatar="avatar1.jpg", + sub_heading="Test agent subheading", + description="Test agent description", + categories=["category1", "category2"], + runs=100, + rating=4.5, + versions=["1.0.0", "1.1.0"], + last_updated=datetime.datetime.now(), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_agent_details") + mock_db_call.return_value = mocked_value + + response = client.get("/agents/creator1/test-agent") + assert response.status_code == 200 + + data = backend.server.v2.store.model.StoreAgentDetails.model_validate( + response.json() + ) + assert data.agent_name == "Test Agent" + assert data.creator == "creator1" + mock_db_call.assert_called_once_with(username="creator1", agent_name="test-agent") + + +def test_get_creators_defaults(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.CreatorsResponse( + creators=[], + pagination=backend.server.v2.store.model.Pagination( + current_page=0, + total_items=0, + total_pages=0, + page_size=10, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_creators") + mock_db_call.return_value = mocked_value + + response = client.get("/creators") + assert response.status_code == 200 + + data = backend.server.v2.store.model.CreatorsResponse.model_validate( + response.json() + ) + assert data.pagination.total_pages == 0 + assert data.creators == [] + mock_db_call.assert_called_once_with( + featured=False, search_query=None, sorted_by=None, page=1, page_size=20 + ) + + +def test_get_creators_pagination(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.CreatorsResponse( + creators=[ + backend.server.v2.store.model.Creator( + name=f"Creator {i}", + username=f"creator{i}", + description=f"Creator {i} description", + avatar_url=f"avatar{i}.jpg", + num_agents=1, + agent_rating=4.5, + agent_runs=100, + is_featured=False, + ) + for i in range(5) + ], + pagination=backend.server.v2.store.model.Pagination( + current_page=2, + total_items=15, + total_pages=3, + page_size=5, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_creators") + mock_db_call.return_value = mocked_value + + response = client.get("/creators?page=2&page_size=5") + assert response.status_code == 200 + + data = backend.server.v2.store.model.CreatorsResponse.model_validate( + response.json() + ) + assert len(data.creators) == 5 + assert data.pagination.current_page == 2 + assert data.pagination.page_size == 5 + mock_db_call.assert_called_once_with( + featured=False, search_query=None, sorted_by=None, page=2, page_size=5 + ) + + +def test_get_creators_malformed_request(mocker: pytest_mock.MockFixture): + # Test with invalid page number + response = client.get("/creators?page=-1") + assert response.status_code == 422 + + # Test with invalid page size + response = client.get("/creators?page_size=0") + assert response.status_code == 422 + + # Test with non-numeric values + response = client.get("/creators?page=abc&page_size=def") + assert response.status_code == 422 + + # Verify no DB calls were made + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_creators") + mock_db_call.assert_not_called() + + +def test_get_creator_details(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.CreatorDetails( + name="Test User", + username="creator1", + description="Test creator description", + links=["link1.com", "link2.com"], + avatar_url="avatar.jpg", + agent_rating=4.8, + agent_runs=1000, + top_categories=["category1", "category2"], + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_creator_details") + mock_db_call.return_value = mocked_value + + response = client.get("/creator/creator1") + assert response.status_code == 200 + + data = backend.server.v2.store.model.CreatorDetails.model_validate(response.json()) + assert data.username == "creator1" + assert data.name == "Test User" + mock_db_call.assert_called_once_with(username="creator1") + + +def test_get_submissions_success(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreSubmissionsResponse( + submissions=[ + backend.server.v2.store.model.StoreSubmission( + name="Test Agent", + description="Test agent description", + image_urls=["test.jpg"], + date_submitted=datetime.datetime.now(), + status=prisma.enums.SubmissionStatus.APPROVED, + runs=50, + rating=4.2, + agent_id="test-agent-id", + agent_version=1, + sub_heading="Test agent subheading", + slug="test-agent", + ) + ], + pagination=backend.server.v2.store.model.Pagination( + current_page=1, + total_items=1, + total_pages=1, + page_size=20, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_submissions") + mock_db_call.return_value = mocked_value + + response = client.get("/submissions") + assert response.status_code == 200 + + data = backend.server.v2.store.model.StoreSubmissionsResponse.model_validate( + response.json() + ) + assert len(data.submissions) == 1 + assert data.submissions[0].name == "Test Agent" + assert data.pagination.current_page == 1 + mock_db_call.assert_called_once_with(user_id="test-user-id", page=1, page_size=20) + + +def test_get_submissions_pagination(mocker: pytest_mock.MockFixture): + mocked_value = backend.server.v2.store.model.StoreSubmissionsResponse( + submissions=[], + pagination=backend.server.v2.store.model.Pagination( + current_page=2, + total_items=10, + total_pages=2, + page_size=5, + ), + ) + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_submissions") + mock_db_call.return_value = mocked_value + + response = client.get("/submissions?page=2&page_size=5") + assert response.status_code == 200 + + data = backend.server.v2.store.model.StoreSubmissionsResponse.model_validate( + response.json() + ) + assert data.pagination.current_page == 2 + assert data.pagination.page_size == 5 + mock_db_call.assert_called_once_with(user_id="test-user-id", page=2, page_size=5) + + +def test_get_submissions_malformed_request(mocker: pytest_mock.MockFixture): + # Test with invalid page number + response = client.get("/submissions?page=-1") + assert response.status_code == 422 + + # Test with invalid page size + response = client.get("/submissions?page_size=0") + assert response.status_code == 422 + + # Test with non-numeric values + response = client.get("/submissions?page=abc&page_size=def") + assert response.status_code == 422 + + # Verify no DB calls were made + mock_db_call = mocker.patch("backend.server.v2.store.db.get_store_submissions") + mock_db_call.assert_not_called() diff --git a/autogpt_platform/backend/backend/server/ws_api.py b/autogpt_platform/backend/backend/server/ws_api.py new file mode 100644 index 000000000000..a6da64b8e53c --- /dev/null +++ b/autogpt_platform/backend/backend/server/ws_api.py @@ -0,0 +1,189 @@ +import asyncio +import logging +from contextlib import asynccontextmanager + +import uvicorn +from autogpt_libs.auth import parse_jwt_token +from fastapi import Depends, FastAPI, WebSocket, WebSocketDisconnect +from starlette.middleware.cors import CORSMiddleware + +from backend.data import redis +from backend.data.execution import AsyncRedisExecutionEventBus +from backend.data.user import DEFAULT_USER_ID +from backend.server.conn_manager import ConnectionManager +from backend.server.model import ExecutionSubscription, Methods, WsMessage +from backend.util.service import AppProcess +from backend.util.settings import AppEnvironment, Config, Settings + +logger = logging.getLogger(__name__) +settings = Settings() + + +@asynccontextmanager +async def lifespan(app: FastAPI): + manager = get_connection_manager() + fut = asyncio.create_task(event_broadcaster(manager)) + fut.add_done_callback(lambda _: logger.info("Event broadcaster stopped")) + yield + + +docs_url = "/docs" if settings.config.app_env == AppEnvironment.LOCAL else None +app = FastAPI(lifespan=lifespan, docs_url=docs_url) +_connection_manager = None + + +def get_connection_manager(): + global _connection_manager + if _connection_manager is None: + _connection_manager = ConnectionManager() + return _connection_manager + + +async def event_broadcaster(manager: ConnectionManager): + try: + redis.connect() + event_queue = AsyncRedisExecutionEventBus() + async for event in event_queue.listen(): + await manager.send_execution_result(event) + except Exception as e: + logger.exception(f"Event broadcaster error: {e}") + raise + finally: + redis.disconnect() + + +async def authenticate_websocket(websocket: WebSocket) -> str: + if not settings.config.enable_auth: + return DEFAULT_USER_ID + + token = websocket.query_params.get("token") + if not token: + await websocket.close(code=4001, reason="Missing authentication token") + return "" + + try: + payload = parse_jwt_token(token) + user_id = payload.get("sub") + if not user_id: + await websocket.close(code=4002, reason="Invalid token") + return "" + return user_id + except ValueError: + await websocket.close(code=4003, reason="Invalid token") + return "" + + +async def handle_subscribe( + websocket: WebSocket, manager: ConnectionManager, message: WsMessage +): + if not message.data: + await websocket.send_text( + WsMessage( + method=Methods.ERROR, + success=False, + error="Subscription data missing", + ).model_dump_json() + ) + else: + ex_sub = ExecutionSubscription.model_validate(message.data) + await manager.subscribe(ex_sub.graph_id, websocket) + logger.debug(f"New execution subscription for graph {ex_sub.graph_id}") + await websocket.send_text( + WsMessage( + method=Methods.SUBSCRIBE, + success=True, + channel=ex_sub.graph_id, + ).model_dump_json() + ) + + +async def handle_unsubscribe( + websocket: WebSocket, manager: ConnectionManager, message: WsMessage +): + if not message.data: + await websocket.send_text( + WsMessage( + method=Methods.ERROR, + success=False, + error="Subscription data missing", + ).model_dump_json() + ) + else: + ex_sub = ExecutionSubscription.model_validate(message.data) + await manager.unsubscribe(ex_sub.graph_id, websocket) + logger.debug(f"Removed execution subscription for graph {ex_sub.graph_id}") + await websocket.send_text( + WsMessage( + method=Methods.UNSUBSCRIBE, + success=True, + channel=ex_sub.graph_id, + ).model_dump_json() + ) + + +@app.get("/") +async def health(): + return {"status": "healthy"} + + +@app.websocket("/ws") +async def websocket_router( + websocket: WebSocket, manager: ConnectionManager = Depends(get_connection_manager) +): + user_id = await authenticate_websocket(websocket) + if not user_id: + return + await manager.connect(websocket) + try: + while True: + data = await websocket.receive_text() + message = WsMessage.model_validate_json(data) + + if message.method == Methods.HEARTBEAT: + await websocket.send_json( + {"method": Methods.HEARTBEAT.value, "data": "pong", "success": True} + ) + continue + + if message.method == Methods.SUBSCRIBE: + await handle_subscribe(websocket, manager, message) + + elif message.method == Methods.UNSUBSCRIBE: + await handle_unsubscribe(websocket, manager, message) + + elif message.method == Methods.ERROR: + logger.error(f"WebSocket Error message received: {message.data}") + + else: + logger.warning( + f"Unknown WebSocket message type {message.method} received: " + f"{message.data}" + ) + await websocket.send_text( + WsMessage( + method=Methods.ERROR, + success=False, + error="Message type is not processed by the server", + ).model_dump_json() + ) + + except WebSocketDisconnect: + manager.disconnect(websocket) + logger.debug("WebSocket client disconnected") + + +class WebsocketServer(AppProcess): + def run(self): + logger.info(f"CORS allow origins: {settings.config.backend_cors_allow_origins}") + server_app = CORSMiddleware( + app=app, + allow_origins=settings.config.backend_cors_allow_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + uvicorn.run( + server_app, + host=Config().websocket_server_host, + port=Config().websocket_server_port, + ) diff --git a/autogpt_platform/backend/backend/usecases/block_autogen.py b/autogpt_platform/backend/backend/usecases/block_autogen.py new file mode 100644 index 000000000000..dc34b79f3278 --- /dev/null +++ b/autogpt_platform/backend/backend/usecases/block_autogen.py @@ -0,0 +1,271 @@ +from pathlib import Path + +from prisma.models import User + +from backend.blocks.basic import StoreValueBlock +from backend.blocks.block import BlockInstallationBlock +from backend.blocks.http import SendWebRequestBlock +from backend.blocks.llm import AITextGeneratorBlock +from backend.blocks.text import ExtractTextInformationBlock, FillTextTemplateBlock +from backend.data.graph import Graph, Link, Node, create_graph +from backend.data.user import get_or_create_user +from backend.util.test import SpinTestServer, wait_execution + +sample_block_modules = { + "llm": "Block that calls the AI model to generate text.", + "basic": "Block that does basic operations.", + "text": "Blocks that do text operations.", + "reddit": "Blocks that interacts with Reddit.", +} +sample_block_codes = {} +for module, description in sample_block_modules.items(): + current_dir = Path(__file__).parent + file_path = current_dir.parent / "blocks" / f"{module}.py" + with open(file_path, "r") as f: + code = "\n".join(["```python", f.read(), "```"]) + sample_block_codes[module] = f"[Example: {description}]\n{code}" + + +async def create_test_user() -> User: + test_user_data = { + "sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1", + "email": "testuser@example.com", + "name": "Test User", + } + user = await get_or_create_user(test_user_data) + return user + + +def create_test_graph() -> Graph: + """ + StoreValueBlock (input) + || + v + FillTextTemplateBlock (input query) + || + v + SendWebRequestBlock (browse) + || + v + ------> StoreValueBlock=============== + | | | || + | -- || + | || + | || + | v + | AITextGeneratorBlock <===== FillTextTemplateBlock (query) + | || ^ + | v || + | ExtractTextInformationBlock || + | || || + | v || + ------ BlockInstallationBlock ====== + """ + # ======= Nodes ========= # + input_data = Node(block_id=StoreValueBlock().id) + input_query_constant = Node( + block_id=StoreValueBlock().id, + input_default={"data": None}, + ) + input_text_formatter = Node( + block_id=FillTextTemplateBlock().id, + input_default={ + "format": "Show me how to make a python code for this query: `{query}`", + }, + ) + search_http_request = Node( + block_id=SendWebRequestBlock().id, + input_default={ + "url": "https://osit-v2.bentlybro.com/search", + }, + ) + search_result_constant = Node( + block_id=StoreValueBlock().id, + input_default={ + "data": None, + }, + ) + prompt_text_formatter = Node( + block_id=FillTextTemplateBlock().id, + input_default={ + "format": """ +Write me a full Block implementation for this query: `{query}` + +Here is the information I get to write a Python code for that: +{search_result} + +Here is your previous attempt: +{previous_attempt} +""", + "values_#_previous_attempt": "No previous attempt found.", + }, + ) + code_gen_llm_call = Node( + block_id=AITextGeneratorBlock().id, + input_default={ + "sys_prompt": f""" +You are a software engineer and you are asked to write the full class implementation. +The class that you are implementing is extending a class called `Block`. +This class will be used as a node in a graph of other blocks to build a complex system. +This class has a method called `run` that takes an input and returns an output. +It also has an `id` attribute that is a UUID, input_schema, and output_schema. +For UUID, you have to hardcode it, like `d2e2ecd2-9ae6-422d-8dfe-ceca500ce6a6`, +don't use any automatic UUID generation, because it needs to be consistent. +To validate the correctness of your implementation, you can also define a test. +There is `test_input` and `test_output` you can use to validate your implementation. +There is also `test_mock` to mock a helper function on your block class for testing. + +Feel free to start your answer by explaining your plan what's required how to test, etc. +But make sure to produce the fully working implementation at the end, +and it should be enclosed within this block format: +```python + +``` + +Here are a couple of sample of the Block class implementation: + +{"--------------".join([sample_block_codes[v] for v in sample_block_modules])} +""", + }, + ) + code_text_parser = Node( + block_id=ExtractTextInformationBlock().id, + input_default={ + "pattern": "```python\n(.+?)\n```", + "group": 1, + }, + ) + block_installation = Node( + block_id=BlockInstallationBlock().id, + ) + nodes = [ + input_data, + input_query_constant, + input_text_formatter, + search_http_request, + search_result_constant, + prompt_text_formatter, + code_gen_llm_call, + code_text_parser, + block_installation, + ] + + # ======= Links ========= # + links = [ + Link( + source_id=input_data.id, + sink_id=input_query_constant.id, + source_name="output", + sink_name="input", + ), + Link( + source_id=input_data.id, + sink_id=input_text_formatter.id, + source_name="output", + sink_name="values_#_query", + ), + Link( + source_id=input_query_constant.id, + sink_id=input_query_constant.id, + source_name="output", + sink_name="data", + ), + Link( + source_id=input_text_formatter.id, + sink_id=search_http_request.id, + source_name="output", + sink_name="body_#_query", + ), + Link( + source_id=search_http_request.id, + sink_id=search_result_constant.id, + source_name="response_#_reply", + sink_name="input", + ), + Link( # Loopback for constant block + source_id=search_result_constant.id, + sink_id=search_result_constant.id, + source_name="output", + sink_name="data", + ), + Link( + source_id=search_result_constant.id, + sink_id=prompt_text_formatter.id, + source_name="output", + sink_name="values_#_search_result", + ), + Link( + source_id=input_query_constant.id, + sink_id=prompt_text_formatter.id, + source_name="output", + sink_name="values_#_query", + ), + Link( + source_id=prompt_text_formatter.id, + sink_id=code_gen_llm_call.id, + source_name="output", + sink_name="prompt", + ), + Link( + source_id=code_gen_llm_call.id, + sink_id=code_text_parser.id, + source_name="response", + sink_name="text", + ), + Link( + source_id=code_text_parser.id, + sink_id=block_installation.id, + source_name="positive", + sink_name="code", + ), + Link( + source_id=block_installation.id, + sink_id=prompt_text_formatter.id, + source_name="error", + sink_name="values_#_previous_attempt", + ), + Link( # Re-trigger search result. + source_id=block_installation.id, + sink_id=search_result_constant.id, + source_name="error", + sink_name="input", + ), + Link( # Re-trigger search result. + source_id=block_installation.id, + sink_id=input_query_constant.id, + source_name="error", + sink_name="input", + ), + ] + + # ======= Graph ========= # + return Graph( + name="BlockAutoGen", + description="Block auto generation agent", + nodes=nodes, + links=links, + ) + + +async def block_autogen_agent(): + async with SpinTestServer() as server: + test_user = await create_test_user() + test_graph = await create_graph(create_test_graph(), user_id=test_user.id) + input_data = {"input": "Write me a block that writes a string into a file."} + response = await server.agent_server.test_execute_graph( + test_graph.id, input_data, test_user.id + ) + print(response) + result = await wait_execution( + graph_id=test_graph.id, + graph_exec_id=response["id"], + timeout=1200, + user_id=test_user.id, + ) + print(result) + + +if __name__ == "__main__": + import asyncio + + asyncio.run(block_autogen_agent()) diff --git a/autogpt_platform/backend/backend/usecases/reddit_marketing.py b/autogpt_platform/backend/backend/usecases/reddit_marketing.py new file mode 100644 index 000000000000..8ea2f651f30e --- /dev/null +++ b/autogpt_platform/backend/backend/usecases/reddit_marketing.py @@ -0,0 +1,170 @@ +from prisma.models import User + +from backend.blocks.llm import AIStructuredResponseGeneratorBlock +from backend.blocks.reddit import GetRedditPostsBlock, PostRedditCommentBlock +from backend.blocks.text import FillTextTemplateBlock, MatchTextPatternBlock +from backend.data.graph import Graph, Link, Node, create_graph +from backend.data.user import get_or_create_user +from backend.util.test import SpinTestServer, wait_execution + + +def create_test_graph() -> Graph: + """ + subreddit + || + v + GetRedditPostsBlock (post_id, post_title, post_body) + // || \\ + post_id post_title post_body + || || || + v v v + FillTextTemplateBlock (format) + || + v + AIStructuredResponseBlock / TextRelevancy + || || || + post_id is_relevant marketing_text + || || || + v v v + MatchTextPatternBlock + || || + positive negative + || + v + PostRedditCommentBlock + """ + # Hardcoded inputs + reddit_get_post_input = { + "post_limit": 3, + } + text_formatter_input = { + "format": """ +Based on the following post, write your marketing comment: +* Post ID: {id} +* Post Subreddit: {subreddit} +* Post Title: {title} +* Post Body: {body}""".strip(), + } + llm_call_input = { + "sys_prompt": """ +You are an expert at marketing. +You have been tasked with picking Reddit posts that are relevant to your product. +The product you are marketing is: Auto-GPT an autonomous AI agent utilizing GPT model. +You reply the post that you find it relevant to be replied with marketing text. +Make sure to only comment on a relevant post. +""", + "expected_format": { + "post_id": "str, the reddit post id", + "is_relevant": "bool, whether the post is relevant for marketing", + "marketing_text": "str, marketing text, this is empty on irrelevant posts", + }, + } + text_matcher_input = {"match": "true", "case_sensitive": False} + reddit_comment_input = {} + + # Nodes + reddit_get_post_node = Node( + block_id=GetRedditPostsBlock().id, + input_default=reddit_get_post_input, + ) + text_formatter_node = Node( + block_id=FillTextTemplateBlock().id, + input_default=text_formatter_input, + ) + llm_call_node = Node( + block_id=AIStructuredResponseGeneratorBlock().id, input_default=llm_call_input + ) + text_matcher_node = Node( + block_id=MatchTextPatternBlock().id, + input_default=text_matcher_input, + ) + reddit_comment_node = Node( + block_id=PostRedditCommentBlock().id, + input_default=reddit_comment_input, + ) + + nodes = [ + reddit_get_post_node, + text_formatter_node, + llm_call_node, + text_matcher_node, + reddit_comment_node, + ] + + # Links + links = [ + Link( + source_id=reddit_get_post_node.id, + sink_id=text_formatter_node.id, + source_name="post", + sink_name="values", + ), + Link( + source_id=text_formatter_node.id, + sink_id=llm_call_node.id, + source_name="output", + sink_name="prompt", + ), + Link( + source_id=llm_call_node.id, + sink_id=text_matcher_node.id, + source_name="response", + sink_name="data", + ), + Link( + source_id=llm_call_node.id, + sink_id=text_matcher_node.id, + source_name="response_#_is_relevant", + sink_name="text", + ), + Link( + source_id=text_matcher_node.id, + sink_id=reddit_comment_node.id, + source_name="positive_#_post_id", + sink_name="data_#_post_id", + ), + Link( + source_id=text_matcher_node.id, + sink_id=reddit_comment_node.id, + source_name="positive_#_marketing_text", + sink_name="data_#_comment", + ), + ] + + # Create graph + test_graph = Graph( + name="RedditMarketingAgent", + description="Reddit marketing agent", + nodes=nodes, + links=links, + ) + return test_graph + + +async def create_test_user() -> User: + test_user_data = { + "sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1", + "email": "testuser@example.com", + "name": "Test User", + } + user = await get_or_create_user(test_user_data) + return user + + +async def reddit_marketing_agent(): + async with SpinTestServer() as server: + test_user = await create_test_user() + test_graph = await create_graph(create_test_graph(), user_id=test_user.id) + input_data = {"subreddit": "AutoGPT"} + response = await server.agent_server.test_execute_graph( + test_graph.id, input_data, test_user.id + ) + print(response) + result = await wait_execution(test_user.id, test_graph.id, response["id"], 120) + print(result) + + +if __name__ == "__main__": + import asyncio + + asyncio.run(reddit_marketing_agent()) diff --git a/autogpt_platform/backend/backend/usecases/sample.py b/autogpt_platform/backend/backend/usecases/sample.py new file mode 100644 index 000000000000..2332f55bb4ab --- /dev/null +++ b/autogpt_platform/backend/backend/usecases/sample.py @@ -0,0 +1,99 @@ +from prisma.models import User + +from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock +from backend.blocks.text import FillTextTemplateBlock +from backend.data import graph +from backend.data.graph import create_graph +from backend.data.user import get_or_create_user +from backend.util.test import SpinTestServer, wait_execution + + +async def create_test_user(alt_user: bool = False) -> User: + if alt_user: + test_user_data = { + "sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b", + "email": "testuser2@example.com", + "name": "Test User 2", + } + else: + test_user_data = { + "sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1", + "email": "testuser@example.com", + "name": "Test User", + } + user = await get_or_create_user(test_user_data) + return user + + +def create_test_graph() -> graph.Graph: + """ + InputBlock + \ + ---- FillTextTemplateBlock ---- PrintToConsoleBlock + / + InputBlock + """ + nodes = [ + graph.Node( + block_id=AgentInputBlock().id, + input_default={"name": "input_1"}, + ), + graph.Node( + block_id=AgentInputBlock().id, + input_default={"name": "input_2"}, + ), + graph.Node( + block_id=FillTextTemplateBlock().id, + input_default={ + "format": "{{a}}, {{b}}{{c}}", + "values_#_c": "!!!", + }, + ), + graph.Node(block_id=PrintToConsoleBlock().id), + ] + links = [ + graph.Link( + source_id=nodes[0].id, + sink_id=nodes[2].id, + source_name="result", + sink_name="values_#_a", + ), + graph.Link( + source_id=nodes[1].id, + sink_id=nodes[2].id, + source_name="result", + sink_name="values_#_b", + ), + graph.Link( + source_id=nodes[2].id, + sink_id=nodes[3].id, + source_name="output", + sink_name="text", + ), + ] + + return graph.Graph( + name="TestGraph", + description="Test graph", + nodes=nodes, + links=links, + ) + + +async def sample_agent(): + async with SpinTestServer() as server: + test_user = await create_test_user() + test_graph = await create_graph(create_test_graph(), test_user.id) + input_data = {"input_1": "Hello", "input_2": "World"} + response = await server.agent_server.test_execute_graph( + test_graph.id, input_data, test_user.id + ) + print(response) + result = await wait_execution(test_user.id, test_graph.id, response["id"], 10) + print(result) + + +if __name__ == "__main__": + import asyncio + + asyncio.run(sample_agent()) diff --git a/autogpts/autogpt/autogpt/prompts/__init__.py b/autogpt_platform/backend/backend/util/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/prompts/__init__.py rename to autogpt_platform/backend/backend/util/__init__.py diff --git a/autogpt_platform/backend/backend/util/data.py b/autogpt_platform/backend/backend/util/data.py new file mode 100644 index 000000000000..d77d4cc7899d --- /dev/null +++ b/autogpt_platform/backend/backend/util/data.py @@ -0,0 +1,27 @@ +import os +import pathlib +import sys + + +def get_frontend_path() -> pathlib.Path: + if getattr(sys, "frozen", False): + # The application is frozen + datadir = pathlib.Path(os.path.dirname(sys.executable)) / "example_files" + else: + # The application is not frozen + # Change this bit to match where you store your data files: + filedir = os.path.dirname(__file__) + datadir = pathlib.Path(filedir).parent.parent.parent / "example_files" + return pathlib.Path(datadir) + + +def get_data_path() -> pathlib.Path: + if getattr(sys, "frozen", False): + # The application is frozen + datadir = os.path.dirname(sys.executable) + else: + # The application is not frozen + # Change this bit to match where you store your data files: + filedir = os.path.dirname(__file__) + datadir = pathlib.Path(filedir).parent.parent + return pathlib.Path(datadir) diff --git a/autogpt_platform/backend/backend/util/decorator.py b/autogpt_platform/backend/backend/util/decorator.py new file mode 100644 index 000000000000..9047ea0b77ec --- /dev/null +++ b/autogpt_platform/backend/backend/util/decorator.py @@ -0,0 +1,67 @@ +import functools +import logging +import os +import time +from typing import Callable, ParamSpec, Tuple, TypeVar + +from pydantic import BaseModel + + +class TimingInfo(BaseModel): + cpu_time: float + wall_time: float + + +def _start_measurement() -> Tuple[float, float]: + return time.time(), os.times()[0] + os.times()[1] + + +def _end_measurement( + start_wall_time: float, start_cpu_time: float +) -> Tuple[float, float]: + end_wall_time = time.time() + end_cpu_time = os.times()[0] + os.times()[1] + return end_wall_time - start_wall_time, end_cpu_time - start_cpu_time + + +P = ParamSpec("P") +T = TypeVar("T") + +logger = logging.getLogger(__name__) + + +def time_measured(func: Callable[P, T]) -> Callable[P, Tuple[TimingInfo, T]]: + """ + Decorator to measure the time taken by a function to execute. + """ + + @functools.wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> Tuple[TimingInfo, T]: + start_wall_time, start_cpu_time = _start_measurement() + try: + result = func(*args, **kwargs) + finally: + wall_duration, cpu_duration = _end_measurement( + start_wall_time, start_cpu_time + ) + timing_info = TimingInfo(cpu_time=cpu_duration, wall_time=wall_duration) + return timing_info, result + + return wrapper + + +def error_logged(func: Callable[P, T]) -> Callable[P, T | None]: + """ + Decorator to suppress and log any exceptions raised by a function. + """ + + @functools.wraps(func) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> T | None: + try: + return func(*args, **kwargs) + except Exception as e: + logger.exception( + f"Error when calling function {func.__name__} with arguments {args} {kwargs}: {e}" + ) + + return wrapper diff --git a/autogpt_platform/backend/backend/util/encryption.py b/autogpt_platform/backend/backend/util/encryption.py new file mode 100644 index 000000000000..c8ba8b5edc50 --- /dev/null +++ b/autogpt_platform/backend/backend/util/encryption.py @@ -0,0 +1,34 @@ +import json +from typing import Optional + +from cryptography.fernet import Fernet + +from backend.util.settings import Settings + +ENCRYPTION_KEY = Settings().secrets.encryption_key + + +class JSONCryptor: + def __init__(self, key: Optional[str] = None): + # Use provided key or get from environment + self.key = key or ENCRYPTION_KEY + if not self.key: + raise ValueError( + "Encryption key must be provided or set in ENCRYPTION_KEY environment variable" + ) + self.fernet = Fernet( + self.key.encode() if isinstance(self.key, str) else self.key + ) + + def encrypt(self, data: dict) -> str: + """Encrypt dictionary data to string""" + json_str = json.dumps(data) + encrypted = self.fernet.encrypt(json_str.encode()) + return encrypted.decode() + + def decrypt(self, encrypted_str: str) -> dict: + """Decrypt string to dictionary""" + if not encrypted_str: + return {} + decrypted = self.fernet.decrypt(encrypted_str.encode()) + return json.loads(decrypted.decode()) diff --git a/autogpt_platform/backend/backend/util/exceptions.py b/autogpt_platform/backend/backend/util/exceptions.py new file mode 100644 index 000000000000..4bb3a08d9548 --- /dev/null +++ b/autogpt_platform/backend/backend/util/exceptions.py @@ -0,0 +1,6 @@ +class MissingConfigError(Exception): + """The attempted operation requires configuration which is not available""" + + +class NeedConfirmation(Exception): + """The user must explicitly confirm that they want to proceed""" diff --git a/autogpt_platform/backend/backend/util/file.py b/autogpt_platform/backend/backend/util/file.py new file mode 100644 index 000000000000..6b3d976abcf3 --- /dev/null +++ b/autogpt_platform/backend/backend/util/file.py @@ -0,0 +1,143 @@ +import base64 +import mimetypes +import re +import shutil +import tempfile +import uuid +from pathlib import Path +from urllib.parse import urlparse + +# This "requests" presumably has additional checks against internal networks for SSRF. +from backend.util.request import requests + +TEMP_DIR = Path(tempfile.gettempdir()).resolve() + + +def get_exec_file_path(graph_exec_id: str, path: str) -> str: + """ + Utility to build an absolute path in the {temp}/exec_file/{exec_id}/... folder. + """ + return str(TEMP_DIR / "exec_file" / graph_exec_id / path) + + +def clean_exec_files(graph_exec_id: str, file: str = "") -> None: + """ + Utility to remove the {temp}/exec_file/{exec_id} folder and its contents. + """ + exec_path = Path(get_exec_file_path(graph_exec_id, file)) + if exec_path.exists() and exec_path.is_dir(): + shutil.rmtree(exec_path) + + +""" +MediaFile is a string that represents a file. It can be one of the following: + - Data URI: base64 encoded media file. See https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data/ + - URL: Media file hosted on the internet, it starts with http:// or https://. + - Local path (anything else): A temporary file path living within graph execution time. + +Note: Replace this type alias into a proper class, when more information is needed. +""" +MediaFile = str + + +def store_media_file( + graph_exec_id: str, file: MediaFile, return_content: bool = False +) -> MediaFile: + """ + Safely handle 'file' (a data URI, a URL, or a local path relative to {temp}/exec_file/{exec_id}), + placing or verifying it under: + {tempdir}/exec_file/{exec_id}/... + + If 'return_content=True', return a data URI (data:;base64,). + Otherwise, returns the file media path relative to the exec_id folder. + + For each MediaFile type: + - Data URI: + -> decode and store in a new random file in that folder + - URL: + -> download and store in that folder + - Local path: + -> interpret as relative to that folder; verify it exists + (no copying, as it's presumably already there). + We realpath-check so no symlink or '..' can escape the folder. + + + :param graph_exec_id: The unique ID of the graph execution. + :param file: Data URI, URL, or local (relative) path. + :param return_content: If True, return a data URI of the file content. + If False, return the *relative* path inside the exec_id folder. + :return: The requested result: data URI or relative path of the media. + """ + # Build base path + base_path = Path(get_exec_file_path(graph_exec_id, "")) + base_path.mkdir(parents=True, exist_ok=True) + + # Helper functions + def _extension_from_mime(mime: str) -> str: + ext = mimetypes.guess_extension(mime, strict=False) + return ext if ext else ".bin" + + def _file_to_data_uri(path: Path) -> str: + mime_type, _ = mimetypes.guess_type(path) + mime_type = mime_type or "application/octet-stream" + b64 = base64.b64encode(path.read_bytes()).decode("utf-8") + return f"data:{mime_type};base64,{b64}" + + def _ensure_inside_base(path_candidate: Path, base: Path) -> Path: + """ + Resolve symlinks via resolve() and ensure the result is still under base. + """ + real_candidate = path_candidate.resolve() + real_base = base.resolve() + + if not real_candidate.is_relative_to(real_base): + raise ValueError( + "Local file path is outside the temp_base directory. Access denied." + ) + return real_candidate + + def _strip_base_prefix(absolute_path: Path, base: Path) -> str: + """ + Strip base prefix and normalize path. + """ + return str(absolute_path.relative_to(base)) + + # Process file + if file.startswith("data:"): + # Data URI + match = re.match(r"^data:([^;]+);base64,(.*)$", file, re.DOTALL) + if not match: + raise ValueError( + "Invalid data URI format. Expected data:;base64," + ) + mime_type = match.group(1).strip().lower() + b64_content = match.group(2).strip() + + # Generate filename and decode + extension = _extension_from_mime(mime_type) + filename = f"{uuid.uuid4()}{extension}" + target_path = _ensure_inside_base(base_path / filename, base_path) + target_path.write_bytes(base64.b64decode(b64_content)) + + elif file.startswith(("http://", "https://")): + # URL + parsed_url = urlparse(file) + filename = Path(parsed_url.path).name or f"{uuid.uuid4()}" + target_path = _ensure_inside_base(base_path / filename, base_path) + + # Download and save + resp = requests.get(file) + resp.raise_for_status() + target_path.write_bytes(resp.content) + + else: + # Local path + target_path = _ensure_inside_base(base_path / file, base_path) + if not target_path.is_file(): + raise ValueError(f"Local file does not exist: {target_path}") + + # Return result + if return_content: + return MediaFile(_file_to_data_uri(target_path)) + else: + return MediaFile(_strip_base_prefix(target_path, base_path)) diff --git a/autogpt_platform/backend/backend/util/json.py b/autogpt_platform/backend/backend/util/json.py new file mode 100644 index 000000000000..7f88917414c4 --- /dev/null +++ b/autogpt_platform/backend/backend/util/json.py @@ -0,0 +1,47 @@ +import json +from typing import Any, Type, TypeVar, overload + +import jsonschema +from fastapi.encoders import jsonable_encoder + +from .type import type_match + + +def to_dict(data) -> dict: + return jsonable_encoder(data) + + +def dumps(data) -> str: + return json.dumps(jsonable_encoder(data)) + + +T = TypeVar("T") + + +@overload +def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ... + + +@overload +def loads(data: str, *args, **kwargs) -> Any: ... + + +def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any: + parsed = json.loads(data, *args, **kwargs) + if target_type: + return type_match(parsed, target_type) + return parsed + + +def validate_with_jsonschema( + schema: dict[str, Any], data: dict[str, Any] +) -> str | None: + """ + Validate the data against the schema. + Returns the validation error message if the data does not match the schema. + """ + try: + jsonschema.validate(data, schema) + return None + except jsonschema.ValidationError as e: + return str(e) diff --git a/autogpt_platform/backend/backend/util/logging.py b/autogpt_platform/backend/backend/util/logging.py new file mode 100644 index 000000000000..63166a84c23b --- /dev/null +++ b/autogpt_platform/backend/backend/util/logging.py @@ -0,0 +1,20 @@ +from backend.util.settings import AppEnvironment, BehaveAs, Settings + +settings = Settings() + + +def configure_logging(): + import logging + + import autogpt_libs.logging.config + + if ( + settings.config.behave_as == BehaveAs.LOCAL + or settings.config.app_env == AppEnvironment.LOCAL + ): + autogpt_libs.logging.config.configure_logging(force_cloud_logging=False) + else: + autogpt_libs.logging.config.configure_logging(force_cloud_logging=True) + + # Silence httpx logger + logging.getLogger("httpx").setLevel(logging.WARNING) diff --git a/autogpt_platform/backend/backend/util/metrics.py b/autogpt_platform/backend/backend/util/metrics.py new file mode 100644 index 000000000000..6381028fe05c --- /dev/null +++ b/autogpt_platform/backend/backend/util/metrics.py @@ -0,0 +1,8 @@ +import sentry_sdk + +from backend.util.settings import Settings + + +def sentry_init(): + sentry_dsn = Settings().secrets.sentry_dsn + sentry_sdk.init(dsn=sentry_dsn, traces_sample_rate=1.0, profiles_sample_rate=1.0) diff --git a/autogpt_platform/backend/backend/util/mock.py b/autogpt_platform/backend/backend/util/mock.py new file mode 100644 index 000000000000..70befa04b6b1 --- /dev/null +++ b/autogpt_platform/backend/backend/util/mock.py @@ -0,0 +1,12 @@ +class MockObject: + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + def __getattr__(self, name): + return self.__dict__.get(name) + + def __call__(self, *args, **kwargs): + return self + + def __setattr__(self, name, value): + self.__dict__[name] = value diff --git a/autogpt_platform/backend/backend/util/process.py b/autogpt_platform/backend/backend/util/process.py new file mode 100644 index 000000000000..218ac749d0aa --- /dev/null +++ b/autogpt_platform/backend/backend/util/process.py @@ -0,0 +1,123 @@ +import logging +import os +import signal +import sys +from abc import ABC, abstractmethod +from multiprocessing import Process, set_start_method +from typing import Optional + +from backend.util.logging import configure_logging +from backend.util.metrics import sentry_init + +logger = logging.getLogger(__name__) +_SERVICE_NAME = "MainProcess" + + +def get_service_name(): + return _SERVICE_NAME + + +def set_service_name(name: str): + global _SERVICE_NAME + _SERVICE_NAME = name + + +class AppProcess(ABC): + """ + A class to represent an object that can be executed in a background process. + """ + + process: Optional[Process] = None + + set_start_method("spawn", force=True) + configure_logging() + sentry_init() + + # Methods that are executed INSIDE the process # + + @abstractmethod + def run(self): + """ + The method that will be executed in the process. + """ + pass + + @classmethod + @property + def service_name(cls) -> str: + return cls.__name__ + + def cleanup(self): + """ + Implement this method on a subclass to do post-execution cleanup, + e.g. disconnecting from a database or terminating child processes. + """ + pass + + def health_check(self): + """ + A method to check the health of the process. + """ + pass + + def execute_run_command(self, silent): + signal.signal(signal.SIGTERM, self._self_terminate) + + try: + if silent: + sys.stdout = open(os.devnull, "w") + sys.stderr = open(os.devnull, "w") + + set_service_name(self.service_name) + logger.info(f"[{self.service_name}] Starting...") + self.run() + except (KeyboardInterrupt, SystemExit) as e: + logger.warning(f"[{self.service_name}] Terminated: {e}; quitting...") + + def _self_terminate(self, signum: int, frame): + self.cleanup() + sys.exit(0) + + # Methods that are executed OUTSIDE the process # + + def __enter__(self): + self.start(background=True) + return self + + def __exit__(self, *args, **kwargs): + self.stop() + + def start(self, background: bool = False, silent: bool = False, **proc_args) -> int: + """ + Start the background process. + Args: + background: Whether to run the process in the background. + silent: Whether to disable stdout and stderr. + proc_args: Additional arguments to pass to the process. + Returns: + the process id or 0 if the process is not running in the background. + """ + if not background: + self.execute_run_command(silent) + return 0 + + self.process = Process( + name=self.__class__.__name__, + target=self.execute_run_command, + args=(silent,), + **proc_args, + ) + self.process.start() + self.health_check() + return self.process.pid or 0 + + def stop(self): + """ + Stop the background process. + """ + if not self.process: + return + + self.process.terminate() + self.process.join() + self.process = None diff --git a/autogpt_platform/backend/backend/util/request.py b/autogpt_platform/backend/backend/util/request.py new file mode 100644 index 000000000000..9a3fa0c5e3d7 --- /dev/null +++ b/autogpt_platform/backend/backend/util/request.py @@ -0,0 +1,178 @@ +import ipaddress +import re +import socket +from typing import Callable +from urllib.parse import urlparse, urlunparse + +import idna +import requests as req + +from backend.util.settings import Config + +# List of IP networks to block +BLOCKED_IP_NETWORKS = [ + # --8<-- [start:BLOCKED_IP_NETWORKS] + # IPv4 Ranges + ipaddress.ip_network("0.0.0.0/8"), # "This" Network + ipaddress.ip_network("10.0.0.0/8"), # Private-Use + ipaddress.ip_network("127.0.0.0/8"), # Loopback + ipaddress.ip_network("169.254.0.0/16"), # Link Local + ipaddress.ip_network("172.16.0.0/12"), # Private-Use + ipaddress.ip_network("192.168.0.0/16"), # Private-Use + ipaddress.ip_network("224.0.0.0/4"), # Multicast + ipaddress.ip_network("240.0.0.0/4"), # Reserved for Future Use + # IPv6 Ranges + ipaddress.ip_network("::1/128"), # Loopback + ipaddress.ip_network("fc00::/7"), # Unique local addresses (ULA) + ipaddress.ip_network("fe80::/10"), # Link-local + ipaddress.ip_network("ff00::/8"), # Multicast + # --8<-- [end:BLOCKED_IP_NETWORKS] +] + +ALLOWED_SCHEMES = ["http", "https"] +HOSTNAME_REGEX = re.compile(r"^[A-Za-z0-9.-]+$") # Basic DNS-safe hostname pattern + + +def _is_ip_blocked(ip: str) -> bool: + """ + Checks if the IP address is in a blocked network. + """ + ip_addr = ipaddress.ip_address(ip) + return any(ip_addr in network for network in BLOCKED_IP_NETWORKS) + + +def validate_url(url: str, trusted_origins: list[str]) -> str: + """ + Validates the URL to prevent SSRF attacks by ensuring it does not point + to a private, link-local, or otherwise blocked IP address — unless + the hostname is explicitly trusted. + """ + # Canonicalize URL + url = url.strip("/ ").replace("\\", "/") + parsed = urlparse(url) + if not parsed.scheme: + url = f"http://{url}" + parsed = urlparse(url) + + # Check scheme + if parsed.scheme not in ALLOWED_SCHEMES: + raise ValueError( + f"Scheme '{parsed.scheme}' is not allowed. Only HTTP/HTTPS are supported." + ) + + # Validate and IDNA encode hostname + if not parsed.hostname: + raise ValueError("Invalid URL: No hostname found.") + + # IDNA encode to prevent Unicode domain attacks + try: + ascii_hostname = idna.encode(parsed.hostname).decode("ascii") + except idna.IDNAError: + raise ValueError("Invalid hostname with unsupported characters.") + + # Check hostname characters + if not HOSTNAME_REGEX.match(ascii_hostname): + raise ValueError("Hostname contains invalid characters.") + + # Rebuild URL with IDNA-encoded hostname + parsed = parsed._replace(netloc=ascii_hostname) + url = str(urlunparse(parsed)) + + # If hostname is trusted, skip IP-based checks + if ascii_hostname in trusted_origins: + return url + + # Resolve all IP addresses for the hostname + try: + ip_addresses = {res[4][0] for res in socket.getaddrinfo(ascii_hostname, None)} + except socket.gaierror: + raise ValueError(f"Unable to resolve IP address for hostname {ascii_hostname}") + + if not ip_addresses: + raise ValueError(f"No IP addresses found for {ascii_hostname}") + + # Block any IP address that belongs to a blocked range + for ip_str in ip_addresses: + if _is_ip_blocked(ip_str): + raise ValueError( + f"Access to blocked or private IP address {ip_str} " + f"for hostname {ascii_hostname} is not allowed." + ) + + return url + + +class Requests: + """ + A wrapper around the requests library that validates URLs before + making requests, preventing SSRF by blocking private networks and + other disallowed address spaces. + """ + + def __init__( + self, + trusted_origins: list[str] | None = None, + raise_for_status: bool = True, + extra_url_validator: Callable[[str], str] | None = None, + extra_headers: dict[str, str] | None = None, + ): + self.trusted_origins = [] + for url in trusted_origins or []: + hostname = urlparse(url).hostname + if not hostname: + raise ValueError(f"Invalid URL: Unable to determine hostname of {url}") + self.trusted_origins.append(hostname) + + self.raise_for_status = raise_for_status + self.extra_url_validator = extra_url_validator + self.extra_headers = extra_headers + + def request( + self, method, url, headers=None, allow_redirects=False, *args, **kwargs + ) -> req.Response: + # Merge any extra headers + if self.extra_headers is not None: + headers = {**(headers or {}), **self.extra_headers} + + # Validate the URL (with optional extra validator) + url = validate_url(url, self.trusted_origins) + if self.extra_url_validator is not None: + url = self.extra_url_validator(url) + + # Perform the request + response = req.request( + method, + url, + headers=headers, + allow_redirects=allow_redirects, + *args, + **kwargs, + ) + if self.raise_for_status: + response.raise_for_status() + + return response + + def get(self, url, *args, **kwargs) -> req.Response: + return self.request("GET", url, *args, **kwargs) + + def post(self, url, *args, **kwargs) -> req.Response: + return self.request("POST", url, *args, **kwargs) + + def put(self, url, *args, **kwargs) -> req.Response: + return self.request("PUT", url, *args, **kwargs) + + def delete(self, url, *args, **kwargs) -> req.Response: + return self.request("DELETE", url, *args, **kwargs) + + def head(self, url, *args, **kwargs) -> req.Response: + return self.request("HEAD", url, *args, **kwargs) + + def options(self, url, *args, **kwargs) -> req.Response: + return self.request("OPTIONS", url, *args, **kwargs) + + def patch(self, url, *args, **kwargs) -> req.Response: + return self.request("PATCH", url, *args, **kwargs) + + +requests = Requests(trusted_origins=Config().trust_endpoints_for_requests) diff --git a/autogpt_platform/backend/backend/util/retry.py b/autogpt_platform/backend/backend/util/retry.py new file mode 100644 index 000000000000..c1adab5caf26 --- /dev/null +++ b/autogpt_platform/backend/backend/util/retry.py @@ -0,0 +1,75 @@ +import asyncio +import logging +import os +import threading +from functools import wraps +from uuid import uuid4 + +from tenacity import retry, stop_after_attempt, wait_exponential + +from backend.util.process import get_service_name + +logger = logging.getLogger(__name__) + + +def _log_prefix(resource_name: str, conn_id: str): + """ + Returns a prefix string for logging purposes. + This needs to be called on the fly to get the current process ID & service name, + not the parent process ID & service name. + """ + return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]" + + +def conn_retry( + resource_name: str, + action_name: str, + max_retry: int = 5, + multiplier: int = 1, + min_wait: float = 1, + max_wait: float = 30, +): + conn_id = str(uuid4()) + + def on_retry(retry_state): + prefix = _log_prefix(resource_name, conn_id) + exception = retry_state.outcome.exception() + logger.error(f"{prefix} {action_name} failed: {exception}. Retrying now...") + + def decorator(func): + is_coroutine = asyncio.iscoroutinefunction(func) + retry_decorator = retry( + stop=stop_after_attempt(max_retry + 1), + wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait), + before_sleep=on_retry, + reraise=True, + ) + wrapped_func = retry_decorator(func) + + @wraps(func) + def sync_wrapper(*args, **kwargs): + prefix = _log_prefix(resource_name, conn_id) + logger.info(f"{prefix} {action_name} started...") + try: + result = wrapped_func(*args, **kwargs) + logger.info(f"{prefix} {action_name} completed successfully.") + return result + except Exception as e: + logger.error(f"{prefix} {action_name} failed after retries: {e}") + raise + + @wraps(func) + async def async_wrapper(*args, **kwargs): + prefix = _log_prefix(resource_name, conn_id) + logger.info(f"{prefix} {action_name} started...") + try: + result = await wrapped_func(*args, **kwargs) + logger.info(f"{prefix} {action_name} completed successfully.") + return result + except Exception as e: + logger.error(f"{prefix} {action_name} failed after retries: {e}") + raise + + return async_wrapper if is_coroutine else sync_wrapper + + return decorator diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py new file mode 100644 index 000000000000..ff81ade0b3a1 --- /dev/null +++ b/autogpt_platform/backend/backend/util/service.py @@ -0,0 +1,279 @@ +import asyncio +import builtins +import logging +import os +import threading +import time +import typing +from abc import ABC, abstractmethod +from enum import Enum +from types import NoneType, UnionType +from typing import ( + Annotated, + Any, + Awaitable, + Callable, + Coroutine, + Dict, + FrozenSet, + Iterator, + List, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, + get_args, + get_origin, +) + +import Pyro5.api +from pydantic import BaseModel +from Pyro5 import api as pyro +from Pyro5 import config as pyro_config + +from backend.data import db, redis +from backend.util.process import AppProcess +from backend.util.retry import conn_retry +from backend.util.settings import Config, Secrets + +logger = logging.getLogger(__name__) +T = TypeVar("T") +C = TypeVar("C", bound=Callable) + +config = Config() +pyro_host = config.pyro_host +pyro_config.MAX_RETRIES = config.pyro_client_comm_retry # type: ignore +pyro_config.COMMTIMEOUT = config.pyro_client_comm_timeout # type: ignore + + +def expose(func: C) -> C: + """ + Decorator to mark a method or class to be exposed for remote calls. + + ## ⚠️ Gotcha + Aside from "simple" types, only Pydantic models are passed unscathed *if annotated*. + Any other passed or returned class objects are converted to dictionaries by Pyro. + """ + + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + msg = f"Error in {func.__name__}: {e.__str__()}" + if isinstance(e, ValueError): + logger.warning(msg) + else: + logger.exception(msg) + raise + + register_pydantic_serializers(func) + + return pyro.expose(wrapper) # type: ignore + + +def register_pydantic_serializers(func: Callable): + """Register custom serializers and deserializers for annotated Pydantic models""" + for name, annotation in func.__annotations__.items(): + try: + pydantic_types = _pydantic_models_from_type_annotation(annotation) + except Exception as e: + raise TypeError(f"Error while exposing {func.__name__}: {e.__str__()}") + + for model in pydantic_types: + logger.debug( + f"Registering Pyro (de)serializers for {func.__name__} annotation " + f"'{name}': {model.__qualname__}" + ) + pyro.register_class_to_dict(model, _make_custom_serializer(model)) + pyro.register_dict_to_class( + model.__qualname__, _make_custom_deserializer(model) + ) + + +def _make_custom_serializer(model: Type[BaseModel]): + def custom_class_to_dict(obj): + data = { + "__class__": obj.__class__.__qualname__, + **obj.model_dump(), + } + logger.debug(f"Serializing {obj.__class__.__qualname__} with data: {data}") + return data + + return custom_class_to_dict + + +def _make_custom_deserializer(model: Type[BaseModel]): + def custom_dict_to_class(qualname, data: dict): + logger.debug(f"Deserializing {model.__qualname__} from data: {data}") + return model(**data) + + return custom_dict_to_class + + +class AppService(AppProcess, ABC): + shared_event_loop: asyncio.AbstractEventLoop + use_db: bool = False + use_redis: bool = False + use_supabase: bool = False + + def __init__(self): + self.uri = None + + @classmethod + @abstractmethod + def get_port(cls) -> int: + pass + + @classmethod + def get_host(cls) -> str: + return os.environ.get(f"{cls.service_name.upper()}_HOST", config.pyro_host) + + def run_service(self) -> None: + while True: + time.sleep(10) + + def __run_async(self, coro: Coroutine[Any, Any, T]): + return asyncio.run_coroutine_threadsafe(coro, self.shared_event_loop) + + def run_and_wait(self, coro: Coroutine[Any, Any, T]) -> T: + future = self.__run_async(coro) + return future.result() + + def run(self): + self.shared_event_loop = asyncio.get_event_loop() + if self.use_db: + self.shared_event_loop.run_until_complete(db.connect()) + if self.use_redis: + redis.connect() + if self.use_supabase: + from supabase import create_client + + secrets = Secrets() + self.supabase = create_client( + secrets.supabase_url, secrets.supabase_service_role_key + ) + + # Initialize the async loop. + async_thread = threading.Thread(target=self.__start_async_loop) + async_thread.daemon = True + async_thread.start() + + # Initialize pyro service + daemon_thread = threading.Thread(target=self.__start_pyro) + daemon_thread.daemon = True + daemon_thread.start() + + # Run the main service (if it's not implemented, just sleep). + self.run_service() + + def cleanup(self): + if self.use_db: + logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting DB...") + self.run_and_wait(db.disconnect()) + if self.use_redis: + logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting Redis...") + redis.disconnect() + + @conn_retry("Pyro", "Starting Pyro Service") + def __start_pyro(self): + maximum_connection_thread_count = max( + Pyro5.config.THREADPOOL_SIZE, + config.num_node_workers * config.num_graph_workers, + ) + + Pyro5.config.THREADPOOL_SIZE = maximum_connection_thread_count # type: ignore + daemon = Pyro5.api.Daemon(host=config.pyro_host, port=self.get_port()) + self.uri = daemon.register(self, objectId=self.service_name) + logger.info(f"[{self.service_name}] Connected to Pyro; URI = {self.uri}") + daemon.requestLoop() + + def __start_async_loop(self): + self.shared_event_loop.run_forever() + + +# --------- UTILITIES --------- # + + +AS = TypeVar("AS", bound=AppService) + + +class PyroClient: + proxy: Pyro5.api.Proxy + + +def close_service_client(client: AppService) -> None: + if isinstance(client, PyroClient): + client.proxy._pyroRelease() + else: + raise RuntimeError(f"Client {client.__class__} is not a Pyro client.") + + +def get_service_client(service_type: Type[AS]) -> AS: + service_name = service_type.service_name + + class DynamicClient(PyroClient): + @conn_retry("Pyro", f"Connecting to [{service_name}]") + def __init__(self): + host = os.environ.get(f"{service_name.upper()}_HOST", pyro_host) + uri = f"PYRO:{service_type.service_name}@{host}:{service_type.get_port()}" + logger.debug(f"Connecting to service [{service_name}]. URI = {uri}") + self.proxy = Pyro5.api.Proxy(uri) + # Attempt to bind to ensure the connection is established + self.proxy._pyroBind() + logger.debug(f"Successfully connected to service [{service_name}]") + + def __getattr__(self, name: str) -> Callable[..., Any]: + res = getattr(self.proxy, name) + return res + + return cast(AS, DynamicClient()) + + +builtin_types = [*vars(builtins).values(), NoneType, Enum] + + +def _pydantic_models_from_type_annotation(annotation) -> Iterator[type[BaseModel]]: + # Peel Annotated parameters + if (origin := get_origin(annotation)) and origin is Annotated: + annotation = get_args(annotation)[0] + + origin = get_origin(annotation) + args = get_args(annotation) + + if origin in ( + Union, + UnionType, + list, + List, + tuple, + Tuple, + set, + Set, + frozenset, + FrozenSet, + ): + for arg in args: + yield from _pydantic_models_from_type_annotation(arg) + elif origin in (dict, Dict): + key_type, value_type = args + yield from _pydantic_models_from_type_annotation(key_type) + yield from _pydantic_models_from_type_annotation(value_type) + elif origin in (Awaitable, Coroutine): + # For coroutines and awaitables, check the return type + return_type = args[-1] + yield from _pydantic_models_from_type_annotation(return_type) + else: + annotype = annotation if origin is None else origin + + # Exclude generic types and aliases + if ( + annotype is not None + and not hasattr(typing, getattr(annotype, "__name__", "")) + and isinstance(annotype, type) + ): + if issubclass(annotype, BaseModel): + yield annotype + elif annotype not in builtin_types and not issubclass(annotype, Enum): + raise TypeError(f"Unsupported type encountered: {annotype}") diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py new file mode 100644 index 000000000000..153700dbcda1 --- /dev/null +++ b/autogpt_platform/backend/backend/util/settings.py @@ -0,0 +1,354 @@ +import json +import os +from enum import Enum +from typing import Any, Dict, Generic, List, Set, Tuple, Type, TypeVar + +from pydantic import BaseModel, Field, PrivateAttr, ValidationInfo, field_validator +from pydantic_settings import ( + BaseSettings, + JsonConfigSettingsSource, + PydanticBaseSettingsSource, + SettingsConfigDict, +) + +from backend.util.data import get_data_path + +T = TypeVar("T", bound=BaseSettings) + + +class AppEnvironment(str, Enum): + LOCAL = "local" + DEVELOPMENT = "dev" + PRODUCTION = "prod" + + +class BehaveAs(str, Enum): + LOCAL = "local" + CLOUD = "cloud" + + +class UpdateTrackingModel(BaseModel, Generic[T]): + _updated_fields: Set[str] = PrivateAttr(default_factory=set) + + def __setattr__(self, name: str, value) -> None: + if name in self.model_fields: + self._updated_fields.add(name) + super().__setattr__(name, value) + + def mark_updated(self, field_name: str) -> None: + if field_name in self.model_fields: + self._updated_fields.add(field_name) + + def clear_updates(self) -> None: + self._updated_fields.clear() + + def get_updates(self) -> Dict[str, Any]: + return {field: getattr(self, field) for field in self._updated_fields} + + @property + def updated_fields(self): + return self._updated_fields + + +class Config(UpdateTrackingModel["Config"], BaseSettings): + """Config for the server.""" + + num_graph_workers: int = Field( + default=10, + ge=1, + le=1000, + description="Maximum number of workers to use for graph execution.", + ) + num_node_workers: int = Field( + default=5, + ge=1, + le=1000, + description="Maximum number of workers to use for node execution within a single graph.", + ) + pyro_host: str = Field( + default="localhost", + description="The default hostname of the Pyro server.", + ) + pyro_client_comm_timeout: float = Field( + default=15, + description="The default timeout in seconds, for Pyro client connections.", + ) + pyro_client_comm_retry: int = Field( + default=3, + description="The default number of retries for Pyro client connections.", + ) + enable_auth: bool = Field( + default=True, + description="If authentication is enabled or not", + ) + enable_credit: bool = Field( + default=False, + description="If user credit system is enabled or not", + ) + enable_beta_monthly_credit: bool = Field( + default=True, + description="If beta monthly credits accounting is enabled or not", + ) + num_user_credits_refill: int = Field( + default=1500, + description="Number of credits to refill for each user", + ) + # Add more configuration fields as needed + + model_config = SettingsConfigDict( + env_file=".env", + extra="allow", + ) + + websocket_server_host: str = Field( + default="0.0.0.0", + description="The host for the websocket server to run on", + ) + + websocket_server_port: int = Field( + default=8001, + description="The port for the websocket server to run on", + ) + + execution_manager_port: int = Field( + default=8002, + description="The port for execution manager daemon to run on", + ) + + execution_scheduler_port: int = Field( + default=8003, + description="The port for execution scheduler daemon to run on", + ) + + agent_server_port: int = Field( + default=8004, + description="The port for agent server daemon to run on", + ) + + database_api_port: int = Field( + default=8005, + description="The port for database server API to run on", + ) + + agent_api_host: str = Field( + default="0.0.0.0", + description="The host for agent server API to run on", + ) + + agent_api_port: int = Field( + default=8006, + description="The port for agent server API to run on", + ) + + platform_base_url: str = Field( + default="", + description="Must be set so the application knows where it's hosted at. " + "This is necessary to make sure webhooks find their way.", + ) + + frontend_base_url: str = Field( + default="", + description="Can be used to explicitly set the base URL for the frontend. " + "This value is then used to generate redirect URLs for OAuth flows.", + ) + + media_gcs_bucket_name: str = Field( + default="", + description="The name of the Google Cloud Storage bucket for media files", + ) + + reddit_user_agent: str = Field( + default="AutoGPT:1.0 (by /u/autogpt)", + description="The user agent for the Reddit API", + ) + + scheduler_db_pool_size: int = Field( + default=3, + description="The pool size for the scheduler database connection pool", + ) + + @field_validator("platform_base_url", "frontend_base_url") + @classmethod + def validate_platform_base_url(cls, v: str, info: ValidationInfo) -> str: + if not v: + return v + if not v.startswith(("http://", "https://")): + raise ValueError( + f"{info.field_name} must be a full URL " + "including a http:// or https:// schema" + ) + if v.endswith("/"): + return v.rstrip("/") # Remove trailing slash + return v + + app_env: AppEnvironment = Field( + default=AppEnvironment.LOCAL, + description="The name of the app environment: local or dev or prod", + ) + + behave_as: BehaveAs = Field( + default=BehaveAs.LOCAL, + description="What environment to behave as: local or cloud", + ) + + execution_event_bus_name: str = Field( + default="execution_event", + description="Name of the event bus", + ) + + trust_endpoints_for_requests: List[str] = Field( + default_factory=list, + description="A whitelist of trusted internal endpoints for the backend to make requests to.", + ) + + backend_cors_allow_origins: List[str] = Field(default_factory=list) + + @field_validator("backend_cors_allow_origins") + @classmethod + def validate_cors_allow_origins(cls, v: List[str]) -> List[str]: + out = [] + port = None + has_localhost = False + has_127_0_0_1 = False + for url in v: + url = url.strip() + if url.startswith(("http://", "https://")): + if "localhost" in url: + port = url.split(":")[2] + has_localhost = True + if "127.0.0.1" in url: + port = url.split(":")[2] + has_127_0_0_1 = True + out.append(url) + else: + raise ValueError(f"Invalid URL: {url}") + + if has_127_0_0_1 and not has_localhost: + out.append(f"http://localhost:{port}") + if has_localhost and not has_127_0_0_1: + out.append(f"http://127.0.0.1:{port}") + + return out + + @classmethod + def settings_customise_sources( + cls, + settings_cls: Type[BaseSettings], + init_settings: PydanticBaseSettingsSource, + env_settings: PydanticBaseSettingsSource, + dotenv_settings: PydanticBaseSettingsSource, + file_secret_settings: PydanticBaseSettingsSource, + ) -> Tuple[PydanticBaseSettingsSource, ...]: + return ( + env_settings, + file_secret_settings, + dotenv_settings, + JsonConfigSettingsSource(settings_cls), + init_settings, + ) + + +class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): + """Secrets for the server.""" + + supabase_url: str = Field(default="", description="Supabase URL") + supabase_service_role_key: str = Field( + default="", description="Supabase service role key" + ) + + encryption_key: str = Field(default="", description="Encryption key") + + # OAuth server credentials for integrations + # --8<-- [start:OAuthServerCredentialsExample] + github_client_id: str = Field(default="", description="GitHub OAuth client ID") + github_client_secret: str = Field( + default="", description="GitHub OAuth client secret" + ) + # --8<-- [end:OAuthServerCredentialsExample] + google_client_id: str = Field(default="", description="Google OAuth client ID") + google_client_secret: str = Field( + default="", description="Google OAuth client secret" + ) + notion_client_id: str = Field(default="", description="Notion OAuth client ID") + notion_client_secret: str = Field( + default="", description="Notion OAuth client secret" + ) + twitter_client_id: str = Field(default="", description="Twitter/X OAuth client ID") + twitter_client_secret: str = Field( + default="", description="Twitter/X OAuth client secret" + ) + + openai_api_key: str = Field(default="", description="OpenAI API key") + anthropic_api_key: str = Field(default="", description="Anthropic API key") + groq_api_key: str = Field(default="", description="Groq API key") + open_router_api_key: str = Field(default="", description="Open Router API Key") + + reddit_client_id: str = Field(default="", description="Reddit client ID") + reddit_client_secret: str = Field(default="", description="Reddit client secret") + + openweathermap_api_key: str = Field( + default="", description="OpenWeatherMap API key" + ) + + medium_api_key: str = Field(default="", description="Medium API key") + medium_author_id: str = Field(default="", description="Medium author ID") + did_api_key: str = Field(default="", description="D-ID API Key") + revid_api_key: str = Field(default="", description="revid.ai API key") + discord_bot_token: str = Field(default="", description="Discord bot token") + + smtp_server: str = Field(default="", description="SMTP server IP") + smtp_port: str = Field(default="", description="SMTP server port") + smtp_username: str = Field(default="", description="SMTP username") + smtp_password: str = Field(default="", description="SMTP password") + + sentry_dsn: str = Field(default="", description="Sentry DSN") + + google_maps_api_key: str = Field(default="", description="Google Maps API Key") + + replicate_api_key: str = Field(default="", description="Replicate API Key") + unreal_speech_api_key: str = Field(default="", description="Unreal Speech API Key") + ideogram_api_key: str = Field(default="", description="Ideogram API Key") + jina_api_key: str = Field(default="", description="Jina API Key") + unreal_speech_api_key: str = Field(default="", description="Unreal Speech API Key") + + fal_api_key: str = Field(default="", description="FAL API key") + exa_api_key: str = Field(default="", description="Exa API key") + e2b_api_key: str = Field(default="", description="E2B API key") + nvidia_api_key: str = Field(default="", description="Nvidia API key") + mem0_api_key: str = Field(default="", description="Mem0 API key") + + linear_client_id: str = Field(default="", description="Linear client ID") + linear_client_secret: str = Field(default="", description="Linear client secret") + + stripe_api_key: str = Field(default="", description="Stripe API Key") + stripe_webhook_secret: str = Field(default="", description="Stripe Webhook Secret") + + # Add more secret fields as needed + + model_config = SettingsConfigDict( + env_file=".env", + env_file_encoding="utf-8", + extra="allow", + ) + + +class Settings(BaseModel): + config: Config = Config() + secrets: Secrets = Secrets() + + def save(self) -> None: + # Save updated config to JSON file + if self.config.updated_fields: + config_to_save = self.config.get_updates() + config_path = os.path.join(get_data_path(), "config.json") + if os.path.exists(config_path): + with open(config_path, "r+") as f: + existing_config: Dict[str, Any] = json.load(f) + existing_config.update(config_to_save) + f.seek(0) + json.dump(existing_config, f, indent=2) + f.truncate() + else: + with open(config_path, "w") as f: + json.dump(config_to_save, f, indent=2) + self.config.clear_updates() diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py new file mode 100644 index 000000000000..91718f5da367 --- /dev/null +++ b/autogpt_platform/backend/backend/util/test.py @@ -0,0 +1,166 @@ +import logging +import time +import uuid +from typing import Sequence, cast + +from backend.data import db +from backend.data.block import Block, BlockSchema, initialize_blocks +from backend.data.execution import ExecutionResult, ExecutionStatus +from backend.data.model import _BaseCredentials +from backend.data.user import create_default_user +from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler +from backend.server.rest_api import AgentServer +from backend.server.utils import get_user_id + +log = logging.getLogger(__name__) + + +class SpinTestServer: + def __init__(self): + self.db_api = DatabaseManager() + self.exec_manager = ExecutionManager() + self.agent_server = AgentServer() + self.scheduler = ExecutionScheduler() + + @staticmethod + def test_get_user_id(): + return "3e53486c-cf57-477e-ba2a-cb02dc828e1a" + + async def __aenter__(self): + self.setup_dependency_overrides() + self.db_api.__enter__() + self.agent_server.__enter__() + self.exec_manager.__enter__() + self.scheduler.__enter__() + + await db.connect() + await initialize_blocks() + await create_default_user() + + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await db.disconnect() + + self.scheduler.__exit__(exc_type, exc_val, exc_tb) + self.exec_manager.__exit__(exc_type, exc_val, exc_tb) + self.agent_server.__exit__(exc_type, exc_val, exc_tb) + self.db_api.__exit__(exc_type, exc_val, exc_tb) + + def setup_dependency_overrides(self): + # Override get_user_id for testing + self.agent_server.set_test_dependency_overrides( + {get_user_id: self.test_get_user_id} + ) + + +async def wait_execution( + user_id: str, + graph_id: str, + graph_exec_id: str, + timeout: int = 30, +) -> Sequence[ExecutionResult]: + async def is_execution_completed(): + status = await AgentServer().test_get_graph_run_status(graph_exec_id, user_id) + log.info(f"Execution status: {status}") + if status == ExecutionStatus.FAILED: + log.info("Execution failed") + raise Exception("Execution failed") + if status == ExecutionStatus.TERMINATED: + log.info("Execution terminated") + raise Exception("Execution terminated") + return status == ExecutionStatus.COMPLETED + + # Wait for the executions to complete + for i in range(timeout): + if await is_execution_completed(): + return await AgentServer().test_get_graph_run_node_execution_results( + graph_id, graph_exec_id, user_id + ) + time.sleep(1) + + assert False, "Execution did not complete in time." + + +def execute_block_test(block: Block): + prefix = f"[Test-{block.name}]" + + if not block.test_input or not block.test_output: + log.info(f"{prefix} No test data provided") + return + if not isinstance(block.test_input, list): + block.test_input = [block.test_input] + if not isinstance(block.test_output, list): + block.test_output = [block.test_output] + + output_index = 0 + log.info(f"{prefix} Executing {len(block.test_input)} tests...") + prefix = " " * 4 + prefix + + for mock_name, mock_obj in (block.test_mock or {}).items(): + log.info(f"{prefix} mocking {mock_name}...") + if hasattr(block, mock_name): + setattr(block, mock_name, mock_obj) + else: + log.info(f"{prefix} mock {mock_name} not found in block") + + # Populate credentials argument(s) + extra_exec_kwargs: dict = { + "graph_id": uuid.uuid4(), + "node_id": uuid.uuid4(), + "graph_exec_id": uuid.uuid4(), + "node_exec_id": uuid.uuid4(), + "user_id": uuid.uuid4(), + } + input_model = cast(type[BlockSchema], block.input_schema) + credentials_input_fields = input_model.get_credentials_fields() + if len(credentials_input_fields) == 1 and isinstance( + block.test_credentials, _BaseCredentials + ): + field_name = next(iter(credentials_input_fields)) + extra_exec_kwargs[field_name] = block.test_credentials + elif credentials_input_fields and block.test_credentials: + if not isinstance(block.test_credentials, dict): + raise TypeError(f"Block {block.name} has no usable test credentials") + else: + for field_name in credentials_input_fields: + if field_name in block.test_credentials: + extra_exec_kwargs[field_name] = block.test_credentials[field_name] + + for input_data in block.test_input: + log.info(f"{prefix} in: {input_data}") + + for output_name, output_data in block.execute(input_data, **extra_exec_kwargs): + if output_index >= len(block.test_output): + raise ValueError( + f"{prefix} produced output more than expected {output_index} >= {len(block.test_output)}:\nOutput Expected:\t\t{block.test_output}\nFailed Output Produced:\t('{output_name}', {output_data})\nNote that this may not be the one that was unexpected, but it is the first that triggered the extra output warning" + ) + ex_output_name, ex_output_data = block.test_output[output_index] + + def compare(data, expected_data): + if data == expected_data: + is_matching = True + elif isinstance(expected_data, type): + is_matching = isinstance(data, expected_data) + elif callable(expected_data): + is_matching = expected_data(data) + else: + is_matching = False + + mark = "✅" if is_matching else "❌" + log.info(f"{prefix} {mark} comparing `{data}` vs `{expected_data}`") + if not is_matching: + raise ValueError( + f"{prefix}: wrong output {data} vs {expected_data}\n" + f"Output Expected:\t\t{block.test_output}\n" + f"Failed Output Produced:\t('{output_name}', {output_data})" + ) + + compare(output_data, ex_output_data) + compare(output_name, ex_output_name) + output_index += 1 + + if output_index < len(block.test_output): + raise ValueError( + f"{prefix} produced output less than expected. output_index={output_index}, len(block.test_output)={len(block.test_output)}" + ) diff --git a/autogpt_platform/backend/backend/util/text.py b/autogpt_platform/backend/backend/util/text.py new file mode 100644 index 000000000000..c867b7a40f79 --- /dev/null +++ b/autogpt_platform/backend/backend/util/text.py @@ -0,0 +1,17 @@ +from jinja2 import BaseLoader +from jinja2.sandbox import SandboxedEnvironment + + +class TextFormatter: + def __init__(self): + # Create a sandboxed environment + self.env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True) + + # Clear any registered filters, tests, and globals to minimize attack surface + self.env.filters.clear() + self.env.tests.clear() + self.env.globals.clear() + + def format_string(self, template_str: str, values=None, **kwargs) -> str: + template = self.env.from_string(template_str) + return template.render(values or {}, **kwargs) diff --git a/autogpt_platform/backend/backend/util/type.py b/autogpt_platform/backend/backend/util/type.py new file mode 100644 index 000000000000..2c480b674d8f --- /dev/null +++ b/autogpt_platform/backend/backend/util/type.py @@ -0,0 +1,193 @@ +import json +from typing import Any, Type, TypeVar, cast, get_args, get_origin + + +class ConversionError(ValueError): + pass + + +def __convert_list(value: Any) -> list: + if isinstance(value, (list, tuple, set)): + return list(value) + elif isinstance(value, dict): + return list(value.items()) + elif isinstance(value, str): + value = value.strip() + if value.startswith("[") and value.endswith("]"): + try: + return json.loads(value) + except json.JSONDecodeError: + return [value] + else: + return [value] + else: + return [value] + + +def __convert_dict(value: Any) -> dict: + if isinstance(value, str): + try: + result = json.loads(value) + if isinstance(result, dict): + return result + else: + return {"value": result} + except json.JSONDecodeError: + return {"value": value} # Fallback conversion + elif isinstance(value, list): + return {i: value[i] for i in range(len(value))} + elif isinstance(value, tuple): + return {i: value[i] for i in range(len(value))} + elif isinstance(value, dict): + return value + else: + return {"value": value} + + +def __convert_tuple(value: Any) -> tuple: + if isinstance(value, (str, list, set)): + return tuple(value) + elif isinstance(value, dict): + return tuple(value.items()) + elif isinstance(value, (int, float, bool)): + return (value,) + elif isinstance(value, tuple): + return value + else: + return (value,) + + +def __convert_set(value: Any) -> set: + if isinstance(value, (str, list, tuple)): + return set(value) + elif isinstance(value, dict): + return set(value.items()) + elif isinstance(value, set): + return value + else: + return {value} + + +def __convert_str(value: Any) -> str: + if isinstance(value, str): + return value + else: + return json.dumps(value) + + +NUM = TypeVar("NUM", int, float) + + +def __convert_num(value: Any, num_type: Type[NUM]) -> NUM: + if isinstance(value, (list, dict, tuple, set)): + return num_type(len(value)) + elif isinstance(value, num_type): + return value + else: + try: + return num_type(float(value)) + except (ValueError, TypeError): + return num_type(0) # Fallback conversion + + +def __convert_bool(value: Any) -> bool: + if isinstance(value, bool): + return value + elif isinstance(value, str): + if value.lower() in ["true", "1"]: + return True + else: + return False + else: + return bool(value) + + +def _try_convert(value: Any, target_type: Type, raise_on_mismatch: bool) -> Any: + origin = get_origin(target_type) + args = get_args(target_type) + if origin is None: + origin = target_type + if origin not in [list, dict, tuple, str, set, int, float, bool]: + return value + + # Handle the case when value is already of the target type + if isinstance(value, origin): + if not args: + return value + else: + # Need to convert elements + if origin is list: + return [convert(v, args[0]) for v in value] + elif origin is tuple: + # Tuples can have multiple types + if len(args) == 1: + return tuple(convert(v, args[0]) for v in value) + else: + return tuple(convert(v, t) for v, t in zip(value, args)) + elif origin is dict: + key_type, val_type = args + return { + convert(k, key_type): convert(v, val_type) for k, v in value.items() + } + elif origin is set: + return {convert(v, args[0]) for v in value} + else: + return value + elif raise_on_mismatch: + raise TypeError(f"Value {value} is not of expected type {target_type}") + else: + # Need to convert value to the origin type + if origin is list: + value = __convert_list(value) + if args: + return [convert(v, args[0]) for v in value] + else: + return value + elif origin is dict: + value = __convert_dict(value) + if args: + key_type, val_type = args + return { + convert(k, key_type): convert(v, val_type) for k, v in value.items() + } + else: + return value + elif origin is tuple: + value = __convert_tuple(value) + if args: + if len(args) == 1: + return tuple(convert(v, args[0]) for v in value) + else: + return tuple(convert(v, t) for v, t in zip(value, args)) + else: + return value + elif origin is str: + return __convert_str(value) + elif origin is set: + value = __convert_set(value) + if args: + return {convert(v, args[0]) for v in value} + else: + return value + elif origin is int: + return __convert_num(value, int) + elif origin is float: + return __convert_num(value, float) + elif origin is bool: + return __convert_bool(value) + else: + return value + + +T = TypeVar("T") + + +def type_match(value: Any, target_type: Type[T]) -> T: + return cast(T, _try_convert(value, target_type, raise_on_mismatch=True)) + + +def convert(value: Any, target_type: Type[T]) -> T: + try: + return cast(T, _try_convert(value, target_type, raise_on_mismatch=False)) + except Exception as e: + raise ConversionError(f"Failed to convert {value} to {target_type}") from e diff --git a/autogpt_platform/backend/backend/ws.py b/autogpt_platform/backend/backend/ws.py new file mode 100644 index 000000000000..3b15a60eb03a --- /dev/null +++ b/autogpt_platform/backend/backend/ws.py @@ -0,0 +1,13 @@ +from backend.app import run_processes +from backend.server.ws_api import WebsocketServer + + +def main(): + """ + Run all the processes required for the AutoGPT-server WebSocket API. + """ + run_processes(WebsocketServer()) + + +if __name__ == "__main__": + main() diff --git a/autogpt_platform/backend/docker-compose.test.yaml b/autogpt_platform/backend/docker-compose.test.yaml new file mode 100644 index 000000000000..f2a5f1bc2a84 --- /dev/null +++ b/autogpt_platform/backend/docker-compose.test.yaml @@ -0,0 +1,32 @@ +services: + postgres-test: + image: ankane/pgvector:latest + environment: + - POSTGRES_USER=${DB_USER} + - POSTGRES_PASSWORD=${DB_PASS} + - POSTGRES_DB=${DB_NAME} + healthcheck: + test: pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB + interval: 10s + timeout: 5s + retries: 5 + ports: + - "${DB_PORT}:5432" + networks: + - app-network-test + redis-test: + image: redis:latest + command: redis-server --requirepass password + ports: + - "6379:6379" + networks: + - app-network-test + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + +networks: + app-network-test: + driver: bridge diff --git a/autogpt_platform/backend/linter.py b/autogpt_platform/backend/linter.py new file mode 100644 index 000000000000..a85fd6e9dc23 --- /dev/null +++ b/autogpt_platform/backend/linter.py @@ -0,0 +1,33 @@ +import os +import subprocess + +directory = os.path.dirname(os.path.realpath(__file__)) + +BACKEND_DIR = "." +LIBS_DIR = "../autogpt_libs" +TARGET_DIRS = [BACKEND_DIR, LIBS_DIR] + + +def run(*command: str) -> None: + print(f">>>>> Running poetry run {' '.join(command)}") + subprocess.run(["poetry", "run"] + list(command), cwd=directory, check=True) + + +def lint(): + try: + run("ruff", "check", *TARGET_DIRS, "--exit-zero") + run("ruff", "format", "--diff", "--check", LIBS_DIR) + run("isort", "--diff", "--check", "--profile", "black", BACKEND_DIR) + run("black", "--diff", "--check", BACKEND_DIR) + run("pyright", *TARGET_DIRS) + except subprocess.CalledProcessError as e: + print("Lint failed, try running `poetry run format` to fix the issues: ", e) + raise e + + +def format(): + run("ruff", "check", "--fix", *TARGET_DIRS) + run("ruff", "format", LIBS_DIR) + run("isort", "--profile", "black", BACKEND_DIR) + run("black", BACKEND_DIR) + run("pyright", *TARGET_DIRS) diff --git a/autogpt_platform/backend/migrations/20240722143307_migrations/migration.sql b/autogpt_platform/backend/migrations/20240722143307_migrations/migration.sql new file mode 100644 index 000000000000..c23105bc0a0f --- /dev/null +++ b/autogpt_platform/backend/migrations/20240722143307_migrations/migration.sql @@ -0,0 +1,128 @@ +-- CreateTable +CREATE TABLE "AgentGraph" ( + "id" TEXT NOT NULL, + "version" INTEGER NOT NULL DEFAULT 1, + "name" TEXT, + "description" TEXT, + "isActive" BOOLEAN NOT NULL DEFAULT true, + "isTemplate" BOOLEAN NOT NULL DEFAULT false, + + CONSTRAINT "AgentGraph_pkey" PRIMARY KEY ("id","version") +); + +-- CreateTable +CREATE TABLE "AgentNode" ( + "id" TEXT NOT NULL, + "agentBlockId" TEXT NOT NULL, + "agentGraphId" TEXT NOT NULL, + "agentGraphVersion" INTEGER NOT NULL DEFAULT 1, + "constantInput" TEXT NOT NULL DEFAULT '{}', + "metadata" TEXT NOT NULL DEFAULT '{}', + + CONSTRAINT "AgentNode_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "AgentNodeLink" ( + "id" TEXT NOT NULL, + "agentNodeSourceId" TEXT NOT NULL, + "sourceName" TEXT NOT NULL, + "agentNodeSinkId" TEXT NOT NULL, + "sinkName" TEXT NOT NULL, + + CONSTRAINT "AgentNodeLink_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "AgentBlock" ( + "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "inputSchema" TEXT NOT NULL, + "outputSchema" TEXT NOT NULL, + + CONSTRAINT "AgentBlock_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "AgentGraphExecution" ( + "id" TEXT NOT NULL, + "agentGraphId" TEXT NOT NULL, + "agentGraphVersion" INTEGER NOT NULL DEFAULT 1, + + CONSTRAINT "AgentGraphExecution_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "AgentNodeExecution" ( + "id" TEXT NOT NULL, + "agentGraphExecutionId" TEXT NOT NULL, + "agentNodeId" TEXT NOT NULL, + "executionStatus" TEXT NOT NULL, + "addedTime" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "queuedTime" TIMESTAMP(3), + "startedTime" TIMESTAMP(3), + "endedTime" TIMESTAMP(3), + + CONSTRAINT "AgentNodeExecution_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "AgentNodeExecutionInputOutput" ( + "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "data" TEXT NOT NULL, + "time" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "referencedByInputExecId" TEXT, + "referencedByOutputExecId" TEXT, + + CONSTRAINT "AgentNodeExecutionInputOutput_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "AgentGraphExecutionSchedule" ( + "id" TEXT NOT NULL, + "agentGraphId" TEXT NOT NULL, + "agentGraphVersion" INTEGER NOT NULL DEFAULT 1, + "schedule" TEXT NOT NULL, + "isEnabled" BOOLEAN NOT NULL DEFAULT true, + "inputData" TEXT NOT NULL, + "lastUpdated" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "AgentGraphExecutionSchedule_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "AgentBlock_name_key" ON "AgentBlock"("name"); + +-- CreateIndex +CREATE INDEX "AgentGraphExecutionSchedule_isEnabled_idx" ON "AgentGraphExecutionSchedule"("isEnabled"); + +-- AddForeignKey +ALTER TABLE "AgentNode" ADD CONSTRAINT "AgentNode_agentBlockId_fkey" FOREIGN KEY ("agentBlockId") REFERENCES "AgentBlock"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNode" ADD CONSTRAINT "AgentNode_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeLink" ADD CONSTRAINT "AgentNodeLink_agentNodeSourceId_fkey" FOREIGN KEY ("agentNodeSourceId") REFERENCES "AgentNode"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeLink" ADD CONSTRAINT "AgentNodeLink_agentNodeSinkId_fkey" FOREIGN KEY ("agentNodeSinkId") REFERENCES "AgentNode"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecution" ADD CONSTRAINT "AgentGraphExecution_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecution" ADD CONSTRAINT "AgentNodeExecution_agentGraphExecutionId_fkey" FOREIGN KEY ("agentGraphExecutionId") REFERENCES "AgentGraphExecution"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecution" ADD CONSTRAINT "AgentNodeExecution_agentNodeId_fkey" FOREIGN KEY ("agentNodeId") REFERENCES "AgentNode"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecutionInputOutput" ADD CONSTRAINT "AgentNodeExecutionInputOutput_referencedByInputExecId_fkey" FOREIGN KEY ("referencedByInputExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecutionInputOutput" ADD CONSTRAINT "AgentNodeExecutionInputOutput_referencedByOutputExecId_fkey" FOREIGN KEY ("referencedByOutputExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" ADD CONSTRAINT "AgentGraphExecutionSchedule_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE RESTRICT ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20240726131311_node_input_unique_constraint/migration.sql b/autogpt_platform/backend/migrations/20240726131311_node_input_unique_constraint/migration.sql new file mode 100644 index 000000000000..284457cd5246 --- /dev/null +++ b/autogpt_platform/backend/migrations/20240726131311_node_input_unique_constraint/migration.sql @@ -0,0 +1,8 @@ +/* + Warnings: + + - A unique constraint covering the columns `[referencedByInputExecId,referencedByOutputExecId,name]` on the table `AgentNodeExecutionInputOutput` will be added. If there are existing duplicate values, this will fail. + +*/ +-- CreateIndex +CREATE UNIQUE INDEX "AgentNodeExecutionInputOutput_referencedByInputExecId_refer_key" ON "AgentNodeExecutionInputOutput"("referencedByInputExecId", "referencedByOutputExecId", "name"); diff --git a/autogpt_platform/backend/migrations/20240729061216_static_input_link/migration.sql b/autogpt_platform/backend/migrations/20240729061216_static_input_link/migration.sql new file mode 100644 index 000000000000..5e756d5215e5 --- /dev/null +++ b/autogpt_platform/backend/migrations/20240729061216_static_input_link/migration.sql @@ -0,0 +1,5 @@ +-- AlterTable +ALTER TABLE "AgentNodeExecution" ADD COLUMN "executionData" TEXT; + +-- AlterTable +ALTER TABLE "AgentNodeLink" ADD COLUMN "isStatic" BOOLEAN NOT NULL DEFAULT false; diff --git a/autogpt_platform/backend/migrations/20240804040801_add_subgraph/migration.sql b/autogpt_platform/backend/migrations/20240804040801_add_subgraph/migration.sql new file mode 100644 index 000000000000..5bcc4c310fcb --- /dev/null +++ b/autogpt_platform/backend/migrations/20240804040801_add_subgraph/migration.sql @@ -0,0 +1,5 @@ +-- AlterTable +ALTER TABLE "AgentGraph" ADD COLUMN "agentGraphParentId" TEXT; + +-- AddForeignKey +ALTER TABLE "AgentGraph" ADD CONSTRAINT "AgentGraph_agentGraphParentId_version_fkey" FOREIGN KEY ("agentGraphParentId", "version") REFERENCES "AgentGraph"("id", "version") ON DELETE RESTRICT ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20240805115810_add_user_management/migration.sql b/autogpt_platform/backend/migrations/20240805115810_add_user_management/migration.sql new file mode 100644 index 000000000000..f446f2153469 --- /dev/null +++ b/autogpt_platform/backend/migrations/20240805115810_add_user_management/migration.sql @@ -0,0 +1,31 @@ +-- AlterTable +ALTER TABLE "AgentGraph" ADD COLUMN "userId" TEXT; + +-- AlterTable +ALTER TABLE "AgentGraphExecution" ADD COLUMN "userId" TEXT; + +-- AlterTable +ALTER TABLE "AgentGraphExecutionSchedule" ADD COLUMN "userId" TEXT; + +-- CreateTable +CREATE TABLE "User" ( + "id" TEXT NOT NULL, + "email" TEXT NOT NULL, + "name" TEXT, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "User_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "User_email_key" ON "User"("email"); + +-- AddForeignKey +ALTER TABLE "AgentGraph" ADD CONSTRAINT "AgentGraph_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecution" ADD CONSTRAINT "AgentGraphExecution_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" ADD CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20240807123738_add_index_users/migration.sql b/autogpt_platform/backend/migrations/20240807123738_add_index_users/migration.sql new file mode 100644 index 000000000000..54a70dca5aad --- /dev/null +++ b/autogpt_platform/backend/migrations/20240807123738_add_index_users/migration.sql @@ -0,0 +1,5 @@ +-- CreateIndex +CREATE INDEX "User_id_idx" ON "User"("id"); + +-- CreateIndex +CREATE INDEX "User_email_idx" ON "User"("email"); diff --git a/autogpt_platform/backend/migrations/20240808095419_add_required_user_ids/migration.sql b/autogpt_platform/backend/migrations/20240808095419_add_required_user_ids/migration.sql new file mode 100644 index 000000000000..8d4fb9ef5c48 --- /dev/null +++ b/autogpt_platform/backend/migrations/20240808095419_add_required_user_ids/migration.sql @@ -0,0 +1,25 @@ +-- Update existing entries with NULL userId +UPDATE "AgentGraph" SET "userId" = '3e53486c-cf57-477e-ba2a-cb02dc828e1a' WHERE "userId" IS NULL; +UPDATE "AgentGraphExecution" SET "userId" = '3e53486c-cf57-477e-ba2a-cb02dc828e1a' WHERE "userId" IS NULL; +UPDATE "AgentGraphExecutionSchedule" SET "userId" = '3e53486c-cf57-477e-ba2a-cb02dc828e1a' WHERE "userId" IS NULL; + +-- AlterTable +ALTER TABLE "AgentGraph" ALTER COLUMN "userId" SET NOT NULL; + +-- AlterTable +ALTER TABLE "AgentGraphExecution" ALTER COLUMN "userId" SET NOT NULL; + +-- AlterTable +ALTER TABLE "AgentGraphExecutionSchedule" ALTER COLUMN "userId" SET NOT NULL; + +-- AlterForeignKey +ALTER TABLE "AgentGraph" DROP CONSTRAINT "AgentGraph_userId_fkey"; +ALTER TABLE "AgentGraph" ADD CONSTRAINT "AgentGraph_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AlterForeignKey +ALTER TABLE "AgentGraphExecution" DROP CONSTRAINT "AgentGraphExecution_userId_fkey"; +ALTER TABLE "AgentGraphExecution" ADD CONSTRAINT "AgentGraphExecution_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AlterForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey"; +ALTER TABLE "AgentGraphExecutionSchedule" ADD CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20240902223334_add_stats_column/migration.sql b/autogpt_platform/backend/migrations/20240902223334_add_stats_column/migration.sql new file mode 100644 index 000000000000..5a7f7387e89a --- /dev/null +++ b/autogpt_platform/backend/migrations/20240902223334_add_stats_column/migration.sql @@ -0,0 +1,5 @@ +-- AlterTable +ALTER TABLE "AgentGraphExecution" ADD COLUMN "stats" TEXT; + +-- AlterTable +ALTER TABLE "AgentNodeExecution" ADD COLUMN "stats" TEXT; diff --git a/autogpt_platform/backend/migrations/20240906155206_add_created_at_updated_at/migration.sql b/autogpt_platform/backend/migrations/20240906155206_add_created_at_updated_at/migration.sql new file mode 100644 index 000000000000..cb3962035b14 --- /dev/null +++ b/autogpt_platform/backend/migrations/20240906155206_add_created_at_updated_at/migration.sql @@ -0,0 +1,11 @@ +-- AlterTable +ALTER TABLE "AgentGraph" ADD COLUMN "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, +ADD COLUMN "updatedAt" TIMESTAMP(3); + +-- AlterTable +ALTER TABLE "AgentGraphExecution" ADD COLUMN "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, +ADD COLUMN "updatedAt" TIMESTAMP(3); + +-- AlterTable +ALTER TABLE "AgentGraphExecutionSchedule" ADD COLUMN "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, +ADD COLUMN "updatedAt" TIMESTAMP(3); diff --git a/autogpt_platform/backend/migrations/20240914033334_user_credit/migration.sql b/autogpt_platform/backend/migrations/20240914033334_user_credit/migration.sql new file mode 100644 index 000000000000..0713f9f920f3 --- /dev/null +++ b/autogpt_platform/backend/migrations/20240914033334_user_credit/migration.sql @@ -0,0 +1,39 @@ +/* + Warnings: + + - The `executionStatus` column on the `AgentNodeExecution` table would be dropped and recreated. This will lead to data loss if there is data in the column. + +*/ +-- CreateEnum +CREATE TYPE "AgentExecutionStatus" AS ENUM ('INCOMPLETE', 'QUEUED', 'RUNNING', 'COMPLETED', 'FAILED'); + +-- CreateEnum +CREATE TYPE "UserBlockCreditType" AS ENUM ('TOP_UP', 'USAGE'); + +-- AlterTable +ALTER TABLE "AgentGraphExecution" ADD COLUMN "executionStatus" "AgentExecutionStatus" NOT NULL DEFAULT 'COMPLETED', +ADD COLUMN "startedAt" TIMESTAMP(3); + +-- AlterTable +ALTER TABLE "AgentNodeExecution" DROP COLUMN "executionStatus", +ADD COLUMN "executionStatus" "AgentExecutionStatus" NOT NULL DEFAULT 'COMPLETED'; + +-- CreateTable +CREATE TABLE "UserBlockCredit" ( + "transactionKey" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "userId" TEXT NOT NULL, + "blockId" TEXT, + "amount" INTEGER NOT NULL, + "type" "UserBlockCreditType" NOT NULL, + "isActive" BOOLEAN NOT NULL DEFAULT true, + "metadata" JSONB, + + CONSTRAINT "UserBlockCredit_pkey" PRIMARY KEY ("transactionKey","userId") +); + +-- AddForeignKey +ALTER TABLE "UserBlockCredit" ADD CONSTRAINT "UserBlockCredit_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "UserBlockCredit" ADD CONSTRAINT "UserBlockCredit_blockId_fkey" FOREIGN KEY ("blockId") REFERENCES "AgentBlock"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20240918163611_add_analytics_tables/migration.sql b/autogpt_platform/backend/migrations/20240918163611_add_analytics_tables/migration.sql new file mode 100644 index 000000000000..fba508819073 --- /dev/null +++ b/autogpt_platform/backend/migrations/20240918163611_add_analytics_tables/migration.sql @@ -0,0 +1,37 @@ +-- CreateTable +CREATE TABLE "AnalyticsDetails" ( + "id" TEXT NOT NULL DEFAULT gen_random_uuid(), + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "userId" TEXT NOT NULL, + "type" TEXT NOT NULL, + "data" JSONB, + "dataIndex" TEXT, + + CONSTRAINT "AnalyticsDetails_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "AnalyticsMetrics" ( + "id" TEXT NOT NULL DEFAULT gen_random_uuid(), + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + "analyticMetric" TEXT NOT NULL, + "value" DOUBLE PRECISION NOT NULL, + "dataString" TEXT, + "userId" TEXT NOT NULL, + + CONSTRAINT "AnalyticsMetrics_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE INDEX "analyticsDetails" ON "AnalyticsDetails"("userId", "type"); + +-- CreateIndex +CREATE INDEX "AnalyticsDetails_type_idx" ON "AnalyticsDetails"("type"); + +-- AddForeignKey +ALTER TABLE "AnalyticsDetails" ADD CONSTRAINT "AnalyticsDetails_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AnalyticsMetrics" ADD CONSTRAINT "AnalyticsMetrics_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20240930151406_reassign_block_ids/migration.sql b/autogpt_platform/backend/migrations/20240930151406_reassign_block_ids/migration.sql new file mode 100644 index 000000000000..c6f7ae6f3376 --- /dev/null +++ b/autogpt_platform/backend/migrations/20240930151406_reassign_block_ids/migration.sql @@ -0,0 +1,18 @@ +-- Update AgentBlock IDs: this should cascade to the AgentNode and UserBlockCredit tables +UPDATE "AgentBlock" +SET "id" = CASE + WHEN "id" = 'a1b2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6' THEN '436c3984-57fd-4b85-8e9a-459b356883bd' + WHEN "id" = 'b2g2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6' THEN '0e50422c-6dee-4145-83d6-3a5a392f65de' + WHEN "id" = 'c3d4e5f6-7g8h-9i0j-1k2l-m3n4o5p6q7r8' THEN 'a0a69be1-4528-491c-a85a-a4ab6873e3f0' + WHEN "id" = 'c3d4e5f6-g7h8-i9j0-k1l2-m3n4o5p6q7r8' THEN '32a87eab-381e-4dd4-bdb8-4c47151be35a' + WHEN "id" = 'b2c3d4e5-6f7g-8h9i-0j1k-l2m3n4o5p6q7' THEN '87840993-2053-44b7-8da4-187ad4ee518c' + WHEN "id" = 'h1i2j3k4-5l6m-7n8o-9p0q-r1s2t3u4v5w6' THEN 'd0822ab5-9f8a-44a3-8971-531dd0178b6b' + WHEN "id" = 'd3f4g5h6-1i2j-3k4l-5m6n-7o8p9q0r1s2t' THEN 'df06086a-d5ac-4abb-9996-2ad0acb2eff7' + WHEN "id" = 'h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m' THEN 'f5b0f5d0-1862-4d61-94be-3ad0fa772760' + WHEN "id" = 'a1234567-89ab-cdef-0123-456789abcdef' THEN '4335878a-394e-4e67-adf2-919877ff49ae' + WHEN "id" = 'f8e7d6c5-b4a3-2c1d-0e9f-8g7h6i5j4k3l' THEN 'f66a3543-28d3-4ab5-8945-9b336371e2ce' + WHEN "id" = 'b29c1b50-5d0e-4d9f-8f9d-1b0e6fcbf0h2' THEN '716a67b3-6760-42e7-86dc-18645c6e00fc' + WHEN "id" = '31d1064e-7446-4693-o7d4-65e5ca9110d1' THEN 'cc10ff7b-7753-4ff2-9af6-9399b1a7eddc' + WHEN "id" = 'c6731acb-4105-4zp1-bc9b-03d0036h370g' THEN '5ebe6768-8e5d-41e3-9134-1c7bd89a8d52' + ELSE "id" +END; diff --git a/autogpt_platform/backend/migrations/20241007090536_add_on_delete_platform/migration.sql b/autogpt_platform/backend/migrations/20241007090536_add_on_delete_platform/migration.sql new file mode 100644 index 000000000000..821450272e35 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241007090536_add_on_delete_platform/migration.sql @@ -0,0 +1,89 @@ +-- DropForeignKey +ALTER TABLE "AgentGraph" DROP CONSTRAINT "AgentGraph_userId_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentGraphExecution" DROP CONSTRAINT "AgentGraphExecution_agentGraphId_agentGraphVersion_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentGraphExecution" DROP CONSTRAINT "AgentGraphExecution_userId_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_agentGraphId_agentGraphVersion_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentNode" DROP CONSTRAINT "AgentNode_agentGraphId_agentGraphVersion_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentNodeExecution" DROP CONSTRAINT "AgentNodeExecution_agentGraphExecutionId_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentNodeExecution" DROP CONSTRAINT "AgentNodeExecution_agentNodeId_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentNodeExecutionInputOutput" DROP CONSTRAINT "AgentNodeExecutionInputOutput_referencedByInputExecId_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentNodeExecutionInputOutput" DROP CONSTRAINT "AgentNodeExecutionInputOutput_referencedByOutputExecId_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentNodeLink" DROP CONSTRAINT "AgentNodeLink_agentNodeSinkId_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentNodeLink" DROP CONSTRAINT "AgentNodeLink_agentNodeSourceId_fkey"; + +-- DropForeignKey +ALTER TABLE "AnalyticsDetails" DROP CONSTRAINT "AnalyticsDetails_userId_fkey"; + +-- DropForeignKey +ALTER TABLE "AnalyticsMetrics" DROP CONSTRAINT "AnalyticsMetrics_userId_fkey"; + +-- DropForeignKey +ALTER TABLE "UserBlockCredit" DROP CONSTRAINT "UserBlockCredit_userId_fkey"; + +-- AddForeignKey +ALTER TABLE "AgentGraph" ADD CONSTRAINT "AgentGraph_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNode" ADD CONSTRAINT "AgentNode_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeLink" ADD CONSTRAINT "AgentNodeLink_agentNodeSourceId_fkey" FOREIGN KEY ("agentNodeSourceId") REFERENCES "AgentNode"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeLink" ADD CONSTRAINT "AgentNodeLink_agentNodeSinkId_fkey" FOREIGN KEY ("agentNodeSinkId") REFERENCES "AgentNode"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecution" ADD CONSTRAINT "AgentGraphExecution_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecution" ADD CONSTRAINT "AgentGraphExecution_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecution" ADD CONSTRAINT "AgentNodeExecution_agentGraphExecutionId_fkey" FOREIGN KEY ("agentGraphExecutionId") REFERENCES "AgentGraphExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecution" ADD CONSTRAINT "AgentNodeExecution_agentNodeId_fkey" FOREIGN KEY ("agentNodeId") REFERENCES "AgentNode"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecutionInputOutput" ADD CONSTRAINT "AgentNodeExecutionInputOutput_referencedByInputExecId_fkey" FOREIGN KEY ("referencedByInputExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecutionInputOutput" ADD CONSTRAINT "AgentNodeExecutionInputOutput_referencedByOutputExecId_fkey" FOREIGN KEY ("referencedByOutputExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" ADD CONSTRAINT "AgentGraphExecutionSchedule_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" ADD CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AnalyticsDetails" ADD CONSTRAINT "AnalyticsDetails_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AnalyticsMetrics" ADD CONSTRAINT "AnalyticsMetrics_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "UserBlockCredit" ADD CONSTRAINT "UserBlockCredit_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20241007115713_cascade_graph_deletion/migration.sql b/autogpt_platform/backend/migrations/20241007115713_cascade_graph_deletion/migration.sql new file mode 100644 index 000000000000..3b783a6d9212 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241007115713_cascade_graph_deletion/migration.sql @@ -0,0 +1,5 @@ +-- DropForeignKey +ALTER TABLE "AgentGraph" DROP CONSTRAINT "AgentGraph_agentGraphParentId_version_fkey"; + +-- AddForeignKey +ALTER TABLE "AgentGraph" ADD CONSTRAINT "AgentGraph_agentGraphParentId_version_fkey" FOREIGN KEY ("agentGraphParentId", "version") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql b/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql new file mode 100644 index 000000000000..b3886efa030a --- /dev/null +++ b/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "User" ADD COLUMN "metadata" JSONB; diff --git a/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql b/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql new file mode 100644 index 000000000000..aa577c90e938 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql @@ -0,0 +1,27 @@ +--CreateFunction +CREATE OR REPLACE FUNCTION add_user_to_platform() RETURNS TRIGGER AS $$ +BEGIN + INSERT INTO platform."User" (id, email, "updatedAt") + VALUES (NEW.id, NEW.email, now()); + RETURN NEW; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +DO $$ +BEGIN + -- Check if the auth schema and users table exist + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'auth' + AND table_name = 'users' + ) THEN + -- Drop the trigger if it exists + DROP TRIGGER IF EXISTS user_added_to_platform ON auth.users; + + -- Create the trigger + CREATE TRIGGER user_added_to_platform + AFTER INSERT ON auth.users + FOR EACH ROW EXECUTE FUNCTION add_user_to_platform(); + END IF; +END $$; diff --git a/autogpt_platform/backend/migrations/20241017180251_add_webhooks_and_their_relation_to_nodes/migration.sql b/autogpt_platform/backend/migrations/20241017180251_add_webhooks_and_their_relation_to_nodes/migration.sql new file mode 100644 index 000000000000..011a017c8f20 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241017180251_add_webhooks_and_their_relation_to_nodes/migration.sql @@ -0,0 +1,26 @@ +-- AlterTable +ALTER TABLE "AgentNode" ADD COLUMN "webhookId" TEXT; + +-- CreateTable +CREATE TABLE "IntegrationWebhook" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3), + "userId" TEXT NOT NULL, + "provider" TEXT NOT NULL, + "credentialsId" TEXT NOT NULL, + "webhookType" TEXT NOT NULL, + "resource" TEXT NOT NULL, + "events" TEXT[], + "config" JSONB NOT NULL, + "secret" TEXT NOT NULL, + "providerWebhookId" TEXT NOT NULL, + + CONSTRAINT "IntegrationWebhook_pkey" PRIMARY KEY ("id") +); + +-- AddForeignKey +ALTER TABLE "AgentNode" ADD CONSTRAINT "AgentNode_webhookId_fkey" FOREIGN KEY ("webhookId") REFERENCES "IntegrationWebhook"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "IntegrationWebhook" ADD CONSTRAINT "IntegrationWebhook_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20241030014950_move_integration_creds_to_platform.User/migration.sql b/autogpt_platform/backend/migrations/20241030014950_move_integration_creds_to_platform.User/migration.sql new file mode 100644 index 000000000000..f2b2269b181e --- /dev/null +++ b/autogpt_platform/backend/migrations/20241030014950_move_integration_creds_to_platform.User/migration.sql @@ -0,0 +1,40 @@ +-- Migrate integration credentials from auth.user.raw_user_meta_data to platform.User.metadata +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'auth' + AND table_name = 'users' + ) THEN + -- First update User metadata for users that have integration_credentials + WITH users_with_creds AS ( + SELECT + id, + raw_user_meta_data->'integration_credentials' as integration_credentials, + raw_user_meta_data + FROM auth.users + WHERE raw_user_meta_data ? 'integration_credentials' + ) + UPDATE "User" u + SET metadata = COALESCE( + CASE + -- If User.metadata already has .integration_credentials, leave it + WHEN u.metadata ? 'integration_credentials' THEN u.metadata + -- If User.metadata exists but has no .integration_credentials, add it + WHEN u.metadata IS NOT NULL AND u.metadata::text != '' THEN + (u.metadata || jsonb_build_object('integration_credentials', uwc.integration_credentials)) + -- If User.metadata is NULL, set it + ELSE jsonb_build_object('integration_credentials', uwc.integration_credentials) + END, + '{}'::jsonb + ) + FROM users_with_creds uwc + WHERE u.id = uwc.id::text; + + -- Finally remove integration_credentials from auth.users + UPDATE auth.users + SET raw_user_meta_data = raw_user_meta_data - 'integration_credentials' + WHERE raw_user_meta_data ? 'integration_credentials'; + END IF; +END $$; diff --git a/autogpt_platform/backend/migrations/20241030061705_encrypt_user_metadata/migration.sql b/autogpt_platform/backend/migrations/20241030061705_encrypt_user_metadata/migration.sql new file mode 100644 index 000000000000..977f5db33e91 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241030061705_encrypt_user_metadata/migration.sql @@ -0,0 +1,16 @@ +-- Make User.metadata column consistent and add integrations column for encrypted credentials + +-- First update all records to have empty JSON object +UPDATE "User" +SET "metadata" = '{}'::jsonb +WHERE "metadata" IS NULL; + +-- Then make it required +ALTER TABLE "User" +ALTER COLUMN "metadata" SET DEFAULT '{}'::jsonb, +ALTER COLUMN "metadata" SET NOT NULL, +-- and add integrations column (which will be encrypted JSON) +ADD COLUMN "integrations" TEXT NOT NULL DEFAULT ''; + +-- Encrypting the credentials and moving them from metadata to integrations +-- will be handled in the backend. diff --git a/autogpt_platform/backend/migrations/20241030063332_drop_all_credentials_from_constant_input/migration.sql b/autogpt_platform/backend/migrations/20241030063332_drop_all_credentials_from_constant_input/migration.sql new file mode 100644 index 000000000000..4880f8aab0cb --- /dev/null +++ b/autogpt_platform/backend/migrations/20241030063332_drop_all_credentials_from_constant_input/migration.sql @@ -0,0 +1,59 @@ +-- Function to clean sensitive data from JSON +CREATE OR REPLACE FUNCTION clean_sensitive_json(data jsonb) +RETURNS jsonb AS $$ +DECLARE + result jsonb := data; +BEGIN + -- If the JSON contains api_key directly + IF result ? 'api_key' THEN + result = result - 'api_key'; + END IF; + + -- If the JSON contains discord_bot_token + IF result ? 'discord_bot_token' THEN + result = result - 'discord_bot_token'; + END IF; + + -- If the JSON contains creds + IF result ? 'creds' THEN + result = result - 'creds'; + END IF; + + -- If the JSON contains smtp credentials + IF result ? 'smtp_username' THEN + result = result - 'smtp_username'; + END IF; + + IF result ? 'smtp_password' THEN + result = result - 'smtp_password'; + END IF; + + -- If the JSON contains OAuth credentials + IF result ? 'client_id' THEN + result = result - 'client_id'; + END IF; + + IF result ? 'client_secret' THEN + result = result - 'client_secret'; + END IF; + + -- If the JSON contains username/password + IF result ? 'username' THEN + result = result - 'username'; + END IF; + + IF result ? 'password' THEN + result = result - 'password'; + END IF; + + RETURN result; +END; +$$ LANGUAGE plpgsql; + +-- Update the table using the function +UPDATE "AgentNode" +SET "constantInput" = clean_sensitive_json("constantInput"::jsonb)::json +WHERE "constantInput"::jsonb ?| array['api_key', 'discord_bot_token', 'creds', 'smtp_username', 'smtp_password', 'client_id', 'client_secret', 'username', 'password']; + +-- Drop the function after use +DROP FUNCTION clean_sensitive_json; diff --git a/autogpt_platform/backend/migrations/20241103133307_remove_subgraph/migration.sql b/autogpt_platform/backend/migrations/20241103133307_remove_subgraph/migration.sql new file mode 100644 index 000000000000..58caf9b2b1b9 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241103133307_remove_subgraph/migration.sql @@ -0,0 +1,11 @@ +/* + Warnings: + + - You are about to drop the column `agentGraphParentId` on the `AgentGraph` table. All the data in the column will be lost. + +*/ +-- DropForeignKey +ALTER TABLE "AgentGraph" DROP CONSTRAINT "AgentGraph_agentGraphParentId_version_fkey"; + +-- AlterTable +ALTER TABLE "AgentGraph" DROP COLUMN "agentGraphParentId"; diff --git a/autogpt_platform/backend/migrations/20241103144418_graph_exec_stats_list_to_obj/migration.sql b/autogpt_platform/backend/migrations/20241103144418_graph_exec_stats_list_to_obj/migration.sql new file mode 100644 index 000000000000..ad9f45c1422e --- /dev/null +++ b/autogpt_platform/backend/migrations/20241103144418_graph_exec_stats_list_to_obj/migration.sql @@ -0,0 +1,4 @@ +-- This migration converts the stats column from a list to an object. +UPDATE "AgentGraphExecution" +SET "stats" = (stats::jsonb -> 0)::text +WHERE stats IS NOT NULL AND jsonb_typeof(stats::jsonb) = 'array'; diff --git a/autogpt_platform/backend/migrations/20241108170448_add_api_key_support/migration.sql b/autogpt_platform/backend/migrations/20241108170448_add_api_key_support/migration.sql new file mode 100644 index 000000000000..6c4bbfd4ca7c --- /dev/null +++ b/autogpt_platform/backend/migrations/20241108170448_add_api_key_support/migration.sql @@ -0,0 +1,44 @@ +-- CreateEnum +CREATE TYPE "APIKeyPermission" AS ENUM ('EXECUTE_GRAPH', 'READ_GRAPH', 'EXECUTE_BLOCK', 'READ_BLOCK'); + +-- CreateEnum +CREATE TYPE "APIKeyStatus" AS ENUM ('ACTIVE', 'REVOKED', 'SUSPENDED'); + +-- CreateTable +CREATE TABLE "APIKey" ( + "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "prefix" TEXT NOT NULL, + "postfix" TEXT NOT NULL, + "key" TEXT NOT NULL, + "status" "APIKeyStatus" NOT NULL DEFAULT 'ACTIVE', + "permissions" "APIKeyPermission"[], + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "lastUsedAt" TIMESTAMP(3), + "revokedAt" TIMESTAMP(3), + "description" TEXT, + "userId" TEXT NOT NULL, + + CONSTRAINT "APIKey_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "APIKey_key_key" ON "APIKey"("key"); + +-- CreateIndex +CREATE INDEX "APIKey_key_idx" ON "APIKey"("key"); + +-- CreateIndex +CREATE INDEX "APIKey_prefix_idx" ON "APIKey"("prefix"); + +-- CreateIndex +CREATE INDEX "APIKey_userId_idx" ON "APIKey"("userId"); + +-- CreateIndex +CREATE INDEX "APIKey_status_idx" ON "APIKey"("status"); + +-- CreateIndex +CREATE INDEX "APIKey_userId_status_idx" ON "APIKey"("userId", "status"); + +-- AddForeignKey +ALTER TABLE "APIKey" ADD CONSTRAINT "APIKey_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/autogpt_platform/backend/migrations/20241113104933_remove_scheduler/migration.sql b/autogpt_platform/backend/migrations/20241113104933_remove_scheduler/migration.sql new file mode 100644 index 000000000000..fa5d22ef30c4 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241113104933_remove_scheduler/migration.sql @@ -0,0 +1,14 @@ +/* + Warnings: + + - You are about to drop the `AgentGraphExecutionSchedule` table. If the table is not empty, all the data it contains will be lost. + +*/ +-- DropForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_agentGraphId_agentGraphVersion_fkey"; + +-- DropForeignKey +ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey"; + +-- DropTable +DROP TABLE "AgentGraphExecutionSchedule"; diff --git a/autogpt_platform/backend/migrations/20241115170707_fix_llm_provider_credentials/migration.sql b/autogpt_platform/backend/migrations/20241115170707_fix_llm_provider_credentials/migration.sql new file mode 100644 index 000000000000..59b1d0b0595b --- /dev/null +++ b/autogpt_platform/backend/migrations/20241115170707_fix_llm_provider_credentials/migration.sql @@ -0,0 +1,13 @@ +-- Correct credentials.provider field on all nodes with 'llm' provider credentials +UPDATE "AgentNode" +SET "constantInput" = JSONB_SET( + "constantInput"::jsonb, + '{credentials,provider}', + CASE + WHEN "constantInput"::jsonb->'credentials'->>'id' = '53c25cb8-e3ee-465c-a4d1-e75a4c899c2a' THEN '"openai"'::jsonb + WHEN "constantInput"::jsonb->'credentials'->>'id' = '24e5d942-d9e3-4798-8151-90143ee55629' THEN '"anthropic"'::jsonb + WHEN "constantInput"::jsonb->'credentials'->>'id' = '4ec22295-8f97-4dd1-b42b-2c6957a02545' THEN '"groq"'::jsonb + ELSE "constantInput"::jsonb->'credentials'->'provider' + END + )::text +WHERE "constantInput"::jsonb->'credentials'->>'provider' = 'llm'; diff --git a/autogpt_platform/backend/migrations/20241210013740_add_indexes/migration.sql b/autogpt_platform/backend/migrations/20241210013740_add_indexes/migration.sql new file mode 100644 index 000000000000..4898dc83ef66 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241210013740_add_indexes/migration.sql @@ -0,0 +1,41 @@ +-- CreateIndex +CREATE INDEX "AgentGraph_userId_isActive_idx" ON "AgentGraph"("userId", "isActive"); + +-- CreateIndex +CREATE INDEX "AgentGraphExecution_agentGraphId_agentGraphVersion_idx" ON "AgentGraphExecution"("agentGraphId", "agentGraphVersion"); + +-- CreateIndex +CREATE INDEX "AgentGraphExecution_userId_idx" ON "AgentGraphExecution"("userId"); + +-- CreateIndex +CREATE INDEX "AgentNode_agentGraphId_agentGraphVersion_idx" ON "AgentNode"("agentGraphId", "agentGraphVersion"); + +-- CreateIndex +CREATE INDEX "AgentNode_agentBlockId_idx" ON "AgentNode"("agentBlockId"); + +-- CreateIndex +CREATE INDEX "AgentNode_webhookId_idx" ON "AgentNode"("webhookId"); + +-- CreateIndex +CREATE INDEX "AgentNodeExecution_agentGraphExecutionId_idx" ON "AgentNodeExecution"("agentGraphExecutionId"); + +-- CreateIndex +CREATE INDEX "AgentNodeExecution_agentNodeId_idx" ON "AgentNodeExecution"("agentNodeId"); + +-- CreateIndex +CREATE INDEX "AgentNodeExecutionInputOutput_referencedByOutputExecId_idx" ON "AgentNodeExecutionInputOutput"("referencedByOutputExecId"); + +-- CreateIndex +CREATE INDEX "AgentNodeLink_agentNodeSourceId_idx" ON "AgentNodeLink"("agentNodeSourceId"); + +-- CreateIndex +CREATE INDEX "AgentNodeLink_agentNodeSinkId_idx" ON "AgentNodeLink"("agentNodeSinkId"); + +-- CreateIndex +CREATE INDEX "AnalyticsMetrics_userId_idx" ON "AnalyticsMetrics"("userId"); + +-- CreateIndex +CREATE INDEX "IntegrationWebhook_userId_idx" ON "IntegrationWebhook"("userId"); + +-- CreateIndex +CREATE INDEX "UserBlockCredit_userId_createdAt_idx" ON "UserBlockCredit"("userId", "createdAt"); diff --git a/autogpt_platform/backend/migrations/20241211160646_rename_credit_model_and_add_stripe_customer/migration.sql b/autogpt_platform/backend/migrations/20241211160646_rename_credit_model_and_add_stripe_customer/migration.sql new file mode 100644 index 000000000000..53ead7259a99 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241211160646_rename_credit_model_and_add_stripe_customer/migration.sql @@ -0,0 +1,8 @@ +-- AlterTable +ALTER TABLE "User" ADD COLUMN "stripeCustomerId" TEXT; + +-- AlterEnum +ALTER TYPE "UserBlockCreditType" RENAME TO "CreditTransactionType"; + +-- AlterTable +ALTER TABLE "UserBlockCredit" RENAME TO "CreditTransaction"; diff --git a/autogpt_platform/backend/migrations/20241212141024_agent_store_v2/migration.sql b/autogpt_platform/backend/migrations/20241212141024_agent_store_v2/migration.sql new file mode 100644 index 000000000000..2f903245d371 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241212141024_agent_store_v2/migration.sql @@ -0,0 +1,228 @@ +-- CreateEnum +CREATE TYPE "SubmissionStatus" AS ENUM ('DAFT', 'PENDING', 'APPROVED', 'REJECTED'); + +-- AlterTable +ALTER TABLE "AgentGraphExecution" ADD COLUMN "agentPresetId" TEXT; + +-- AlterTable +ALTER TABLE "AgentNodeExecutionInputOutput" ADD COLUMN "agentPresetId" TEXT; + +-- AlterTable +ALTER TABLE "AnalyticsMetrics" ALTER COLUMN "id" DROP DEFAULT; + +-- AlterTable +ALTER TABLE "CreditTransaction" RENAME CONSTRAINT "UserBlockCredit_pkey" TO "CreditTransaction_pkey"; + +-- CreateTable +CREATE TABLE "AgentPreset" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "name" TEXT NOT NULL, + "description" TEXT NOT NULL, + "isActive" BOOLEAN NOT NULL DEFAULT true, + "userId" TEXT NOT NULL, + "agentId" TEXT NOT NULL, + "agentVersion" INTEGER NOT NULL, + + CONSTRAINT "AgentPreset_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "UserAgent" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "userId" TEXT NOT NULL, + "agentId" TEXT NOT NULL, + "agentVersion" INTEGER NOT NULL, + "agentPresetId" TEXT, + "isFavorite" BOOLEAN NOT NULL DEFAULT false, + "isCreatedByUser" BOOLEAN NOT NULL DEFAULT false, + "isArchived" BOOLEAN NOT NULL DEFAULT false, + "isDeleted" BOOLEAN NOT NULL DEFAULT false, + + CONSTRAINT "UserAgent_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "Profile" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "userId" TEXT, + "name" TEXT NOT NULL, + "username" TEXT NOT NULL, + "description" TEXT NOT NULL, + "links" TEXT[], + "avatarUrl" TEXT, + + CONSTRAINT "Profile_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "StoreListing" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "isDeleted" BOOLEAN NOT NULL DEFAULT false, + "isApproved" BOOLEAN NOT NULL DEFAULT false, + "agentId" TEXT NOT NULL, + "agentVersion" INTEGER NOT NULL, + "owningUserId" TEXT NOT NULL, + + CONSTRAINT "StoreListing_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "StoreListingVersion" ( + "id" TEXT NOT NULL, + "version" INTEGER NOT NULL DEFAULT 1, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "agentId" TEXT NOT NULL, + "agentVersion" INTEGER NOT NULL, + "slug" TEXT NOT NULL, + "name" TEXT NOT NULL, + "subHeading" TEXT NOT NULL, + "videoUrl" TEXT, + "imageUrls" TEXT[], + "description" TEXT NOT NULL, + "categories" TEXT[], + "isFeatured" BOOLEAN NOT NULL DEFAULT false, + "isDeleted" BOOLEAN NOT NULL DEFAULT false, + "isAvailable" BOOLEAN NOT NULL DEFAULT true, + "isApproved" BOOLEAN NOT NULL DEFAULT false, + "storeListingId" TEXT, + + CONSTRAINT "StoreListingVersion_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "StoreListingReview" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "storeListingVersionId" TEXT NOT NULL, + "reviewByUserId" TEXT NOT NULL, + "score" INTEGER NOT NULL, + "comments" TEXT, + + CONSTRAINT "StoreListingReview_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "StoreListingSubmission" ( + "id" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "storeListingId" TEXT NOT NULL, + "storeListingVersionId" TEXT NOT NULL, + "reviewerId" TEXT NOT NULL, + "Status" "SubmissionStatus" NOT NULL DEFAULT 'PENDING', + "reviewComments" TEXT, + + CONSTRAINT "StoreListingSubmission_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE INDEX "AgentPreset_userId_idx" ON "AgentPreset"("userId"); + +-- CreateIndex +CREATE INDEX "UserAgent_userId_idx" ON "UserAgent"("userId"); + +-- CreateIndex +CREATE UNIQUE INDEX "Profile_username_key" ON "Profile"("username"); + +-- CreateIndex +CREATE INDEX "Profile_username_idx" ON "Profile"("username"); + +-- CreateIndex +CREATE INDEX "Profile_userId_idx" ON "Profile"("userId"); + +-- CreateIndex +CREATE INDEX "StoreListing_isApproved_idx" ON "StoreListing"("isApproved"); + +-- CreateIndex +CREATE INDEX "StoreListing_agentId_idx" ON "StoreListing"("agentId"); + +-- CreateIndex +CREATE INDEX "StoreListing_owningUserId_idx" ON "StoreListing"("owningUserId"); + +-- CreateIndex +CREATE INDEX "StoreListingVersion_agentId_agentVersion_isApproved_idx" ON "StoreListingVersion"("agentId", "agentVersion", "isApproved"); + +-- CreateIndex +CREATE UNIQUE INDEX "StoreListingVersion_agentId_agentVersion_key" ON "StoreListingVersion"("agentId", "agentVersion"); + +-- CreateIndex +CREATE INDEX "StoreListingReview_storeListingVersionId_idx" ON "StoreListingReview"("storeListingVersionId"); + +-- CreateIndex +CREATE UNIQUE INDEX "StoreListingReview_storeListingVersionId_reviewByUserId_key" ON "StoreListingReview"("storeListingVersionId", "reviewByUserId"); + +-- CreateIndex +CREATE INDEX "StoreListingSubmission_storeListingId_idx" ON "StoreListingSubmission"("storeListingId"); + +-- CreateIndex +CREATE INDEX "StoreListingSubmission_Status_idx" ON "StoreListingSubmission"("Status"); + +-- RenameForeignKey +ALTER TABLE "CreditTransaction" RENAME CONSTRAINT "UserBlockCredit_blockId_fkey" TO "CreditTransaction_blockId_fkey"; + +-- RenameForeignKey +ALTER TABLE "CreditTransaction" RENAME CONSTRAINT "UserBlockCredit_userId_fkey" TO "CreditTransaction_userId_fkey"; + +-- AddForeignKey +ALTER TABLE "AgentPreset" ADD CONSTRAINT "AgentPreset_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentPreset" ADD CONSTRAINT "AgentPreset_agentId_agentVersion_fkey" FOREIGN KEY ("agentId", "agentVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "UserAgent" ADD CONSTRAINT "UserAgent_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "UserAgent" ADD CONSTRAINT "UserAgent_agentId_agentVersion_fkey" FOREIGN KEY ("agentId", "agentVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "UserAgent" ADD CONSTRAINT "UserAgent_agentPresetId_fkey" FOREIGN KEY ("agentPresetId") REFERENCES "AgentPreset"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentGraphExecution" ADD CONSTRAINT "AgentGraphExecution_agentPresetId_fkey" FOREIGN KEY ("agentPresetId") REFERENCES "AgentPreset"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "AgentNodeExecutionInputOutput" ADD CONSTRAINT "AgentNodeExecutionInputOutput_agentPresetId_fkey" FOREIGN KEY ("agentPresetId") REFERENCES "AgentPreset"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "Profile" ADD CONSTRAINT "Profile_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListing" ADD CONSTRAINT "StoreListing_agentId_agentVersion_fkey" FOREIGN KEY ("agentId", "agentVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListing" ADD CONSTRAINT "StoreListing_owningUserId_fkey" FOREIGN KEY ("owningUserId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListingVersion" ADD CONSTRAINT "StoreListingVersion_agentId_agentVersion_fkey" FOREIGN KEY ("agentId", "agentVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListingVersion" ADD CONSTRAINT "StoreListingVersion_storeListingId_fkey" FOREIGN KEY ("storeListingId") REFERENCES "StoreListing"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListingReview" ADD CONSTRAINT "StoreListingReview_storeListingVersionId_fkey" FOREIGN KEY ("storeListingVersionId") REFERENCES "StoreListingVersion"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListingReview" ADD CONSTRAINT "StoreListingReview_reviewByUserId_fkey" FOREIGN KEY ("reviewByUserId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListingSubmission" ADD CONSTRAINT "StoreListingSubmission_storeListingId_fkey" FOREIGN KEY ("storeListingId") REFERENCES "StoreListing"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListingSubmission" ADD CONSTRAINT "StoreListingSubmission_storeListingVersionId_fkey" FOREIGN KEY ("storeListingVersionId") REFERENCES "StoreListingVersion"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "StoreListingSubmission" ADD CONSTRAINT "StoreListingSubmission_reviewerId_fkey" FOREIGN KEY ("reviewerId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- RenameIndex +ALTER INDEX "UserBlockCredit_userId_createdAt_idx" RENAME TO "CreditTransaction_userId_createdAt_idx"; diff --git a/autogpt_platform/backend/migrations/20241212142024_creator_featured_flag/migration.sql b/autogpt_platform/backend/migrations/20241212142024_creator_featured_flag/migration.sql new file mode 100644 index 000000000000..0dc9db59832e --- /dev/null +++ b/autogpt_platform/backend/migrations/20241212142024_creator_featured_flag/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "Profile" ADD COLUMN "isFeatured" BOOLEAN NOT NULL DEFAULT false; diff --git a/autogpt_platform/backend/migrations/20241212150828_agent_store_v2_views/migration.sql b/autogpt_platform/backend/migrations/20241212150828_agent_store_v2_views/migration.sql new file mode 100644 index 000000000000..18c488f2942d --- /dev/null +++ b/autogpt_platform/backend/migrations/20241212150828_agent_store_v2_views/migration.sql @@ -0,0 +1,119 @@ +BEGIN; + +CREATE VIEW "StoreAgent" AS +WITH ReviewStats AS ( + SELECT sl."id" AS "storeListingId", + COUNT(sr.id) AS review_count, + AVG(CAST(sr.score AS DECIMAL)) AS avg_rating + FROM "StoreListing" sl + JOIN "StoreListingVersion" slv ON slv."storeListingId" = sl."id" + JOIN "StoreListingReview" sr ON sr."storeListingVersionId" = slv.id + WHERE sl."isDeleted" = FALSE + GROUP BY sl."id" +), +AgentRuns AS ( + SELECT "agentGraphId", COUNT(*) AS run_count + FROM "AgentGraphExecution" + GROUP BY "agentGraphId" +) +SELECT + sl.id AS listing_id, + slv.id AS "storeListingVersionId", + slv."createdAt" AS updated_at, + slv.slug, + a.name AS agent_name, + slv."videoUrl" AS agent_video, + COALESCE(slv."imageUrls", ARRAY[]::TEXT[]) AS agent_image, + slv."isFeatured" AS featured, + p.username AS creator_username, + p."avatarUrl" AS creator_avatar, + slv."subHeading" AS sub_heading, + slv.description, + slv.categories, + COALESCE(ar.run_count, 0) AS runs, + CAST(COALESCE(rs.avg_rating, 0.0) AS DOUBLE PRECISION) AS rating, + ARRAY_AGG(DISTINCT CAST(slv.version AS TEXT)) AS versions +FROM "StoreListing" sl +JOIN "AgentGraph" a ON sl."agentId" = a.id AND sl."agentVersion" = a."version" +LEFT JOIN "Profile" p ON sl."owningUserId" = p."userId" +LEFT JOIN "StoreListingVersion" slv ON slv."storeListingId" = sl.id +LEFT JOIN ReviewStats rs ON sl.id = rs."storeListingId" +LEFT JOIN AgentRuns ar ON a.id = ar."agentGraphId" +WHERE sl."isDeleted" = FALSE + AND sl."isApproved" = TRUE +GROUP BY sl.id, slv.id, slv.slug, slv."createdAt", a.name, slv."videoUrl", slv."imageUrls", slv."isFeatured", + p.username, p."avatarUrl", slv."subHeading", slv.description, slv.categories, + ar.run_count, rs.avg_rating; + +CREATE VIEW "Creator" AS +WITH AgentStats AS ( + SELECT + p.username, + COUNT(DISTINCT sl.id) as num_agents, + AVG(CAST(COALESCE(sr.score, 0) AS DECIMAL)) as agent_rating, + SUM(COALESCE(age.run_count, 0)) as agent_runs + FROM "Profile" p + LEFT JOIN "StoreListing" sl ON sl."owningUserId" = p."userId" + LEFT JOIN "StoreListingVersion" slv ON slv."storeListingId" = sl.id + LEFT JOIN "StoreListingReview" sr ON sr."storeListingVersionId" = slv.id + LEFT JOIN ( + SELECT "agentGraphId", COUNT(*) as run_count + FROM "AgentGraphExecution" + GROUP BY "agentGraphId" + ) age ON age."agentGraphId" = sl."agentId" + WHERE sl."isDeleted" = FALSE AND sl."isApproved" = TRUE + GROUP BY p.username +) +SELECT + p.username, + p.name, + p."avatarUrl" as avatar_url, + p.description, + ARRAY_AGG(DISTINCT c) FILTER (WHERE c IS NOT NULL) as top_categories, + p.links, + p."isFeatured" as is_featured, + COALESCE(ast.num_agents, 0) as num_agents, + COALESCE(ast.agent_rating, 0.0) as agent_rating, + COALESCE(ast.agent_runs, 0) as agent_runs +FROM "Profile" p +LEFT JOIN AgentStats ast ON ast.username = p.username +LEFT JOIN LATERAL ( + SELECT UNNEST(slv.categories) as c + FROM "StoreListing" sl + JOIN "StoreListingVersion" slv ON slv."storeListingId" = sl.id + WHERE sl."owningUserId" = p."userId" + AND sl."isDeleted" = FALSE + AND sl."isApproved" = TRUE +) cats ON true +GROUP BY p.username, p.name, p."avatarUrl", p.description, p.links, p."isFeatured", + ast.num_agents, ast.agent_rating, ast.agent_runs; + +CREATE VIEW "StoreSubmission" AS +SELECT + sl.id as listing_id, + sl."owningUserId" as user_id, + slv."agentId" as agent_id, + slv."version" as agent_version, + slv.slug, + slv.name, + slv."subHeading" as sub_heading, + slv.description, + slv."imageUrls" as image_urls, + slv."createdAt" as date_submitted, + COALESCE(sls."Status", 'PENDING') as status, + COALESCE(ar.run_count, 0) as runs, + CAST(COALESCE(AVG(CAST(sr.score AS DECIMAL)), 0.0) AS DOUBLE PRECISION) as rating +FROM "StoreListing" sl +JOIN "StoreListingVersion" slv ON slv."storeListingId" = sl.id +LEFT JOIN "StoreListingSubmission" sls ON sls."storeListingId" = sl.id +LEFT JOIN "StoreListingReview" sr ON sr."storeListingVersionId" = slv.id +LEFT JOIN ( + SELECT "agentGraphId", COUNT(*) as run_count + FROM "AgentGraphExecution" + GROUP BY "agentGraphId" +) ar ON ar."agentGraphId" = slv."agentId" +WHERE sl."isDeleted" = FALSE +GROUP BY sl.id, sl."owningUserId", slv."agentId", slv."version", slv.slug, slv.name, slv."subHeading", + slv.description, slv."imageUrls", slv."createdAt", sls."Status", ar.run_count; + +COMMIT; \ No newline at end of file diff --git a/autogpt_platform/backend/migrations/20241230102007_update_store_agent_view/migration.sql b/autogpt_platform/backend/migrations/20241230102007_update_store_agent_view/migration.sql new file mode 100644 index 000000000000..76cabcb57426 --- /dev/null +++ b/autogpt_platform/backend/migrations/20241230102007_update_store_agent_view/migration.sql @@ -0,0 +1,50 @@ +BEGIN; + +DROP VIEW IF EXISTS "StoreAgent"; + +CREATE VIEW "StoreAgent" AS +WITH ReviewStats AS ( + SELECT sl."id" AS "storeListingId", + COUNT(sr.id) AS review_count, + AVG(CAST(sr.score AS DECIMAL)) AS avg_rating + FROM "StoreListing" sl + JOIN "StoreListingVersion" slv ON slv."storeListingId" = sl."id" + JOIN "StoreListingReview" sr ON sr."storeListingVersionId" = slv.id + WHERE sl."isDeleted" = FALSE + GROUP BY sl."id" +), +AgentRuns AS ( + SELECT "agentGraphId", COUNT(*) AS run_count + FROM "AgentGraphExecution" + GROUP BY "agentGraphId" +) +SELECT + sl.id AS listing_id, + slv.id AS "storeListingVersionId", + slv."createdAt" AS updated_at, + slv.slug, + slv.name AS agent_name, + slv."videoUrl" AS agent_video, + COALESCE(slv."imageUrls", ARRAY[]::TEXT[]) AS agent_image, + slv."isFeatured" AS featured, + p.username AS creator_username, + p."avatarUrl" AS creator_avatar, + slv."subHeading" AS sub_heading, + slv.description, + slv.categories, + COALESCE(ar.run_count, 0) AS runs, + CAST(COALESCE(rs.avg_rating, 0.0) AS DOUBLE PRECISION) AS rating, + ARRAY_AGG(DISTINCT CAST(slv.version AS TEXT)) AS versions +FROM "StoreListing" sl +JOIN "AgentGraph" a ON sl."agentId" = a.id AND sl."agentVersion" = a."version" +LEFT JOIN "Profile" p ON sl."owningUserId" = p."userId" +LEFT JOIN "StoreListingVersion" slv ON slv."storeListingId" = sl.id +LEFT JOIN ReviewStats rs ON sl.id = rs."storeListingId" +LEFT JOIN AgentRuns ar ON a.id = ar."agentGraphId" +WHERE sl."isDeleted" = FALSE + AND sl."isApproved" = TRUE +GROUP BY sl.id, slv.id, slv.slug, slv."createdAt", slv.name, slv."videoUrl", slv."imageUrls", slv."isFeatured", + p.username, p."avatarUrl", slv."subHeading", slv.description, slv.categories, + ar.run_count, rs.avg_rating; + +COMMIT; diff --git a/autogpt_platform/backend/migrations/20250103143207_add_terminated_execution_status/migration.sql b/autogpt_platform/backend/migrations/20250103143207_add_terminated_execution_status/migration.sql new file mode 100644 index 000000000000..0b3a2a27b9be --- /dev/null +++ b/autogpt_platform/backend/migrations/20250103143207_add_terminated_execution_status/migration.sql @@ -0,0 +1,2 @@ +-- Add "TERMINATED" to execution status enum type +ALTER TYPE "AgentExecutionStatus" ADD VALUE 'TERMINATED'; diff --git a/autogpt_platform/backend/migrations/20250105254106_migrate_brace_to_double_brace_string_format/migration.sql b/autogpt_platform/backend/migrations/20250105254106_migrate_brace_to_double_brace_string_format/migration.sql new file mode 100644 index 000000000000..2a7141b0f9c8 --- /dev/null +++ b/autogpt_platform/backend/migrations/20250105254106_migrate_brace_to_double_brace_string_format/migration.sql @@ -0,0 +1,86 @@ +/* + Warnings: + - You are about replace a single brace string input format for the following blocks: + - AgentOutputBlock + - FillTextTemplateBlock + - AITextGeneratorBlock + - AIStructuredResponseGeneratorBlock + with a double brace format. + - This migration can be slow for a large updated AgentNode tables. +*/ +BEGIN; +SET LOCAL statement_timeout = '10min'; + +WITH to_update AS ( + SELECT + "id", + "agentBlockId", + "constantInput"::jsonb AS j + FROM "AgentNode" + WHERE + "agentBlockId" IN ( + '363ae599-353e-4804-937e-b2ee3cef3da4', -- AgentOutputBlock + 'db7d8f02-2f44-4c55-ab7a-eae0941f0c30', -- FillTextTemplateBlock + '1f292d4a-41a4-4977-9684-7c8d560b9f91', -- AITextGeneratorBlock + 'ed55ac19-356e-4243-a6cb-bc599e9b716f' -- AIStructuredResponseGeneratorBlock + ) + AND ( + "constantInput"::jsonb->>'format' ~ '(?>'prompt' ~ '(?>'sys_prompt' ~ '(?>'format' ~ '(?>'format', + '(?>'prompt' ~ '(?>'prompt', + '(?>'sys_prompt' ~ '(?>'sys_prompt', + '(?block_id. + +*/ +BEGIN; + +-- DropForeignKey blockId +ALTER TABLE "CreditTransaction" DROP CONSTRAINT "CreditTransaction_blockId_fkey"; + +-- Update migrate blockId into metadata->"block_id" +UPDATE "CreditTransaction" +SET "metadata" = jsonb_set( + COALESCE("metadata"::jsonb, '{}'), + '{block_id}', + to_jsonb("blockId") +) +WHERE "blockId" IS NOT NULL; + +-- AlterTable drop blockId +ALTER TABLE "CreditTransaction" DROP COLUMN "blockId"; + +COMMIT; + +/* + These indices dropped below were part of the cleanup during the schema change applied above. + These indexes were not useful and will not impact anything upon their removal. +*/ + +-- DropIndex +DROP INDEX "StoreListingReview_storeListingVersionId_idx"; + +-- DropIndex +DROP INDEX "StoreListingSubmission_Status_idx"; diff --git a/autogpt_platform/backend/migrations/20250115432618_add_auto_top_up_config/migration.sql b/autogpt_platform/backend/migrations/20250115432618_add_auto_top_up_config/migration.sql new file mode 100644 index 000000000000..c336b3509c96 --- /dev/null +++ b/autogpt_platform/backend/migrations/20250115432618_add_auto_top_up_config/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "User" ADD COLUMN "topUpConfig" JSONB; diff --git a/autogpt_platform/backend/migrations/20250124211747_make_store_listing_version_id_unique/migration.sql b/autogpt_platform/backend/migrations/20250124211747_make_store_listing_version_id_unique/migration.sql new file mode 100644 index 000000000000..a1841b917c90 --- /dev/null +++ b/autogpt_platform/backend/migrations/20250124211747_make_store_listing_version_id_unique/migration.sql @@ -0,0 +1,8 @@ +/* + Warnings: + + - A unique constraint covering the columns `[storeListingVersionId]` on the table `StoreListingSubmission` will be added. If there are existing duplicate values, this will fail. + +*/ +-- CreateIndex +CREATE UNIQUE INDEX "StoreListingSubmission_storeListingVersionId_key" ON "StoreListingSubmission"("storeListingVersionId"); diff --git a/autogpt_platform/backend/migrations/migration_lock.toml b/autogpt_platform/backend/migrations/migration_lock.toml new file mode 100644 index 000000000000..fbffa92c2bb7 --- /dev/null +++ b/autogpt_platform/backend/migrations/migration_lock.toml @@ -0,0 +1,3 @@ +# Please do not edit this file manually +# It should be added in your version-control system (i.e. Git) +provider = "postgresql" \ No newline at end of file diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock new file mode 100644 index 000000000000..8a195bc1b40c --- /dev/null +++ b/autogpt_platform/backend/poetry.lock @@ -0,0 +1,5037 @@ +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. + +[[package]] +name = "aio-pika" +version = "9.5.4" +description = "Wrapper around the aiormq for asyncio and humans" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "aio_pika-9.5.4-py3-none-any.whl", hash = "sha256:a308f904cd4f97e2705662fe23cde37c6c7eddde0e1ea17467028fac6c474e15"}, + {file = "aio_pika-9.5.4.tar.gz", hash = "sha256:5a1bad96a75fa5ac3aa5b2bbd3eca971ea9abda70693e4334e6e629639f8a8fc"}, +] + +[package.dependencies] +aiormq = ">=6.8,<6.9" +exceptiongroup = ">=1,<2" +yarl = "*" + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.4" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, + {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, +] + +[[package]] +name = "aiohttp" +version = "3.11.11" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60804bff28662cbcf340a4d61598891f12eea3a66af48ecfdc975ceec21e3c8"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b4fa1cb5f270fb3eab079536b764ad740bb749ce69a94d4ec30ceee1b5940d5"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:731468f555656767cda219ab42e033355fe48c85fbe3ba83a349631541715ba2"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb23d8bb86282b342481cad4370ea0853a39e4a32a0042bb52ca6bdde132df43"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f047569d655f81cb70ea5be942ee5d4421b6219c3f05d131f64088c73bb0917f"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd7659baae9ccf94ae5fe8bfaa2c7bc2e94d24611528395ce88d009107e00c6d"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af01e42ad87ae24932138f154105e88da13ce7d202a6de93fafdafb2883a00ef"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5854be2f3e5a729800bac57a8d76af464e160f19676ab6aea74bde18ad19d438"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6526e5fb4e14f4bbf30411216780c9967c20c5a55f2f51d3abd6de68320cc2f3"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:85992ee30a31835fc482468637b3e5bd085fa8fe9392ba0bdcbdc1ef5e9e3c55"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:88a12ad8ccf325a8a5ed80e6d7c3bdc247d66175afedbe104ee2aaca72960d8e"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0a6d3fbf2232e3a08c41eca81ae4f1dff3d8f1a30bae415ebe0af2d2458b8a33"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84a585799c58b795573c7fa9b84c455adf3e1d72f19a2bf498b54a95ae0d194c"}, + {file = "aiohttp-3.11.11-cp310-cp310-win32.whl", hash = "sha256:bfde76a8f430cf5c5584553adf9926534352251d379dcb266ad2b93c54a29745"}, + {file = "aiohttp-3.11.11-cp310-cp310-win_amd64.whl", hash = "sha256:0fd82b8e9c383af11d2b26f27a478640b6b83d669440c0a71481f7c865a51da9"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ba74ec819177af1ef7f59063c6d35a214a8fde6f987f7661f4f0eecc468a8f76"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4af57160800b7a815f3fe0eba9b46bf28aafc195555f1824555fa2cfab6c1538"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffa336210cf9cd8ed117011085817d00abe4c08f99968deef0013ea283547204"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81b8fe282183e4a3c7a1b72f5ade1094ed1c6345a8f153506d114af5bf8accd9"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af41686ccec6a0f2bdc66686dc0f403c41ac2089f80e2214a0f82d001052c03"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70d1f9dde0e5dd9e292a6d4d00058737052b01f3532f69c0c65818dac26dc287"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:249cc6912405917344192b9f9ea5cd5b139d49e0d2f5c7f70bdfaf6b4dbf3a2e"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0eb98d90b6690827dcc84c246811feeb4e1eea683c0eac6caed7549be9c84665"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec82bf1fda6cecce7f7b915f9196601a1bd1a3079796b76d16ae4cce6d0ef89b"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9fd46ce0845cfe28f108888b3ab17abff84ff695e01e73657eec3f96d72eef34"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bd176afcf8f5d2aed50c3647d4925d0db0579d96f75a31e77cbaf67d8a87742d"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:ec2aa89305006fba9ffb98970db6c8221541be7bee4c1d027421d6f6df7d1ce2"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:92cde43018a2e17d48bb09c79e4d4cb0e236de5063ce897a5e40ac7cb4878773"}, + {file = "aiohttp-3.11.11-cp311-cp311-win32.whl", hash = "sha256:aba807f9569455cba566882c8938f1a549f205ee43c27b126e5450dc9f83cc62"}, + {file = "aiohttp-3.11.11-cp311-cp311-win_amd64.whl", hash = "sha256:ae545f31489548c87b0cced5755cfe5a5308d00407000e72c4fa30b19c3220ac"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e595c591a48bbc295ebf47cb91aebf9bd32f3ff76749ecf282ea7f9f6bb73886"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ea1b59dc06396b0b424740a10a0a63974c725b1c64736ff788a3689d36c02d2"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8811f3f098a78ffa16e0ea36dffd577eb031aea797cbdba81be039a4169e242c"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7227b87a355ce1f4bf83bfae4399b1f5bb42e0259cb9405824bd03d2f4336a"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d40f9da8cabbf295d3a9dae1295c69975b86d941bc20f0a087f0477fa0a66231"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffb3dc385f6bb1568aa974fe65da84723210e5d9707e360e9ecb51f59406cd2e"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8f5f7515f3552d899c61202d99dcb17d6e3b0de777900405611cd747cecd1b8"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3499c7ffbfd9c6a3d8d6a2b01c26639da7e43d47c7b4f788016226b1e711caa8"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8e2bf8029dbf0810c7bfbc3e594b51c4cc9101fbffb583a3923aea184724203c"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b6212a60e5c482ef90f2d788835387070a88d52cf6241d3916733c9176d39eab"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d119fafe7b634dbfa25a8c597718e69a930e4847f0b88e172744be24515140da"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:6fba278063559acc730abf49845d0e9a9e1ba74f85f0ee6efd5803f08b285853"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92fc484e34b733704ad77210c7957679c5c3877bd1e6b6d74b185e9320cc716e"}, + {file = "aiohttp-3.11.11-cp312-cp312-win32.whl", hash = "sha256:9f5b3c1ed63c8fa937a920b6c1bec78b74ee09593b3f5b979ab2ae5ef60d7600"}, + {file = "aiohttp-3.11.11-cp312-cp312-win_amd64.whl", hash = "sha256:1e69966ea6ef0c14ee53ef7a3d68b564cc408121ea56c0caa2dc918c1b2f553d"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:541d823548ab69d13d23730a06f97460f4238ad2e5ed966aaf850d7c369782d9"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:929f3ed33743a49ab127c58c3e0a827de0664bfcda566108989a14068f820194"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0882c2820fd0132240edbb4a51eb8ceb6eef8181db9ad5291ab3332e0d71df5f"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63de12e44935d5aca7ed7ed98a255a11e5cb47f83a9fded7a5e41c40277d104"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa54f8ef31d23c506910c21163f22b124facb573bff73930735cf9fe38bf7dff"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a344d5dc18074e3872777b62f5f7d584ae4344cd6006c17ba12103759d407af3"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7fb429ab1aafa1f48578eb315ca45bd46e9c37de11fe45c7f5f4138091e2f1"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c341c7d868750e31961d6d8e60ff040fb9d3d3a46d77fd85e1ab8e76c3e9a5c4"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed9ee95614a71e87f1a70bc81603f6c6760128b140bc4030abe6abaa988f1c3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:de8d38f1c2810fa2a4f1d995a2e9c70bb8737b18da04ac2afbf3971f65781d87"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a9b7371665d4f00deb8f32208c7c5e652059b0fda41cf6dbcac6114a041f1cc2"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:620598717fce1b3bd14dd09947ea53e1ad510317c85dda2c9c65b622edc96b12"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf8d9bfee991d8acc72d060d53860f356e07a50f0e0d09a8dfedea1c554dd0d5"}, + {file = "aiohttp-3.11.11-cp313-cp313-win32.whl", hash = "sha256:9d73ee3725b7a737ad86c2eac5c57a4a97793d9f442599bea5ec67ac9f4bdc3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-win_amd64.whl", hash = "sha256:c7a06301c2fb096bdb0bd25fe2011531c1453b9f2c163c8031600ec73af1cc99"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3e23419d832d969f659c208557de4a123e30a10d26e1e14b73431d3c13444c2e"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21fef42317cf02e05d3b09c028712e1d73a9606f02467fd803f7c1f39cc59add"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1f21bb8d0235fc10c09ce1d11ffbd40fc50d3f08a89e4cf3a0c503dc2562247a"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1642eceeaa5ab6c9b6dfeaaa626ae314d808188ab23ae196a34c9d97efb68350"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2170816e34e10f2fd120f603e951630f8a112e1be3b60963a1f159f5699059a6"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8be8508d110d93061197fd2d6a74f7401f73b6d12f8822bbcd6d74f2b55d71b1"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eed954b161e6b9b65f6be446ed448ed3921763cc432053ceb606f89d793927e"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6c9af134da4bc9b3bd3e6a70072509f295d10ee60c697826225b60b9959acdd"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44167fc6a763d534a6908bdb2592269b4bf30a03239bcb1654781adf5e49caf1"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:479b8c6ebd12aedfe64563b85920525d05d394b85f166b7873c8bde6da612f9c"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:10b4ff0ad793d98605958089fabfa350e8e62bd5d40aa65cdc69d6785859f94e"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b540bd67cfb54e6f0865ceccd9979687210d7ed1a1cc8c01f8e67e2f1e883d28"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dac54e8ce2ed83b1f6b1a54005c87dfed139cf3f777fdc8afc76e7841101226"}, + {file = "aiohttp-3.11.11-cp39-cp39-win32.whl", hash = "sha256:568c1236b2fde93b7720f95a890741854c1200fba4a3471ff48b2934d2d93fd3"}, + {file = "aiohttp-3.11.11-cp39-cp39-win_amd64.whl", hash = "sha256:943a8b052e54dfd6439fd7989f67fc6a7f2138d0a2cf0a7de5f18aa4fe7eb3b1"}, + {file = "aiohttp-3.11.11.tar.gz", hash = "sha256:bb49c7f1e6ebf3821a42d81d494f538107610c3a705987f53068546b0e90303e"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +async-timeout = {version = ">=4.0,<6.0", markers = "python_version < \"3.11\""} +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] + +[[package]] +name = "aiormq" +version = "6.8.1" +description = "Pure python AMQP asynchronous client library" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "aiormq-6.8.1-py3-none-any.whl", hash = "sha256:5da896c8624193708f9409ffad0b20395010e2747f22aa4150593837f40aa017"}, + {file = "aiormq-6.8.1.tar.gz", hash = "sha256:a964ab09634be1da1f9298ce225b310859763d5cf83ef3a7eae1a6dc6bd1da1a"}, +] + +[package.dependencies] +pamqp = "3.3.0" +yarl = "*" + +[[package]] +name = "aiosignal" +version = "1.3.2" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, + {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anthropic" +version = "0.40.0" +description = "The official Python library for the anthropic API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "anthropic-0.40.0-py3-none-any.whl", hash = "sha256:442028ae8790ff9e3b6f8912043918755af1230d193904ae2ef78cc22995280c"}, + {file = "anthropic-0.40.0.tar.gz", hash = "sha256:3efeca6d9e97813f93ed34322c6c7ea2279bf0824cd0aa71b59ce222665e2b87"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +typing-extensions = ">=4.7,<5" + +[package.extras] +bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] +vertex = ["google-auth (>=2,<3)"] + +[[package]] +name = "anyio" +version = "4.8.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, + {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "apscheduler" +version = "3.11.0" +description = "In-process task scheduler with Cron-like capabilities" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "APScheduler-3.11.0-py3-none-any.whl", hash = "sha256:fc134ca32e50f5eadcc4938e3a4545ab19131435e851abb40b34d63d5141c6da"}, + {file = "apscheduler-3.11.0.tar.gz", hash = "sha256:4c622d250b0955a65d5d0eb91c33e6d43fd879834bf541e0a18661ae60460133"}, +] + +[package.dependencies] +tzlocal = ">=3.0" + +[package.extras] +doc = ["packaging", "sphinx", "sphinx-rtd-theme (>=1.3.0)"] +etcd = ["etcd3", "protobuf (<=3.21.0)"] +gevent = ["gevent"] +mongodb = ["pymongo (>=3.0)"] +redis = ["redis (>=3.0)"] +rethinkdb = ["rethinkdb (>=2.4.0)"] +sqlalchemy = ["sqlalchemy (>=1.4)"] +test = ["APScheduler[etcd,mongodb,redis,rethinkdb,sqlalchemy,tornado,zookeeper]", "PySide6", "anyio (>=4.5.2)", "gevent", "pytest", "pytz", "twisted"] +tornado = ["tornado (>=4.3)"] +twisted = ["twisted"] +zookeeper = ["kazoo"] + +[[package]] +name = "async-timeout" +version = "5.0.1" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_full_version < \"3.11.3\"" +files = [ + {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, + {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, +] + +[[package]] +name = "attrs" +version = "24.3.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"}, + {file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"}, +] + +[package.extras] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] + +[[package]] +name = "autogpt-libs" +version = "0.2.0" +description = "Shared libraries across NextGen AutoGPT" +optional = false +python-versions = ">=3.10,<4.0" +groups = ["main"] +files = [] +develop = true + +[package.dependencies] +colorama = "^0.4.6" +expiringdict = "^1.2.2" +google-cloud-logging = "^3.11.3" +pydantic = "^2.10.5" +pydantic-settings = "^2.7.1" +pyjwt = "^2.10.1" +pytest-asyncio = "^0.25.2" +pytest-mock = "^3.14.0" +python-dotenv = "^1.0.1" +supabase = "^2.11.0" + +[package.source] +type = "directory" +url = "../autogpt_libs" + +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +groups = ["main"] +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "black" +version = "24.10.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, + {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, + {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"}, + {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"}, + {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"}, + {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"}, + {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"}, + {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"}, + {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"}, + {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"}, + {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"}, + {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"}, + {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"}, + {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"}, + {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"}, + {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"}, + {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"}, + {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"}, + {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"}, + {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"}, + {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"}, + {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.10)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "cachetools" +version = "5.5.1" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "cachetools-5.5.1-py3-none-any.whl", hash = "sha256:b76651fdc3b24ead3c648bbdeeb940c1b04d365b38b4af66788f9ec4a81d42bb"}, + {file = "cachetools-5.5.1.tar.gz", hash = "sha256:70f238fbba50383ef62e55c6aff6d9673175fe59f7c6782c7a0b9e38f4a9df95"}, +] + +[[package]] +name = "certifi" +version = "2024.12.14" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main", "dev"] +files = [ + {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, + {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_python_implementation != \"PyPy\"" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, +] + +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] +markers = {dev = "sys_platform == \"win32\" or platform_system == \"Windows\""} + +[[package]] +name = "cryptography" +version = "43.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["main"] +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] + +[[package]] +name = "deprecated" +version = "1.2.16" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] +files = [ + {file = "Deprecated-1.2.16-py2.py3-none-any.whl", hash = "sha256:4c8e429ada6573698ba723f9c4ea53006e990042a45db938e412f1f9d4b9ffd9"}, + {file = "deprecated-1.2.16.tar.gz", hash = "sha256:6ec624c168290715d56c6af7c1c1066ea5c732bd56427b1651db75ede923aa38"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "jinja2 (>=3.0.3,<3.1.0)", "setuptools", "sphinx (<2)", "tox"] + +[[package]] +name = "deprecation" +version = "2.1.0" +description = "A library to handle automated deprecations" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, + {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, +] + +[package.dependencies] +packaging = "*" + +[[package]] +name = "discord-py" +version = "2.4.0" +description = "A Python wrapper for the Discord API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "discord.py-2.4.0-py3-none-any.whl", hash = "sha256:b8af6711c70f7e62160bfbecb55be699b5cb69d007426759ab8ab06b1bd77d1d"}, + {file = "discord_py-2.4.0.tar.gz", hash = "sha256:d07cb2a223a185873a1d0ee78b9faa9597e45b3f6186df21a95cec1e9bcdc9a5"}, +] + +[package.dependencies] +aiohttp = ">=3.7.4,<4" + +[package.extras] +docs = ["sphinx (==4.4.0)", "sphinx-inline-tabs (==2023.4.21)", "sphinxcontrib-applehelp (==1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (==2.0.1)", "sphinxcontrib-jsmath (==1.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)", "sphinxcontrib-trio (==1.1.2)", "sphinxcontrib-websupport (==1.2.4)", "typing-extensions (>=4.3,<5)"] +speed = ["Brotli", "aiodns (>=1.1)", "cchardet (==2.1.7)", "orjson (>=3.5.4)"] +test = ["coverage[toml]", "pytest", "pytest-asyncio", "pytest-cov", "pytest-mock", "typing-extensions (>=4.3,<5)", "tzdata"] +voice = ["PyNaCl (>=1.3.0,<1.6)"] + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "e2b" +version = "1.0.5" +description = "E2B SDK that give agents cloud environments" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "e2b-1.0.5-py3-none-any.whl", hash = "sha256:a71bdec46f33d3e38e87d475d7fd2939bd7b6b753b819c9639ca211cd375b79e"}, + {file = "e2b-1.0.5.tar.gz", hash = "sha256:43c82705af7b7d4415c2510ff77dab4dc075351e0b769d6adf8e0d7bb4868d13"}, +] + +[package.dependencies] +attrs = ">=23.2.0" +httpcore = ">=1.0.5,<2.0.0" +httpx = ">=0.27.0,<1.0.0" +packaging = ">=24.1" +protobuf = ">=3.20.0,<6.0.0" +python-dateutil = ">=2.8.2" +typing-extensions = ">=4.1.0" + +[[package]] +name = "e2b-code-interpreter" +version = "1.0.4" +description = "E2B Code Interpreter - Stateful code execution" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "e2b_code_interpreter-1.0.4-py3-none-any.whl", hash = "sha256:e8cea4946b3457072a524250aee712f7f8d44834b91cd9c13da3bdf96eda1a6e"}, + {file = "e2b_code_interpreter-1.0.4.tar.gz", hash = "sha256:fec5651d98ca0d03dd038c5df943a0beaeb59c6d422112356f55f2b662d8dea1"}, +] + +[package.dependencies] +attrs = ">=21.3.0" +e2b = ">=1.0.4,<2.0.0" +httpx = ">=0.20.0,<1.0.0" + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] +markers = {dev = "python_version < \"3.11\""} + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "expiringdict" +version = "1.2.2" +description = "Dictionary with auto-expiring values for caching purposes" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "expiringdict-1.2.2-py3-none-any.whl", hash = "sha256:09a5d20bc361163e6432a874edd3179676e935eb81b925eccef48d409a8a45e8"}, + {file = "expiringdict-1.2.2.tar.gz", hash = "sha256:300fb92a7e98f15b05cf9a856c1415b3bc4f2e132be07daa326da6414c23ee09"}, +] + +[package.extras] +tests = ["coverage", "coveralls", "dill", "mock", "nose"] + +[[package]] +name = "faker" +version = "33.3.1" +description = "Faker is a Python package that generates fake data for you." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "Faker-33.3.1-py3-none-any.whl", hash = "sha256:ac4cf2f967ce02c898efa50651c43180bd658a7707cfd676fcc5410ad1482c03"}, + {file = "faker-33.3.1.tar.gz", hash = "sha256:49dde3b06a5602177bc2ad013149b6f60a290b7154539180d37b6f876ae79b20"}, +] + +[package.dependencies] +python-dateutil = ">=2.4" +typing-extensions = "*" + +[[package]] +name = "fastapi" +version = "0.115.7" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "fastapi-0.115.7-py3-none-any.whl", hash = "sha256:eb6a8c8bf7f26009e8147111ff15b5177a0e19bb4a45bc3486ab14804539d21e"}, + {file = "fastapi-0.115.7.tar.gz", hash = "sha256:0f106da6c01d88a6786b3248fb4d7a940d071f6f488488898ad5d354b25ed015"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.40.0,<0.46.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "feedparser" +version = "6.0.11" +description = "Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "feedparser-6.0.11-py3-none-any.whl", hash = "sha256:0be7ee7b395572b19ebeb1d6aafb0028dee11169f1c934e0ed67d54992f4ad45"}, + {file = "feedparser-6.0.11.tar.gz", hash = "sha256:c9d0407b64c6f2a065d0ebb292c2b35c01050cc0dc33757461aaabdc4c4184d5"}, +] + +[package.dependencies] +sgmllib3k = "*" + +[[package]] +name = "flake8" +version = "7.1.1" +description = "the modular source code checker: pep8 pyflakes and co" +optional = false +python-versions = ">=3.8.1" +groups = ["main"] +files = [ + {file = "flake8-7.1.1-py2.py3-none-any.whl", hash = "sha256:597477df7860daa5aa0fdd84bf5208a043ab96b8e96ab708770ae0364dd03213"}, + {file = "flake8-7.1.1.tar.gz", hash = "sha256:049d058491e228e03e67b390f311bbf88fce2dbaa8fa673e7aea87b7198b8d38"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.12.0,<2.13.0" +pyflakes = ">=3.2.0,<3.3.0" + +[[package]] +name = "frozenlist" +version = "1.5.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, +] + +[[package]] +name = "google-api-core" +version = "2.24.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_api_core-2.24.0-py3-none-any.whl", hash = "sha256:10d82ac0fca69c82a25b3efdeefccf6f28e02ebb97925a8cce8edbfe379929d9"}, + {file = "google_api_core-2.24.0.tar.gz", hash = "sha256:e255640547a597a4da010876d333208ddac417d60add22b6851a0c66a831fcaf"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +grpcio = [ + {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +grpcio-status = [ + {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, +] +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +async-rest = ["google-auth[aiohttp] (>=2.35.0,<3.0.dev0)"] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + +[[package]] +name = "google-api-python-client" +version = "2.159.0" +description = "Google API Client Library for Python" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_api_python_client-2.159.0-py2.py3-none-any.whl", hash = "sha256:baef0bb631a60a0bd7c0bf12a5499e3a40cd4388484de7ee55c1950bf820a0cf"}, + {file = "google_api_python_client-2.159.0.tar.gz", hash = "sha256:55197f430f25c907394b44fa078545ffef89d33fd4dca501b7db9f0d8e224bd6"}, +] + +[package.dependencies] +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0.dev0" +google-auth = ">=1.32.0,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0.dev0" +google-auth-httplib2 = ">=0.2.0,<1.0.0" +httplib2 = ">=0.19.0,<1.dev0" +uritemplate = ">=3.0.1,<5" + +[[package]] +name = "google-auth" +version = "2.38.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a"}, + {file = "google_auth-2.38.0.tar.gz", hash = "sha256:8285113607d3b80a3f1543b75962447ba8a09fe85783432a784fdeef6ac094c4"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "google-auth-httplib2" +version = "0.2.0" +description = "Google Authentication Library: httplib2 transport" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05"}, + {file = "google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d"}, +] + +[package.dependencies] +google-auth = "*" +httplib2 = ">=0.19.0" + +[[package]] +name = "google-auth-oauthlib" +version = "1.2.1" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "google_auth_oauthlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:2d58a27262d55aa1b87678c3ba7142a080098cbc2024f903c62355deb235d91f"}, + {file = "google_auth_oauthlib-1.2.1.tar.gz", hash = "sha256:afd0cad092a2eaa53cd8e8298557d6de1034c6cb4a740500b5357b648af97263"}, +] + +[package.dependencies] +google-auth = ">=2.15.0" +requests-oauthlib = ">=0.7.0" + +[package.extras] +tool = ["click (>=6.0.0)"] + +[[package]] +name = "google-cloud-appengine-logging" +version = "1.5.0" +description = "Google Cloud Appengine Logging API client library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_cloud_appengine_logging-1.5.0-py2.py3-none-any.whl", hash = "sha256:81e36606e13c377c4898c918542888abb7a6896837ac5f559011c7729fc63d8a"}, + {file = "google_cloud_appengine_logging-1.5.0.tar.gz", hash = "sha256:39a2df694d97981ed00ef5df541f7cfcca920a92496707557f2b07bb7ba9d67a"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-audit-log" +version = "0.3.0" +description = "Google Cloud Audit Protos" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_cloud_audit_log-0.3.0-py2.py3-none-any.whl", hash = "sha256:8340793120a1d5aa143605def8704ecdcead15106f754ef1381ae3bab533722f"}, + {file = "google_cloud_audit_log-0.3.0.tar.gz", hash = "sha256:901428b257020d8c1d1133e0fa004164a555e5a395c7ca3cdbb8486513df3a65"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.56.2,<2.0dev" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-core" +version = "2.4.1" +description = "Google Cloud API client core library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"}, + {file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] + +[[package]] +name = "google-cloud-logging" +version = "3.11.3" +description = "Stackdriver Logging API client library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_cloud_logging-3.11.3-py2.py3-none-any.whl", hash = "sha256:b8ec23f2998f76a58f8492db26a0f4151dd500425c3f08448586b85972f3c494"}, + {file = "google_cloud_logging-3.11.3.tar.gz", hash = "sha256:0a73cd94118875387d4535371d9e9426861edef8e44fba1261e86782d5b8d54f"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +google-cloud-appengine-logging = ">=0.1.3,<2.0.0dev" +google-cloud-audit-log = ">=0.2.4,<1.0.0dev" +google-cloud-core = ">=2.0.0,<3.0.0dev" +grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" +opentelemetry-api = ">=1.9.0" +proto-plus = [ + {version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""}, + {version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""}, +] +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-storage" +version = "2.19.0" +description = "Google Cloud Storage API client library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_cloud_storage-2.19.0-py2.py3-none-any.whl", hash = "sha256:aeb971b5c29cf8ab98445082cbfe7b161a1f48ed275822f59ed3f1524ea54fba"}, + {file = "google_cloud_storage-2.19.0.tar.gz", hash = "sha256:cd05e9e7191ba6cb68934d8eb76054d9be4562aa89dbc4236feee4d7d51342b2"}, +] + +[package.dependencies] +google-api-core = ">=2.15.0,<3.0.0dev" +google-auth = ">=2.26.1,<3.0dev" +google-cloud-core = ">=2.3.0,<3.0dev" +google-crc32c = ">=1.0,<2.0dev" +google-resumable-media = ">=2.7.2" +requests = ">=2.18.0,<3.0.0dev" + +[package.extras] +protobuf = ["protobuf (<6.0.0dev)"] +tracing = ["opentelemetry-api (>=1.1.0)"] + +[[package]] +name = "google-crc32c" +version = "1.6.0" +description = "A python wrapper of the C library 'Google CRC32C'" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "google_crc32c-1.6.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa"}, + {file = "google_crc32c-1.6.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e"}, + {file = "google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc"}, + {file = "google_crc32c-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42"}, + {file = "google_crc32c-1.6.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4"}, + {file = "google_crc32c-1.6.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8"}, + {file = "google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d"}, + {file = "google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f"}, + {file = "google_crc32c-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3"}, + {file = "google_crc32c-1.6.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d"}, + {file = "google_crc32c-1.6.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b"}, + {file = "google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00"}, + {file = "google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3"}, + {file = "google_crc32c-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760"}, + {file = "google_crc32c-1.6.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205"}, + {file = "google_crc32c-1.6.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871"}, + {file = "google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57"}, + {file = "google_crc32c-1.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c"}, + {file = "google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc"}, + {file = "google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d"}, + {file = "google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24"}, + {file = "google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d"}, + {file = "google_crc32c-1.6.0.tar.gz", hash = "sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc"}, +] + +[package.extras] +testing = ["pytest"] + +[[package]] +name = "google-resumable-media" +version = "2.7.2" +description = "Utilities for Google Media Downloads and Resumable Uploads" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "google_resumable_media-2.7.2-py2.py3-none-any.whl", hash = "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa"}, + {file = "google_resumable_media-2.7.2.tar.gz", hash = "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0"}, +] + +[package.dependencies] +google-crc32c = ">=1.0,<2.0dev" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "google-auth (>=1.22.0,<2.0dev)"] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + +[[package]] +name = "googleapis-common-protos" +version = "1.66.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed"}, + {file = "googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c"}, +] + +[package.dependencies] +grpcio = {version = ">=1.44.0,<2.0.0.dev0", optional = true, markers = "extra == \"grpc\""} +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "googlemaps" +version = "4.10.0" +description = "Python client library for Google Maps Platform" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "googlemaps-4.10.0.tar.gz", hash = "sha256:3055fcbb1aa262a9159b589b5e6af762b10e80634ae11c59495bd44867e47d88"}, +] + +[package.dependencies] +requests = ">=2.20.0,<3.0" + +[[package]] +name = "gotrue" +version = "2.11.1" +description = "Python Client Library for Supabase Auth" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "gotrue-2.11.1-py3-none-any.whl", hash = "sha256:1b2d915bdc65fd0ad608532759ce9c72fa2e910145c1e6901f2188519e7bcd2d"}, + {file = "gotrue-2.11.1.tar.gz", hash = "sha256:5594ceee60bd873e5f4fdd028b08dece3906f6013b6ed08e7786b71c0092fed0"}, +] + +[package.dependencies] +httpx = {version = ">=0.26,<0.29", extras = ["http2"]} +pydantic = ">=1.10,<3" + +[[package]] +name = "greenlet" +version = "3.1.1" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" +files = [ + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil"] + +[[package]] +name = "groq" +version = "0.13.1" +description = "The official Python library for the groq API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "groq-0.13.1-py3-none-any.whl", hash = "sha256:0c5d1d6df93de55de705fe73729b79baaa0c871f7575d6aa64b2962b56101b3e"}, + {file = "groq-0.13.1.tar.gz", hash = "sha256:588fd5bee984f4eb46ec89552778d5698b9e9614435defef868645c19463cbcc"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +typing-extensions = ">=4.10,<5" + +[[package]] +name = "grpc-google-iam-v1" +version = "0.14.0" +description = "IAM API client library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "grpc_google_iam_v1-0.14.0-py2.py3-none-any.whl", hash = "sha256:fb4a084b30099ba3ab07d61d620a0d4429570b13ff53bd37bac75235f98b7da4"}, + {file = "grpc_google_iam_v1-0.14.0.tar.gz", hash = "sha256:c66e07aa642e39bb37950f9e7f491f70dad150ac9801263b42b2814307c2df99"}, +] + +[package.dependencies] +googleapis-common-protos = {version = ">=1.56.0,<2.0.0dev", extras = ["grpc"]} +grpcio = ">=1.44.0,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "grpcio" +version = "1.70.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "grpcio-1.70.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:95469d1977429f45fe7df441f586521361e235982a0b39e33841549143ae2851"}, + {file = "grpcio-1.70.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:ed9718f17fbdb472e33b869c77a16d0b55e166b100ec57b016dc7de9c8d236bf"}, + {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:374d014f29f9dfdb40510b041792e0e2828a1389281eb590df066e1cc2b404e5"}, + {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2af68a6f5c8f78d56c145161544ad0febbd7479524a59c16b3e25053f39c87f"}, + {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7df14b2dcd1102a2ec32f621cc9fab6695effef516efbc6b063ad749867295"}, + {file = "grpcio-1.70.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c78b339869f4dbf89881e0b6fbf376313e4f845a42840a7bdf42ee6caed4b11f"}, + {file = "grpcio-1.70.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:58ad9ba575b39edef71f4798fdb5c7b6d02ad36d47949cd381d4392a5c9cbcd3"}, + {file = "grpcio-1.70.0-cp310-cp310-win32.whl", hash = "sha256:2b0d02e4b25a5c1f9b6c7745d4fa06efc9fd6a611af0fb38d3ba956786b95199"}, + {file = "grpcio-1.70.0-cp310-cp310-win_amd64.whl", hash = "sha256:0de706c0a5bb9d841e353f6343a9defc9fc35ec61d6eb6111802f3aa9fef29e1"}, + {file = "grpcio-1.70.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:17325b0be0c068f35770f944124e8839ea3185d6d54862800fc28cc2ffad205a"}, + {file = "grpcio-1.70.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:dbe41ad140df911e796d4463168e33ef80a24f5d21ef4d1e310553fcd2c4a386"}, + {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5ea67c72101d687d44d9c56068328da39c9ccba634cabb336075fae2eab0d04b"}, + {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb5277db254ab7586769e490b7b22f4ddab3876c490da0a1a9d7c695ccf0bf77"}, + {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7831a0fc1beeeb7759f737f5acd9fdcda520e955049512d68fda03d91186eea"}, + {file = "grpcio-1.70.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27cc75e22c5dba1fbaf5a66c778e36ca9b8ce850bf58a9db887754593080d839"}, + {file = "grpcio-1.70.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d63764963412e22f0491d0d32833d71087288f4e24cbcddbae82476bfa1d81fd"}, + {file = "grpcio-1.70.0-cp311-cp311-win32.whl", hash = "sha256:bb491125103c800ec209d84c9b51f1c60ea456038e4734688004f377cfacc113"}, + {file = "grpcio-1.70.0-cp311-cp311-win_amd64.whl", hash = "sha256:d24035d49e026353eb042bf7b058fb831db3e06d52bee75c5f2f3ab453e71aca"}, + {file = "grpcio-1.70.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:ef4c14508299b1406c32bdbb9fb7b47612ab979b04cf2b27686ea31882387cff"}, + {file = "grpcio-1.70.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:aa47688a65643afd8b166928a1da6247d3f46a2784d301e48ca1cc394d2ffb40"}, + {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:880bfb43b1bb8905701b926274eafce5c70a105bc6b99e25f62e98ad59cb278e"}, + {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e654c4b17d07eab259d392e12b149c3a134ec52b11ecdc6a515b39aceeec898"}, + {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2394e3381071045a706ee2eeb6e08962dd87e8999b90ac15c55f56fa5a8c9597"}, + {file = "grpcio-1.70.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b3c76701428d2df01964bc6479422f20e62fcbc0a37d82ebd58050b86926ef8c"}, + {file = "grpcio-1.70.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac073fe1c4cd856ebcf49e9ed6240f4f84d7a4e6ee95baa5d66ea05d3dd0df7f"}, + {file = "grpcio-1.70.0-cp312-cp312-win32.whl", hash = "sha256:cd24d2d9d380fbbee7a5ac86afe9787813f285e684b0271599f95a51bce33528"}, + {file = "grpcio-1.70.0-cp312-cp312-win_amd64.whl", hash = "sha256:0495c86a55a04a874c7627fd33e5beaee771917d92c0e6d9d797628ac40e7655"}, + {file = "grpcio-1.70.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa573896aeb7d7ce10b1fa425ba263e8dddd83d71530d1322fd3a16f31257b4a"}, + {file = "grpcio-1.70.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:d405b005018fd516c9ac529f4b4122342f60ec1cee181788249372524e6db429"}, + {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f32090238b720eb585248654db8e3afc87b48d26ac423c8dde8334a232ff53c9"}, + {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfa089a734f24ee5f6880c83d043e4f46bf812fcea5181dcb3a572db1e79e01c"}, + {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f19375f0300b96c0117aca118d400e76fede6db6e91f3c34b7b035822e06c35f"}, + {file = "grpcio-1.70.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:7c73c42102e4a5ec76608d9b60227d917cea46dff4d11d372f64cbeb56d259d0"}, + {file = "grpcio-1.70.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:0a5c78d5198a1f0aa60006cd6eb1c912b4a1520b6a3968e677dbcba215fabb40"}, + {file = "grpcio-1.70.0-cp313-cp313-win32.whl", hash = "sha256:fe9dbd916df3b60e865258a8c72ac98f3ac9e2a9542dcb72b7a34d236242a5ce"}, + {file = "grpcio-1.70.0-cp313-cp313-win_amd64.whl", hash = "sha256:4119fed8abb7ff6c32e3d2255301e59c316c22d31ab812b3fbcbaf3d0d87cc68"}, + {file = "grpcio-1.70.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:8058667a755f97407fca257c844018b80004ae8035565ebc2812cc550110718d"}, + {file = "grpcio-1.70.0-cp38-cp38-macosx_10_14_universal2.whl", hash = "sha256:879a61bf52ff8ccacbedf534665bb5478ec8e86ad483e76fe4f729aaef867cab"}, + {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:0ba0a173f4feacf90ee618fbc1a27956bfd21260cd31ced9bc707ef551ff7dc7"}, + {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558c386ecb0148f4f99b1a65160f9d4b790ed3163e8610d11db47838d452512d"}, + {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:412faabcc787bbc826f51be261ae5fa996b21263de5368a55dc2cf824dc5090e"}, + {file = "grpcio-1.70.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3b0f01f6ed9994d7a0b27eeddea43ceac1b7e6f3f9d86aeec0f0064b8cf50fdb"}, + {file = "grpcio-1.70.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7385b1cb064734005204bc8994eed7dcb801ed6c2eda283f613ad8c6c75cf873"}, + {file = "grpcio-1.70.0-cp38-cp38-win32.whl", hash = "sha256:07269ff4940f6fb6710951116a04cd70284da86d0a4368fd5a3b552744511f5a"}, + {file = "grpcio-1.70.0-cp38-cp38-win_amd64.whl", hash = "sha256:aba19419aef9b254e15011b230a180e26e0f6864c90406fdbc255f01d83bc83c"}, + {file = "grpcio-1.70.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:4f1937f47c77392ccd555728f564a49128b6a197a05a5cd527b796d36f3387d0"}, + {file = "grpcio-1.70.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:0cd430b9215a15c10b0e7d78f51e8a39d6cf2ea819fd635a7214fae600b1da27"}, + {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:e27585831aa6b57b9250abaf147003e126cd3a6c6ca0c531a01996f31709bed1"}, + {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1af8e15b0f0fe0eac75195992a63df17579553b0c4af9f8362cc7cc99ccddf4"}, + {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbce24409beaee911c574a3d75d12ffb8c3e3dd1b813321b1d7a96bbcac46bf4"}, + {file = "grpcio-1.70.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ff4a8112a79464919bb21c18e956c54add43ec9a4850e3949da54f61c241a4a6"}, + {file = "grpcio-1.70.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5413549fdf0b14046c545e19cfc4eb1e37e9e1ebba0ca390a8d4e9963cab44d2"}, + {file = "grpcio-1.70.0-cp39-cp39-win32.whl", hash = "sha256:b745d2c41b27650095e81dea7091668c040457483c9bdb5d0d9de8f8eb25e59f"}, + {file = "grpcio-1.70.0-cp39-cp39-win_amd64.whl", hash = "sha256:a31d7e3b529c94e930a117b2175b2efd179d96eb3c7a21ccb0289a8ab05b645c"}, + {file = "grpcio-1.70.0.tar.gz", hash = "sha256:8d1584a68d5922330025881e63a6c1b54cc8117291d382e4fa69339b6d914c56"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.70.0)"] + +[[package]] +name = "grpcio-status" +version = "1.70.0" +description = "Status proto mapping for gRPC" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "grpcio_status-1.70.0-py3-none-any.whl", hash = "sha256:fc5a2ae2b9b1c1969cc49f3262676e6854aa2398ec69cb5bd6c47cd501904a85"}, + {file = "grpcio_status-1.70.0.tar.gz", hash = "sha256:0e7b42816512433b18b9d764285ff029bde059e9d41f8fe10a60631bd8348101"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.5.5" +grpcio = ">=1.70.0" +protobuf = ">=5.26.1,<6.0dev" + +[[package]] +name = "grpcio-tools" +version = "1.68.0" +description = "Protobuf code generator for gRPC" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "grpcio_tools-1.68.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:9509a5c3ed3d54fa7ac20748d501cb86668f764605a0a68f275339ee0f1dc1a6"}, + {file = "grpcio_tools-1.68.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:59a885091bf29700ba0e14a954d156a18714caaa2006a7f328b18e1ac4b1e721"}, + {file = "grpcio_tools-1.68.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:d3e678162e1d7a8720dc05fdd537fc8df082a50831791f7bb1c6f90095f8368b"}, + {file = "grpcio_tools-1.68.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10d03e3ad4af6284fd27cb14f5a3d52045913c1253e3e24a384ed91bc8adbfcd"}, + {file = "grpcio_tools-1.68.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1769d7f529de1cc102f7fb900611e3c0b69bdb244fca1075b24d6e5b49024586"}, + {file = "grpcio_tools-1.68.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:88640d95ee41921ac7352fa5fadca52a06d7e21fbe53e6a706a9a494f756be7d"}, + {file = "grpcio_tools-1.68.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e903d07bc65232aa9e7704c829aec263e1e139442608e473d7912417a9908e29"}, + {file = "grpcio_tools-1.68.0-cp310-cp310-win32.whl", hash = "sha256:66b70b37184d40806844f51c2757c6b852511d4ea46a3bf2c7e931a47b455bc6"}, + {file = "grpcio_tools-1.68.0-cp310-cp310-win_amd64.whl", hash = "sha256:b47ae076ffb29a68e517bc03552bef0d9c973f8e18adadff180b123e973a26ea"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:f65942fab440e99113ce14436deace7554d5aa554ea18358e3a5f3fc47efe322"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8fefc6d000e169a97336feded23ce614df3fb9926fc48c7a9ff8ea459d93b5b0"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:6dd69c9f3ff85eee8d1f71adf7023c638ca8d465633244ac1b7f19bc3668612d"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7dc5195dc02057668cc22da1ff1aea1811f6fa0deb801b3194dec1fe0bab1cf0"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:849b12bec2320e49e988df104c92217d533e01febac172a4495caab36d9f0edc"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:766c2cd2e365e0fc0e559af56f2c2d144d95fd7cb8668a34d533e66d6435eb34"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2ec3a2e0afa4866ccc5ba33c071aebaa619245dfdd840cbb74f2b0591868d085"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-win32.whl", hash = "sha256:80b733014eb40d920d836d782e5cdea0dcc90d251a2ffb35ab378ef4f8a42c14"}, + {file = "grpcio_tools-1.68.0-cp311-cp311-win_amd64.whl", hash = "sha256:f95103e3e4e7fee7c6123bc9e4e925e07ad24d8d09d7c1c916fb6c8d1cb9e726"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:dd9a654af8536b3de8525bff72a245fef62d572eabf96ac946fe850e707cb27d"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0f77957e3a0916a0dd18d57ce6b49d95fc9a5cfed92310f226339c0fda5394f6"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:92a09afe64fe26696595de2036e10967876d26b12c894cc9160f00152cacebe7"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28ebdbad2ef16699d07400b65260240851049a75502eff69a59b127d3ab960f1"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d3150d784d8050b10dcf5eb06e04fb90747a1547fed3a062a608d940fe57066"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:261d98fd635595de42aadee848f9af46da6654d63791c888891e94f66c5d0682"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:061345c0079b9471f32230186ab01acb908ea0e577bc1699a8cf47acef8be4af"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-win32.whl", hash = "sha256:533ce6791a5ba21e35d74c6c25caf4776f5692785a170c01ea1153783ad5af31"}, + {file = "grpcio_tools-1.68.0-cp312-cp312-win_amd64.whl", hash = "sha256:56842a0ce74b4b92eb62cd5ee00181b2d3acc58ba0c4fd20d15a5db51f891ba6"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:1117a81592542f0c36575082daa6413c57ca39188b18a4c50ec7332616f4b97e"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:51e5a090849b30c99a2396d42140b8a3e558eff6cdfa12603f9582e2cd07724e"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:4fe611d89a1836df8936f066d39c7eb03d4241806449ec45d4b8e1c843ae8011"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c10f3faa0cc4d89eb546f53b623837af23e86dc495d3b89510bcc0e0a6c0b8b2"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46b537480b8fd2195d988120a28467601a2a3de2e504043b89fb90318e1eb754"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:17d0c9004ea82b4213955a585401e80c30d4b37a1d4ace32ccdea8db4d3b7d43"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:2919faae04fe47bad57fc9b578aeaab527da260e851f321a253b6b11862254a8"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-win32.whl", hash = "sha256:ee86157ef899f58ba2fe1055cce0d33bd703e99aa6d5a0895581ac3969f06bfa"}, + {file = "grpcio_tools-1.68.0-cp313-cp313-win_amd64.whl", hash = "sha256:d0470ffc6a93c86cdda48edd428d22e2fef17d854788d60d0d5f291038873157"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:795f2cd76f68a12b0b5541b98187ba367dd69b49d359cf98b781ead742961370"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:57e29e78c33fb1b1d557fbe7650d722d1f2b0a9f53ea73beb8ea47e627b6000b"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:700f171cd3293ee8d50cd43171562ff07b14fa8e49ee471cd91c6924c7da8644"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:196cd8a3a5963a4c9e424314df9eb573b305e6f958fe6508d26580ce01e7aa56"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cad40c3164ee9cef62524dea509449ea581b17ea493178beef051bf79b5103ca"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ab93fab49fa1e699e577ff5fbb99aba660164d710d4c33cfe0aa9d06f585539f"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:511224a99726eb84db9ddb84dc8a75377c3eae797d835f99e80128ec618376d5"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-win32.whl", hash = "sha256:b4ca81770cd729a9ea536d871aacedbde2b732bb9bb83c9d993d63f58502153d"}, + {file = "grpcio_tools-1.68.0-cp38-cp38-win_amd64.whl", hash = "sha256:6950725bf7a496f81d3ec3324334ffc9dbec743b510dd0e897f51f8627eeb6ac"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:01ace351a51d7ee120963a4612b1f00e964462ec548db20d17f8902e238592c8"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5afd2f3f7257b52228a7808a2b4a765893d4d802d7a2377d9284853e67d045c6"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:453ee3193d59c974c678d91f08786f43c25ef753651b0825dc3d008c31baf68d"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b094b22919b786ad73c20372ef5e546330e7cd2c6dc12293b7ed586975f35d38"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26335eea976dfc1ff5d90b19c309a9425bd53868112a0507ad20f297f2c21d3e"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c77ecc5164bb413a613bdac9091dcc29d26834a2ac42fcd1afdfcda9e3003e68"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e31be6dc61496a59c1079b0a669f93dfcc2cdc4b1dbdc4374247cd09cee1329b"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-win32.whl", hash = "sha256:3aa40958355920ae2846c6fb5cadac4f2c8e33234a2982fef8101da0990e3968"}, + {file = "grpcio_tools-1.68.0-cp39-cp39-win_amd64.whl", hash = "sha256:19bafb80948eda979b1b3a63c1567162d06249f43068a0e46a028a448e6f72d4"}, + {file = "grpcio_tools-1.68.0.tar.gz", hash = "sha256:737804ec2225dd4cc27e633b4ca0e963b0795161bf678285fab6586e917fd867"}, +] + +[package.dependencies] +grpcio = ">=1.68.0" +protobuf = ">=5.26.1,<6.0dev" +setuptools = "*" + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "h2" +version = "4.1.0" +description = "HTTP/2 State-Machine based protocol implementation" +optional = false +python-versions = ">=3.6.1" +groups = ["main"] +files = [ + {file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"}, + {file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"}, +] + +[package.dependencies] +hpack = ">=4.0,<5" +hyperframe = ">=6.0,<7" + +[[package]] +name = "hpack" +version = "4.1.0" +description = "Pure-Python HPACK header encoding" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496"}, + {file = "hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca"}, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httplib2" +version = "0.22.0" +description = "A comprehensive HTTP client library." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main"] +files = [ + {file = "httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc"}, + {file = "httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81"}, +] + +[package.dependencies] +pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} + +[[package]] +name = "httptools" +version = "0.6.4" +description = "A collection of framework independent HTTP protocol utils." +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "httptools-0.6.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c73ce323711a6ffb0d247dcd5a550b8babf0f757e86a52558fe5b86d6fefcc0"}, + {file = "httptools-0.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345c288418f0944a6fe67be8e6afa9262b18c7626c3ef3c28adc5eabc06a68da"}, + {file = "httptools-0.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deee0e3343f98ee8047e9f4c5bc7cedbf69f5734454a94c38ee829fb2d5fa3c1"}, + {file = "httptools-0.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca80b7485c76f768a3bc83ea58373f8db7b015551117375e4918e2aa77ea9b50"}, + {file = "httptools-0.6.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:90d96a385fa941283ebd231464045187a31ad932ebfa541be8edf5b3c2328959"}, + {file = "httptools-0.6.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:59e724f8b332319e2875efd360e61ac07f33b492889284a3e05e6d13746876f4"}, + {file = "httptools-0.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:c26f313951f6e26147833fc923f78f95604bbec812a43e5ee37f26dc9e5a686c"}, + {file = "httptools-0.6.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f47f8ed67cc0ff862b84a1189831d1d33c963fb3ce1ee0c65d3b0cbe7b711069"}, + {file = "httptools-0.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0614154d5454c21b6410fdf5262b4a3ddb0f53f1e1721cfd59d55f32138c578a"}, + {file = "httptools-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8787367fbdfccae38e35abf7641dafc5310310a5987b689f4c32cc8cc3ee975"}, + {file = "httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b0f7fe4fd38e6a507bdb751db0379df1e99120c65fbdc8ee6c1d044897a636"}, + {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40a5ec98d3f49904b9fe36827dcf1aadfef3b89e2bd05b0e35e94f97c2b14721"}, + {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dacdd3d10ea1b4ca9df97a0a303cbacafc04b5cd375fa98732678151643d4988"}, + {file = "httptools-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:288cd628406cc53f9a541cfaf06041b4c71d751856bab45e3702191f931ccd17"}, + {file = "httptools-0.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:df017d6c780287d5c80601dafa31f17bddb170232d85c066604d8558683711a2"}, + {file = "httptools-0.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:85071a1e8c2d051b507161f6c3e26155b5c790e4e28d7f236422dbacc2a9cc44"}, + {file = "httptools-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69422b7f458c5af875922cdb5bd586cc1f1033295aa9ff63ee196a87519ac8e1"}, + {file = "httptools-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e603a3bff50db08cd578d54f07032ca1631450ceb972c2f834c2b860c28ea2"}, + {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81"}, + {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f"}, + {file = "httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970"}, + {file = "httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660"}, + {file = "httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083"}, + {file = "httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3"}, + {file = "httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071"}, + {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5"}, + {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0"}, + {file = "httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8"}, + {file = "httptools-0.6.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d3f0d369e7ffbe59c4b6116a44d6a8eb4783aae027f2c0b366cf0aa964185dba"}, + {file = "httptools-0.6.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:94978a49b8f4569ad607cd4946b759d90b285e39c0d4640c6b36ca7a3ddf2efc"}, + {file = "httptools-0.6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40dc6a8e399e15ea525305a2ddba998b0af5caa2566bcd79dcbe8948181eeaff"}, + {file = "httptools-0.6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab9ba8dcf59de5181f6be44a77458e45a578fc99c31510b8c65b7d5acc3cf490"}, + {file = "httptools-0.6.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc411e1c0a7dcd2f902c7c48cf079947a7e65b5485dea9decb82b9105ca71a43"}, + {file = "httptools-0.6.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d54efd20338ac52ba31e7da78e4a72570cf729fac82bc31ff9199bedf1dc7440"}, + {file = "httptools-0.6.4-cp38-cp38-win_amd64.whl", hash = "sha256:df959752a0c2748a65ab5387d08287abf6779ae9165916fe053e68ae1fbdc47f"}, + {file = "httptools-0.6.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:85797e37e8eeaa5439d33e556662cc370e474445d5fab24dcadc65a8ffb04003"}, + {file = "httptools-0.6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:db353d22843cf1028f43c3651581e4bb49374d85692a85f95f7b9a130e1b2cab"}, + {file = "httptools-0.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ffd262a73d7c28424252381a5b854c19d9de5f56f075445d33919a637e3547"}, + {file = "httptools-0.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c346571fa50d2e9856a37d7cd9435a25e7fd15e236c397bf224afaa355fe9"}, + {file = "httptools-0.6.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aafe0f1918ed07b67c1e838f950b1c1fabc683030477e60b335649b8020e1076"}, + {file = "httptools-0.6.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0e563e54979e97b6d13f1bbc05a96109923e76b901f786a5eae36e99c01237bd"}, + {file = "httptools-0.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:b799de31416ecc589ad79dd85a0b2657a8fe39327944998dea368c1d4c9e55e6"}, + {file = "httptools-0.6.4.tar.gz", hash = "sha256:4e93eee4add6493b59a5c514da98c939b244fce4a0d8879cd3f466562f4b7d5c"}, +] + +[package.extras] +test = ["Cython (>=0.29.24)"] + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""} +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "hyperframe" +version = "6.1.0" +description = "Pure-Python HTTP/2 framing" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5"}, + {file = "hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08"}, +] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main", "dev"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "imageio" +version = "2.37.0" +description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "imageio-2.37.0-py3-none-any.whl", hash = "sha256:11efa15b87bc7871b61590326b2d635439acc321cf7f8ce996f812543ce10eed"}, + {file = "imageio-2.37.0.tar.gz", hash = "sha256:71b57b3669666272c818497aebba2b4c5f20d5b37c81720e5e1a56d59c492996"}, +] + +[package.dependencies] +numpy = "*" +pillow = ">=8.3.2" + +[package.extras] +all-plugins = ["astropy", "av", "imageio-ffmpeg", "numpy (>2)", "pillow-heif", "psutil", "rawpy", "tifffile"] +all-plugins-pypy = ["av", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] +build = ["wheel"] +dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] +docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] +ffmpeg = ["imageio-ffmpeg", "psutil"] +fits = ["astropy"] +full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "itk", "numpy (>2)", "numpydoc", "pillow-heif", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "rawpy", "sphinx (<6)", "tifffile", "wheel"] +gdal = ["gdal"] +itk = ["itk"] +linting = ["black", "flake8"] +pillow-heif = ["pillow-heif"] +pyav = ["av"] +rawpy = ["numpy (>2)", "rawpy"] +test = ["fsspec[github]", "pytest", "pytest-cov"] +tifffile = ["tifffile"] + +[[package]] +name = "imageio-ffmpeg" +version = "0.6.0" +description = "FFMPEG wrapper for Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "imageio_ffmpeg-0.6.0-py3-none-macosx_10_9_intel.macosx_10_9_x86_64.whl", hash = "sha256:9d2baaf867088508d4a3458e61eeb30e945c4ad8016025545f66c4b5aaef0a61"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b1ae3173414b5fc5f538a726c4e48ea97edc0d2cdc11f103afee655c463fa742"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1d47bebd83d2c5fc770720d211855f208af8a596c82d17730aa51e815cdee6dc"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c7e46fcec401dd990405049d2e2f475e2b397779df2519b544b8aab515195282"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-win32.whl", hash = "sha256:196faa79366b4a82f95c0f4053191d2013f4714a715780f0ad2a68ff37483cc2"}, + {file = "imageio_ffmpeg-0.6.0-py3-none-win_amd64.whl", hash = "sha256:02fa47c83703c37df6bfe4896aab339013f62bf02c5ebf2dce6da56af04ffc0a"}, + {file = "imageio_ffmpeg-0.6.0.tar.gz", hash = "sha256:e2556bed8e005564a9f925bb7afa4002d82770d6b08825078b7697ab88ba1755"}, +] + +[[package]] +name = "importlib-metadata" +version = "8.5.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jinja2" +version = "3.1.5" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, + {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jiter" +version = "0.8.2" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "jiter-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ca8577f6a413abe29b079bc30f907894d7eb07a865c4df69475e868d73e71c7b"}, + {file = "jiter-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b25bd626bde7fb51534190c7e3cb97cee89ee76b76d7585580e22f34f5e3f393"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5c826a221851a8dc028eb6d7d6429ba03184fa3c7e83ae01cd6d3bd1d4bd17d"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d35c864c2dff13dfd79fb070fc4fc6235d7b9b359efe340e1261deb21b9fcb66"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f557c55bc2b7676e74d39d19bcb8775ca295c7a028246175d6a8b431e70835e5"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:580ccf358539153db147e40751a0b41688a5ceb275e6f3e93d91c9467f42b2e3"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af102d3372e917cffce49b521e4c32c497515119dc7bd8a75665e90a718bbf08"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cadcc978f82397d515bb2683fc0d50103acff2a180552654bb92d6045dec2c49"}, + {file = "jiter-0.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ba5bdf56969cad2019d4e8ffd3f879b5fdc792624129741d3d83fc832fef8c7d"}, + {file = "jiter-0.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3b94a33a241bee9e34b8481cdcaa3d5c2116f575e0226e421bed3f7a6ea71cff"}, + {file = "jiter-0.8.2-cp310-cp310-win32.whl", hash = "sha256:6e5337bf454abddd91bd048ce0dca5134056fc99ca0205258766db35d0a2ea43"}, + {file = "jiter-0.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:4a9220497ca0cb1fe94e3f334f65b9b5102a0b8147646118f020d8ce1de70105"}, + {file = "jiter-0.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2dd61c5afc88a4fda7d8b2cf03ae5947c6ac7516d32b7a15bf4b49569a5c076b"}, + {file = "jiter-0.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a6c710d657c8d1d2adbbb5c0b0c6bfcec28fd35bd6b5f016395f9ac43e878a15"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9584de0cd306072635fe4b89742bf26feae858a0683b399ad0c2509011b9dc0"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5a90a923338531b7970abb063cfc087eebae6ef8ec8139762007188f6bc69a9f"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21974d246ed0181558087cd9f76e84e8321091ebfb3a93d4c341479a736f099"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32475a42b2ea7b344069dc1e81445cfc00b9d0e3ca837f0523072432332e9f74"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9931fd36ee513c26b5bf08c940b0ac875de175341cbdd4fa3be109f0492586"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0820f4a3a59ddced7fce696d86a096d5cc48d32a4183483a17671a61edfddc"}, + {file = "jiter-0.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8ffc86ae5e3e6a93765d49d1ab47b6075a9c978a2b3b80f0f32628f39caa0c88"}, + {file = "jiter-0.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5127dc1abd809431172bc3fbe8168d6b90556a30bb10acd5ded41c3cfd6f43b6"}, + {file = "jiter-0.8.2-cp311-cp311-win32.whl", hash = "sha256:66227a2c7b575720c1871c8800d3a0122bb8ee94edb43a5685aa9aceb2782d44"}, + {file = "jiter-0.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:cde031d8413842a1e7501e9129b8e676e62a657f8ec8166e18a70d94d4682855"}, + {file = "jiter-0.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e6ec2be506e7d6f9527dae9ff4b7f54e68ea44a0ef6b098256ddf895218a2f8f"}, + {file = "jiter-0.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76e324da7b5da060287c54f2fabd3db5f76468006c811831f051942bf68c9d44"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:180a8aea058f7535d1c84183c0362c710f4750bef66630c05f40c93c2b152a0f"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025337859077b41548bdcbabe38698bcd93cfe10b06ff66617a48ff92c9aec60"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecff0dc14f409599bbcafa7e470c00b80f17abc14d1405d38ab02e4b42e55b57"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffd9fee7d0775ebaba131f7ca2e2d83839a62ad65e8e02fe2bd8fc975cedeb9e"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14601dcac4889e0a1c75ccf6a0e4baf70dbc75041e51bcf8d0e9274519df6887"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92249669925bc1c54fcd2ec73f70f2c1d6a817928480ee1c65af5f6b81cdf12d"}, + {file = "jiter-0.8.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e725edd0929fa79f8349ab4ec7f81c714df51dc4e991539a578e5018fa4a7152"}, + {file = "jiter-0.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bf55846c7b7a680eebaf9c3c48d630e1bf51bdf76c68a5f654b8524335b0ad29"}, + {file = "jiter-0.8.2-cp312-cp312-win32.whl", hash = "sha256:7efe4853ecd3d6110301665a5178b9856be7e2a9485f49d91aa4d737ad2ae49e"}, + {file = "jiter-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:83c0efd80b29695058d0fd2fa8a556490dbce9804eac3e281f373bbc99045f6c"}, + {file = "jiter-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ca1f08b8e43dc3bd0594c992fb1fd2f7ce87f7bf0d44358198d6da8034afdf84"}, + {file = "jiter-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5672a86d55416ccd214c778efccf3266b84f87b89063b582167d803246354be4"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58dc9bc9767a1101f4e5e22db1b652161a225874d66f0e5cb8e2c7d1c438b587"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b2998606d6dadbb5ccda959a33d6a5e853252d921fec1792fc902351bb4e2c"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab9a87f3784eb0e098f84a32670cfe4a79cb6512fd8f42ae3d0709f06405d18"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79aec8172b9e3c6d05fd4b219d5de1ac616bd8da934107325a6c0d0e866a21b6"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711e408732d4e9a0208008e5892c2966b485c783cd2d9a681f3eb147cf36c7ef"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:653cf462db4e8c41995e33d865965e79641ef45369d8a11f54cd30888b7e6ff1"}, + {file = "jiter-0.8.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:9c63eaef32b7bebac8ebebf4dabebdbc6769a09c127294db6babee38e9f405b9"}, + {file = "jiter-0.8.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:eb21aaa9a200d0a80dacc7a81038d2e476ffe473ffdd9c91eb745d623561de05"}, + {file = "jiter-0.8.2-cp313-cp313-win32.whl", hash = "sha256:789361ed945d8d42850f919342a8665d2dc79e7e44ca1c97cc786966a21f627a"}, + {file = "jiter-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ab7f43235d71e03b941c1630f4b6e3055d46b6cb8728a17663eaac9d8e83a865"}, + {file = "jiter-0.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b426f72cd77da3fec300ed3bc990895e2dd6b49e3bfe6c438592a3ba660e41ca"}, + {file = "jiter-0.8.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2dd880785088ff2ad21ffee205e58a8c1ddabc63612444ae41e5e4b321b39c0"}, + {file = "jiter-0.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:3ac9f578c46f22405ff7f8b1f5848fb753cc4b8377fbec8470a7dc3997ca7566"}, + {file = "jiter-0.8.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9e1fa156ee9454642adb7e7234a383884452532bc9d53d5af2d18d98ada1d79c"}, + {file = "jiter-0.8.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cf5dfa9956d96ff2efb0f8e9c7d055904012c952539a774305aaaf3abdf3d6c"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e52bf98c7e727dd44f7c4acb980cb988448faeafed8433c867888268899b298b"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a2ecaa3c23e7a7cf86d00eda3390c232f4d533cd9ddea4b04f5d0644faf642c5"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08d4c92bf480e19fc3f2717c9ce2aa31dceaa9163839a311424b6862252c943e"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99d9a1eded738299ba8e106c6779ce5c3893cffa0e32e4485d680588adae6db8"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20be8b7f606df096e08b0b1b4a3c6f0515e8dac296881fe7461dfa0fb5ec817"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d33f94615fcaf872f7fd8cd98ac3b429e435c77619777e8a449d9d27e01134d1"}, + {file = "jiter-0.8.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:317b25e98a35ffec5c67efe56a4e9970852632c810d35b34ecdd70cc0e47b3b6"}, + {file = "jiter-0.8.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fc9043259ee430ecd71d178fccabd8c332a3bf1e81e50cae43cc2b28d19e4cb7"}, + {file = "jiter-0.8.2-cp38-cp38-win32.whl", hash = "sha256:fc5adda618205bd4678b146612ce44c3cbfdee9697951f2c0ffdef1f26d72b63"}, + {file = "jiter-0.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:cd646c827b4f85ef4a78e4e58f4f5854fae0caf3db91b59f0d73731448a970c6"}, + {file = "jiter-0.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e41e75344acef3fc59ba4765df29f107f309ca9e8eace5baacabd9217e52a5ee"}, + {file = "jiter-0.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f22b16b35d5c1df9dfd58843ab2cd25e6bf15191f5a236bed177afade507bfc"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7200b8f7619d36aa51c803fd52020a2dfbea36ffec1b5e22cab11fd34d95a6d"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70bf4c43652cc294040dbb62256c83c8718370c8b93dd93d934b9a7bf6c4f53c"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9d471356dc16f84ed48768b8ee79f29514295c7295cb41e1133ec0b2b8d637d"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:859e8eb3507894093d01929e12e267f83b1d5f6221099d3ec976f0c995cb6bd9"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaa58399c01db555346647a907b4ef6d4f584b123943be6ed5588c3f2359c9f4"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8f2d5ed877f089862f4c7aacf3a542627c1496f972a34d0474ce85ee7d939c27"}, + {file = "jiter-0.8.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03c9df035d4f8d647f8c210ddc2ae0728387275340668fb30d2421e17d9a0841"}, + {file = "jiter-0.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8bd2a824d08d8977bb2794ea2682f898ad3d8837932e3a74937e93d62ecbb637"}, + {file = "jiter-0.8.2-cp39-cp39-win32.whl", hash = "sha256:ca29b6371ebc40e496995c94b988a101b9fbbed48a51190a4461fcb0a68b4a36"}, + {file = "jiter-0.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:1c0dfbd1be3cbefc7510102370d86e35d1d53e5a93d48519688b1bf0f761160a"}, + {file = "jiter-0.8.2.tar.gz", hash = "sha256:cd73d3e740666d0e639f678adb176fad25c1bcbdae88d8d7b857e1783bb4212d"}, +] + +[[package]] +name = "jsonref" +version = "1.1.0" +description = "jsonref is a library for automatic dereferencing of JSON Reference objects for Python." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jsonref-1.1.0-py3-none-any.whl", hash = "sha256:590dc7773df6c21cbf948b5dac07a72a251db28b0238ceecce0a2abfa8ec30a9"}, + {file = "jsonref-1.1.0.tar.gz", hash = "sha256:32fe8e1d85af0fdefbebce950af85590b22b60f9e95443176adbde4e1ecea552"}, +] + +[[package]] +name = "jsonschema" +version = "4.23.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] + +[[package]] +name = "jsonschema-specifications" +version = "2024.10.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, + {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "launchdarkly-eventsource" +version = "1.2.1" +description = "LaunchDarkly SSE Client" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "launchdarkly_eventsource-1.2.1-py3-none-any.whl", hash = "sha256:0fa935b7692555455ac8b44b845cdc16738bd9b2e9ce89ee19b3f8b4adafe3f1"}, + {file = "launchdarkly_eventsource-1.2.1.tar.gz", hash = "sha256:99c29fa9a570aa8d49c9804bcc401028cab8a8954ccbf4a68c3116933301ec33"}, +] + +[package.dependencies] +urllib3 = ">=1.26.0,<3" + +[[package]] +name = "launchdarkly-server-sdk" +version = "9.9.0" +description = "LaunchDarkly SDK for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "launchdarkly_server_sdk-9.9.0-py3-none-any.whl", hash = "sha256:91a016e9e41a7eccdcb1e00e760b408d7bda0041a34083b994236ac19c69c2bf"}, + {file = "launchdarkly_server_sdk-9.9.0.tar.gz", hash = "sha256:10a39a22e1d9feb31c8586c19d03167c3d8e1ac277c6adc0864032a0c2e1c62e"}, +] + +[package.dependencies] +certifi = ">=2018.4.16" +expiringdict = ">=1.1.4" +launchdarkly-eventsource = ">=1.1.0,<2.0.0" +pyRFC3339 = ">=1.0" +semver = ">=2.10.2" +urllib3 = ">=1.26.0,<3" + +[package.extras] +consul = ["python-consul (>=1.0.1)"] +dynamodb = ["boto3 (>=1.9.71)"] +redis = ["redis (>=2.10.5)"] +test-filesource = ["pyyaml (>=5.3.1)", "watchdog (>=3.0.0)"] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mem0ai" +version = "0.1.44" +description = "Long-term memory for AI Agents" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "mem0ai-0.1.44-py3-none-any.whl", hash = "sha256:32260a2cd935035a1b16ce04ad2e4510a5bd97618709466e2d06303e0eb8d9d4"}, + {file = "mem0ai-0.1.44.tar.gz", hash = "sha256:93214272915d94f673d370bb8fe7a8bfc21806267e65700b471bec454dcdfa5c"}, +] + +[package.dependencies] +openai = ">=1.33.0,<2.0.0" +posthog = ">=3.5.0,<4.0.0" +pydantic = ">=2.7.3,<3.0.0" +pytz = ">=2024.1,<2025.0" +qdrant-client = ">=1.9.1,<2.0.0" +sqlalchemy = ">=2.0.31,<3.0.0" + +[package.extras] +graph = ["langchain-community (>=0.3.1,<0.4.0)", "neo4j (>=5.23.1,<6.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)"] + +[[package]] +name = "monotonic" +version = "1.6" +description = "An implementation of time.monotonic() for Python 2 & < 3.3" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, + {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, +] + +[[package]] +name = "moviepy" +version = "2.1.2" +description = "Video editing with Python" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "moviepy-2.1.2-py3-none-any.whl", hash = "sha256:6cdc0d739110c8f347a224d72bd59eebaec010720d01eff290d37111bf545a73"}, + {file = "moviepy-2.1.2.tar.gz", hash = "sha256:22c57a7472f607eaad9fe80791df67c05082e1060fb74817c4eaac68e138ee77"}, +] + +[package.dependencies] +decorator = ">=4.0.2,<6.0" +imageio = ">=2.5,<3.0" +imageio_ffmpeg = ">=0.2.0" +numpy = ">=1.25.0" +pillow = ">=9.2.0,<11.0" +proglog = "<=1.0.0" +python-dotenv = ">=0.10" + +[package.extras] +doc = ["Sphinx (==6.*)", "numpydoc (<2.0)", "pydata-sphinx-theme (==0.13)", "sphinx_design"] +lint = ["black (>=23.7.0)", "flake8 (>=6.0.0)", "flake8-absolute-import (>=1.0)", "flake8-docstrings (>=1.7.0)", "flake8-implicit-str-concat (==0.4.0)", "flake8-rst-docstrings (>=0.3)", "isort (>=5.12)", "pre-commit (>=3.3)"] +test = ["coveralls (>=3.0,<4.0)", "pytest (>=3.0.0,<7.0.0)", "pytest-cov (>=2.5.1,<3.0)"] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +groups = ["dev"] +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "numpy" +version = "2.2.2" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "numpy-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7079129b64cb78bdc8d611d1fd7e8002c0a2565da6a47c4df8062349fee90e3e"}, + {file = "numpy-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ec6c689c61df613b783aeb21f945c4cbe6c51c28cb70aae8430577ab39f163e"}, + {file = "numpy-2.2.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:40c7ff5da22cd391944a28c6a9c638a5eef77fcf71d6e3a79e1d9d9e82752715"}, + {file = "numpy-2.2.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:995f9e8181723852ca458e22de5d9b7d3ba4da3f11cc1cb113f093b271d7965a"}, + {file = "numpy-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78ea78450fd96a498f50ee096f69c75379af5138f7881a51355ab0e11286c97"}, + {file = "numpy-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fbe72d347fbc59f94124125e73fc4976a06927ebc503ec5afbfb35f193cd957"}, + {file = "numpy-2.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8e6da5cffbbe571f93588f562ed130ea63ee206d12851b60819512dd3e1ba50d"}, + {file = "numpy-2.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:09d6a2032faf25e8d0cadde7fd6145118ac55d2740132c1d845f98721b5ebcfd"}, + {file = "numpy-2.2.2-cp310-cp310-win32.whl", hash = "sha256:159ff6ee4c4a36a23fe01b7c3d07bd8c14cc433d9720f977fcd52c13c0098160"}, + {file = "numpy-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:64bd6e1762cd7f0986a740fee4dff927b9ec2c5e4d9a28d056eb17d332158014"}, + {file = "numpy-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:642199e98af1bd2b6aeb8ecf726972d238c9877b0f6e8221ee5ab945ec8a2189"}, + {file = "numpy-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6d9fc9d812c81e6168b6d405bf00b8d6739a7f72ef22a9214c4241e0dc70b323"}, + {file = "numpy-2.2.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c7d1fd447e33ee20c1f33f2c8e6634211124a9aabde3c617687d8b739aa69eac"}, + {file = "numpy-2.2.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:451e854cfae0febe723077bd0cf0a4302a5d84ff25f0bfece8f29206c7bed02e"}, + {file = "numpy-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd249bc894af67cbd8bad2c22e7cbcd46cf87ddfca1f1289d1e7e54868cc785c"}, + {file = "numpy-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02935e2c3c0c6cbe9c7955a8efa8908dd4221d7755644c59d1bba28b94fd334f"}, + {file = "numpy-2.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a972cec723e0563aa0823ee2ab1df0cb196ed0778f173b381c871a03719d4826"}, + {file = "numpy-2.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d6d6a0910c3b4368d89dde073e630882cdb266755565155bc33520283b2d9df8"}, + {file = "numpy-2.2.2-cp311-cp311-win32.whl", hash = "sha256:860fd59990c37c3ef913c3ae390b3929d005243acca1a86facb0773e2d8d9e50"}, + {file = "numpy-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:da1eeb460ecce8d5b8608826595c777728cdf28ce7b5a5a8c8ac8d949beadcf2"}, + {file = "numpy-2.2.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ac9bea18d6d58a995fac1b2cb4488e17eceeac413af014b1dd26170b766d8467"}, + {file = "numpy-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23ae9f0c2d889b7b2d88a3791f6c09e2ef827c2446f1c4a3e3e76328ee4afd9a"}, + {file = "numpy-2.2.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:3074634ea4d6df66be04f6728ee1d173cfded75d002c75fac79503a880bf3825"}, + {file = "numpy-2.2.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ec0636d3f7d68520afc6ac2dc4b8341ddb725039de042faf0e311599f54eb37"}, + {file = "numpy-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ffbb1acd69fdf8e89dd60ef6182ca90a743620957afb7066385a7bbe88dc748"}, + {file = "numpy-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0349b025e15ea9d05c3d63f9657707a4e1d471128a3b1d876c095f328f8ff7f0"}, + {file = "numpy-2.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:463247edcee4a5537841d5350bc87fe8e92d7dd0e8c71c995d2c6eecb8208278"}, + {file = "numpy-2.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9dd47ff0cb2a656ad69c38da850df3454da88ee9a6fde0ba79acceee0e79daba"}, + {file = "numpy-2.2.2-cp312-cp312-win32.whl", hash = "sha256:4525b88c11906d5ab1b0ec1f290996c0020dd318af8b49acaa46f198b1ffc283"}, + {file = "numpy-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:5acea83b801e98541619af398cc0109ff48016955cc0818f478ee9ef1c5c3dcb"}, + {file = "numpy-2.2.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b208cfd4f5fe34e1535c08983a1a6803fdbc7a1e86cf13dd0c61de0b51a0aadc"}, + {file = "numpy-2.2.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d0bbe7dd86dca64854f4b6ce2ea5c60b51e36dfd597300057cf473d3615f2369"}, + {file = "numpy-2.2.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:22ea3bb552ade325530e72a0c557cdf2dea8914d3a5e1fecf58fa5dbcc6f43cd"}, + {file = "numpy-2.2.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:128c41c085cab8a85dc29e66ed88c05613dccf6bc28b3866cd16050a2f5448be"}, + {file = "numpy-2.2.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:250c16b277e3b809ac20d1f590716597481061b514223c7badb7a0f9993c7f84"}, + {file = "numpy-2.2.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0c8854b09bc4de7b041148d8550d3bd712b5c21ff6a8ed308085f190235d7ff"}, + {file = "numpy-2.2.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b6fb9c32a91ec32a689ec6410def76443e3c750e7cfc3fb2206b985ffb2b85f0"}, + {file = "numpy-2.2.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:57b4012e04cc12b78590a334907e01b3a85efb2107df2b8733ff1ed05fce71de"}, + {file = "numpy-2.2.2-cp313-cp313-win32.whl", hash = "sha256:4dbd80e453bd34bd003b16bd802fac70ad76bd463f81f0c518d1245b1c55e3d9"}, + {file = "numpy-2.2.2-cp313-cp313-win_amd64.whl", hash = "sha256:5a8c863ceacae696aff37d1fd636121f1a512117652e5dfb86031c8d84836369"}, + {file = "numpy-2.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:b3482cb7b3325faa5f6bc179649406058253d91ceda359c104dac0ad320e1391"}, + {file = "numpy-2.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9491100aba630910489c1d0158034e1c9a6546f0b1340f716d522dc103788e39"}, + {file = "numpy-2.2.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:41184c416143defa34cc8eb9d070b0a5ba4f13a0fa96a709e20584638254b317"}, + {file = "numpy-2.2.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:7dca87ca328f5ea7dafc907c5ec100d187911f94825f8700caac0b3f4c384b49"}, + {file = "numpy-2.2.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bc61b307655d1a7f9f4b043628b9f2b721e80839914ede634e3d485913e1fb2"}, + {file = "numpy-2.2.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fad446ad0bc886855ddf5909cbf8cb5d0faa637aaa6277fb4b19ade134ab3c7"}, + {file = "numpy-2.2.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:149d1113ac15005652e8d0d3f6fd599360e1a708a4f98e43c9c77834a28238cb"}, + {file = "numpy-2.2.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:106397dbbb1896f99e044efc90360d098b3335060375c26aa89c0d8a97c5f648"}, + {file = "numpy-2.2.2-cp313-cp313t-win32.whl", hash = "sha256:0eec19f8af947a61e968d5429f0bd92fec46d92b0008d0a6685b40d6adf8a4f4"}, + {file = "numpy-2.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:97b974d3ba0fb4612b77ed35d7627490e8e3dff56ab41454d9e8b23448940576"}, + {file = "numpy-2.2.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b0531f0b0e07643eb089df4c509d30d72c9ef40defa53e41363eca8a8cc61495"}, + {file = "numpy-2.2.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:e9e82dcb3f2ebbc8cb5ce1102d5f1c5ed236bf8a11730fb45ba82e2841ec21df"}, + {file = "numpy-2.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0d4142eb40ca6f94539e4db929410f2a46052a0fe7a2c1c59f6179c39938d2a"}, + {file = "numpy-2.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:356ca982c188acbfa6af0d694284d8cf20e95b1c3d0aefa8929376fea9146f60"}, + {file = "numpy-2.2.2.tar.gz", hash = "sha256:ed6906f61834d687738d25988ae117683705636936cc605be0bb208b23df4d8f"}, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + +[[package]] +name = "ollama" +version = "0.4.7" +description = "The official Python client for Ollama." +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "ollama-0.4.7-py3-none-any.whl", hash = "sha256:85505663cca67a83707be5fb3aeff0ea72e67846cea5985529d8eca4366564a1"}, + {file = "ollama-0.4.7.tar.gz", hash = "sha256:891dcbe54f55397d82d289c459de0ea897e103b86a3f1fad0fdb1895922a75ff"}, +] + +[package.dependencies] +httpx = ">=0.27,<0.29" +pydantic = ">=2.9.0,<3.0.0" + +[[package]] +name = "openai" +version = "1.60.0" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "openai-1.60.0-py3-none-any.whl", hash = "sha256:df06c43be8018274980ac363da07d4b417bd835ead1c66e14396f6f15a0d5dda"}, + {file = "openai-1.60.0.tar.gz", hash = "sha256:7fa536cd4b644718645b874d2706e36dbbef38b327e42ca0623275da347ee1a9"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.11,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +realtime = ["websockets (>=13,<15)"] + +[[package]] +name = "opentelemetry-api" +version = "1.29.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "opentelemetry_api-1.29.0-py3-none-any.whl", hash = "sha256:5fcd94c4141cc49c736271f3e1efb777bebe9cc535759c54c936cca4f1b312b8"}, + {file = "opentelemetry_api-1.29.0.tar.gz", hash = "sha256:d04a6cf78aad09614f52964ecb38021e248f5714dc32c2e0d8fd99517b4d69cf"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0,<=8.5.0" + +[[package]] +name = "packaging" +version = "24.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] + +[[package]] +name = "pamqp" +version = "3.3.0" +description = "RabbitMQ Focused AMQP low-level library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "pamqp-3.3.0-py2.py3-none-any.whl", hash = "sha256:c901a684794157ae39b52cbf700db8c9aae7a470f13528b9d7b4e5f7202f8eb0"}, + {file = "pamqp-3.3.0.tar.gz", hash = "sha256:40b8795bd4efcf2b0f8821c1de83d12ca16d5760f4507836267fd7a02b06763b"}, +] + +[package.extras] +codegen = ["lxml", "requests", "yapf"] +testing = ["coverage", "flake8", "flake8-comprehensions", "flake8-deprecated", "flake8-import-order", "flake8-print", "flake8-quotes", "flake8-rst-docstrings", "flake8-tuple", "yapf"] + +[[package]] +name = "pastel" +version = "0.2.1" +description = "Bring colors to your terminal." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] +files = [ + {file = "pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364"}, + {file = "pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pillow" +version = "10.4.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + +[[package]] +name = "pinecone" +version = "5.4.2" +description = "Pinecone client and SDK" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "pinecone-5.4.2-py3-none-any.whl", hash = "sha256:1fad082c66a50a229b58cda0c3a1fa0083532dc9de8303015fe4071cb25c19a8"}, + {file = "pinecone-5.4.2.tar.gz", hash = "sha256:23e8aaa73b400bb11a3b626c4129284fb170f19025b82f65bd89cbb0dab2b873"}, +] + +[package.dependencies] +certifi = ">=2019.11.17" +pinecone-plugin-inference = ">=2.0.0,<4.0.0" +pinecone-plugin-interface = ">=0.0.7,<0.0.8" +python-dateutil = ">=2.5.3" +tqdm = ">=4.64.1" +typing-extensions = ">=3.7.4" +urllib3 = [ + {version = ">=1.26.0", markers = "python_version >= \"3.8\" and python_version < \"3.12\""}, + {version = ">=1.26.5", markers = "python_version >= \"3.12\" and python_version < \"4.0\""}, +] + +[package.extras] +grpc = ["googleapis-common-protos (>=1.53.0)", "grpcio (>=1.44.0)", "grpcio (>=1.59.0)", "lz4 (>=3.1.3)", "protobuf (>=4.25,<5.0)", "protoc-gen-openapiv2 (>=0.0.1,<0.0.2)"] + +[[package]] +name = "pinecone-plugin-inference" +version = "3.1.0" +description = "Embeddings plugin for Pinecone SDK" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "pinecone_plugin_inference-3.1.0-py3-none-any.whl", hash = "sha256:96e861527bd41e90d58b7e76abd4e713d9af28f63e76a51864dfb9cf7180e3df"}, + {file = "pinecone_plugin_inference-3.1.0.tar.gz", hash = "sha256:eff826178e1fe448577be2ff3d8dbb072befbbdc2d888e214624523a1c37cd8d"}, +] + +[package.dependencies] +pinecone-plugin-interface = ">=0.0.7,<0.0.8" + +[[package]] +name = "pinecone-plugin-interface" +version = "0.0.7" +description = "Plugin interface for the Pinecone python client" +optional = false +python-versions = "<4.0,>=3.8" +groups = ["main"] +files = [ + {file = "pinecone_plugin_interface-0.0.7-py3-none-any.whl", hash = "sha256:875857ad9c9fc8bbc074dbe780d187a2afd21f5bfe0f3b08601924a61ef1bba8"}, + {file = "pinecone_plugin_interface-0.0.7.tar.gz", hash = "sha256:b8e6675e41847333aa13923cc44daa3f85676d7157324682dc1640588a982846"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "poethepoet" +version = "0.32.1" +description = "A task runner that works well with poetry." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "poethepoet-0.32.1-py3-none-any.whl", hash = "sha256:d1e0a52a2f677870fac17dfb26bfe4910242756ac821443ef31f90ad26227c2d"}, + {file = "poethepoet-0.32.1.tar.gz", hash = "sha256:471e1a025812dcd3d2997e30989681be5ab0a49232ee5fba94859629671c9584"}, +] + +[package.dependencies] +pastel = ">=0.2.1,<0.3.0" +pyyaml = ">=6.0.2,<7.0.0" +tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""} + +[package.extras] +poetry-plugin = ["poetry (>=1.0,<3.0)"] + +[[package]] +name = "portalocker" +version = "2.10.1" +description = "Wraps the portalocker recipe for easy usage" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "portalocker-2.10.1-py3-none-any.whl", hash = "sha256:53a5984ebc86a025552264b459b46a2086e269b21823cb572f8f28ee759e45bf"}, + {file = "portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f"}, +] + +[package.dependencies] +pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""} + +[package.extras] +docs = ["sphinx (>=1.7.1)"] +redis = ["redis"] +tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"] + +[[package]] +name = "postgrest" +version = "0.19.1" +description = "PostgREST client for Python. This library provides an ORM interface to PostgREST." +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "postgrest-0.19.1-py3-none-any.whl", hash = "sha256:a8e7be4e1abc69fd8eee5a49d7dc3a76dfbffbd778beed0b2bd7accb3f4f3a2a"}, + {file = "postgrest-0.19.1.tar.gz", hash = "sha256:d8fa88953cced4f45efa0f412056c364f64ece8a35b5b35f458a7e58c133fbca"}, +] + +[package.dependencies] +deprecation = ">=2.1.0,<3.0.0" +httpx = {version = ">=0.26,<0.29", extras = ["http2"]} +pydantic = ">=1.9,<3.0" +strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "posthog" +version = "3.8.3" +description = "Integrate PostHog into any python application." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "posthog-3.8.3-py2.py3-none-any.whl", hash = "sha256:7215c4d7649b0c87905b42f460403311564996d776ab48d39852f46539a50f22"}, + {file = "posthog-3.8.3.tar.gz", hash = "sha256:263df03ea312d4b47a3d5ea393fdb22ff2ed78140d5ce9af9dd0618ae245a44b"}, +] + +[package.dependencies] +backoff = ">=1.10.0" +monotonic = ">=1.5" +python-dateutil = ">2.1" +requests = ">=2.7,<3.0" +six = ">=1.5" + +[package.extras] +dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"] +langchain = ["langchain (>=0.2.0)"] +sentry = ["django", "sentry-sdk"] +test = ["coverage", "django", "flake8", "freezegun (==0.3.15)", "langchain-community (>=0.2.0)", "langchain-openai (>=0.2.0)", "mock (>=2.0.0)", "pylint", "pytest", "pytest-asyncio", "pytest-timeout"] + +[[package]] +name = "praw" +version = "7.8.1" +description = "Python Reddit API Wrapper." +optional = false +python-versions = "~=3.8" +groups = ["main"] +files = [ + {file = "praw-7.8.1-py3-none-any.whl", hash = "sha256:15917a81a06e20ff0aaaf1358481f4588449fa2421233040cb25e5c8202a3e2f"}, + {file = "praw-7.8.1.tar.gz", hash = "sha256:3c5767909f71e48853eb6335fef7b50a43cbe3da728cdfb16d3be92904b0a4d8"}, +] + +[package.dependencies] +prawcore = ">=2.4,<3" +update_checker = ">=0.18" +websocket-client = ">=0.54.0" + +[package.extras] +ci = ["coveralls"] +dev = ["packaging", "praw[lint]", "praw[test]"] +lint = ["praw[readthedocs]", "pre-commit", "ruff (>=0.0.291)"] +readthedocs = ["furo", "sphinx"] +test = ["betamax (>=0.8,<0.9)", "betamax-matchers (>=0.3.0,<0.5)", "pytest (>=2.7.3)", "requests (>=2.20.1,<3)", "urllib3 (==1.26.*)"] + +[[package]] +name = "prawcore" +version = "2.4.0" +description = "\"Low-level communication layer for PRAW 4+." +optional = false +python-versions = "~=3.8" +groups = ["main"] +files = [ + {file = "prawcore-2.4.0-py3-none-any.whl", hash = "sha256:29af5da58d85704b439ad3c820873ad541f4535e00bb98c66f0fbcc8c603065a"}, + {file = "prawcore-2.4.0.tar.gz", hash = "sha256:b7b2b5a1d04406e086ab4e79988dc794df16059862f329f4c6a43ed09986c335"}, +] + +[package.dependencies] +requests = ">=2.6.0,<3.0" + +[package.extras] +ci = ["coveralls"] +dev = ["packaging", "prawcore[lint]", "prawcore[test]"] +lint = ["pre-commit", "ruff (>=0.0.291)"] +test = ["betamax (>=0.8,<0.9)", "pytest (>=2.7.3)", "urllib3 (==1.26.*)"] + +[[package]] +name = "prisma" +version = "0.15.0" +description = "Prisma Client Python is an auto-generated and fully type-safe database client" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "prisma-0.15.0-py3-none-any.whl", hash = "sha256:de949cc94d3d91243615f22ff64490aa6e2d7cb81aabffce53d92bd3977c09a4"}, + {file = "prisma-0.15.0.tar.gz", hash = "sha256:5cd6402aa8322625db3fc1152040404e7fc471fe7f8fa3a314fa8a99529ca107"}, +] + +[package.dependencies] +click = ">=7.1.2" +httpx = ">=0.19.0" +jinja2 = ">=2.11.2" +nodeenv = "*" +pydantic = ">=1.10.0,<3" +python-dotenv = ">=0.12.0" +StrEnum = {version = "*", markers = "python_version < \"3.11\""} +tomlkit = "*" +typing-extensions = ">=4.5.0" + +[package.extras] +all = ["nodejs-bin"] +node = ["nodejs-bin"] + +[[package]] +name = "proglog" +version = "0.1.10" +description = "Log and progress bar manager for console, notebooks, web..." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "proglog-0.1.10-py3-none-any.whl", hash = "sha256:19d5da037e8c813da480b741e3fa71fb1ac0a5b02bf21c41577c7f327485ec50"}, + {file = "proglog-0.1.10.tar.gz", hash = "sha256:658c28c9c82e4caeb2f25f488fff9ceace22f8d69b15d0c1c86d64275e4ddab4"}, +] + +[package.dependencies] +tqdm = "*" + +[[package]] +name = "propcache" +version = "0.2.1" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "propcache-0.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6b3f39a85d671436ee3d12c017f8fdea38509e4f25b28eb25877293c98c243f6"}, + {file = "propcache-0.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d51fbe4285d5db5d92a929e3e21536ea3dd43732c5b177c7ef03f918dff9f2"}, + {file = "propcache-0.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6445804cf4ec763dc70de65a3b0d9954e868609e83850a47ca4f0cb64bd79fea"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9479aa06a793c5aeba49ce5c5692ffb51fcd9a7016e017d555d5e2b0045d212"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9631c5e8b5b3a0fda99cb0d29c18133bca1e18aea9effe55adb3da1adef80d3"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3156628250f46a0895f1f36e1d4fbe062a1af8718ec3ebeb746f1d23f0c5dc4d"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b6fb63ae352e13748289f04f37868099e69dba4c2b3e271c46061e82c745634"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:887d9b0a65404929641a9fabb6452b07fe4572b269d901d622d8a34a4e9043b2"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a96dc1fa45bd8c407a0af03b2d5218392729e1822b0c32e62c5bf7eeb5fb3958"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a7e65eb5c003a303b94aa2c3852ef130230ec79e349632d030e9571b87c4698c"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:999779addc413181912e984b942fbcc951be1f5b3663cd80b2687758f434c583"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:19a0f89a7bb9d8048d9c4370c9c543c396e894c76be5525f5e1ad287f1750ddf"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1ac2f5fe02fa75f56e1ad473f1175e11f475606ec9bd0be2e78e4734ad575034"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:574faa3b79e8ebac7cb1d7930f51184ba1ccf69adfdec53a12f319a06030a68b"}, + {file = "propcache-0.2.1-cp310-cp310-win32.whl", hash = "sha256:03ff9d3f665769b2a85e6157ac8b439644f2d7fd17615a82fa55739bc97863f4"}, + {file = "propcache-0.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:2d3af2e79991102678f53e0dbf4c35de99b6b8b58f29a27ca0325816364caaba"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ffc3cca89bb438fb9c95c13fc874012f7b9466b89328c3c8b1aa93cdcfadd16"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f174bbd484294ed9fdf09437f889f95807e5f229d5d93588d34e92106fbf6717"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:70693319e0b8fd35dd863e3e29513875eb15c51945bf32519ef52927ca883bc3"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b480c6a4e1138e1aa137c0079b9b6305ec6dcc1098a8ca5196283e8a49df95a9"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d27b84d5880f6d8aa9ae3edb253c59d9f6642ffbb2c889b78b60361eed449787"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:857112b22acd417c40fa4595db2fe28ab900c8c5fe4670c7989b1c0230955465"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf6c4150f8c0e32d241436526f3c3f9cbd34429492abddbada2ffcff506c51af"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66d4cfda1d8ed687daa4bc0274fcfd5267873db9a5bc0418c2da19273040eeb7"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c2f992c07c0fca81655066705beae35fc95a2fa7366467366db627d9f2ee097f"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:4a571d97dbe66ef38e472703067021b1467025ec85707d57e78711c085984e54"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bb6178c241278d5fe853b3de743087be7f5f4c6f7d6d22a3b524d323eecec505"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ad1af54a62ffe39cf34db1aa6ed1a1873bd548f6401db39d8e7cd060b9211f82"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e7048abd75fe40712005bcfc06bb44b9dfcd8e101dda2ecf2f5aa46115ad07ca"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:160291c60081f23ee43d44b08a7e5fb76681221a8e10b3139618c5a9a291b84e"}, + {file = "propcache-0.2.1-cp311-cp311-win32.whl", hash = "sha256:819ce3b883b7576ca28da3861c7e1a88afd08cc8c96908e08a3f4dd64a228034"}, + {file = "propcache-0.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:edc9fc7051e3350643ad929df55c451899bb9ae6d24998a949d2e4c87fb596d3"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:081a430aa8d5e8876c6909b67bd2d937bfd531b0382d3fdedb82612c618bc41a"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2ccec9ac47cf4e04897619c0e0c1a48c54a71bdf045117d3a26f80d38ab1fb0"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14d86fe14b7e04fa306e0c43cdbeebe6b2c2156a0c9ce56b815faacc193e320d"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:049324ee97bb67285b49632132db351b41e77833678432be52bdd0289c0e05e4"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cd9a1d071158de1cc1c71a26014dcdfa7dd3d5f4f88c298c7f90ad6f27bb46d"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98110aa363f1bb4c073e8dcfaefd3a5cea0f0834c2aab23dda657e4dab2f53b5"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:647894f5ae99c4cf6bb82a1bb3a796f6e06af3caa3d32e26d2350d0e3e3faf24"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd3223c15bebe26518d58ccf9a39b93948d3dcb3e57a20480dfdd315356baff"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d71264a80f3fcf512eb4f18f59423fe82d6e346ee97b90625f283df56aee103f"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e73091191e4280403bde6c9a52a6999d69cdfde498f1fdf629105247599b57ec"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3935bfa5fede35fb202c4b569bb9c042f337ca4ff7bd540a0aa5e37131659348"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f508b0491767bb1f2b87fdfacaba5f7eddc2f867740ec69ece6d1946d29029a6"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1672137af7c46662a1c2be1e8dc78cb6d224319aaa40271c9257d886be4363a6"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518"}, + {file = "propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246"}, + {file = "propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aca405706e0b0a44cc6bfd41fbe89919a6a56999157f6de7e182a990c36e37bc"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:12d1083f001ace206fe34b6bdc2cb94be66d57a850866f0b908972f90996b3e9"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d93f3307ad32a27bda2e88ec81134b823c240aa3abb55821a8da553eed8d9439"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba278acf14471d36316159c94a802933d10b6a1e117b8554fe0d0d9b75c9d536"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e6281aedfca15301c41f74d7005e6e3f4ca143584ba696ac69df4f02f40d629"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b750a8e5a1262434fb1517ddf64b5de58327f1adc3524a5e44c2ca43305eb0b"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf72af5e0fb40e9babf594308911436c8efde3cb5e75b6f206c34ad18be5c052"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2d0a12018b04f4cb820781ec0dffb5f7c7c1d2a5cd22bff7fb055a2cb19ebce"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e800776a79a5aabdb17dcc2346a7d66d0777e942e4cd251defeb084762ecd17d"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4160d9283bd382fa6c0c2b5e017acc95bc183570cd70968b9202ad6d8fc48dce"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:30b43e74f1359353341a7adb783c8f1b1c676367b011709f466f42fda2045e95"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:58791550b27d5488b1bb52bc96328456095d96206a250d28d874fafe11b3dfaf"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f022d381747f0dfe27e99d928e31bc51a18b65bb9e481ae0af1380a6725dd1f"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:297878dc9d0a334358f9b608b56d02e72899f3b8499fc6044133f0d319e2ec30"}, + {file = "propcache-0.2.1-cp313-cp313-win32.whl", hash = "sha256:ddfab44e4489bd79bda09d84c430677fc7f0a4939a73d2bba3073036f487a0a6"}, + {file = "propcache-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:556fc6c10989f19a179e4321e5d678db8eb2924131e64652a51fe83e4c3db0e1"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6a9a8c34fb7bb609419a211e59da8887eeca40d300b5ea8e56af98f6fbbb1541"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae1aa1cd222c6d205853b3013c69cd04515f9d6ab6de4b0603e2e1c33221303e"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:accb6150ce61c9c4b7738d45550806aa2b71c7668c6942f17b0ac182b6142fd4"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eee736daafa7af6d0a2dc15cc75e05c64f37fc37bafef2e00d77c14171c2097"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7a31fc1e1bd362874863fdeed71aed92d348f5336fd84f2197ba40c59f061bd"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba4cfa1052819d16699e1d55d18c92b6e094d4517c41dd231a8b9f87b6fa681"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f089118d584e859c62b3da0892b88a83d611c2033ac410e929cb6754eec0ed16"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:781e65134efaf88feb447e8c97a51772aa75e48b794352f94cb7ea717dedda0d"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31f5af773530fd3c658b32b6bdc2d0838543de70eb9a2156c03e410f7b0d3aae"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a7a078f5d37bee6690959c813977da5291b24286e7b962e62a94cec31aa5188b"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:cea7daf9fc7ae6687cf1e2c049752f19f146fdc37c2cc376e7d0032cf4f25347"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b3489ff1ed1e8315674d0775dc7d2195fb13ca17b3808721b54dbe9fd020faf"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9403db39be1393618dd80c746cb22ccda168efce239c73af13c3763ef56ffc04"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5d97151bc92d2b2578ff7ce779cdb9174337390a535953cbb9452fb65164c587"}, + {file = "propcache-0.2.1-cp39-cp39-win32.whl", hash = "sha256:9caac6b54914bdf41bcc91e7eb9147d331d29235a7c967c150ef5df6464fd1bb"}, + {file = "propcache-0.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:92fc4500fcb33899b05ba73276dfb684a20d31caa567b7cb5252d48f896a91b1"}, + {file = "propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54"}, + {file = "propcache-0.2.1.tar.gz", hash = "sha256:3f77ce728b19cb537714499928fe800c3dda29e8d9428778fc7c186da4c09a64"}, +] + +[[package]] +name = "proto-plus" +version = "1.25.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "proto_plus-1.25.0-py3-none-any.whl", hash = "sha256:c91fc4a65074ade8e458e95ef8bac34d4008daa7cce4a12d6707066fca648961"}, + {file = "proto_plus-1.25.0.tar.gz", hash = "sha256:fbb17f57f7bd05a68b7707e745e26528b0b3c34e378db91eef93912c54982d91"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + +[[package]] +name = "protobuf" +version = "5.29.3" +description = "" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888"}, + {file = "protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a"}, + {file = "protobuf-5.29.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e"}, + {file = "protobuf-5.29.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84"}, + {file = "protobuf-5.29.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f"}, + {file = "protobuf-5.29.3-cp38-cp38-win32.whl", hash = "sha256:84a57163a0ccef3f96e4b6a20516cedcf5bb3a95a657131c5c3ac62200d23252"}, + {file = "protobuf-5.29.3-cp38-cp38-win_amd64.whl", hash = "sha256:b89c115d877892a512f79a8114564fb435943b59067615894c3b13cd3e1fa107"}, + {file = "protobuf-5.29.3-cp39-cp39-win32.whl", hash = "sha256:0eb32bfa5219fc8d4111803e9a690658aa2e6366384fd0851064b963b6d1f2a7"}, + {file = "protobuf-5.29.3-cp39-cp39-win_amd64.whl", hash = "sha256:6ce8cc3389a20693bfde6c6562e03474c40851b44975c9b2bf6df7d8c4f864da"}, + {file = "protobuf-5.29.3-py3-none-any.whl", hash = "sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f"}, + {file = "protobuf-5.29.3.tar.gz", hash = "sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620"}, +] + +[[package]] +name = "psutil" +version = "6.1.1" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["main"] +files = [ + {file = "psutil-6.1.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:9ccc4316f24409159897799b83004cb1e24f9819b0dcf9c0b68bdcb6cefee6a8"}, + {file = "psutil-6.1.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ca9609c77ea3b8481ab005da74ed894035936223422dc591d6772b147421f777"}, + {file = "psutil-6.1.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:8df0178ba8a9e5bc84fed9cfa61d54601b371fbec5c8eebad27575f1e105c0d4"}, + {file = "psutil-6.1.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:1924e659d6c19c647e763e78670a05dbb7feaf44a0e9c94bf9e14dfc6ba50468"}, + {file = "psutil-6.1.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:018aeae2af92d943fdf1da6b58665124897cfc94faa2ca92098838f83e1b1bca"}, + {file = "psutil-6.1.1-cp27-none-win32.whl", hash = "sha256:6d4281f5bbca041e2292be3380ec56a9413b790579b8e593b1784499d0005dac"}, + {file = "psutil-6.1.1-cp27-none-win_amd64.whl", hash = "sha256:c777eb75bb33c47377c9af68f30e9f11bc78e0f07fbf907be4a5d70b2fe5f030"}, + {file = "psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8"}, + {file = "psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3"}, + {file = "psutil-6.1.1-cp36-cp36m-win32.whl", hash = "sha256:384636b1a64b47814437d1173be1427a7c83681b17a450bfc309a1953e329603"}, + {file = "psutil-6.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:8be07491f6ebe1a693f17d4f11e69d0dc1811fa082736500f649f79df7735303"}, + {file = "psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53"}, + {file = "psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649"}, + {file = "psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5"}, +] + +[package.extras] +dev = ["abi3audit", "black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"] +test = ["pytest", "pytest-xdist", "setuptools"] + +[[package]] +name = "psycopg2-binary" +version = "2.9.10" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:0ea8e3d0ae83564f2fc554955d327fa081d065c8ca5cc6d2abb643e2c9c1200f"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:3e9c76f0ac6f92ecfc79516a8034a544926430f7b080ec5a0537bca389ee0906"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad26b467a405c798aaa1458ba09d7e2b6e5f96b1ce0ac15d82fd9f95dc38a92"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:270934a475a0e4b6925b5f804e3809dd5f90f8613621d062848dd82f9cd62007"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48b338f08d93e7be4ab2b5f1dbe69dc5e9ef07170fe1f86514422076d9c010d0"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4152f8f76d2023aac16285576a9ecd2b11a9895373a1f10fd9db54b3ff06b4"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32581b3020c72d7a421009ee1c6bf4a131ef5f0a968fab2e2de0c9d2bb4577f1"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:2ce3e21dc3437b1d960521eca599d57408a695a0d3c26797ea0f72e834c7ffe5"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e984839e75e0b60cfe75e351db53d6db750b00de45644c5d1f7ee5d1f34a1ce5"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c4745a90b78e51d9ba06e2088a2fe0c693ae19cc8cb051ccda44e8df8a6eb53"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-win32.whl", hash = "sha256:e5720a5d25e3b99cd0dc5c8a440570469ff82659bb09431c1439b92caf184d3b"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:3c18f74eb4386bf35e92ab2354a12c17e5eb4d9798e4c0ad3a00783eae7cd9f1"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:04392983d0bb89a8717772a193cfaac58871321e3ec69514e1c4e0d4957b5aff"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1a6784f0ce3fec4edc64e985865c17778514325074adf5ad8f80636cd029ef7c"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5f86c56eeb91dc3135b3fd8a95dc7ae14c538a2f3ad77a19645cf55bab1799c"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b3d2491d4d78b6b14f76881905c7a8a8abcf974aad4a8a0b065273a0ed7a2cb"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2286791ececda3a723d1910441c793be44625d86d1a4e79942751197f4d30341"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:512d29bb12608891e349af6a0cccedce51677725a921c07dba6342beaf576f9a"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5a507320c58903967ef7384355a4da7ff3f28132d679aeb23572753cbf2ec10b"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6d4fa1079cab9018f4d0bd2db307beaa612b0d13ba73b5c6304b9fe2fb441ff7"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:851485a42dbb0bdc1edcdabdb8557c09c9655dfa2ca0460ff210522e073e319e"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:35958ec9e46432d9076286dda67942ed6d968b9c3a6a2fd62b48939d1d78bf68"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-win32.whl", hash = "sha256:ecced182e935529727401b24d76634a357c71c9275b356efafd8a2a91ec07392"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:ee0e8c683a7ff25d23b55b11161c2663d4b099770f6085ff0a20d4505778d6b4"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:880845dfe1f85d9d5f7c412efea7a08946a46894537e4e5d091732eb1d34d9a0"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9440fa522a79356aaa482aa4ba500b65f28e5d0e63b801abf6aa152a29bd842a"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3923c1d9870c49a2d44f795df0c889a22380d36ef92440ff618ec315757e539"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b2c956c028ea5de47ff3a8d6b3cc3330ab45cf0b7c3da35a2d6ff8420896526"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f758ed67cab30b9a8d2833609513ce4d3bd027641673d4ebc9c067e4d208eec1"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd9b4f2cfab88ed4a9106192de509464b75a906462fb846b936eabe45c2063e"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dc08420625b5a20b53551c50deae6e231e6371194fa0651dbe0fb206452ae1f"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7cd730dfa7c36dbe8724426bf5612798734bff2d3c3857f36f2733f5bfc7c00"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:155e69561d54d02b3c3209545fb08938e27889ff5a10c19de8d23eb5a41be8a5"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3cc28a6fd5a4a26224007712e79b81dbaee2ffb90ff406256158ec4d7b52b47"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-win32.whl", hash = "sha256:ec8a77f521a17506a24a5f626cb2aee7850f9b69a0afe704586f63a464f3cd64"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:18c5ee682b9c6dd3696dad6e54cc7ff3a1a9020df6a5c0f861ef8bfd338c3ca0"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:26540d4a9a4e2b096f1ff9cce51253d0504dca5a85872c7f7be23be5a53eb18d"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e217ce4d37667df0bc1c397fdcd8de5e81018ef305aed9415c3b093faaeb10fb"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:245159e7ab20a71d989da00f280ca57da7641fa2cdcf71749c193cea540a74f7"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c4ded1a24b20021ebe677b7b08ad10bf09aac197d6943bfe6fec70ac4e4690d"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3abb691ff9e57d4a93355f60d4f4c1dd2d68326c968e7db17ea96df3c023ef73"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8608c078134f0b3cbd9f89b34bd60a943b23fd33cc5f065e8d5f840061bd0673"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:230eeae2d71594103cd5b93fd29d1ace6420d0b86f4778739cb1a5a32f607d1f"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:27422aa5f11fbcd9b18da48373eb67081243662f9b46e6fd07c3eb46e4535142"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:056470c3dc57904bbf63d6f534988bafc4e970ffd50f6271fc4ee7daad9498a5"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aa0e31fa4bb82578f3a6c74a73c273367727de397a7a0f07bd83cbea696baa"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8de718c0e1c4b982a54b41779667242bc630b2197948405b7bd8ce16bcecac92"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5c370b1e4975df846b0277b4deba86419ca77dbc25047f535b0bb03d1a544d44"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ffe8ed017e4ed70f68b7b371d84b7d4a790368db9203dfc2d222febd3a9c8863"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8aecc5e80c63f7459a1a2ab2c64df952051df196294d9f739933a9f6687e86b3"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:7a813c8bdbaaaab1f078014b9b0b13f5de757e2b5d9be6403639b298a04d218b"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00924255d7fc916ef66e4bf22f354a940c67179ad3fd7067d7a0a9c84d2fbfc"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7559bce4b505762d737172556a4e6ea8a9998ecac1e39b5233465093e8cee697"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8b58f0a96e7a1e341fc894f62c1177a7c83febebb5ff9123b579418fdc8a481"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b269105e59ac96aba877c1707c600ae55711d9dcd3fc4b5012e4af68e30c648"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:79625966e176dc97ddabc142351e0409e28acf4660b88d1cf6adb876d20c490d"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8aabf1c1a04584c168984ac678a668094d831f152859d06e055288fa515e4d30"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:19721ac03892001ee8fdd11507e6a2e01f4e37014def96379411ca99d78aeb2c"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7f5d859928e635fa3ce3477704acee0f667b3a3d3e4bb109f2b18d4005f38287"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-win32.whl", hash = "sha256:3216ccf953b3f267691c90c6fe742e45d890d8272326b4a8b20850a03d05b7b8"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:30e34c4e97964805f715206c7b789d54a78b70f3ff19fbe590104b71c45600e5"}, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pycodestyle" +version = "2.12.1" +description = "Python style guide checker" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pycodestyle-2.12.1-py2.py3-none-any.whl", hash = "sha256:46f0fb92069a7c28ab7bb558f05bfc0110dac69a0cd23c61ea0040283a9d78b3"}, + {file = "pycodestyle-2.12.1.tar.gz", hash = "sha256:6838eae08bbce4f6accd5d5572075c63626a15ee3e6f842df996bf62f6d73521"}, +] + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_python_implementation != \"PyPy\"" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pydantic" +version = "2.10.6" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-settings" +version = "2.7.1" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic_settings-2.7.1-py3-none-any.whl", hash = "sha256:590be9e6e24d06db33a4262829edef682500ef008565a969c73d39d5f8bfb3fd"}, + {file = "pydantic_settings-2.7.1.tar.gz", hash = "sha256:10c9caad35e64bfb3c2fbf70a078c0e25cc92499782e5200747f942a065dec93"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" + +[package.extras] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "pyflakes" +version = "3.2.0" +description = "passive checker of Python programs" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"}, + {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"}, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +description = "JSON Web Token implementation in Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb"}, + {file = "pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953"}, +] + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + +[[package]] +name = "pyparsing" +version = "3.2.1" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1"}, + {file = "pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pyrfc3339" +version = "2.0.1" +description = "Generate and parse RFC 3339 timestamps" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pyRFC3339-2.0.1-py3-none-any.whl", hash = "sha256:30b70a366acac3df7386b558c21af871522560ed7f3f73cf344b8c2cbb8b0c9d"}, + {file = "pyrfc3339-2.0.1.tar.gz", hash = "sha256:e47843379ea35c1296c3b6c67a948a1a490ae0584edfcbdea0eaffb5dd29960b"}, +] + +[[package]] +name = "pyright" +version = "1.1.392.post0" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "pyright-1.1.392.post0-py3-none-any.whl", hash = "sha256:252f84458a46fa2f0fd4e2f91fc74f50b9ca52c757062e93f6c250c0d8329eb2"}, + {file = "pyright-1.1.392.post0.tar.gz", hash = "sha256:3b7f88de74a28dcfa90c7d90c782b6569a48c2be5f9d4add38472bdaac247ebd"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" +typing-extensions = ">=4.1" + +[package.extras] +all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] +nodejs = ["nodejs-wheel-binaries"] + +[[package]] +name = "pyro5" +version = "5.15" +description = "Remote object communication library, fifth major version" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "Pyro5-5.15-py3-none-any.whl", hash = "sha256:4d85428ed75985e63f159d2486ad5680743ea76f766340fd30b65dd20f83d471"}, + {file = "Pyro5-5.15.tar.gz", hash = "sha256:82c3dfc9860b49f897b28ff24fe6716c841672c600af8fe40d0e3a7fac9a3f5e"}, +] + +[package.dependencies] +serpent = ">=1.41" + +[[package]] +name = "pytest" +version = "8.3.4" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, + {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.25.2" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pytest_asyncio-0.25.2-py3-none-any.whl", hash = "sha256:0d0bb693f7b99da304a0634afc0a4b19e49d5e0de2d670f38dc4bfa5727c5075"}, + {file = "pytest_asyncio-0.25.2.tar.gz", hash = "sha256:3f8ef9a98f45948ea91a0ed3dc4268b5326c0e7bce73892acc654df4262ad45f"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-mock" +version = "3.14.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-watcher" +version = "0.4.3" +description = "Automatically rerun your tests on file modifications" +optional = false +python-versions = "<4.0.0,>=3.7.0" +groups = ["dev"] +files = [ + {file = "pytest_watcher-0.4.3-py3-none-any.whl", hash = "sha256:d59b1e1396f33a65ea4949b713d6884637755d641646960056a90b267c3460f9"}, + {file = "pytest_watcher-0.4.3.tar.gz", hash = "sha256:0cb0e4661648c8c0ff2b2d25efa5a8e421784b9e4c60fcecbf9b7c30b2d731b3"}, +] + +[package.dependencies] +tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""} +watchdog = ">=2.0.0" + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-multipart" +version = "0.0.20" +description = "A streaming multipart parser for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"}, + {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"}, +] + +[[package]] +name = "pytz" +version = "2024.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, +] + +[[package]] +name = "pywin32" +version = "308" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_system == \"Windows\"" +files = [ + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "qdrant-client" +version = "1.12.2" +description = "Client library for the Qdrant vector search engine" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "qdrant_client-1.12.2-py3-none-any.whl", hash = "sha256:a0ae500a46a679ff3521ba3f1f1cf3d72b57090a768cec65fc317066bcbac1e6"}, + {file = "qdrant_client-1.12.2.tar.gz", hash = "sha256:2777e09b3e89bb22bb490384d8b1fa8140f3915287884f18984f7031a346aba5"}, +] + +[package.dependencies] +grpcio = ">=1.41.0" +grpcio-tools = ">=1.41.0" +httpx = {version = ">=0.20.0", extras = ["http2"]} +numpy = [ + {version = ">=1.21", markers = "python_version >= \"3.10\" and python_version < \"3.12\""}, + {version = ">=1.26", markers = "python_version >= \"3.12\" and python_version < \"3.13\""}, +] +portalocker = ">=2.7.0,<3.0.0" +pydantic = ">=1.10.8" +urllib3 = ">=1.26.14,<3" + +[package.extras] +fastembed = ["fastembed (==0.5.0)"] +fastembed-gpu = ["fastembed-gpu (==0.5.0)"] + +[[package]] +name = "realtime" +version = "2.2.0" +description = "" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "realtime-2.2.0-py3-none-any.whl", hash = "sha256:26dbaa58d143345318344bd7a7d4dc67154d6e0e9c98524327053a78bb3cc6b6"}, + {file = "realtime-2.2.0.tar.gz", hash = "sha256:f87a51b6b8dd8c72c30af6c841e0161132dcb32bf8b96178f3fe3866d575ef33"}, +] + +[package.dependencies] +aiohttp = ">=3.11.11,<4.0.0" +python-dateutil = ">=2.8.1,<3.0.0" +typing-extensions = ">=4.12.2,<5.0.0" +websockets = ">=11,<14" + +[[package]] +name = "redis" +version = "5.2.1" +description = "Python client for Redis database and key-value store" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "redis-5.2.1-py3-none-any.whl", hash = "sha256:ee7e1056b9aea0f04c6c2ed59452947f34c4940ee025f5dd83e6a6418b6989e4"}, + {file = "redis-5.2.1.tar.gz", hash = "sha256:16f2e22dff21d5125e8481515e386711a34cbec50f0e44413dd7d9c060a54e0f"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""} + +[package.extras] +hiredis = ["hiredis (>=3.0.0)"] +ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"] + +[[package]] +name = "referencing" +version = "0.36.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "referencing-0.36.1-py3-none-any.whl", hash = "sha256:363d9c65f080d0d70bc41c721dce3c7f3e77fc09f269cd5c8813da18069a6794"}, + {file = "referencing-0.36.1.tar.gz", hash = "sha256:ca2e6492769e3602957e9b831b94211599d2aade9477f5d44110d2530cf9aade"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" +typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} + +[[package]] +name = "replicate" +version = "1.0.4" +description = "Python client for Replicate" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"}, + {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"}, +] + +[package.dependencies] +httpx = ">=0.21.0,<1" +packaging = "*" +pydantic = ">1.10.7" +typing_extensions = ">=4.5.0" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=3.4" +groups = ["main"] +files = [ + {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, + {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + +[[package]] +name = "rpds-py" +version = "0.22.3" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "rpds_py-0.22.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967"}, + {file = "rpds_py-0.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70eb60b3ae9245ddea20f8a4190bd79c705a22f8028aaf8bbdebe4716c3fab24"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4041711832360a9b75cfb11b25a6a97c8fb49c07b8bd43d0d02b45d0b499a4ff"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64607d4cbf1b7e3c3c8a14948b99345eda0e161b852e122c6bb71aab6d1d798c"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e69b0a0e2537f26d73b4e43ad7bc8c8efb39621639b4434b76a3de50c6966e"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc27863442d388870c1809a87507727b799c8460573cfbb6dc0eeaef5a11b5ec"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e79dd39f1e8c3504be0607e5fc6e86bb60fe3584bec8b782578c3b0fde8d932c"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e0fa2d4ec53dc51cf7d3bb22e0aa0143966119f42a0c3e4998293a3dd2856b09"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fda7cb070f442bf80b642cd56483b5548e43d366fe3f39b98e67cce780cded00"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cff63a0272fcd259dcc3be1657b07c929c466b067ceb1c20060e8d10af56f5bf"}, + {file = "rpds_py-0.22.3-cp310-cp310-win32.whl", hash = "sha256:9bd7228827ec7bb817089e2eb301d907c0d9827a9e558f22f762bb690b131652"}, + {file = "rpds_py-0.22.3-cp310-cp310-win_amd64.whl", hash = "sha256:9beeb01d8c190d7581a4d59522cd3d4b6887040dcfc744af99aa59fef3e041a8"}, + {file = "rpds_py-0.22.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d20cfb4e099748ea39e6f7b16c91ab057989712d31761d3300d43134e26e165f"}, + {file = "rpds_py-0.22.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:68049202f67380ff9aa52f12e92b1c30115f32e6895cd7198fa2a7961621fc5a"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb4f868f712b2dd4bcc538b0a0c1f63a2b1d584c925e69a224d759e7070a12d5"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc51abd01f08117283c5ebf64844a35144a0843ff7b2983e0648e4d3d9f10dbb"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f3cec041684de9a4684b1572fe28c7267410e02450f4561700ca5a3bc6695a2"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ef9d9da710be50ff6809fed8f1963fecdfecc8b86656cadfca3bc24289414b0"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59f4a79c19232a5774aee369a0c296712ad0e77f24e62cad53160312b1c1eaa1"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a60bce91f81ddaac922a40bbb571a12c1070cb20ebd6d49c48e0b101d87300d"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e89391e6d60251560f0a8f4bd32137b077a80d9b7dbe6d5cab1cd80d2746f648"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e3fb866d9932a3d7d0c82da76d816996d1667c44891bd861a0f97ba27e84fc74"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1352ae4f7c717ae8cba93421a63373e582d19d55d2ee2cbb184344c82d2ae55a"}, + {file = "rpds_py-0.22.3-cp311-cp311-win32.whl", hash = "sha256:b0b4136a252cadfa1adb705bb81524eee47d9f6aab4f2ee4fa1e9d3cd4581f64"}, + {file = "rpds_py-0.22.3-cp311-cp311-win_amd64.whl", hash = "sha256:8bd7c8cfc0b8247c8799080fbff54e0b9619e17cdfeb0478ba7295d43f635d7c"}, + {file = "rpds_py-0.22.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:27e98004595899949bd7a7b34e91fa7c44d7a97c40fcaf1d874168bb652ec67e"}, + {file = "rpds_py-0.22.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1978d0021e943aae58b9b0b196fb4895a25cc53d3956b8e35e0b7682eefb6d56"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655ca44a831ecb238d124e0402d98f6212ac527a0ba6c55ca26f616604e60a45"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:feea821ee2a9273771bae61194004ee2fc33f8ec7db08117ef9147d4bbcbca8e"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22bebe05a9ffc70ebfa127efbc429bc26ec9e9b4ee4d15a740033efda515cf3d"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af6e48651c4e0d2d166dc1b033b7042ea3f871504b6805ba5f4fe31581d8d38"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ba3c290821343c192f7eae1d8fd5999ca2dc99994114643e2f2d3e6138b15"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02fbb9c288ae08bcb34fb41d516d5eeb0455ac35b5512d03181d755d80810059"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f56a6b404f74ab372da986d240e2e002769a7d7102cc73eb238a4f72eec5284e"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0a0461200769ab3b9ab7e513f6013b7a97fdeee41c29b9db343f3c5a8e2b9e61"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8633e471c6207a039eff6aa116e35f69f3156b3989ea3e2d755f7bc41754a4a7"}, + {file = "rpds_py-0.22.3-cp312-cp312-win32.whl", hash = "sha256:593eba61ba0c3baae5bc9be2f5232430453fb4432048de28399ca7376de9c627"}, + {file = "rpds_py-0.22.3-cp312-cp312-win_amd64.whl", hash = "sha256:d115bffdd417c6d806ea9069237a4ae02f513b778e3789a359bc5856e0404cc4"}, + {file = "rpds_py-0.22.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ea7433ce7e4bfc3a85654aeb6747babe3f66eaf9a1d0c1e7a4435bbdf27fea84"}, + {file = "rpds_py-0.22.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6dd9412824c4ce1aca56c47b0991e65bebb7ac3f4edccfd3f156150c96a7bf25"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20070c65396f7373f5df4005862fa162db5d25d56150bddd0b3e8214e8ef45b4"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b09865a9abc0ddff4e50b5ef65467cd94176bf1e0004184eb915cbc10fc05c5"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3453e8d41fe5f17d1f8e9c383a7473cd46a63661628ec58e07777c2fff7196dc"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5d36399a1b96e1a5fdc91e0522544580dbebeb1f77f27b2b0ab25559e103b8b"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:009de23c9c9ee54bf11303a966edf4d9087cd43a6003672e6aa7def643d06518"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1aef18820ef3e4587ebe8b3bc9ba6e55892a6d7b93bac6d29d9f631a3b4befbd"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f60bd8423be1d9d833f230fdbccf8f57af322d96bcad6599e5a771b151398eb2"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:62d9cfcf4948683a18a9aff0ab7e1474d407b7bab2ca03116109f8464698ab16"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9253fc214112405f0afa7db88739294295f0e08466987f1d70e29930262b4c8f"}, + {file = "rpds_py-0.22.3-cp313-cp313-win32.whl", hash = "sha256:fb0ba113b4983beac1a2eb16faffd76cb41e176bf58c4afe3e14b9c681f702de"}, + {file = "rpds_py-0.22.3-cp313-cp313-win_amd64.whl", hash = "sha256:c58e2339def52ef6b71b8f36d13c3688ea23fa093353f3a4fee2556e62086ec9"}, + {file = "rpds_py-0.22.3-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f82a116a1d03628a8ace4859556fb39fd1424c933341a08ea3ed6de1edb0283b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3dfcbc95bd7992b16f3f7ba05af8a64ca694331bd24f9157b49dadeeb287493b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59259dc58e57b10e7e18ce02c311804c10c5a793e6568f8af4dead03264584d1"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5725dd9cc02068996d4438d397e255dcb1df776b7ceea3b9cb972bdb11260a83"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99b37292234e61325e7a5bb9689e55e48c3f5f603af88b1642666277a81f1fbd"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27b1d3b3915a99208fee9ab092b8184c420f2905b7d7feb4aeb5e4a9c509b8a1"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f612463ac081803f243ff13cccc648578e2279295048f2a8d5eb430af2bae6e3"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f73d3fef726b3243a811121de45193c0ca75f6407fe66f3f4e183c983573e130"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3f21f0495edea7fdbaaa87e633a8689cd285f8f4af5c869f27bc8074638ad69c"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:1e9663daaf7a63ceccbbb8e3808fe90415b0757e2abddbfc2e06c857bf8c5e2b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a76e42402542b1fae59798fab64432b2d015ab9d0c8c47ba7addddbaf7952333"}, + {file = "rpds_py-0.22.3-cp313-cp313t-win32.whl", hash = "sha256:69803198097467ee7282750acb507fba35ca22cc3b85f16cf45fb01cb9097730"}, + {file = "rpds_py-0.22.3-cp313-cp313t-win_amd64.whl", hash = "sha256:f5cf2a0c2bdadf3791b5c205d55a37a54025c6e18a71c71f82bb536cf9a454bf"}, + {file = "rpds_py-0.22.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:378753b4a4de2a7b34063d6f95ae81bfa7b15f2c1a04a9518e8644e81807ebea"}, + {file = "rpds_py-0.22.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3445e07bf2e8ecfeef6ef67ac83de670358abf2996916039b16a218e3d95e97e"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b2513ba235829860b13faa931f3b6846548021846ac808455301c23a101689d"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eaf16ae9ae519a0e237a0f528fd9f0197b9bb70f40263ee57ae53c2b8d48aeb3"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:583f6a1993ca3369e0f80ba99d796d8e6b1a3a2a442dd4e1a79e652116413091"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4617e1915a539a0d9a9567795023de41a87106522ff83fbfaf1f6baf8e85437e"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c150c7a61ed4a4f4955a96626574e9baf1adf772c2fb61ef6a5027e52803543"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fa4331c200c2521512595253f5bb70858b90f750d39b8cbfd67465f8d1b596d"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:214b7a953d73b5e87f0ebece4a32a5bd83c60a3ecc9d4ec8f1dca968a2d91e99"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f47ad3d5f3258bd7058d2d506852217865afefe6153a36eb4b6928758041d831"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f276b245347e6e36526cbd4a266a417796fc531ddf391e43574cf6466c492520"}, + {file = "rpds_py-0.22.3-cp39-cp39-win32.whl", hash = "sha256:bbb232860e3d03d544bc03ac57855cd82ddf19c7a07651a7c0fdb95e9efea8b9"}, + {file = "rpds_py-0.22.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfbc454a2880389dbb9b5b398e50d439e2e58669160f27b60e5eca11f68ae17c"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d48424e39c2611ee1b84ad0f44fb3b2b53d473e65de061e3f460fc0be5f1939d"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:24e8abb5878e250f2eb0d7859a8e561846f98910326d06c0d51381fed59357bd"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b232061ca880db21fa14defe219840ad9b74b6158adb52ddf0e87bead9e8493"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac0a03221cdb5058ce0167ecc92a8c89e8d0decdc9e99a2ec23380793c4dcb96"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb0c341fa71df5a4595f9501df4ac5abfb5a09580081dffbd1ddd4654e6e9123"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf9db5488121b596dbfc6718c76092fda77b703c1f7533a226a5a9f65248f8ad"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8db6b5b2d4491ad5b6bdc2bc7c017eec108acbf4e6785f42a9eb0ba234f4c9"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b3d504047aba448d70cf6fa22e06cb09f7cbd761939fdd47604f5e007675c24e"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e61b02c3f7a1e0b75e20c3978f7135fd13cb6cf551bf4a6d29b999a88830a338"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e35ba67d65d49080e8e5a1dd40101fccdd9798adb9b050ff670b7d74fa41c566"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:26fd7cac7dd51011a245f29a2cc6489c4608b5a8ce8d75661bb4a1066c52dfbe"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:177c7c0fce2855833819c98e43c262007f42ce86651ffbb84f37883308cb0e7d"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb47271f60660803ad11f4c61b42242b8c1312a31c98c578f79ef9387bbde21c"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:70fb28128acbfd264eda9bf47015537ba3fe86e40d046eb2963d75024be4d055"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44d61b4b7d0c2c9ac019c314e52d7cbda0ae31078aabd0f22e583af3e0d79723"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f0e260eaf54380380ac3808aa4ebe2d8ca28b9087cf411649f96bad6900c728"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b25bc607423935079e05619d7de556c91fb6adeae9d5f80868dde3468657994b"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb6116dfb8d1925cbdb52595560584db42a7f664617a1f7d7f6e32f138cdf37d"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a63cbdd98acef6570c62b92a1e43266f9e8b21e699c363c0fef13bd530799c11"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b8f60e1b739a74bab7e01fcbe3dddd4657ec685caa04681df9d562ef15b625f"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:2e8b55d8517a2fda8d95cb45d62a5a8bbf9dd0ad39c5b25c8833efea07b880ca"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:2de29005e11637e7a2361fa151f780ff8eb2543a0da1413bb951e9f14b699ef3"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:666ecce376999bf619756a24ce15bb14c5bfaf04bf00abc7e663ce17c3f34fe7"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5246b14ca64a8675e0a7161f7af68fe3e910e6b90542b4bfb5439ba752191df6"}, + {file = "rpds_py-0.22.3.tar.gz", hash = "sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d"}, +] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +groups = ["main"] +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "ruff" +version = "0.9.3" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624"}, + {file = "ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c"}, + {file = "ruff-0.9.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c59ab92f8e92d6725b7ded9d4a31be3ef42688a115c6d3da9457a5bda140e2b4"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc153c25e715be41bb228bc651c1e9b1a88d5c6e5ed0194fa0dfea02b026439"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:646909a1e25e0dc28fbc529eab8eb7bb583079628e8cbe738192853dbbe43af5"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a5a46e09355695fbdbb30ed9889d6cf1c61b77b700a9fafc21b41f097bfbba4"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c4bb09d2bbb394e3730d0918c00276e79b2de70ec2a5231cd4ebb51a57df9ba1"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96a87ec31dc1044d8c2da2ebbed1c456d9b561e7d087734336518181b26b3aa5"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb7554aca6f842645022fe2d301c264e6925baa708b392867b7a62645304df4"}, + {file = "ruff-0.9.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabc332b7075a914ecea912cd1f3d4370489c8018f2c945a30bcc934e3bc06a6"}, + {file = "ruff-0.9.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:33866c3cc2a575cbd546f2cd02bdd466fed65118e4365ee538a3deffd6fcb730"}, + {file = "ruff-0.9.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:006e5de2621304c8810bcd2ee101587712fa93b4f955ed0985907a36c427e0c2"}, + {file = "ruff-0.9.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ba6eea4459dbd6b1be4e6bfc766079fb9b8dd2e5a35aff6baee4d9b1514ea519"}, + {file = "ruff-0.9.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90230a6b8055ad47d3325e9ee8f8a9ae7e273078a66401ac66df68943ced029b"}, + {file = "ruff-0.9.3-py3-none-win32.whl", hash = "sha256:eabe5eb2c19a42f4808c03b82bd313fc84d4e395133fb3fc1b1516170a31213c"}, + {file = "ruff-0.9.3-py3-none-win_amd64.whl", hash = "sha256:040ceb7f20791dfa0e78b4230ee9dce23da3b64dd5848e40e3bf3ab76468dcf4"}, + {file = "ruff-0.9.3-py3-none-win_arm64.whl", hash = "sha256:800d773f6d4d33b0a3c60e2c6ae8f4c202ea2de056365acfa519aa48acf28e0b"}, + {file = "ruff-0.9.3.tar.gz", hash = "sha256:8293f89985a090ebc3ed1064df31f3b4b56320cdfcec8b60d3295bddb955c22a"}, +] + +[[package]] +name = "semver" +version = "3.0.4" +description = "Python helper for Semantic Versioning (https://semver.org)" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "semver-3.0.4-py3-none-any.whl", hash = "sha256:9c824d87ba7f7ab4a1890799cec8596f15c1241cb473404ea1cb0c55e4b04746"}, + {file = "semver-3.0.4.tar.gz", hash = "sha256:afc7d8c584a5ed0a11033af086e8af226a9c0b206f313e0301f8dd7b6b589602"}, +] + +[[package]] +name = "sentry-sdk" +version = "2.19.2" +description = "Python client for Sentry (https://sentry.io)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "sentry_sdk-2.19.2-py2.py3-none-any.whl", hash = "sha256:ebdc08228b4d131128e568d696c210d846e5b9d70aa0327dec6b1272d9d40b84"}, + {file = "sentry_sdk-2.19.2.tar.gz", hash = "sha256:467df6e126ba242d39952375dd816fbee0f217d119bf454a8ce74cf1e7909e8d"}, +] + +[package.dependencies] +certifi = "*" +urllib3 = ">=1.26.11" + +[package.extras] +aiohttp = ["aiohttp (>=3.5)"] +anthropic = ["anthropic (>=0.16)"] +arq = ["arq (>=0.23)"] +asyncpg = ["asyncpg (>=0.23)"] +beam = ["apache-beam (>=2.12)"] +bottle = ["bottle (>=0.12.13)"] +celery = ["celery (>=3)"] +celery-redbeat = ["celery-redbeat (>=2)"] +chalice = ["chalice (>=1.16.0)"] +clickhouse-driver = ["clickhouse-driver (>=0.2.0)"] +django = ["django (>=1.8)"] +falcon = ["falcon (>=1.4)"] +fastapi = ["fastapi (>=0.79.0)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] +grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"] +http2 = ["httpcore[http2] (==1.*)"] +httpx = ["httpx (>=0.16.0)"] +huey = ["huey (>=2)"] +huggingface-hub = ["huggingface_hub (>=0.22)"] +langchain = ["langchain (>=0.0.210)"] +launchdarkly = ["launchdarkly-server-sdk (>=9.8.0)"] +litestar = ["litestar (>=2.0.0)"] +loguru = ["loguru (>=0.5)"] +openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] +openfeature = ["openfeature-sdk (>=0.7.1)"] +opentelemetry = ["opentelemetry-distro (>=0.35b0)"] +opentelemetry-experimental = ["opentelemetry-distro"] +pure-eval = ["asttokens", "executing", "pure_eval"] +pymongo = ["pymongo (>=3.1)"] +pyspark = ["pyspark (>=2.4.4)"] +quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] +rq = ["rq (>=0.6)"] +sanic = ["sanic (>=0.8)"] +sqlalchemy = ["sqlalchemy (>=1.2)"] +starlette = ["starlette (>=0.19.1)"] +starlite = ["starlite (>=1.48)"] +tornado = ["tornado (>=6)"] + +[[package]] +name = "serpent" +version = "1.41" +description = "Serialization based on ast.literal_eval" +optional = false +python-versions = ">=3.2" +groups = ["main"] +files = [ + {file = "serpent-1.41-py3-none-any.whl", hash = "sha256:5fd776b3420441985bc10679564c2c9b4a19f77bea59f018e473441d98ae5dd7"}, + {file = "serpent-1.41.tar.gz", hash = "sha256:0407035fe3c6644387d48cff1467d5aa9feff814d07372b78677ed0ee3ed7095"}, +] + +[[package]] +name = "setuptools" +version = "75.8.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3"}, + {file = "setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] +core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] + +[[package]] +name = "sgmllib3k" +version = "1.0.0" +description = "Py3k port of sgmllib." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9"}, +] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.37" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "SQLAlchemy-2.0.37-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da36c3b0e891808a7542c5c89f224520b9a16c7f5e4d6a1156955605e54aef0e"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e7402ff96e2b073a98ef6d6142796426d705addd27b9d26c3b32dbaa06d7d069"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6f5d254a22394847245f411a2956976401e84da4288aa70cbcd5190744062c1"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41296bbcaa55ef5fdd32389a35c710133b097f7b2609d8218c0eabded43a1d84"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bedee60385c1c0411378cbd4dc486362f5ee88deceea50002772912d798bb00f"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6c67415258f9f3c69867ec02fea1bf6508153709ecbd731a982442a590f2b7e4"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-win32.whl", hash = "sha256:650dcb70739957a492ad8acff65d099a9586b9b8920e3507ca61ec3ce650bb72"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-win_amd64.whl", hash = "sha256:93d1543cd8359040c02b6614421c8e10cd7a788c40047dbc507ed46c29ae5636"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:78361be6dc9073ed17ab380985d1e45e48a642313ab68ab6afa2457354ff692c"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b661b49d0cb0ab311a189b31e25576b7ac3e20783beb1e1817d72d9d02508bf5"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d57bafbab289e147d064ffbd5cca2d7b1394b63417c0636cea1f2e93d16eb9e8"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fa2c0913f02341d25fb858e4fb2031e6b0813494cca1ba07d417674128ce11b"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9df21b8d9e5c136ea6cde1c50d2b1c29a2b5ff2b1d610165c23ff250e0704087"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:db18ff6b8c0f1917f8b20f8eca35c28bbccb9f83afa94743e03d40203ed83de9"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-win32.whl", hash = "sha256:46954173612617a99a64aee103bcd3f078901b9a8dcfc6ae80cbf34ba23df989"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-win_amd64.whl", hash = "sha256:7b7e772dc4bc507fdec4ee20182f15bd60d2a84f1e087a8accf5b5b7a0dcf2ba"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2952748ecd67ed3b56773c185e85fc084f6bdcdec10e5032a7c25a6bc7d682ef"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3151822aa1db0eb5afd65ccfafebe0ef5cda3a7701a279c8d0bf17781a793bb4"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaa8039b6d20137a4e02603aba37d12cd2dde7887500b8855356682fc33933f4"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cdba1f73b64530c47b27118b7053b8447e6d6f3c8104e3ac59f3d40c33aa9fd"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1b2690456528a87234a75d1a1644cdb330a6926f455403c8e4f6cad6921f9098"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf5ae8a9dcf657fd72144a7fd01f243236ea39e7344e579a121c4205aedf07bb"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-win32.whl", hash = "sha256:ea308cec940905ba008291d93619d92edaf83232ec85fbd514dcb329f3192761"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-win_amd64.whl", hash = "sha256:635d8a21577341dfe4f7fa59ec394b346da12420b86624a69e466d446de16aff"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8c4096727193762e72ce9437e2a86a110cf081241919ce3fab8e89c02f6b6658"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e4fb5ac86d8fe8151966814f6720996430462e633d225497566b3996966b9bdb"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e56a139bfe136a22c438478a86f8204c1eb5eed36f4e15c4224e4b9db01cb3e4"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f95fc8e3f34b5f6b3effb49d10ac97c569ec8e32f985612d9b25dd12d0d2e94"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c505edd429abdfe3643fa3b2e83efb3445a34a9dc49d5f692dd087be966020e0"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:12b0f1ec623cccf058cf21cb544f0e74656618165b083d78145cafde156ea7b6"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-win32.whl", hash = "sha256:293f9ade06b2e68dd03cfb14d49202fac47b7bb94bffcff174568c951fbc7af2"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-win_amd64.whl", hash = "sha256:d70f53a0646cc418ca4853da57cf3ddddbccb8c98406791f24426f2dd77fd0e2"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:44f569d0b1eb82301b92b72085583277316e7367e038d97c3a1a899d9a05e342"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2eae3423e538c10d93ae3e87788c6a84658c3ed6db62e6a61bb9495b0ad16bb"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfff7be361048244c3aa0f60b5e63221c5e0f0e509f4e47b8910e22b57d10ae7"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:5bc3339db84c5fb9130ac0e2f20347ee77b5dd2596ba327ce0d399752f4fce39"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:84b9f23b0fa98a6a4b99d73989350a94e4a4ec476b9a7dfe9b79ba5939f5e80b"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-win32.whl", hash = "sha256:51bc9cfef83e0ac84f86bf2b10eaccb27c5a3e66a1212bef676f5bee6ef33ebb"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-win_amd64.whl", hash = "sha256:8e47f1af09444f87c67b4f1bb6231e12ba6d4d9f03050d7fc88df6d075231a49"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6b788f14c5bb91db7f468dcf76f8b64423660a05e57fe277d3f4fad7b9dcb7ce"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521ef85c04c33009166777c77e76c8a676e2d8528dc83a57836b63ca9c69dcd1"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75311559f5c9881a9808eadbeb20ed8d8ba3f7225bef3afed2000c2a9f4d49b9"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cce918ada64c956b62ca2c2af59b125767097ec1dca89650a6221e887521bfd7"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9d087663b7e1feabea8c578d6887d59bb00388158e8bff3a76be11aa3f748ca2"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cf95a60b36997dad99692314c4713f141b61c5b0b4cc5c3426faad570b31ca01"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-win32.whl", hash = "sha256:d75ead7dd4d255068ea0f21492ee67937bd7c90964c8f3c2bea83c7b7f81b95f"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-win_amd64.whl", hash = "sha256:74bbd1d0a9bacf34266a7907d43260c8d65d31d691bb2356f41b17c2dca5b1d0"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:648ec5acf95ad59255452ef759054f2176849662af4521db6cb245263ae4aa33"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:35bd2df269de082065d4b23ae08502a47255832cc3f17619a5cea92ce478b02b"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f581d365af9373a738c49e0c51e8b18e08d8a6b1b15cc556773bcd8a192fa8b"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82df02816c14f8dc9f4d74aea4cb84a92f4b0620235daa76dde002409a3fbb5a"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94b564e38b344d3e67d2e224f0aec6ba09a77e4582ced41e7bfd0f757d926ec9"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:955a2a765aa1bd81aafa69ffda179d4fe3e2a3ad462a736ae5b6f387f78bfeb8"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-win32.whl", hash = "sha256:03f0528c53ca0b67094c4764523c1451ea15959bbf0a8a8a3096900014db0278"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-win_amd64.whl", hash = "sha256:4b12885dc85a2ab2b7d00995bac6d967bffa8594123b02ed21e8eb2205a7584b"}, + {file = "SQLAlchemy-2.0.37-py3-none-any.whl", hash = "sha256:a8998bf9f8658bd3839cbc44ddbe982955641863da0c1efe5b00c1ab4f5c16b1"}, + {file = "sqlalchemy-2.0.37.tar.gz", hash = "sha256:12b28d99a9c14eaf4055810df1001557176716de0167b91026e648e65229bffb"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "starlette" +version = "0.45.3" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "starlette-0.45.3-py3-none-any.whl", hash = "sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d"}, + {file = "starlette-0.45.3.tar.gz", hash = "sha256:2cbcba2a75806f8a41c722141486f37c28e30a0921c5f6fe4346cb0dcee1302f"}, +] + +[package.dependencies] +anyio = ">=3.6.2,<5" + +[package.extras] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] + +[[package]] +name = "storage3" +version = "0.11.1" +description = "Supabase Storage client for Python." +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "storage3-0.11.1-py3-none-any.whl", hash = "sha256:a8dcfd1472ff1238c0f4a6a725d7a579f132762539c5395dc1e91806b4e20e45"}, + {file = "storage3-0.11.1.tar.gz", hash = "sha256:b3bca07108f7077d406d49ef0ddd6805fe22f94fafc186c56bf3a1e2761291f3"}, +] + +[package.dependencies] +httpx = {version = ">=0.26,<0.29", extras = ["http2"]} +python-dateutil = ">=2.8.2,<3.0.0" + +[[package]] +name = "strenum" +version = "0.4.15" +description = "An Enum that inherits from str." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659"}, + {file = "StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff"}, +] + +[package.extras] +docs = ["myst-parser[linkify]", "sphinx", "sphinx-rtd-theme"] +release = ["twine"] +test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"] + +[[package]] +name = "stripe" +version = "11.4.1" +description = "Python bindings for the Stripe API" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "stripe-11.4.1-py2.py3-none-any.whl", hash = "sha256:8aa47a241de0355c383c916c4ef7273ab666f096a44ee7081e357db4a36f0cce"}, + {file = "stripe-11.4.1.tar.gz", hash = "sha256:7ddd251b622d490fe57d78487855dc9f4d95b1bb113607e81fd377037a133d5a"}, +] + +[package.dependencies] +requests = {version = ">=2.20", markers = "python_version >= \"3.0\""} +typing-extensions = {version = ">=4.5.0", markers = "python_version >= \"3.7\""} + +[[package]] +name = "supabase" +version = "2.11.0" +description = "Supabase client for Python." +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "supabase-2.11.0-py3-none-any.whl", hash = "sha256:67a0da498895f4cd6554935e2854b4c41f87b297b78fb9c9414902a382041406"}, + {file = "supabase-2.11.0.tar.gz", hash = "sha256:2a906f7909fd9a50f944cd9332ce66c684e2d37c0864284d34c5815e6c63cc01"}, +] + +[package.dependencies] +gotrue = ">=2.11.0,<3.0.0" +httpx = ">=0.26,<0.29" +postgrest = ">=0.19,<0.20" +realtime = ">=2.0.0,<3.0.0" +storage3 = ">=0.10,<0.12" +supafunc = ">=0.9,<0.10" + +[[package]] +name = "supafunc" +version = "0.9.2" +description = "Library for Supabase Functions" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["main"] +files = [ + {file = "supafunc-0.9.2-py3-none-any.whl", hash = "sha256:be5ee9f53842c4b0ba5f4abfb5bddf9f9e37e69e755ec0526852bb15af9d2ff5"}, + {file = "supafunc-0.9.2.tar.gz", hash = "sha256:f5164114a3e65e7e552539f3f1050aa3d4970885abdd7405555c17fd216e2da1"}, +] + +[package.dependencies] +httpx = {version = ">=0.26,<0.29", extras = ["http2"]} +strenum = ">=0.4.15,<0.5.0" + +[[package]] +name = "tenacity" +version = "9.0.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, + {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tomli" +version = "2.2.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] + +[[package]] +name = "tomlkit" +version = "0.13.2" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, + {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "tweepy" +version = "4.15.0" +description = "Twitter library for Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "tweepy-4.15.0-py3-none-any.whl", hash = "sha256:64adcea317158937059e4e2897b3ceb750b0c2dd5df58938c2da8f7eb3b88e6a"}, + {file = "tweepy-4.15.0.tar.gz", hash = "sha256:1345cbcdf0a75e2d89f424c559fd49fda4d8cd7be25cd5131e3b57bad8a21d76"}, +] + +[package.dependencies] +oauthlib = ">=3.2.0,<4" +requests = ">=2.27.0,<3" +requests-oauthlib = ">=1.2.0,<3" + +[package.extras] +async = ["aiohttp (>=3.7.3,<4)", "async-lru (>=1.0.3,<3)"] +dev = ["coverage (>=4.4.2)", "coveralls (>=2.1.0)", "tox (>=3.21.0)"] +docs = ["myst-parser (==0.15.2)", "readthedocs-sphinx-search (==0.1.1)", "sphinx (==4.2.0)", "sphinx-hoverxref (==0.7b1)", "sphinx-tabs (==3.2.0)", "sphinx_rtd_theme (==1.0.0)"] +socks = ["requests[socks] (>=2.27.0,<3)"] +test = ["urllib3 (<2)", "vcrpy (>=1.10.3)"] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "tzdata" +version = "2025.1" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +groups = ["main"] +markers = "platform_system == \"Windows\"" +files = [ + {file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"}, + {file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"}, +] + +[[package]] +name = "tzlocal" +version = "5.2" +description = "tzinfo object for the local timezone" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "tzlocal-5.2-py3-none-any.whl", hash = "sha256:49816ef2fe65ea8ac19d19aa7a1ae0551c834303d5014c6d5a62e4cbda8047b8"}, + {file = "tzlocal-5.2.tar.gz", hash = "sha256:8d399205578f1a9342816409cc1e46a93ebd5755e39ea2d85334bea911bf0e6e"}, +] + +[package.dependencies] +tzdata = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +devenv = ["check-manifest", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3)", "zest.releaser"] + +[[package]] +name = "update-checker" +version = "0.18.0" +description = "A python module that will check for package updates." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "update_checker-0.18.0-py3-none-any.whl", hash = "sha256:cbba64760a36fe2640d80d85306e8fe82b6816659190993b7bdabadee4d4bbfd"}, + {file = "update_checker-0.18.0.tar.gz", hash = "sha256:6a2d45bb4ac585884a6b03f9eade9161cedd9e8111545141e9aa9058932acb13"}, +] + +[package.dependencies] +requests = ">=2.3.0" + +[package.extras] +dev = ["black", "flake8", "pytest (>=2.7.3)"] +lint = ["black", "flake8"] +test = ["pytest (>=2.7.3)"] + +[[package]] +name = "uritemplate" +version = "4.1.1" +description = "Implementation of RFC 6570 URI Templates" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, + {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, + {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uvicorn" +version = "0.34.0" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, + {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} +h11 = ">=0.8" +httptools = {version = ">=0.6.3", optional = true, markers = "extra == \"standard\""} +python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "uvloop" +version = "0.21.0" +description = "Fast implementation of asyncio event loop on top of libuv" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\"" +files = [ + {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, + {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, + {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f38b2e090258d051d68a5b14d1da7203a3c3677321cf32a95a6f4db4dd8b6f26"}, + {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c43e0f13022b998eb9b973b5e97200c8b90823454d4bc06ab33829e09fb9bb"}, + {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:10d66943def5fcb6e7b37310eb6b5639fd2ccbc38df1177262b0640c3ca68c1f"}, + {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:67dd654b8ca23aed0a8e99010b4c34aca62f4b7fce88f39d452ed7622c94845c"}, + {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c0f3fa6200b3108919f8bdabb9a7f87f20e7097ea3c543754cabc7d717d95cf8"}, + {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0878c2640cf341b269b7e128b1a5fed890adc4455513ca710d77d5e93aa6d6a0"}, + {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9fb766bb57b7388745d8bcc53a359b116b8a04c83a2288069809d2b3466c37e"}, + {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a375441696e2eda1c43c44ccb66e04d61ceeffcd76e4929e527b7fa401b90fb"}, + {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:baa0e6291d91649c6ba4ed4b2f982f9fa165b5bbd50a9e203c416a2797bab3c6"}, + {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4509360fcc4c3bd2c70d87573ad472de40c13387f5fda8cb58350a1d7475e58d"}, + {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:359ec2c888397b9e592a889c4d72ba3d6befba8b2bb01743f72fffbde663b59c"}, + {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7089d2dc73179ce5ac255bdf37c236a9f914b264825fdaacaded6990a7fb4c2"}, + {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa4dcdbd9ae0a372f2167a207cd98c9f9a1ea1188a8a526431eef2f8116cc8d"}, + {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc"}, + {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb"}, + {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f"}, + {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281"}, + {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af"}, + {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6"}, + {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816"}, + {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc"}, + {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553"}, + {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:17df489689befc72c39a08359efac29bbee8eee5209650d4b9f34df73d22e414"}, + {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc09f0ff191e61c2d592a752423c767b4ebb2986daa9ed62908e2b1b9a9ae206"}, + {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0ce1b49560b1d2d8a2977e3ba4afb2414fb46b86a1b64056bc4ab929efdafbe"}, + {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e678ad6fe52af2c58d2ae3c73dc85524ba8abe637f134bf3564ed07f555c5e79"}, + {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:460def4412e473896ef179a1671b40c039c7012184b627898eea5072ef6f017a"}, + {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:10da8046cc4a8f12c91a1c39d1dd1585c41162a15caaef165c2174db9ef18bdc"}, + {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c097078b8031190c934ed0ebfee8cc5f9ba9642e6eb88322b9958b649750f72b"}, + {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46923b0b5ee7fc0020bef24afe7836cb068f5050ca04caf6b487c513dc1a20b2"}, + {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53e420a3afe22cdcf2a0f4846e377d16e718bc70103d7088a4f7623567ba5fb0"}, + {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb67cdbc0e483da00af0b2c3cdad4b7c61ceb1ee0f33fe00e09c81e3a6cb75"}, + {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:221f4f2a1f46032b403bf3be628011caf75428ee3cc204a22addf96f586b19fd"}, + {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2d1f581393673ce119355d56da84fe1dd9d2bb8b3d13ce792524e1607139feff"}, + {file = "uvloop-0.21.0.tar.gz", hash = "sha256:3bf12b0fda68447806a7ad847bfa591613177275d35b6724b1ee573faa3704e3"}, +] + +[package.extras] +dev = ["Cython (>=3.0,<4.0)", "setuptools (>=60)"] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["aiohttp (>=3.10.5)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + +[[package]] +name = "watchdog" +version = "6.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"}, + {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"}, + {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"}, + {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"}, + {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[[package]] +name = "watchfiles" +version = "1.0.4" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "watchfiles-1.0.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ba5bb3073d9db37c64520681dd2650f8bd40902d991e7b4cfaeece3e32561d08"}, + {file = "watchfiles-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f25d0ba0fe2b6d2c921cf587b2bf4c451860086534f40c384329fb96e2044d1"}, + {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47eb32ef8c729dbc4f4273baece89398a4d4b5d21a1493efea77a17059f4df8a"}, + {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:076f293100db3b0b634514aa0d294b941daa85fc777f9c698adb1009e5aca0b1"}, + {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1eacd91daeb5158c598fe22d7ce66d60878b6294a86477a4715154990394c9b3"}, + {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13c2ce7b72026cfbca120d652f02c7750f33b4c9395d79c9790b27f014c8a5a2"}, + {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90192cdc15ab7254caa7765a98132a5a41471cf739513cc9bcf7d2ffcc0ec7b2"}, + {file = "watchfiles-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278aaa395f405972e9f523bd786ed59dfb61e4b827856be46a42130605fd0899"}, + {file = "watchfiles-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a462490e75e466edbb9fc4cd679b62187153b3ba804868452ef0577ec958f5ff"}, + {file = "watchfiles-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8d0d0630930f5cd5af929040e0778cf676a46775753e442a3f60511f2409f48f"}, + {file = "watchfiles-1.0.4-cp310-cp310-win32.whl", hash = "sha256:cc27a65069bcabac4552f34fd2dce923ce3fcde0721a16e4fb1b466d63ec831f"}, + {file = "watchfiles-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:8b1f135238e75d075359cf506b27bf3f4ca12029c47d3e769d8593a2024ce161"}, + {file = "watchfiles-1.0.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2a9f93f8439639dc244c4d2902abe35b0279102bca7bbcf119af964f51d53c19"}, + {file = "watchfiles-1.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9eea33ad8c418847dd296e61eb683cae1c63329b6d854aefcd412e12d94ee235"}, + {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31f1a379c9dcbb3f09cf6be1b7e83b67c0e9faabed0471556d9438a4a4e14202"}, + {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab594e75644421ae0a2484554832ca5895f8cab5ab62de30a1a57db460ce06c6"}, + {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc2eb5d14a8e0d5df7b36288979176fbb39672d45184fc4b1c004d7c3ce29317"}, + {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f68d8e9d5a321163ddacebe97091000955a1b74cd43724e346056030b0bacee"}, + {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9ce064e81fe79faa925ff03b9f4c1a98b0bbb4a1b8c1b015afa93030cb21a49"}, + {file = "watchfiles-1.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b77d5622ac5cc91d21ae9c2b284b5d5c51085a0bdb7b518dba263d0af006132c"}, + {file = "watchfiles-1.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1941b4e39de9b38b868a69b911df5e89dc43767feeda667b40ae032522b9b5f1"}, + {file = "watchfiles-1.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f8c4998506241dedf59613082d1c18b836e26ef2a4caecad0ec41e2a15e4226"}, + {file = "watchfiles-1.0.4-cp311-cp311-win32.whl", hash = "sha256:4ebbeca9360c830766b9f0df3640b791be569d988f4be6c06d6fae41f187f105"}, + {file = "watchfiles-1.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:05d341c71f3d7098920f8551d4df47f7b57ac5b8dad56558064c3431bdfc0b74"}, + {file = "watchfiles-1.0.4-cp311-cp311-win_arm64.whl", hash = "sha256:32b026a6ab64245b584acf4931fe21842374da82372d5c039cba6bf99ef722f3"}, + {file = "watchfiles-1.0.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:229e6ec880eca20e0ba2f7e2249c85bae1999d330161f45c78d160832e026ee2"}, + {file = "watchfiles-1.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5717021b199e8353782dce03bd8a8f64438832b84e2885c4a645f9723bf656d9"}, + {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0799ae68dfa95136dde7c472525700bd48777875a4abb2ee454e3ab18e9fc712"}, + {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:43b168bba889886b62edb0397cab5b6490ffb656ee2fcb22dec8bfeb371a9e12"}, + {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb2c46e275fbb9f0c92e7654b231543c7bbfa1df07cdc4b99fa73bedfde5c844"}, + {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:857f5fc3aa027ff5e57047da93f96e908a35fe602d24f5e5d8ce64bf1f2fc733"}, + {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55ccfd27c497b228581e2838d4386301227fc0cb47f5a12923ec2fe4f97b95af"}, + {file = "watchfiles-1.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c11ea22304d17d4385067588123658e9f23159225a27b983f343fcffc3e796a"}, + {file = "watchfiles-1.0.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:74cb3ca19a740be4caa18f238298b9d472c850f7b2ed89f396c00a4c97e2d9ff"}, + {file = "watchfiles-1.0.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c7cce76c138a91e720d1df54014a047e680b652336e1b73b8e3ff3158e05061e"}, + {file = "watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94"}, + {file = "watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c"}, + {file = "watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90"}, + {file = "watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9"}, + {file = "watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60"}, + {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407"}, + {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d"}, + {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d"}, + {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b"}, + {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590"}, + {file = "watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902"}, + {file = "watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1"}, + {file = "watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303"}, + {file = "watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80"}, + {file = "watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc"}, + {file = "watchfiles-1.0.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d3452c1ec703aa1c61e15dfe9d482543e4145e7c45a6b8566978fbb044265a21"}, + {file = "watchfiles-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7b75fee5a16826cf5c46fe1c63116e4a156924d668c38b013e6276f2582230f0"}, + {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e997802d78cdb02623b5941830ab06f8860038faf344f0d288d325cc9c5d2ff"}, + {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0611d244ce94d83f5b9aff441ad196c6e21b55f77f3c47608dcf651efe54c4a"}, + {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9745a4210b59e218ce64c91deb599ae8775c8a9da4e95fb2ee6fe745fc87d01a"}, + {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4810ea2ae622add560f4aa50c92fef975e475f7ac4900ce5ff5547b2434642d8"}, + {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:740d103cd01458f22462dedeb5a3382b7f2c57d07ff033fbc9465919e5e1d0f3"}, + {file = "watchfiles-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdbd912a61543a36aef85e34f212e5d2486e7c53ebfdb70d1e0b060cc50dd0bf"}, + {file = "watchfiles-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0bc80d91ddaf95f70258cf78c471246846c1986bcc5fd33ccc4a1a67fcb40f9a"}, + {file = "watchfiles-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab0311bb2ffcd9f74b6c9de2dda1612c13c84b996d032cd74799adb656af4e8b"}, + {file = "watchfiles-1.0.4-cp39-cp39-win32.whl", hash = "sha256:02a526ee5b5a09e8168314c905fc545c9bc46509896ed282aeb5a8ba9bd6ca27"}, + {file = "watchfiles-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:a5ae5706058b27c74bac987d615105da17724172d5aaacc6c362a40599b6de43"}, + {file = "watchfiles-1.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdcc92daeae268de1acf5b7befcd6cfffd9a047098199056c72e4623f531de18"}, + {file = "watchfiles-1.0.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8d3d9203705b5797f0af7e7e5baa17c8588030aaadb7f6a86107b7247303817"}, + {file = "watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdef5a1be32d0b07dcea3318a0be95d42c98ece24177820226b56276e06b63b0"}, + {file = "watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:342622287b5604ddf0ed2d085f3a589099c9ae8b7331df3ae9845571586c4f3d"}, + {file = "watchfiles-1.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9fe37a2de80aa785d340f2980276b17ef697ab8db6019b07ee4fd28a8359d2f3"}, + {file = "watchfiles-1.0.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9d1ef56b56ed7e8f312c934436dea93bfa3e7368adfcf3df4c0da6d4de959a1e"}, + {file = "watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b42cac65beae3a362629950c444077d1b44f1790ea2772beaea95451c086bb"}, + {file = "watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e0227b8ed9074c6172cf55d85b5670199c99ab11fd27d2c473aa30aec67ee42"}, + {file = "watchfiles-1.0.4.tar.gz", hash = "sha256:6ba473efd11062d73e4f00c2b730255f9c1bdd73cd5f9fe5b5da8dbd4a717205"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "websocket-client" +version = "1.8.0" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[[package]] +name = "websockets" +version = "13.1" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, +] + +[[package]] +name = "wrapt" +version = "1.17.2" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"}, + {file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"}, + {file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"}, + {file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"}, + {file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"}, + {file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"}, + {file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"}, + {file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"}, + {file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"}, + {file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"}, + {file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"}, + {file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"}, + {file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"}, + {file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"}, + {file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"}, + {file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"}, + {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, +] + +[[package]] +name = "yarl" +version = "1.18.3" +description = "Yet another URL library" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602d98f2c2d929f8e697ed274fbadc09902c4025c5a9963bf4e9edfc3ab6f7ed"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c654d5207c78e0bd6d749f6dae1dcbbfde3403ad3a4b11f3c5544d9906969dde"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5094d9206c64181d0f6e76ebd8fb2f8fe274950a63890ee9e0ebfd58bf9d787b"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35098b24e0327fc4ebdc8ffe336cee0a87a700c24ffed13161af80124b7dc8e5"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3236da9272872443f81fedc389bace88408f64f89f75d1bdb2256069a8730ccc"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c08cc9b16f4f4bc522771d96734c7901e7ebef70c6c5c35dd0f10845270bcd"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80316a8bd5109320d38eef8833ccf5f89608c9107d02d2a7f985f98ed6876990"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c1e1cc06da1491e6734f0ea1e6294ce00792193c463350626571c287c9a704db"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fea09ca13323376a2fdfb353a5fa2e59f90cd18d7ca4eaa1fd31f0a8b4f91e62"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e3b9fd71836999aad54084906f8663dffcd2a7fb5cdafd6c37713b2e72be1760"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:757e81cae69244257d125ff31663249b3013b5dc0a8520d73694aed497fb195b"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b1771de9944d875f1b98a745bc547e684b863abf8f8287da8466cf470ef52690"}, + {file = "yarl-1.18.3-cp310-cp310-win32.whl", hash = "sha256:8874027a53e3aea659a6d62751800cf6e63314c160fd607489ba5c2edd753cf6"}, + {file = "yarl-1.18.3-cp310-cp310-win_amd64.whl", hash = "sha256:93b2e109287f93db79210f86deb6b9bbb81ac32fc97236b16f7433db7fc437d8"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8503ad47387b8ebd39cbbbdf0bf113e17330ffd339ba1144074da24c545f0069"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02ddb6756f8f4517a2d5e99d8b2f272488e18dd0bfbc802f31c16c6c20f22193"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67a283dd2882ac98cc6318384f565bffc751ab564605959df4752d42483ad889"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d980e0325b6eddc81331d3f4551e2a333999fb176fd153e075c6d1c2530aa8a8"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b643562c12680b01e17239be267bc306bbc6aac1f34f6444d1bded0c5ce438ca"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c017a3b6df3a1bd45b9fa49a0f54005e53fbcad16633870104b66fa1a30a29d8"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75674776d96d7b851b6498f17824ba17849d790a44d282929c42dbb77d4f17ae"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccaa3a4b521b780a7e771cc336a2dba389a0861592bbce09a476190bb0c8b4b3"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d06d3005e668744e11ed80812e61efd77d70bb7f03e33c1598c301eea20efbb"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:9d41beda9dc97ca9ab0b9888cb71f7539124bc05df02c0cff6e5acc5a19dcc6e"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ba23302c0c61a9999784e73809427c9dbedd79f66a13d84ad1b1943802eaaf59"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6748dbf9bfa5ba1afcc7556b71cda0d7ce5f24768043a02a58846e4a443d808d"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0b0cad37311123211dc91eadcb322ef4d4a66008d3e1bdc404808992260e1a0e"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fb2171a4486bb075316ee754c6d8382ea6eb8b399d4ec62fde2b591f879778a"}, + {file = "yarl-1.18.3-cp311-cp311-win32.whl", hash = "sha256:61b1a825a13bef4a5f10b1885245377d3cd0bf87cba068e1d9a88c2ae36880e1"}, + {file = "yarl-1.18.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9d60031cf568c627d028239693fd718025719c02c9f55df0a53e587aab951b5"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285"}, + {file = "yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2"}, + {file = "yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8"}, + {file = "yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d"}, + {file = "yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:61e5e68cb65ac8f547f6b5ef933f510134a6bf31bb178be428994b0cb46c2a04"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe57328fbc1bfd0bd0514470ac692630f3901c0ee39052ae47acd1d90a436719"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a440a2a624683108a1b454705ecd7afc1c3438a08e890a1513d468671d90a04e"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c7907c8548bcd6ab860e5f513e727c53b4a714f459b084f6580b49fa1b9cee"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4f6450109834af88cb4cc5ecddfc5380ebb9c228695afc11915a0bf82116789"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9ca04806f3be0ac6d558fffc2fdf8fcef767e0489d2684a21912cc4ed0cd1b8"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77a6e85b90a7641d2e07184df5557132a337f136250caafc9ccaa4a2a998ca2c"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6333c5a377c8e2f5fae35e7b8f145c617b02c939d04110c76f29ee3676b5f9a5"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0b3c92fa08759dbf12b3a59579a4096ba9af8dd344d9a813fc7f5070d86bbab1"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ac515b860c36becb81bb84b667466885096b5fc85596948548b667da3bf9f24"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:045b8482ce9483ada4f3f23b3774f4e1bf4f23a2d5c912ed5170f68efb053318"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a4bb030cf46a434ec0225bddbebd4b89e6471814ca851abb8696170adb163985"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:54d6921f07555713b9300bee9c50fb46e57e2e639027089b1d795ecd9f7fa910"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1d407181cfa6e70077df3377938c08012d18893f9f20e92f7d2f314a437c30b1"}, + {file = "yarl-1.18.3-cp39-cp39-win32.whl", hash = "sha256:ac36703a585e0929b032fbaab0707b75dc12703766d0b53486eabd5139ebadd5"}, + {file = "yarl-1.18.3-cp39-cp39-win_amd64.whl", hash = "sha256:ba87babd629f8af77f557b61e49e7c7cac36f22f871156b91e10a6e9d4f829e9"}, + {file = "yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b"}, + {file = "yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.0" + +[[package]] +name = "youtube-transcript-api" +version = "0.6.3" +description = "This is an python API which allows you to get the transcripts/subtitles for a given YouTube video. It also works for automatically generated subtitles, supports translating subtitles and it does not require a headless browser, like other selenium based solutions do!" +optional = false +python-versions = "<3.14,>=3.8" +groups = ["main"] +files = [ + {file = "youtube_transcript_api-0.6.3-py3-none-any.whl", hash = "sha256:297a74c1863d9df88f6885229f33a7eda61493d73ecb13ec80e876b65423e9b4"}, + {file = "youtube_transcript_api-0.6.3.tar.gz", hash = "sha256:4d1f6451ae508390a5279f98519efb45e091bf60d3cca5ea0bb122800ab6a011"}, +] + +[package.dependencies] +defusedxml = ">=0.7.1,<0.8.0" +requests = "*" + +[[package]] +name = "zipp" +version = "3.21.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.10,<3.13" +content-hash = "38a5c750ddca1a6264fd98b7ee74d199c2bbf57d3acc189264bd9f8ec90febc2" diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml new file mode 100644 index 000000000000..ad7061468231 --- /dev/null +++ b/autogpt_platform/backend/pyproject.toml @@ -0,0 +1,104 @@ +[tool.poetry] +name = "autogpt-platform-backend" +version = "0.3.4" +description = "A platform for building AI-powered agentic workflows" +authors = ["AutoGPT "] +readme = "README.md" +packages = [{ include = "backend", format = "sdist" }] + + +[tool.poetry.dependencies] +python = ">=3.10,<3.13" +aio-pika = "^9.5.4" +anthropic = "^0.40.0" +apscheduler = "^3.11.0" +autogpt-libs = { path = "../autogpt_libs", develop = true } +click = "^8.1.7" +discord-py = "^2.4.0" +e2b-code-interpreter = "^1.0.1" +fastapi = "^0.115.5" +feedparser = "^6.0.11" +flake8 = "^7.0.0" +google-api-python-client = "^2.154.0" +google-auth-oauthlib = "^1.2.1" +groq = "^0.13.1" +jinja2 = "^3.1.4" +jsonref = "^1.1.0" +jsonschema = "^4.22.0" +ollama = "^0.4.1" +openai = "^1.57.4" +praw = "~7.8.1" +prisma = "^0.15.0" +psutil = "^6.1.0" +pydantic = "^2.9.2" +pydantic-settings = "^2.3.4" +pyro5 = "^5.15" +pytest = "^8.2.1" +pytest-asyncio = "^0.25.0" +python-dotenv = "^1.0.1" +redis = "^5.2.0" +sentry-sdk = "2.19.2" +strenum = "^0.4.9" +stripe = "^11.3.0" +supabase = "2.11.0" +tenacity = "^9.0.0" +tweepy = "^4.14.0" +uvicorn = { extras = ["standard"], version = "^0.34.0" } +websockets = "^13.1" +youtube-transcript-api = "^0.6.2" +googlemaps = "^4.10.0" +replicate = "^1.0.4" +pinecone = "^5.3.1" +cryptography = "^43.0" +python-multipart = "^0.0.20" +sqlalchemy = "^2.0.36" +psycopg2-binary = "^2.9.10" +google-cloud-storage = "^2.18.2" +launchdarkly-server-sdk = "^9.8.0" +mem0ai = "^0.1.44" +moviepy = "^2.1.2" + +[tool.poetry.group.dev.dependencies] +poethepoet = "^0.32.1" +httpx = "^0.27.0" +pytest-watcher = "^0.4.2" +requests = "^2.32.3" +ruff = "^0.9.2" +pyright = "^1.1.392" +isort = "^5.13.2" +black = "^24.10.0" +aiohappyeyeballs = "^2.4.4" +pytest-mock = "^3.14.0" +faker = "^33.3.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry.scripts] +app = "backend.app:main" +rest = "backend.rest:main" +ws = "backend.ws:main" +executor = "backend.exec:main" +cli = "backend.cli:main" +format = "linter:format" +lint = "linter:lint" +test = "run_tests:test" + +[tool.isort] +profile = "black" + +[tool.pytest-watcher] +now = false +clear = true +delay = 0.2 +runner = "pytest" +runner_args = [] +patterns = ["*.py"] +ignore_patterns = [] + +[tool.pytest.ini_options] +asyncio_mode = "auto" + +[tool.ruff] +target-version = "py310" diff --git a/autogpt_platform/backend/run_tests.py b/autogpt_platform/backend/run_tests.py new file mode 100644 index 000000000000..b2343b2082ca --- /dev/null +++ b/autogpt_platform/backend/run_tests.py @@ -0,0 +1,71 @@ +import subprocess +import sys +import time + + +def wait_for_postgres(max_retries=5, delay=5): + for _ in range(max_retries): + try: + result = subprocess.run( + [ + "docker", + "compose", + "-f", + "docker-compose.test.yaml", + "exec", + "postgres-test", + "pg_isready", + "-U", + "postgres", + "-d", + "postgres", + ], + check=True, + capture_output=True, + text=True, + ) + if "accepting connections" in result.stdout: + print("PostgreSQL is ready.") + return True + except subprocess.CalledProcessError: + print(f"PostgreSQL is not ready yet. Retrying in {delay} seconds...") + time.sleep(delay) + print("Failed to connect to PostgreSQL.") + return False + + +def run_command(command, check=True): + try: + subprocess.run(command, check=check) + except subprocess.CalledProcessError as e: + print(f"Command failed: {e}") + sys.exit(1) + + +def test(): + # Start PostgreSQL with Docker Compose + run_command( + [ + "docker", + "compose", + "-f", + "docker-compose.test.yaml", + "up", + "-d", + "postgres-test", + ] + ) + + if not wait_for_postgres(): + run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"]) + sys.exit(1) + + # Run Prisma migrations + run_command(["prisma", "migrate", "dev"]) + + # Run the tests + result = subprocess.run(["pytest"] + sys.argv[1:], check=False) + + run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"]) + + sys.exit(result.returncode) diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma new file mode 100644 index 000000000000..6fc645c3527f --- /dev/null +++ b/autogpt_platform/backend/schema.prisma @@ -0,0 +1,642 @@ +// THIS FILE IS AUTO-GENERATED, RUN `poetry run schema` TO UPDATE +datasource db { + provider = "postgresql" + url = env("DATABASE_URL") +} + +generator client { + provider = "prisma-client-py" + recursive_type_depth = 5 + interface = "asyncio" + previewFeatures = ["views"] +} + +// User model to mirror Auth provider users +model User { + id String @id // This should match the Supabase user ID + email String @unique + name String? + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + metadata Json @default("{}") + integrations String @default("") + stripeCustomerId String? + topUpConfig Json? + + // Relations + AgentGraphs AgentGraph[] + AgentGraphExecutions AgentGraphExecution[] + AnalyticsDetails AnalyticsDetails[] + AnalyticsMetrics AnalyticsMetrics[] + CreditTransaction CreditTransaction[] + + AgentPreset AgentPreset[] + UserAgent UserAgent[] + + Profile Profile[] + StoreListing StoreListing[] + StoreListingReview StoreListingReview[] + StoreListingSubmission StoreListingSubmission[] + APIKeys APIKey[] + IntegrationWebhooks IntegrationWebhook[] + + @@index([id]) + @@index([email]) +} + +// This model describes the Agent Graph/Flow (Multi Agent System). +model AgentGraph { + id String @default(uuid()) + version Int @default(1) + createdAt DateTime @default(now()) + updatedAt DateTime? @updatedAt + + name String? + description String? + isActive Boolean @default(true) + isTemplate Boolean @default(false) + + // Link to User model + userId String + // FIX: Do not cascade delete the agent when the user is deleted + // This allows us to delete user data with deleting the agent which maybe in use by other users + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + AgentNodes AgentNode[] + AgentGraphExecution AgentGraphExecution[] + + AgentPreset AgentPreset[] + UserAgent UserAgent[] + StoreListing StoreListing[] + StoreListingVersion StoreListingVersion? + + @@id(name: "graphVersionId", [id, version]) + @@index([userId, isActive]) +} + +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////////////// USER SPECIFIC DATA //////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// + +// An AgentPrest is an Agent + User Configuration of that agent. +// For example, if someone has created a weather agent and they want to set it up to +// Inform them of extreme weather warnings in Texas, the agent with the configuration to set it to +// monitor texas, along with the cron setup or webhook tiggers, is an AgentPreset +model AgentPreset { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + name String + description String + + // For agents that can be triggered by webhooks or cronjob + // This bool allows us to disable a configured agent without deleting it + isActive Boolean @default(true) + + userId String + User User @relation(fields: [userId], references: [id], onDelete: Cascade) + + agentId String + agentVersion Int + Agent AgentGraph @relation(fields: [agentId, agentVersion], references: [id, version], onDelete: Cascade) + + InputPresets AgentNodeExecutionInputOutput[] @relation("AgentPresetsInputData") + UserAgents UserAgent[] + AgentExecution AgentGraphExecution[] + + @@index([userId]) +} + +// For the library page +// It is a user controlled list of agents, that they will see in there library +model UserAgent { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + userId String + User User @relation(fields: [userId], references: [id], onDelete: Cascade) + + agentId String + agentVersion Int + Agent AgentGraph @relation(fields: [agentId, agentVersion], references: [id, version]) + + agentPresetId String? + AgentPreset AgentPreset? @relation(fields: [agentPresetId], references: [id]) + + isFavorite Boolean @default(false) + isCreatedByUser Boolean @default(false) + isArchived Boolean @default(false) + isDeleted Boolean @default(false) + + @@index([userId]) +} + +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////// AGENT DEFINITION AND EXECUTION TABLES //////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// + +// This model describes a single node in the Agent Graph/Flow (Multi Agent System). +model AgentNode { + id String @id @default(uuid()) + + agentBlockId String + AgentBlock AgentBlock @relation(fields: [agentBlockId], references: [id], onUpdate: Cascade) + + agentGraphId String + agentGraphVersion Int @default(1) + AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version], onDelete: Cascade) + + // List of consumed input, that the parent node should provide. + Input AgentNodeLink[] @relation("AgentNodeSink") + + // List of produced output, that the child node should be executed. + Output AgentNodeLink[] @relation("AgentNodeSource") + + // JSON serialized dict[str, str] containing predefined input values. + constantInput String @default("{}") + + // For webhook-triggered blocks: reference to the webhook that triggers the node + webhookId String? + Webhook IntegrationWebhook? @relation(fields: [webhookId], references: [id]) + + // JSON serialized dict[str, str] containing the node metadata. + metadata String @default("{}") + + ExecutionHistory AgentNodeExecution[] + + @@index([agentGraphId, agentGraphVersion]) + @@index([agentBlockId]) + @@index([webhookId]) +} + +// This model describes the link between two AgentNodes. +model AgentNodeLink { + id String @id @default(uuid()) + + // Output of a node is connected to the source of the link. + agentNodeSourceId String + AgentNodeSource AgentNode @relation("AgentNodeSource", fields: [agentNodeSourceId], references: [id], onDelete: Cascade) + sourceName String + + // Input of a node is connected to the sink of the link. + agentNodeSinkId String + AgentNodeSink AgentNode @relation("AgentNodeSink", fields: [agentNodeSinkId], references: [id], onDelete: Cascade) + sinkName String + + // Default: the data coming from the source can only be consumed by the sink once, Static: input data will be reused. + isStatic Boolean @default(false) + + @@index([agentNodeSourceId]) + @@index([agentNodeSinkId]) +} + +// This model describes a component that will be executed by the AgentNode. +model AgentBlock { + id String @id @default(uuid()) + name String @unique + + // We allow a block to have multiple types of input & output. + // Serialized object-typed `jsonschema` with top-level properties as input/output name. + inputSchema String + outputSchema String + + // Prisma requires explicit back-references. + ReferencedByAgentNode AgentNode[] +} + +// This model describes the status of an AgentGraphExecution or AgentNodeExecution. +enum AgentExecutionStatus { + INCOMPLETE + QUEUED + RUNNING + COMPLETED + TERMINATED + FAILED +} + +// This model describes the execution of an AgentGraph. +model AgentGraphExecution { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime? @updatedAt + startedAt DateTime? + + executionStatus AgentExecutionStatus @default(COMPLETED) + + agentGraphId String + agentGraphVersion Int @default(1) + AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version], onDelete: Cascade) + + AgentNodeExecutions AgentNodeExecution[] + + // Link to User model -- Executed by this user + userId String + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + stats String? // JSON serialized object + AgentPreset AgentPreset? @relation(fields: [agentPresetId], references: [id]) + agentPresetId String? + + @@index([agentGraphId, agentGraphVersion]) + @@index([userId]) +} + +// This model describes the execution of an AgentNode. +model AgentNodeExecution { + id String @id @default(uuid()) + + agentGraphExecutionId String + AgentGraphExecution AgentGraphExecution @relation(fields: [agentGraphExecutionId], references: [id], onDelete: Cascade) + + agentNodeId String + AgentNode AgentNode @relation(fields: [agentNodeId], references: [id], onDelete: Cascade) + + Input AgentNodeExecutionInputOutput[] @relation("AgentNodeExecutionInput") + Output AgentNodeExecutionInputOutput[] @relation("AgentNodeExecutionOutput") + + executionStatus AgentExecutionStatus @default(COMPLETED) + // Final JSON serialized input data for the node execution. + executionData String? + addedTime DateTime @default(now()) + queuedTime DateTime? + startedTime DateTime? + endedTime DateTime? + + stats String? // JSON serialized object + + @@index([agentGraphExecutionId]) + @@index([agentNodeId]) +} + +// This model describes the output of an AgentNodeExecution. +model AgentNodeExecutionInputOutput { + id String @id @default(uuid()) + + name String + data String + time DateTime @default(now()) + + // Prisma requires explicit back-references. + referencedByInputExecId String? + ReferencedByInputExec AgentNodeExecution? @relation("AgentNodeExecutionInput", fields: [referencedByInputExecId], references: [id], onDelete: Cascade) + referencedByOutputExecId String? + ReferencedByOutputExec AgentNodeExecution? @relation("AgentNodeExecutionOutput", fields: [referencedByOutputExecId], references: [id], onDelete: Cascade) + + agentPresetId String? + AgentPreset AgentPreset? @relation("AgentPresetsInputData", fields: [agentPresetId], references: [id]) + + // Input and Output pin names are unique for each AgentNodeExecution. + @@unique([referencedByInputExecId, referencedByOutputExecId, name]) + @@index([referencedByOutputExecId]) +} + +// Webhook that is registered with a provider and propagates to one or more nodes +model IntegrationWebhook { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime? @updatedAt + + userId String + user User @relation(fields: [userId], references: [id], onDelete: Restrict) // Webhooks must be deregistered before deleting + + provider String // e.g. 'github' + credentialsId String // relation to the credentials that the webhook was created with + webhookType String // e.g. 'repo' + resource String // e.g. 'Significant-Gravitas/AutoGPT' + events String[] // e.g. ['created', 'updated'] + config Json + secret String // crypto string, used to verify payload authenticity + + providerWebhookId String // Webhook ID assigned by the provider + + AgentNodes AgentNode[] + + @@index([userId]) +} + +model AnalyticsDetails { + // PK uses gen_random_uuid() to allow the db inserts to happen outside of prisma + // typical uuid() inserts are handled by prisma + id String @id @default(dbgenerated("gen_random_uuid()")) + + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + // Link to User model + userId String + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + // Analytics Categorical data used for filtering (indexable w and w/o userId) + type String + + // Analytic Specific Data. We should use a union type here, but prisma doesn't support it. + data Json? + + // Indexable field for any count based analytical measures like page order clicking, tutorial step completion, etc. + dataIndex String? + + @@index([userId, type], name: "analyticsDetails") + @@index([type]) +} + +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +////////////// METRICS TRACKING TABLES //////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +model AnalyticsMetrics { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + // Analytics Categorical data used for filtering (indexable w and w/o userId) + analyticMetric String + // Any numeric data that should be counted upon, summed, or otherwise aggregated. + value Float + // Any string data that should be used to identify the metric as distinct. + // ex: '/build' vs '/market' + dataString String? + + // Link to User model + userId String + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + @@index([userId]) +} + +enum CreditTransactionType { + TOP_UP + USAGE +} + +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +//////// ACCOUNTING AND CREDIT SYSTEM TABLES ////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +model CreditTransaction { + transactionKey String @default(uuid()) + createdAt DateTime @default(now()) + + userId String + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + amount Int + type CreditTransactionType + + runningBalance Int? + + isActive Boolean @default(true) + metadata Json? + + @@id(name: "creditTransactionIdentifier", [transactionKey, userId]) + @@index([userId, createdAt]) +} + +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// +////////////// Store TABLES /////////////////////////// +//////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////// + +model Profile { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + // Only 1 of user or group can be set. + // The user this profile belongs to, if any. + userId String? + User User? @relation(fields: [userId], references: [id], onDelete: Cascade) + + name String + username String @unique + description String + + links String[] + + avatarUrl String? + + isFeatured Boolean @default(false) + + @@index([username]) + @@index([userId]) +} + +view Creator { + username String @unique + name String + avatar_url String + description String + + top_categories String[] + links String[] + + num_agents Int + agent_rating Float + agent_runs Int + is_featured Boolean + + // Index or unique are not applied to views +} + +view StoreAgent { + listing_id String @id + storeListingVersionId String + updated_at DateTime + + slug String + agent_name String + agent_video String? + agent_image String[] + + featured Boolean @default(false) + creator_username String + creator_avatar String + sub_heading String + description String + categories String[] + runs Int + rating Float + versions String[] + + // Index or unique are not applied to views +} + +view StoreSubmission { + listing_id String @id + user_id String + slug String + name String + sub_heading String + description String + image_urls String[] + date_submitted DateTime + status SubmissionStatus + runs Int + rating Float + agent_id String + agent_version Int + + // Index or unique are not applied to views +} + +model StoreListing { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + isDeleted Boolean @default(false) + // Not needed but makes lookups faster + isApproved Boolean @default(false) + + // The agent link here is only so we can do lookup on agentId, for the listing the StoreListingVersion is used. + agentId String + agentVersion Int + Agent AgentGraph @relation(fields: [agentId, agentVersion], references: [id, version], onDelete: Cascade) + + owningUserId String + OwningUser User @relation(fields: [owningUserId], references: [id]) + + StoreListingVersions StoreListingVersion[] + StoreListingSubmission StoreListingSubmission[] + + // Unique index on agentId to ensure only one listing per agent, regardless of number of versions the agent has. + @@unique([agentId]) + @@index([agentId, owningUserId]) + @@index([owningUserId]) + // Used in the view query + @@index([isDeleted, isApproved]) + @@index([isDeleted]) +} + +model StoreListingVersion { + id String @id @default(uuid()) + version Int @default(1) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + // The agent and version to be listed on the store + agentId String + agentVersion Int + Agent AgentGraph @relation(fields: [agentId, agentVersion], references: [id, version]) + + // The details for this version of the agent, this allows the author to update the details of the agent, + // But still allow using old versions of the agent with there original details. + // TODO: Create a database view that shows only the latest version of each store listing. + slug String + name String + subHeading String + videoUrl String? + imageUrls String[] + description String + categories String[] + + isFeatured Boolean @default(false) + + isDeleted Boolean @default(false) + // Old versions can be made unavailable by the author if desired + isAvailable Boolean @default(true) + // Not needed but makes lookups faster + isApproved Boolean @default(false) + StoreListing StoreListing? @relation(fields: [storeListingId], references: [id], onDelete: Cascade) + storeListingId String? + StoreListingSubmission StoreListingSubmission[] + + // Reviews are on a specific version, but then aggregated up to the listing. + // This allows us to provide a review filter to current version of the agent. + StoreListingReview StoreListingReview[] + + @@unique([agentId, agentVersion]) + @@index([agentId, agentVersion, isDeleted]) +} + +model StoreListingReview { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + storeListingVersionId String + StoreListingVersion StoreListingVersion @relation(fields: [storeListingVersionId], references: [id], onDelete: Cascade) + + reviewByUserId String + ReviewByUser User @relation(fields: [reviewByUserId], references: [id]) + + score Int + comments String? + + @@unique([storeListingVersionId, reviewByUserId]) +} + +enum SubmissionStatus { + DAFT + PENDING + APPROVED + REJECTED +} + +model StoreListingSubmission { + id String @id @default(uuid()) + createdAt DateTime @default(now()) + updatedAt DateTime @default(now()) @updatedAt + + storeListingId String + StoreListing StoreListing @relation(fields: [storeListingId], references: [id], onDelete: Cascade) + + storeListingVersionId String + StoreListingVersion StoreListingVersion @relation(fields: [storeListingVersionId], references: [id], onDelete: Cascade) + + reviewerId String + Reviewer User @relation(fields: [reviewerId], references: [id]) + + Status SubmissionStatus @default(PENDING) + reviewComments String? + + @@unique([storeListingVersionId]) + @@index([storeListingId]) +} + +enum APIKeyPermission { + EXECUTE_GRAPH // Can execute agent graphs + READ_GRAPH // Can get graph versions and details + EXECUTE_BLOCK // Can execute individual blocks + READ_BLOCK // Can get block information +} + +model APIKey { + id String @id @default(uuid()) + name String + prefix String // First 8 chars for identification + postfix String + key String @unique // Hashed key + status APIKeyStatus @default(ACTIVE) + permissions APIKeyPermission[] + + createdAt DateTime @default(now()) + lastUsedAt DateTime? + revokedAt DateTime? + + description String? + + // Relation to user + userId String + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + @@index([key]) + @@index([prefix]) + @@index([userId]) + @@index([status]) + @@index([userId, status]) +} + +enum APIKeyStatus { + ACTIVE + REVOKED + SUSPENDED +} diff --git a/autogpt_platform/backend/test/__init__.py b/autogpt_platform/backend/test/__init__.py new file mode 100644 index 000000000000..d10438719da5 --- /dev/null +++ b/autogpt_platform/backend/test/__init__.py @@ -0,0 +1,3 @@ +import os + +os.environ["ENABLE_AUTH"] = "false" diff --git a/autogpt_platform/backend/test/block/test_block.py b/autogpt_platform/backend/test/block/test_block.py new file mode 100644 index 000000000000..48d2616f613e --- /dev/null +++ b/autogpt_platform/backend/test/block/test_block.py @@ -0,0 +1,11 @@ +from typing import Type + +import pytest + +from backend.data.block import Block, get_blocks +from backend.util.test import execute_block_test + + +@pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b.name) +def test_available_blocks(block: Type[Block]): + execute_block_test(block()) diff --git a/autogpt_platform/backend/test/conftest.py b/autogpt_platform/backend/test/conftest.py new file mode 100644 index 000000000000..d107a0b322ef --- /dev/null +++ b/autogpt_platform/backend/test/conftest.py @@ -0,0 +1,48 @@ +import logging + +import pytest + +from backend.util.test import SpinTestServer + +# NOTE: You can run tests like with the --log-cli-level=INFO to see the logs +# Set up logging +logger = logging.getLogger(__name__) + +# Create console handler with formatting +ch = logging.StreamHandler() +ch.setLevel(logging.INFO) +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +ch.setFormatter(formatter) +logger.addHandler(ch) + + +@pytest.fixture(scope="session") +async def server(): + async with SpinTestServer() as server: + yield server + + +@pytest.fixture(scope="session", autouse=True) +async def graph_cleanup(server): + created_graph_ids = [] + original_create_graph = server.agent_server.test_create_graph + + async def create_graph_wrapper(*args, **kwargs): + created_graph = await original_create_graph(*args, **kwargs) + # Extract user_id correctly + user_id = kwargs.get("user_id", args[2] if len(args) > 2 else None) + created_graph_ids.append((created_graph.id, user_id)) + return created_graph + + try: + server.agent_server.test_create_graph = create_graph_wrapper + yield # This runs the test function + finally: + server.agent_server.test_create_graph = original_create_graph + + # Delete the created graphs and assert they were deleted + for graph_id, user_id in created_graph_ids: + if user_id: + resp = await server.agent_server.test_delete_graph(graph_id, user_id) + num_deleted = resp["version_counts"] + assert num_deleted > 0, f"Graph {graph_id} was not deleted." diff --git a/autogpt_platform/backend/test/data/test_credit.py b/autogpt_platform/backend/test/data/test_credit.py new file mode 100644 index 000000000000..d247d1b9bd15 --- /dev/null +++ b/autogpt_platform/backend/test/data/test_credit.py @@ -0,0 +1,119 @@ +from datetime import datetime, timezone + +import pytest +from prisma.enums import CreditTransactionType +from prisma.models import CreditTransaction + +from backend.blocks.llm import AITextGeneratorBlock +from backend.data.credit import BetaUserCredit +from backend.data.execution import NodeExecutionEntry +from backend.data.user import DEFAULT_USER_ID +from backend.integrations.credentials_store import openai_credentials +from backend.util.test import SpinTestServer + +REFILL_VALUE = 1000 +user_credit = BetaUserCredit(REFILL_VALUE) + + +async def disable_test_user_transactions(): + await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID}) + + +async def top_up(amount: int): + await user_credit._add_transaction( + DEFAULT_USER_ID, + amount, + CreditTransactionType.TOP_UP, + ) + + +@pytest.mark.asyncio(scope="session") +async def test_block_credit_usage(server: SpinTestServer): + await disable_test_user_transactions() + await top_up(100) + current_credit = await user_credit.get_credits(DEFAULT_USER_ID) + + spending_amount_1 = await user_credit.spend_credits( + NodeExecutionEntry( + user_id=DEFAULT_USER_ID, + graph_id="test_graph", + node_id="test_node", + graph_exec_id="test_graph_exec", + node_exec_id="test_node_exec", + block_id=AITextGeneratorBlock().id, + data={ + "model": "gpt-4-turbo", + "credentials": { + "id": openai_credentials.id, + "provider": openai_credentials.provider, + "type": openai_credentials.type, + }, + }, + ), + 0.0, + 0.0, + ) + assert spending_amount_1 > 0 + + spending_amount_2 = await user_credit.spend_credits( + NodeExecutionEntry( + user_id=DEFAULT_USER_ID, + graph_id="test_graph", + node_id="test_node", + graph_exec_id="test_graph_exec", + node_exec_id="test_node_exec", + block_id=AITextGeneratorBlock().id, + data={"model": "gpt-4-turbo", "api_key": "owned_api_key"}, + ), + 0.0, + 0.0, + ) + assert spending_amount_2 == 0 + + new_credit = await user_credit.get_credits(DEFAULT_USER_ID) + assert new_credit == current_credit - spending_amount_1 - spending_amount_2 + + +@pytest.mark.asyncio(scope="session") +async def test_block_credit_top_up(server: SpinTestServer): + await disable_test_user_transactions() + current_credit = await user_credit.get_credits(DEFAULT_USER_ID) + + await top_up(100) + + new_credit = await user_credit.get_credits(DEFAULT_USER_ID) + assert new_credit == current_credit + 100 + + +@pytest.mark.asyncio(scope="session") +async def test_block_credit_reset(server: SpinTestServer): + await disable_test_user_transactions() + month1 = 1 + month2 = 2 + + # set the calendar to month 2 but use current time from now + user_credit.time_now = lambda: datetime.now(timezone.utc).replace( + month=month2, day=1 + ) + month2credit = await user_credit.get_credits(DEFAULT_USER_ID) + + # Month 1 result should only affect month 1 + user_credit.time_now = lambda: datetime.now(timezone.utc).replace( + month=month1, day=1 + ) + month1credit = await user_credit.get_credits(DEFAULT_USER_ID) + await top_up(100) + assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100 + + # Month 2 balance is unaffected + user_credit.time_now = lambda: datetime.now(timezone.utc).replace( + month=month2, day=1 + ) + assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit + + +@pytest.mark.asyncio(scope="session") +async def test_credit_refill(server: SpinTestServer): + await disable_test_user_transactions() + balance = await user_credit.get_credits(DEFAULT_USER_ID) + assert balance == REFILL_VALUE diff --git a/autogpt_platform/backend/test/data/test_graph.py b/autogpt_platform/backend/test/data/test_graph.py new file mode 100644 index 000000000000..3141f62ec161 --- /dev/null +++ b/autogpt_platform/backend/test/data/test_graph.py @@ -0,0 +1,297 @@ +from typing import Any +from uuid import UUID + +import autogpt_libs.auth.models +import fastapi.exceptions +import pytest + +import backend.server.v2.store.model as store +from backend.blocks.basic import AgentInputBlock, AgentOutputBlock, StoreValueBlock +from backend.data.block import BlockSchema +from backend.data.graph import Graph, Link, Node +from backend.data.model import SchemaField +from backend.data.user import DEFAULT_USER_ID +from backend.server.model import CreateGraph +from backend.usecases.sample import create_test_user +from backend.util.test import SpinTestServer + + +@pytest.mark.asyncio(scope="session") +async def test_graph_creation(server: SpinTestServer): + """ + Test the creation of a graph with nodes and links. + + This test ensures that: + 1. A graph can be successfully created with valid connections. + 2. The created graph has the correct structure and properties. + + Args: + server (SpinTestServer): The test server instance. + """ + value_block = StoreValueBlock().id + input_block = AgentInputBlock().id + + graph = Graph( + id="test_graph", + name="TestGraph", + description="Test graph", + nodes=[ + Node(id="node_1", block_id=value_block), + Node(id="node_2", block_id=input_block, input_default={"name": "input"}), + Node(id="node_3", block_id=value_block), + ], + links=[ + Link( + source_id="node_1", + sink_id="node_2", + source_name="output", + sink_name="name", + ), + ], + ) + create_graph = CreateGraph(graph=graph) + created_graph = await server.agent_server.test_create_graph( + create_graph, DEFAULT_USER_ID + ) + + assert UUID(created_graph.id) + assert created_graph.name == "TestGraph" + + assert len(created_graph.nodes) == 3 + assert UUID(created_graph.nodes[0].id) + assert UUID(created_graph.nodes[1].id) + assert UUID(created_graph.nodes[2].id) + + nodes = created_graph.nodes + links = created_graph.links + assert len(links) == 1 + assert links[0].source_id != links[0].sink_id + assert links[0].source_id in {nodes[0].id, nodes[1].id, nodes[2].id} + assert links[0].sink_id in {nodes[0].id, nodes[1].id, nodes[2].id} + + +@pytest.mark.asyncio(scope="session") +async def test_get_input_schema(server: SpinTestServer): + """ + Test the get_input_schema method of a created graph. + + This test ensures that: + 1. A graph can be created with a single node. + 2. The input schema of the created graph is correctly generated. + 3. The input schema contains the expected input name and node id. + + Args: + server (SpinTestServer): The test server instance. + """ + value_block = StoreValueBlock().id + input_block = AgentInputBlock().id + output_block = AgentOutputBlock().id + + graph = Graph( + name="TestInputSchema", + description="Test input schema", + nodes=[ + Node( + id="node_0_a", + block_id=input_block, + input_default={ + "name": "in_key_a", + "title": "Key A", + "value": "A", + "advanced": True, + }, + metadata={"id": "node_0_a"}, + ), + Node( + id="node_0_b", + block_id=input_block, + input_default={"name": "in_key_b", "advanced": True}, + metadata={"id": "node_0_b"}, + ), + Node(id="node_1", block_id=value_block, metadata={"id": "node_1"}), + Node( + id="node_2", + block_id=output_block, + input_default={ + "name": "out_key", + "description": "This is an output key", + }, + metadata={"id": "node_2"}, + ), + ], + links=[ + Link( + source_id="node_0_a", + sink_id="node_1", + source_name="result", + sink_name="input", + ), + Link( + source_id="node_0_b", + sink_id="node_1", + source_name="result", + sink_name="input", + ), + Link( + source_id="node_1", + sink_id="node_2", + source_name="output", + sink_name="value", + ), + ], + ) + + create_graph = CreateGraph(graph=graph) + created_graph = await server.agent_server.test_create_graph( + create_graph, DEFAULT_USER_ID + ) + + class ExpectedInputSchema(BlockSchema): + in_key_a: Any = SchemaField(title="Key A", default="A", advanced=True) + in_key_b: Any = SchemaField(title="in_key_b", advanced=False) + + class ExpectedOutputSchema(BlockSchema): + out_key: Any = SchemaField( + description="This is an output key", + title="out_key", + advanced=False, + ) + + input_schema = created_graph.input_schema + input_schema["title"] = "ExpectedInputSchema" + assert input_schema == ExpectedInputSchema.jsonschema() + + output_schema = created_graph.output_schema + output_schema["title"] = "ExpectedOutputSchema" + assert output_schema == ExpectedOutputSchema.jsonschema() + + +@pytest.mark.asyncio(scope="session") +async def test_clean_graph(server: SpinTestServer): + """ + Test the clean_graph function that: + 1. Clears input block values + 2. Removes credentials from nodes + """ + # Create a graph with input blocks and credentials + graph = Graph( + id="test_clean_graph", + name="Test Clean Graph", + description="Test graph cleaning", + nodes=[ + Node( + id="input_node", + block_id=AgentInputBlock().id, + input_default={ + "name": "test_input", + "value": "test value", + "description": "Test input description", + }, + ), + ], + links=[], + ) + + # Create graph and get model + create_graph = CreateGraph(graph=graph) + created_graph = await server.agent_server.test_create_graph( + create_graph, DEFAULT_USER_ID + ) + + # Clean the graph + created_graph.clean_graph() + + # # Verify input block value is cleared + input_node = next( + n for n in created_graph.nodes if n.block_id == AgentInputBlock().id + ) + assert input_node.input_default["value"] == "" + + +@pytest.mark.asyncio(scope="session") +async def test_access_store_listing_graph(server: SpinTestServer): + """ + Test the access of a store listing graph. + """ + graph = Graph( + id="test_clean_graph", + name="Test Clean Graph", + description="Test graph cleaning", + nodes=[ + Node( + id="input_node", + block_id=AgentInputBlock().id, + input_default={ + "name": "test_input", + "value": "test value", + "description": "Test input description", + }, + ), + ], + links=[], + ) + + # Create graph and get model + create_graph = CreateGraph(graph=graph) + created_graph = await server.agent_server.test_create_graph( + create_graph, DEFAULT_USER_ID + ) + + store_submission_request = store.StoreSubmissionRequest( + agent_id=created_graph.id, + agent_version=created_graph.version, + slug="test-slug", + name="Test name", + sub_heading="Test sub heading", + video_url=None, + image_urls=[], + description="Test description", + categories=[], + ) + + # First we check the graph an not be accessed by a different user + with pytest.raises(fastapi.exceptions.HTTPException) as exc_info: + await server.agent_server.test_get_graph( + created_graph.id, + created_graph.version, + "3e53486c-cf57-477e-ba2a-cb02dc828e1b", + ) + assert exc_info.value.status_code == 404 + assert "Graph" in str(exc_info.value.detail) + + # Now we create a store listing + store_listing = await server.agent_server.test_create_store_listing( + store_submission_request, DEFAULT_USER_ID + ) + + if isinstance(store_listing, fastapi.responses.JSONResponse): + assert False, "Failed to create store listing" + + slv_id = ( + store_listing.store_listing_version_id + if store_listing.store_listing_version_id is not None + else None + ) + + assert slv_id is not None + + admin_user = await create_test_user(alt_user=True) + await server.agent_server.test_review_store_listing( + store.ReviewSubmissionRequest( + store_listing_version_id=slv_id, + is_approved=True, + comments="Test comments", + ), + autogpt_libs.auth.models.User( + user_id=admin_user.id, + role="admin", + email=admin_user.email, + phone_number="1234567890", + ), + ) + + # Now we check the graph can be accessed by a user that does not own the graph + got_graph = await server.agent_server.test_get_graph( + created_graph.id, created_graph.version, "3e53486c-cf57-477e-ba2a-cb02dc828e1b" + ) + assert got_graph is not None diff --git a/autogpt_platform/backend/test/executor/test_manager.py b/autogpt_platform/backend/test/executor/test_manager.py new file mode 100644 index 000000000000..d98c89be33e0 --- /dev/null +++ b/autogpt_platform/backend/test/executor/test_manager.py @@ -0,0 +1,359 @@ +import logging + +import autogpt_libs.auth.models +import fastapi.responses +import pytest +from prisma.models import User + +import backend.server.v2.store.model +from backend.blocks.basic import FindInDictionaryBlock, StoreValueBlock +from backend.blocks.maths import CalculatorBlock, Operation +from backend.data import execution, graph +from backend.server.model import CreateGraph +from backend.server.rest_api import AgentServer +from backend.usecases.sample import create_test_graph, create_test_user +from backend.util.test import SpinTestServer, wait_execution + +logger = logging.getLogger(__name__) + + +async def create_graph(s: SpinTestServer, g: graph.Graph, u: User) -> graph.Graph: + logger.info(f"Creating graph for user {u.id}") + return await s.agent_server.test_create_graph(CreateGraph(graph=g), u.id) + + +async def execute_graph( + agent_server: AgentServer, + test_graph: graph.Graph, + test_user: User, + input_data: dict, + num_execs: int = 4, +) -> str: + logger.info(f"Executing graph {test_graph.id} for user {test_user.id}") + logger.info(f"Input data: {input_data}") + + # --- Test adding new executions --- # + response = await agent_server.test_execute_graph( + user_id=test_user.id, + graph_id=test_graph.id, + graph_version=test_graph.version, + node_input=input_data, + ) + graph_exec_id = response["id"] + logger.info(f"Created execution with ID: {graph_exec_id}") + + # Execution queue should be empty + logger.info("Waiting for execution to complete...") + result = await wait_execution(test_user.id, test_graph.id, graph_exec_id, 30) + logger.info(f"Execution completed with {len(result)} results") + assert len(result) == num_execs + return graph_exec_id + + +async def assert_sample_graph_executions( + agent_server: AgentServer, + test_graph: graph.Graph, + test_user: User, + graph_exec_id: str, +): + logger.info(f"Checking execution results for graph {test_graph.id}") + executions = await agent_server.test_get_graph_run_node_execution_results( + test_graph.id, + graph_exec_id, + test_user.id, + ) + + output_list = [{"result": ["Hello"]}, {"result": ["World"]}] + input_list = [ + { + "name": "input_1", + "value": "Hello", + }, + { + "name": "input_2", + "value": "World", + }, + ] + + # Executing StoreValueBlock + exec = executions[0] + logger.info(f"Checking first StoreValueBlock execution: {exec}") + assert exec.status == execution.ExecutionStatus.COMPLETED + assert exec.graph_exec_id == graph_exec_id + assert ( + exec.output_data in output_list + ), f"Output data: {exec.output_data} and {output_list}" + assert ( + exec.input_data in input_list + ), f"Input data: {exec.input_data} and {input_list}" + assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id] + + # Executing StoreValueBlock + exec = executions[1] + logger.info(f"Checking second StoreValueBlock execution: {exec}") + assert exec.status == execution.ExecutionStatus.COMPLETED + assert exec.graph_exec_id == graph_exec_id + assert ( + exec.output_data in output_list + ), f"Output data: {exec.output_data} and {output_list}" + assert ( + exec.input_data in input_list + ), f"Input data: {exec.input_data} and {input_list}" + assert exec.node_id in [test_graph.nodes[0].id, test_graph.nodes[1].id] + + # Executing FillTextTemplateBlock + exec = executions[2] + logger.info(f"Checking FillTextTemplateBlock execution: {exec}") + assert exec.status == execution.ExecutionStatus.COMPLETED + assert exec.graph_exec_id == graph_exec_id + assert exec.output_data == {"output": ["Hello, World!!!"]} + assert exec.input_data == { + "format": "{{a}}, {{b}}{{c}}", + "values": {"a": "Hello", "b": "World", "c": "!!!"}, + "values_#_a": "Hello", + "values_#_b": "World", + "values_#_c": "!!!", + } + assert exec.node_id == test_graph.nodes[2].id + + # Executing PrintToConsoleBlock + exec = executions[3] + logger.info(f"Checking PrintToConsoleBlock execution: {exec}") + assert exec.status == execution.ExecutionStatus.COMPLETED + assert exec.graph_exec_id == graph_exec_id + assert exec.output_data == {"status": ["printed"]} + assert exec.input_data == {"text": "Hello, World!!!"} + assert exec.node_id == test_graph.nodes[3].id + + +@pytest.mark.asyncio(scope="session") +async def test_agent_execution(server: SpinTestServer): + logger.info("Starting test_agent_execution") + test_user = await create_test_user() + test_graph = await create_graph(server, create_test_graph(), test_user) + data = {"node_input": {"input_1": "Hello", "input_2": "World"}} + graph_exec_id = await execute_graph( + server.agent_server, + test_graph, + test_user, + data, + 4, + ) + await assert_sample_graph_executions( + server.agent_server, test_graph, test_user, graph_exec_id + ) + logger.info("Completed test_agent_execution") + + +@pytest.mark.asyncio(scope="session") +async def test_input_pin_always_waited(server: SpinTestServer): + """ + This test is asserting that the input pin should always be waited for the execution, + even when default value on that pin is defined, the value has to be ignored. + + Test scenario: + StoreValueBlock1 + \\ input + >------- FindInDictionaryBlock | input_default: key: "", input: {} + // key + StoreValueBlock2 + """ + logger.info("Starting test_input_pin_always_waited") + nodes = [ + graph.Node( + block_id=StoreValueBlock().id, + input_default={"input": {"key1": "value1", "key2": "value2"}}, + ), + graph.Node( + block_id=StoreValueBlock().id, + input_default={"input": "key2"}, + ), + graph.Node( + block_id=FindInDictionaryBlock().id, + input_default={"key": "", "input": {}}, + ), + ] + links = [ + graph.Link( + source_id=nodes[0].id, + sink_id=nodes[2].id, + source_name="output", + sink_name="input", + ), + graph.Link( + source_id=nodes[1].id, + sink_id=nodes[2].id, + source_name="output", + sink_name="key", + ), + ] + test_graph = graph.Graph( + name="TestGraph", + description="Test graph", + nodes=nodes, + links=links, + ) + test_user = await create_test_user() + test_graph = await create_graph(server, test_graph, test_user) + graph_exec_id = await execute_graph( + server.agent_server, test_graph, test_user, {}, 3 + ) + + logger.info("Checking execution results") + executions = await server.agent_server.test_get_graph_run_node_execution_results( + test_graph.id, graph_exec_id, test_user.id + ) + assert len(executions) == 3 + # FindInDictionaryBlock should wait for the input pin to be provided, + # Hence executing extraction of "key" from {"key1": "value1", "key2": "value2"} + assert executions[2].status == execution.ExecutionStatus.COMPLETED + assert executions[2].output_data == {"output": ["value2"]} + logger.info("Completed test_input_pin_always_waited") + + +@pytest.mark.asyncio(scope="session") +async def test_static_input_link_on_graph(server: SpinTestServer): + """ + This test is asserting the behaviour of static input link, e.g: reusable input link. + + Test scenario: + *StoreValueBlock1*===a=========\\ + *StoreValueBlock2*===a=====\\ || + *StoreValueBlock3*===a===*MathBlock*====b / static====*StoreValueBlock5* + *StoreValueBlock4*=========================================// + + In this test, there will be three input waiting in the MathBlock input pin `a`. + And later, another output is produced on input pin `b`, which is a static link, + this input will complete the input of those three incomplete executions. + """ + logger.info("Starting test_static_input_link_on_graph") + nodes = [ + graph.Node(block_id=StoreValueBlock().id, input_default={"input": 4}), # a + graph.Node(block_id=StoreValueBlock().id, input_default={"input": 4}), # a + graph.Node(block_id=StoreValueBlock().id, input_default={"input": 4}), # a + graph.Node(block_id=StoreValueBlock().id, input_default={"input": 5}), # b + graph.Node(block_id=StoreValueBlock().id), + graph.Node( + block_id=CalculatorBlock().id, + input_default={"operation": Operation.ADD.value}, + ), + ] + links = [ + graph.Link( + source_id=nodes[0].id, + sink_id=nodes[5].id, + source_name="output", + sink_name="a", + ), + graph.Link( + source_id=nodes[1].id, + sink_id=nodes[5].id, + source_name="output", + sink_name="a", + ), + graph.Link( + source_id=nodes[2].id, + sink_id=nodes[5].id, + source_name="output", + sink_name="a", + ), + graph.Link( + source_id=nodes[3].id, + sink_id=nodes[4].id, + source_name="output", + sink_name="input", + ), + graph.Link( + source_id=nodes[4].id, + sink_id=nodes[5].id, + source_name="output", + sink_name="b", + is_static=True, # This is the static link to test. + ), + ] + test_graph = graph.Graph( + name="TestGraph", + description="Test graph", + nodes=nodes, + links=links, + ) + test_user = await create_test_user() + test_graph = await create_graph(server, test_graph, test_user) + graph_exec_id = await execute_graph( + server.agent_server, test_graph, test_user, {}, 8 + ) + logger.info("Checking execution results") + executions = await server.agent_server.test_get_graph_run_node_execution_results( + test_graph.id, graph_exec_id, test_user.id + ) + assert len(executions) == 8 + # The last 3 executions will be a+b=4+5=9 + for i, exec_data in enumerate(executions[-3:]): + logger.info(f"Checking execution {i+1} of last 3: {exec_data}") + assert exec_data.status == execution.ExecutionStatus.COMPLETED + assert exec_data.output_data == {"result": [9]} + logger.info("Completed test_static_input_link_on_graph") + + +@pytest.mark.asyncio(scope="session") +async def test_store_listing_graph(server: SpinTestServer): + logger.info("Starting test_agent_execution") + test_user = await create_test_user() + test_graph = await create_graph(server, create_test_graph(), test_user) + + store_submission_request = backend.server.v2.store.model.StoreSubmissionRequest( + agent_id=test_graph.id, + agent_version=test_graph.version, + slug="test-slug", + name="Test name", + sub_heading="Test sub heading", + video_url=None, + image_urls=[], + description="Test description", + categories=[], + ) + + store_listing = await server.agent_server.test_create_store_listing( + store_submission_request, test_user.id + ) + + if isinstance(store_listing, fastapi.responses.JSONResponse): + assert False, "Failed to create store listing" + + slv_id = ( + store_listing.store_listing_version_id + if store_listing.store_listing_version_id is not None + else None + ) + + assert slv_id is not None + + admin_user = await create_test_user(alt_user=True) + await server.agent_server.test_review_store_listing( + backend.server.v2.store.model.ReviewSubmissionRequest( + store_listing_version_id=slv_id, + is_approved=True, + comments="Test comments", + ), + autogpt_libs.auth.models.User( + user_id=admin_user.id, + role="admin", + email=admin_user.email, + phone_number="1234567890", + ), + ) + alt_test_user = admin_user + + data = {"node_input": {"input_1": "Hello", "input_2": "World"}} + graph_exec_id = await execute_graph( + server.agent_server, + test_graph, + alt_test_user, + data, + 4, + ) + + await assert_sample_graph_executions( + server.agent_server, test_graph, alt_test_user, graph_exec_id + ) + logger.info("Completed test_agent_execution") diff --git a/autogpt_platform/backend/test/executor/test_scheduler.py b/autogpt_platform/backend/test/executor/test_scheduler.py new file mode 100644 index 000000000000..5e9fbc2bc9a6 --- /dev/null +++ b/autogpt_platform/backend/test/executor/test_scheduler.py @@ -0,0 +1,39 @@ +import pytest + +from backend.data import db +from backend.executor import ExecutionScheduler +from backend.server.model import CreateGraph +from backend.usecases.sample import create_test_graph, create_test_user +from backend.util.service import get_service_client +from backend.util.test import SpinTestServer + + +@pytest.mark.asyncio(scope="session") +async def test_agent_schedule(server: SpinTestServer): + await db.connect() + test_user = await create_test_user() + test_graph = await server.agent_server.test_create_graph( + create_graph=CreateGraph(graph=create_test_graph()), + user_id=test_user.id, + ) + + scheduler = get_service_client(ExecutionScheduler) + schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id) + assert len(schedules) == 0 + + schedule = scheduler.add_execution_schedule( + graph_id=test_graph.id, + user_id=test_user.id, + graph_version=1, + cron="0 0 * * *", + input_data={"input": "data"}, + ) + assert schedule + + schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id) + assert len(schedules) == 1 + assert schedules[0].cron == "0 0 * * *" + + scheduler.delete_schedule(schedule.id, user_id=test_user.id) + schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id) + assert len(schedules) == 0 diff --git a/autogpt_platform/backend/test/server/test_con_manager.py b/autogpt_platform/backend/test/server/test_con_manager.py new file mode 100644 index 000000000000..80f9e08f5a48 --- /dev/null +++ b/autogpt_platform/backend/test/server/test_con_manager.py @@ -0,0 +1,118 @@ +from datetime import datetime, timezone +from unittest.mock import AsyncMock + +import pytest +from fastapi import WebSocket + +from backend.data.execution import ExecutionResult, ExecutionStatus +from backend.server.conn_manager import ConnectionManager +from backend.server.model import Methods, WsMessage + + +@pytest.fixture +def connection_manager() -> ConnectionManager: + return ConnectionManager() + + +@pytest.fixture +def mock_websocket() -> AsyncMock: + websocket: AsyncMock = AsyncMock(spec=WebSocket) + websocket.send_text = AsyncMock() + return websocket + + +@pytest.mark.asyncio +async def test_connect( + connection_manager: ConnectionManager, mock_websocket: AsyncMock +) -> None: + await connection_manager.connect(mock_websocket) + assert mock_websocket in connection_manager.active_connections + mock_websocket.accept.assert_called_once() + + +def test_disconnect( + connection_manager: ConnectionManager, mock_websocket: AsyncMock +) -> None: + connection_manager.active_connections.add(mock_websocket) + connection_manager.subscriptions["test_graph"] = {mock_websocket} + + connection_manager.disconnect(mock_websocket) + + assert mock_websocket not in connection_manager.active_connections + assert mock_websocket not in connection_manager.subscriptions["test_graph"] + + +@pytest.mark.asyncio +async def test_subscribe( + connection_manager: ConnectionManager, mock_websocket: AsyncMock +) -> None: + await connection_manager.subscribe("test_graph", mock_websocket) + assert mock_websocket in connection_manager.subscriptions["test_graph"] + + +@pytest.mark.asyncio +async def test_unsubscribe( + connection_manager: ConnectionManager, mock_websocket: AsyncMock +) -> None: + connection_manager.subscriptions["test_graph"] = {mock_websocket} + + await connection_manager.unsubscribe("test_graph", mock_websocket) + + assert "test_graph" not in connection_manager.subscriptions + + +@pytest.mark.asyncio +async def test_send_execution_result( + connection_manager: ConnectionManager, mock_websocket: AsyncMock +) -> None: + connection_manager.subscriptions["test_graph"] = {mock_websocket} + result: ExecutionResult = ExecutionResult( + graph_id="test_graph", + graph_version=1, + graph_exec_id="test_exec_id", + node_exec_id="test_node_exec_id", + node_id="test_node_id", + block_id="test_block_id", + status=ExecutionStatus.COMPLETED, + input_data={"input1": "value1"}, + output_data={"output1": ["result1"]}, + add_time=datetime.now(tz=timezone.utc), + queue_time=None, + start_time=datetime.now(tz=timezone.utc), + end_time=datetime.now(tz=timezone.utc), + ) + + await connection_manager.send_execution_result(result) + + mock_websocket.send_text.assert_called_once_with( + WsMessage( + method=Methods.EXECUTION_EVENT, + channel="test_graph", + data=result.model_dump(), + ).model_dump_json() + ) + + +@pytest.mark.asyncio +async def test_send_execution_result_no_subscribers( + connection_manager: ConnectionManager, mock_websocket: AsyncMock +) -> None: + result: ExecutionResult = ExecutionResult( + graph_id="test_graph", + graph_version=1, + graph_exec_id="test_exec_id", + node_exec_id="test_node_exec_id", + node_id="test_node_id", + block_id="test_block_id", + status=ExecutionStatus.COMPLETED, + input_data={"input1": "value1"}, + output_data={"output1": ["result1"]}, + add_time=datetime.now(), + queue_time=None, + start_time=datetime.now(), + end_time=datetime.now(), + ) + + await connection_manager.send_execution_result(result) + + mock_websocket.send_text.assert_not_called() diff --git a/autogpt_platform/backend/test/server/test_ws_api.py b/autogpt_platform/backend/test/server/test_ws_api.py new file mode 100644 index 000000000000..cedcc935b404 --- /dev/null +++ b/autogpt_platform/backend/test/server/test_ws_api.py @@ -0,0 +1,154 @@ +from typing import cast +from unittest.mock import AsyncMock + +import pytest +from fastapi import WebSocket, WebSocketDisconnect + +from backend.server.conn_manager import ConnectionManager +from backend.server.ws_api import ( + Methods, + WsMessage, + handle_subscribe, + handle_unsubscribe, + websocket_router, +) + + +@pytest.fixture +def mock_websocket() -> AsyncMock: + return AsyncMock(spec=WebSocket) + + +@pytest.fixture +def mock_manager() -> AsyncMock: + return AsyncMock(spec=ConnectionManager) + + +@pytest.mark.asyncio +async def test_websocket_router_subscribe( + mock_websocket: AsyncMock, mock_manager: AsyncMock +) -> None: + mock_websocket.receive_text.side_effect = [ + WsMessage( + method=Methods.SUBSCRIBE, data={"graph_id": "test_graph"} + ).model_dump_json(), + WebSocketDisconnect(), + ] + + await websocket_router( + cast(WebSocket, mock_websocket), cast(ConnectionManager, mock_manager) + ) + + mock_manager.connect.assert_called_once_with(mock_websocket) + mock_manager.subscribe.assert_called_once_with("test_graph", mock_websocket) + mock_websocket.send_text.assert_called_once() + assert '"method":"subscribe"' in mock_websocket.send_text.call_args[0][0] + assert '"success":true' in mock_websocket.send_text.call_args[0][0] + mock_manager.disconnect.assert_called_once_with(mock_websocket) + + +@pytest.mark.asyncio +async def test_websocket_router_unsubscribe( + mock_websocket: AsyncMock, mock_manager: AsyncMock +) -> None: + mock_websocket.receive_text.side_effect = [ + WsMessage( + method=Methods.UNSUBSCRIBE, data={"graph_id": "test_graph"} + ).model_dump_json(), + WebSocketDisconnect(), + ] + + await websocket_router( + cast(WebSocket, mock_websocket), cast(ConnectionManager, mock_manager) + ) + + mock_manager.connect.assert_called_once_with(mock_websocket) + mock_manager.unsubscribe.assert_called_once_with("test_graph", mock_websocket) + mock_websocket.send_text.assert_called_once() + assert '"method":"unsubscribe"' in mock_websocket.send_text.call_args[0][0] + assert '"success":true' in mock_websocket.send_text.call_args[0][0] + mock_manager.disconnect.assert_called_once_with(mock_websocket) + + +@pytest.mark.asyncio +async def test_websocket_router_invalid_method( + mock_websocket: AsyncMock, mock_manager: AsyncMock +) -> None: + mock_websocket.receive_text.side_effect = [ + WsMessage(method=Methods.EXECUTION_EVENT).model_dump_json(), + WebSocketDisconnect(), + ] + + await websocket_router( + cast(WebSocket, mock_websocket), cast(ConnectionManager, mock_manager) + ) + + mock_manager.connect.assert_called_once_with(mock_websocket) + mock_websocket.send_text.assert_called_once() + assert '"method":"error"' in mock_websocket.send_text.call_args[0][0] + assert '"success":false' in mock_websocket.send_text.call_args[0][0] + mock_manager.disconnect.assert_called_once_with(mock_websocket) + + +@pytest.mark.asyncio +async def test_handle_subscribe_success( + mock_websocket: AsyncMock, mock_manager: AsyncMock +) -> None: + message = WsMessage(method=Methods.SUBSCRIBE, data={"graph_id": "test_graph"}) + + await handle_subscribe( + cast(WebSocket, mock_websocket), cast(ConnectionManager, mock_manager), message + ) + + mock_manager.subscribe.assert_called_once_with("test_graph", mock_websocket) + mock_websocket.send_text.assert_called_once() + assert '"method":"subscribe"' in mock_websocket.send_text.call_args[0][0] + assert '"success":true' in mock_websocket.send_text.call_args[0][0] + + +@pytest.mark.asyncio +async def test_handle_subscribe_missing_data( + mock_websocket: AsyncMock, mock_manager: AsyncMock +) -> None: + message = WsMessage(method=Methods.SUBSCRIBE) + + await handle_subscribe( + cast(WebSocket, mock_websocket), cast(ConnectionManager, mock_manager), message + ) + + mock_manager.subscribe.assert_not_called() + mock_websocket.send_text.assert_called_once() + assert '"method":"error"' in mock_websocket.send_text.call_args[0][0] + assert '"success":false' in mock_websocket.send_text.call_args[0][0] + + +@pytest.mark.asyncio +async def test_handle_unsubscribe_success( + mock_websocket: AsyncMock, mock_manager: AsyncMock +) -> None: + message = WsMessage(method=Methods.UNSUBSCRIBE, data={"graph_id": "test_graph"}) + + await handle_unsubscribe( + cast(WebSocket, mock_websocket), cast(ConnectionManager, mock_manager), message + ) + + mock_manager.unsubscribe.assert_called_once_with("test_graph", mock_websocket) + mock_websocket.send_text.assert_called_once() + assert '"method":"unsubscribe"' in mock_websocket.send_text.call_args[0][0] + assert '"success":true' in mock_websocket.send_text.call_args[0][0] + + +@pytest.mark.asyncio +async def test_handle_unsubscribe_missing_data( + mock_websocket: AsyncMock, mock_manager: AsyncMock +) -> None: + message = WsMessage(method=Methods.UNSUBSCRIBE) + + await handle_unsubscribe( + cast(WebSocket, mock_websocket), cast(ConnectionManager, mock_manager), message + ) + + mock_manager.unsubscribe.assert_not_called() + mock_websocket.send_text.assert_called_once() + assert '"method":"error"' in mock_websocket.send_text.call_args[0][0] + assert '"success":false' in mock_websocket.send_text.call_args[0][0] diff --git a/autogpt_platform/backend/test/test_data_creator.py b/autogpt_platform/backend/test/test_data_creator.py new file mode 100644 index 000000000000..d5235043d355 --- /dev/null +++ b/autogpt_platform/backend/test/test_data_creator.py @@ -0,0 +1,435 @@ +import asyncio +import random +from datetime import datetime + +import prisma.enums +from faker import Faker +from prisma import Prisma + +faker = Faker() + +# Constants for data generation limits + +# Base entities +NUM_USERS = 100 # Creates 100 user records +NUM_AGENT_BLOCKS = 100 # Creates 100 agent block templates + +# Per-user entities +MIN_GRAPHS_PER_USER = 1 # Each user will have between 1-5 graphs +MAX_GRAPHS_PER_USER = 5 # Total graphs: 500-2500 (NUM_USERS * MIN/MAX_GRAPHS) + +# Per-graph entities +MIN_NODES_PER_GRAPH = 2 # Each graph will have between 2-5 nodes +MAX_NODES_PER_GRAPH = ( + 5 # Total nodes: 1000-2500 (GRAPHS_PER_USER * NUM_USERS * MIN/MAX_NODES) +) + +# Additional per-user entities +MIN_PRESETS_PER_USER = 1 # Each user will have between 1-2 presets +MAX_PRESETS_PER_USER = 5 # Total presets: 500-2500 (NUM_USERS * MIN/MAX_PRESETS) +MIN_AGENTS_PER_USER = 1 # Each user will have between 1-2 agents +MAX_AGENTS_PER_USER = 10 # Total agents: 500-5000 (NUM_USERS * MIN/MAX_AGENTS) + +# Execution and review records +MIN_EXECUTIONS_PER_GRAPH = 1 # Each graph will have between 1-5 execution records +MAX_EXECUTIONS_PER_GRAPH = ( + 20 # Total executions: 1000-5000 (TOTAL_GRAPHS * MIN/MAX_EXECUTIONS) +) +MIN_REVIEWS_PER_VERSION = 1 # Each version will have between 1-3 reviews +MAX_REVIEWS_PER_VERSION = 5 # Total reviews depends on number of versions created + + +def get_image(): + url = faker.image_url() + while "placekitten.com" in url: + url = faker.image_url() + return url + + +async def main(): + db = Prisma() + await db.connect() + + # Insert Users + print(f"Inserting {NUM_USERS} users") + users = [] + for _ in range(NUM_USERS): + user = await db.user.create( + data={ + "id": str(faker.uuid4()), + "email": faker.unique.email(), + "name": faker.name(), + "metadata": prisma.Json({}), + "integrations": "", + } + ) + users.append(user) + + # Insert AgentBlocks + agent_blocks = [] + print(f"Inserting {NUM_AGENT_BLOCKS} agent blocks") + for _ in range(NUM_AGENT_BLOCKS): + block = await db.agentblock.create( + data={ + "name": f"{faker.word()}_{str(faker.uuid4())[:8]}", + "inputSchema": "{}", + "outputSchema": "{}", + } + ) + agent_blocks.append(block) + + # Insert AgentGraphs + agent_graphs = [] + print(f"Inserting {NUM_USERS * MAX_GRAPHS_PER_USER} agent graphs") + for user in users: + for _ in range( + random.randint(MIN_GRAPHS_PER_USER, MAX_GRAPHS_PER_USER) + ): # Adjust the range to create more graphs per user if desired + graph = await db.agentgraph.create( + data={ + "name": faker.sentence(nb_words=3), + "description": faker.text(max_nb_chars=200), + "userId": user.id, + "isActive": True, + "isTemplate": False, + } + ) + agent_graphs.append(graph) + + # Insert AgentNodes + agent_nodes = [] + print( + f"Inserting {NUM_USERS * MAX_GRAPHS_PER_USER * MAX_NODES_PER_GRAPH} agent nodes" + ) + for graph in agent_graphs: + num_nodes = random.randint(MIN_NODES_PER_GRAPH, MAX_NODES_PER_GRAPH) + for _ in range(num_nodes): # Create 5 AgentNodes per graph + block = random.choice(agent_blocks) + node = await db.agentnode.create( + data={ + "agentBlockId": block.id, + "agentGraphId": graph.id, + "agentGraphVersion": graph.version, + "constantInput": "{}", + "metadata": "{}", + } + ) + agent_nodes.append(node) + + # Insert AgentPresets + agent_presets = [] + print(f"Inserting {NUM_USERS * MAX_PRESETS_PER_USER} agent presets") + for user in users: + num_presets = random.randint(MIN_PRESETS_PER_USER, MAX_PRESETS_PER_USER) + for _ in range(num_presets): # Create 1 AgentPreset per user + graph = random.choice(agent_graphs) + preset = await db.agentpreset.create( + data={ + "name": faker.sentence(nb_words=3), + "description": faker.text(max_nb_chars=200), + "userId": user.id, + "agentId": graph.id, + "agentVersion": graph.version, + "isActive": True, + } + ) + agent_presets.append(preset) + + # Insert UserAgents + user_agents = [] + print(f"Inserting {NUM_USERS * MAX_AGENTS_PER_USER} user agents") + for user in users: + num_agents = random.randint(MIN_AGENTS_PER_USER, MAX_AGENTS_PER_USER) + for _ in range(num_agents): # Create 1 UserAgent per user + graph = random.choice(agent_graphs) + preset = random.choice(agent_presets) + user_agent = await db.useragent.create( + data={ + "userId": user.id, + "agentId": graph.id, + "agentVersion": graph.version, + "agentPresetId": preset.id, + "isFavorite": random.choice([True, False]), + "isCreatedByUser": random.choice([True, False]), + "isArchived": random.choice([True, False]), + "isDeleted": random.choice([True, False]), + } + ) + user_agents.append(user_agent) + + # Insert AgentGraphExecutions + # Insert AgentGraphExecutions + agent_graph_executions = [] + print( + f"Inserting {NUM_USERS * MAX_GRAPHS_PER_USER * MAX_EXECUTIONS_PER_GRAPH} agent graph executions" + ) + graph_execution_data = [] + for graph in agent_graphs: + user = random.choice(users) + num_executions = random.randint( + MIN_EXECUTIONS_PER_GRAPH, MAX_EXECUTIONS_PER_GRAPH + ) + for _ in range(num_executions): + matching_presets = [p for p in agent_presets if p.agentId == graph.id] + preset = ( + random.choice(matching_presets) + if matching_presets and random.random() < 0.5 + else None + ) + + graph_execution_data.append( + { + "agentGraphId": graph.id, + "agentGraphVersion": graph.version, + "userId": user.id, + "executionStatus": prisma.enums.AgentExecutionStatus.COMPLETED, + "startedAt": faker.date_time_this_year(), + "agentPresetId": preset.id if preset else None, + } + ) + + agent_graph_executions = await db.agentgraphexecution.create_many( + data=graph_execution_data + ) + # Need to fetch the created records since create_many doesn't return them + agent_graph_executions = await db.agentgraphexecution.find_many() + + # Insert AgentNodeExecutions + print( + f"Inserting {NUM_USERS * MAX_GRAPHS_PER_USER * MAX_EXECUTIONS_PER_GRAPH} agent node executions" + ) + node_execution_data = [] + for execution in agent_graph_executions: + nodes = [ + node for node in agent_nodes if node.agentGraphId == execution.agentGraphId + ] + for node in nodes: + node_execution_data.append( + { + "agentGraphExecutionId": execution.id, + "agentNodeId": node.id, + "executionStatus": prisma.enums.AgentExecutionStatus.COMPLETED, + "addedTime": datetime.now(), + } + ) + + agent_node_executions = await db.agentnodeexecution.create_many( + data=node_execution_data + ) + # Need to fetch the created records since create_many doesn't return them + agent_node_executions = await db.agentnodeexecution.find_many() + + # Insert AgentNodeExecutionInputOutput + print( + f"Inserting {NUM_USERS * MAX_GRAPHS_PER_USER * MAX_EXECUTIONS_PER_GRAPH} agent node execution input/outputs" + ) + input_output_data = [] + for node_execution in agent_node_executions: + # Input data + input_output_data.append( + { + "name": "input1", + "data": "{}", + "time": datetime.now(), + "referencedByInputExecId": node_execution.id, + } + ) + # Output data + input_output_data.append( + { + "name": "output1", + "data": "{}", + "time": datetime.now(), + "referencedByOutputExecId": node_execution.id, + } + ) + + await db.agentnodeexecutioninputoutput.create_many(data=input_output_data) + + # Insert AgentNodeLinks + print(f"Inserting {NUM_USERS * MAX_GRAPHS_PER_USER} agent node links") + for graph in agent_graphs: + nodes = [node for node in agent_nodes if node.agentGraphId == graph.id] + if len(nodes) >= 2: + source_node = nodes[0] + sink_node = nodes[1] + await db.agentnodelink.create( + data={ + "agentNodeSourceId": source_node.id, + "sourceName": "output1", + "agentNodeSinkId": sink_node.id, + "sinkName": "input1", + "isStatic": False, + } + ) + + # Insert AnalyticsDetails + print(f"Inserting {NUM_USERS} analytics details") + for user in users: + for _ in range(1): + await db.analyticsdetails.create( + data={ + "userId": user.id, + "type": faker.word(), + "data": prisma.Json({}), + "dataIndex": faker.word(), + } + ) + + # Insert AnalyticsMetrics + print(f"Inserting {NUM_USERS} analytics metrics") + for user in users: + for _ in range(1): + await db.analyticsmetrics.create( + data={ + "userId": user.id, + "analyticMetric": faker.word(), + "value": random.uniform(0, 100), + "dataString": faker.word(), + } + ) + + # Insert CreditTransaction (formerly UserBlockCredit) + print(f"Inserting {NUM_USERS} credit transactions") + for user in users: + for _ in range(1): + block = random.choice(agent_blocks) + await db.credittransaction.create( + data={ + "transactionKey": str(faker.uuid4()), + "userId": user.id, + "amount": random.randint(1, 100), + "type": ( + prisma.enums.CreditTransactionType.TOP_UP + if random.random() < 0.5 + else prisma.enums.CreditTransactionType.USAGE + ), + "metadata": prisma.Json({}), + } + ) + + # Insert Profiles + profiles = [] + print(f"Inserting {NUM_USERS} profiles") + for user in users: + profile = await db.profile.create( + data={ + "userId": user.id, + "name": user.name or faker.name(), + "username": faker.unique.user_name(), + "description": faker.text(), + "links": [faker.url() for _ in range(3)], + "avatarUrl": get_image(), + } + ) + profiles.append(profile) + + # Insert StoreListings + store_listings = [] + print(f"Inserting {NUM_USERS} store listings") + for graph in agent_graphs: + user = random.choice(users) + listing = await db.storelisting.create( + data={ + "agentId": graph.id, + "agentVersion": graph.version, + "owningUserId": user.id, + "isApproved": random.choice([True, False]), + } + ) + store_listings.append(listing) + + # Insert StoreListingVersions + store_listing_versions = [] + print(f"Inserting {NUM_USERS} store listing versions") + for listing in store_listings: + graph = [g for g in agent_graphs if g.id == listing.agentId][0] + version = await db.storelistingversion.create( + data={ + "agentId": graph.id, + "agentVersion": graph.version, + "slug": faker.slug(), + "name": graph.name or faker.sentence(nb_words=3), + "subHeading": faker.sentence(), + "videoUrl": faker.url(), + "imageUrls": [get_image() for _ in range(3)], + "description": faker.text(), + "categories": [faker.word() for _ in range(3)], + "isFeatured": random.choice([True, False]), + "isAvailable": True, + "isApproved": random.choice([True, False]), + "storeListingId": listing.id, + } + ) + store_listing_versions.append(version) + + # Insert StoreListingReviews + print(f"Inserting {NUM_USERS * MAX_REVIEWS_PER_VERSION} store listing reviews") + for version in store_listing_versions: + # Create a copy of users list and shuffle it to avoid duplicates + available_reviewers = users.copy() + random.shuffle(available_reviewers) + + # Limit number of reviews to available unique reviewers + num_reviews = min( + random.randint(MIN_REVIEWS_PER_VERSION, MAX_REVIEWS_PER_VERSION), + len(available_reviewers), + ) + + # Take only the first num_reviews reviewers + for reviewer in available_reviewers[:num_reviews]: + await db.storelistingreview.create( + data={ + "storeListingVersionId": version.id, + "reviewByUserId": reviewer.id, + "score": random.randint(1, 5), + "comments": faker.text(), + } + ) + + # Insert StoreListingSubmissions + print(f"Inserting {NUM_USERS} store listing submissions") + for listing in store_listings: + version = random.choice(store_listing_versions) + reviewer = random.choice(users) + status: prisma.enums.SubmissionStatus = random.choice( + [ + prisma.enums.SubmissionStatus.PENDING, + prisma.enums.SubmissionStatus.APPROVED, + prisma.enums.SubmissionStatus.REJECTED, + ] + ) + await db.storelistingsubmission.create( + data={ + "storeListingId": listing.id, + "storeListingVersionId": version.id, + "reviewerId": reviewer.id, + "Status": status, + "reviewComments": faker.text(), + } + ) + + # Insert APIKeys + print(f"Inserting {NUM_USERS} api keys") + for user in users: + await db.apikey.create( + data={ + "name": faker.word(), + "prefix": str(faker.uuid4())[:8], + "postfix": str(faker.uuid4())[-8:], + "key": str(faker.sha256()), + "status": prisma.enums.APIKeyStatus.ACTIVE, + "permissions": [ + prisma.enums.APIKeyPermission.EXECUTE_GRAPH, + prisma.enums.APIKeyPermission.READ_GRAPH, + ], + "description": faker.text(), + "userId": user.id, + } + ) + + await db.disconnect() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/autogpt_platform/backend/test/util/test_decorator.py b/autogpt_platform/backend/test/util/test_decorator.py new file mode 100644 index 000000000000..de38b747d8ae --- /dev/null +++ b/autogpt_platform/backend/test/util/test_decorator.py @@ -0,0 +1,26 @@ +import time + +from backend.util.decorator import error_logged, time_measured + + +@time_measured +def example_function(a: int, b: int, c: int) -> int: + time.sleep(0.5) + return a + b + c + + +@error_logged +def example_function_with_error(a: int, b: int, c: int) -> int: + raise ValueError("This is a test error") + + +def test_timer_decorator(): + info, res = example_function(1, 2, 3) + assert info.cpu_time >= 0 + assert info.wall_time >= 0.4 + assert res == 6 + + +def test_error_decorator(): + res = example_function_with_error(1, 2, 3) + assert res is None diff --git a/autogpt_platform/backend/test/util/test_request.py b/autogpt_platform/backend/test/util/test_request.py new file mode 100644 index 000000000000..703ef9eb5bb4 --- /dev/null +++ b/autogpt_platform/backend/test/util/test_request.py @@ -0,0 +1,79 @@ +import pytest + +from backend.util.request import validate_url + + +def test_validate_url(): + # Rejected IP ranges + with pytest.raises(ValueError): + validate_url("localhost", []) + + with pytest.raises(ValueError): + validate_url("192.168.1.1", []) + + with pytest.raises(ValueError): + validate_url("127.0.0.1", []) + + with pytest.raises(ValueError): + validate_url("0.0.0.0", []) + + # Normal URLs + assert validate_url("google.com/a?b=c", []) == "http://google.com/a?b=c" + assert validate_url("github.com?key=!@!@", []) == "http://github.com?key=!@!@" + + # Scheme Enforcement + with pytest.raises(ValueError): + validate_url("ftp://example.com", []) + with pytest.raises(ValueError): + validate_url("file://example.com", []) + + # International domain that converts to punycode - should be allowed if public + assert validate_url("http://xn--exmple-cua.com", []) == "http://xn--exmple-cua.com" + # If the domain fails IDNA encoding or is invalid, it should raise an error + with pytest.raises(ValueError): + validate_url("http://exa◌mple.com", []) + + # IPv6 Addresses + with pytest.raises(ValueError): + validate_url("::1", []) # IPv6 loopback should be blocked + with pytest.raises(ValueError): + validate_url("http://[::1]", []) # IPv6 loopback in URL form + + # Suspicious Characters in Hostname + with pytest.raises(ValueError): + validate_url("http://example_underscore.com", []) + with pytest.raises(ValueError): + validate_url("http://exa mple.com", []) # Space in hostname + + # Malformed URLs + with pytest.raises(ValueError): + validate_url("http://", []) # No hostname + with pytest.raises(ValueError): + validate_url("://missing-scheme", []) # Missing proper scheme + + # Trusted Origins + trusted = ["internal-api.company.com", "10.0.0.5"] + assert ( + validate_url("internal-api.company.com", trusted) + == "http://internal-api.company.com" + ) + assert validate_url("10.0.0.5", ["10.0.0.5"]) == "http://10.0.0.5" + + # Special Characters in Path or Query + assert ( + validate_url("example.com/path%20with%20spaces", []) + == "http://example.com/path%20with%20spaces" + ) + + # Backslashes should be replaced with forward slashes + assert ( + validate_url("http://example.com\\backslash", []) + == "http://example.com/backslash" + ) + + # Check defaulting scheme behavior for valid domains + assert validate_url("example.com", []) == "http://example.com" + assert validate_url("https://secure.com", []) == "https://secure.com" + + # Non-ASCII Characters in Query/Fragment + assert validate_url("example.com?param=äöü", []) == "http://example.com?param=äöü" diff --git a/autogpt_platform/backend/test/util/test_retry.py b/autogpt_platform/backend/test/util/test_retry.py new file mode 100644 index 000000000000..d3192f4f9fac --- /dev/null +++ b/autogpt_platform/backend/test/util/test_retry.py @@ -0,0 +1,49 @@ +import asyncio + +import pytest + +from backend.util.retry import conn_retry + + +def test_conn_retry_sync_function(): + retry_count = 0 + + @conn_retry("Test", "Test function", max_retry=2, max_wait=0.1, min_wait=0.1) + def test_function(): + nonlocal retry_count + retry_count -= 1 + if retry_count > 0: + raise ValueError("Test error") + return "Success" + + retry_count = 2 + res = test_function() + assert res == "Success" + + retry_count = 100 + with pytest.raises(ValueError) as e: + test_function() + assert str(e.value) == "Test error" + + +@pytest.mark.asyncio +async def test_conn_retry_async_function(): + retry_count = 0 + + @conn_retry("Test", "Test function", max_retry=2, max_wait=0.1, min_wait=0.1) + async def test_function(): + nonlocal retry_count + await asyncio.sleep(1) + retry_count -= 1 + if retry_count > 0: + raise ValueError("Test error") + return "Success" + + retry_count = 2 + res = await test_function() + assert res == "Success" + + retry_count = 100 + with pytest.raises(ValueError) as e: + await test_function() + assert str(e.value) == "Test error" diff --git a/autogpt_platform/backend/test/util/test_service.py b/autogpt_platform/backend/test/util/test_service.py new file mode 100644 index 000000000000..a20810dbb1e5 --- /dev/null +++ b/autogpt_platform/backend/test/util/test_service.py @@ -0,0 +1,38 @@ +import pytest + +from backend.util.service import AppService, expose, get_service_client + +TEST_SERVICE_PORT = 8765 + + +class ServiceTest(AppService): + def __init__(self): + super().__init__() + + @classmethod + def get_port(cls) -> int: + return TEST_SERVICE_PORT + + @expose + def add(self, a: int, b: int) -> int: + return a + b + + @expose + def subtract(self, a: int, b: int) -> int: + return a - b + + @expose + def fun_with_async(self, a: int, b: int) -> int: + async def add_async(a: int, b: int) -> int: + return a + b + + return self.run_and_wait(add_async(a, b)) + + +@pytest.mark.asyncio(scope="session") +async def test_service_creation(server): + with ServiceTest(): + client = get_service_client(ServiceTest) + assert client.add(5, 3) == 8 + assert client.subtract(10, 4) == 6 + assert client.fun_with_async(5, 3) == 8 diff --git a/autogpt_platform/backend/test/util/test_type.py b/autogpt_platform/backend/test/util/test_type.py new file mode 100644 index 000000000000..f9a14d10a00c --- /dev/null +++ b/autogpt_platform/backend/test/util/test_type.py @@ -0,0 +1,32 @@ +from backend.util.type import convert + + +def test_type_conversion(): + assert convert(5.5, int) == 5 + assert convert("5.5", int) == 5 + assert convert([1, 2, 3], int) == 3 + + assert convert("5.5", float) == 5.5 + assert convert(5, float) == 5.0 + + assert convert("True", bool) is True + assert convert("False", bool) is False + + assert convert(5, str) == "5" + assert convert({"a": 1, "b": 2}, str) == '{"a": 1, "b": 2}' + assert convert([1, 2, 3], str) == "[1, 2, 3]" + + assert convert("5", list) == ["5"] + assert convert((1, 2, 3), list) == [1, 2, 3] + assert convert({1, 2, 3}, list) == [1, 2, 3] + + assert convert("5", dict) == {"value": 5} + assert convert('{"a": 1, "b": 2}', dict) == {"a": 1, "b": 2} + assert convert([1, 2, 3], dict) == {0: 1, 1: 2, 2: 3} + assert convert((1, 2, 3), dict) == {0: 1, 1: 2, 2: 3} + + from typing import List + + assert convert("5", List[int]) == [5] + assert convert("[5,4,2]", List[int]) == [5, 4, 2] + assert convert([5, 4, 2], List[str]) == ["5", "4", "2"] diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml new file mode 100644 index 000000000000..e013616db505 --- /dev/null +++ b/autogpt_platform/docker-compose.platform.yml @@ -0,0 +1,176 @@ +services: + migrate: + build: + context: ../ + dockerfile: autogpt_platform/backend/Dockerfile + target: server + command: ["sh", "-c", "poetry run prisma migrate deploy"] + develop: + watch: + - path: ./ + target: autogpt_platform/backend/migrations + action: rebuild + depends_on: + db: + condition: service_healthy + environment: + - DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform + networks: + - app-network + restart: on-failure + healthcheck: + test: ["CMD", "poetry", "run", "prisma", "migrate", "status"] + interval: 10s + timeout: 5s + retries: 5 + + redis: + image: redis:latest + command: redis-server --requirepass password + ports: + - "6379:6379" + networks: + - app-network + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + + rest_server: + build: + context: ../ + dockerfile: autogpt_platform/backend/Dockerfile + target: server + command: ["python", "-m", "backend.rest"] + develop: + watch: + - path: ./ + target: autogpt_platform/backend/ + action: rebuild + depends_on: + redis: + condition: service_healthy + db: + condition: service_healthy + migrate: + condition: service_completed_successfully + environment: + - SUPABASE_URL=http://kong:8000 + - SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long + - SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q + - DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform + - REDIS_HOST=redis + - REDIS_PORT=6379 + - REDIS_PASSWORD=password + - ENABLE_AUTH=true + - PYRO_HOST=0.0.0.0 + - EXECUTIONSCHEDULER_HOST=rest_server + - EXECUTIONMANAGER_HOST=executor + - FRONTEND_BASE_URL=http://localhost:3000 + - BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] + - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! + ports: + - "8006:8006" + - "8003:8003" # execution scheduler + networks: + - app-network + + executor: + build: + context: ../ + dockerfile: autogpt_platform/backend/Dockerfile + target: server + command: ["python", "-m", "backend.exec"] + develop: + watch: + - path: ./ + target: autogpt_platform/backend/ + action: rebuild + depends_on: + redis: + condition: service_healthy + db: + condition: service_healthy + migrate: + condition: service_completed_successfully + environment: + - SUPABASE_URL=http://kong:8000 + - SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long + - SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q + - DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform + - REDIS_HOST=redis + - REDIS_PORT=6379 + - REDIS_PASSWORD=password + - ENABLE_AUTH=true + - PYRO_HOST=0.0.0.0 + - AGENTSERVER_HOST=rest_server + - ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!! + ports: + - "8002:8000" + networks: + - app-network + + websocket_server: + build: + context: ../ + dockerfile: autogpt_platform/backend/Dockerfile + target: server + command: ["python", "-m", "backend.ws"] + develop: + watch: + - path: ./ + target: autogpt_platform/backend/ + action: rebuild + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + migrate: + condition: service_completed_successfully + environment: + - SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long + - DATABASE_URL=postgresql://postgres:your-super-secret-and-long-postgres-password@db:5432/postgres?connect_timeout=60&schema=platform + - REDIS_HOST=redis + - REDIS_PORT=6379 + - REDIS_PASSWORD=password + - ENABLE_AUTH=true + - PYRO_HOST=0.0.0.0 + - BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"] + + ports: + - "8001:8001" + networks: + - app-network + +# frontend: +# build: +# context: ../ +# dockerfile: autogpt_platform/frontend/Dockerfile +# target: dev +# depends_on: +# db: +# condition: service_healthy +# rest_server: +# condition: service_started +# websocket_server: +# condition: service_started +# migrate: +# condition: service_completed_successfully +# environment: +# - NEXT_PUBLIC_SUPABASE_URL=http://kong:8000 +# - NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE +# - DATABASE_URL=postgresql://agpt_user:pass123@postgres:5432/postgres?connect_timeout=60&schema=platform +# - NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api +# - NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws +# - NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market +# - NEXT_PUBLIC_BEHAVE_AS=LOCAL +# ports: +# - "3000:3000" +# networks: +# - app-network + +networks: + app-network: + driver: bridge diff --git a/autogpt_platform/docker-compose.yml b/autogpt_platform/docker-compose.yml new file mode 100644 index 000000000000..9166e0e327d0 --- /dev/null +++ b/autogpt_platform/docker-compose.yml @@ -0,0 +1,148 @@ +networks: + app-network: + name: app-network + shared-network: + name: shared-network + +volumes: + db-config: + +x-agpt-services: + &agpt-services + networks: + - app-network + - shared-network + +x-supabase-services: + &supabase-services + networks: + - app-network + - shared-network + +services: + # AGPT services + migrate: + <<: *agpt-services + extends: + file: ./docker-compose.platform.yml + service: migrate + + redis: + <<: *agpt-services + extends: + file: ./docker-compose.platform.yml + service: redis + + rest_server: + <<: *agpt-services + extends: + file: ./docker-compose.platform.yml + service: rest_server + + executor: + <<: *agpt-services + extends: + file: ./docker-compose.platform.yml + service: executor + + websocket_server: + <<: *agpt-services + extends: + file: ./docker-compose.platform.yml + service: websocket_server + +# frontend: +# <<: *agpt-services +# extends: +# file: ./docker-compose.platform.yml +# service: frontend + + # Supabase services + studio: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: studio + + kong: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: kong + + auth: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: auth + environment: + GOTRUE_MAILER_AUTOCONFIRM: true + + rest: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: rest + + realtime: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: realtime + + storage: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: storage + + imgproxy: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: imgproxy + + meta: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: meta + + functions: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: functions + + analytics: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: analytics + + db: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: db + vector: + <<: *supabase-services + extends: + file: ./supabase/docker/docker-compose.yml + service: vector + + deps: + <<: *supabase-services + profiles: + - local + image: busybox + command: /bin/true + depends_on: + - studio + - kong + - auth + - meta + - analytics + - db + - vector + - redis diff --git a/autogpt_platform/frontend/.env.example b/autogpt_platform/frontend/.env.example new file mode 100644 index 000000000000..9903c14fe457 --- /dev/null +++ b/autogpt_platform/frontend/.env.example @@ -0,0 +1,27 @@ +NEXT_PUBLIC_AUTH_CALLBACK_URL=http://localhost:8006/auth/callback +NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api +NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws +NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market +NEXT_PUBLIC_LAUNCHDARKLY_ENABLED=false +NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID= +NEXT_PUBLIC_APP_ENV=dev +NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY= + +## Locale settings + +NEXT_PUBLIC_DEFAULT_LOCALE=en +NEXT_PUBLIC_LOCALES=en,es + +## Supabase credentials + +NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000 +NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE + +## OAuth Callback URL +## This should be {domain}/auth/callback +## Only used if you're using Supabase and OAuth +AUTH_CALLBACK_URL=http://localhost:3000/auth/callback +GA_MEASUREMENT_ID=G-FH2XK2W4GN + +# When running locally, set NEXT_PUBLIC_BEHAVE_AS=CLOUD to use the a locally hosted marketplace (as is typical in development, and the cloud deployment), otherwise set it to LOCAL to have the marketplace open in a new tab +NEXT_PUBLIC_BEHAVE_AS=LOCAL diff --git a/autogpt_platform/frontend/.eslintrc.json b/autogpt_platform/frontend/.eslintrc.json new file mode 100644 index 000000000000..bb8b1c099d49 --- /dev/null +++ b/autogpt_platform/frontend/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": ["next/core-web-vitals", "plugin:storybook/recommended"] +} diff --git a/autogpt_platform/frontend/.gitignore b/autogpt_platform/frontend/.gitignore new file mode 100644 index 000000000000..22cda21b7759 --- /dev/null +++ b/autogpt_platform/frontend/.gitignore @@ -0,0 +1,50 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js +.yarn/install-state.gz + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts + +# Sentry Config File +.env.sentry-build-plugin +node_modules/ +/test-results/ +/playwright-report/ +/blob-report/ +/playwright/.cache/ + +*storybook.log +storybook-static +*.ignore.* +*.ign.* +.cursorrules \ No newline at end of file diff --git a/autogpt_platform/frontend/.prettierignore b/autogpt_platform/frontend/.prettierignore new file mode 100644 index 000000000000..1ec91b0d1acb --- /dev/null +++ b/autogpt_platform/frontend/.prettierignore @@ -0,0 +1,4 @@ +node_modules +.next +build +public diff --git a/autogpt_platform/frontend/.prettierrc b/autogpt_platform/frontend/.prettierrc new file mode 100644 index 000000000000..b4bfed3579c4 --- /dev/null +++ b/autogpt_platform/frontend/.prettierrc @@ -0,0 +1,3 @@ +{ + "plugins": ["prettier-plugin-tailwindcss"] +} diff --git a/autogpt_platform/frontend/.storybook/main.ts b/autogpt_platform/frontend/.storybook/main.ts new file mode 100644 index 000000000000..62db72c32018 --- /dev/null +++ b/autogpt_platform/frontend/.storybook/main.ts @@ -0,0 +1,21 @@ +import type { StorybookConfig } from "@storybook/nextjs"; + +const config: StorybookConfig = { + stories: ["../src/**/*.mdx", "../src/**/*.stories.@(js|jsx|mjs|ts|tsx)"], + addons: [ + "@storybook/addon-a11y", + "@storybook/addon-onboarding", + "@storybook/addon-links", + "@storybook/addon-essentials", + "@storybook/addon-interactions", + ], + features: { + experimentalRSC: true, + }, + framework: { + name: "@storybook/nextjs", + options: {}, + }, + staticDirs: ["../public"], +}; +export default config; diff --git a/autogpt_platform/frontend/.storybook/preview.ts b/autogpt_platform/frontend/.storybook/preview.ts new file mode 100644 index 000000000000..b8bef1a320f4 --- /dev/null +++ b/autogpt_platform/frontend/.storybook/preview.ts @@ -0,0 +1,23 @@ +import type { Preview } from "@storybook/react"; +import { initialize, mswLoader } from "msw-storybook-addon"; +import "../src/app/globals.css"; + +// Initialize MSW +initialize(); + +const preview: Preview = { + parameters: { + nextjs: { + appDirectory: true, + }, + controls: { + matchers: { + color: /(background|color)$/i, + date: /Date$/i, + }, + }, + }, + loaders: [mswLoader], +}; + +export default preview; diff --git a/autogpt_platform/frontend/.storybook/test-runner.ts b/autogpt_platform/frontend/.storybook/test-runner.ts new file mode 100644 index 000000000000..fc12dcc48dd0 --- /dev/null +++ b/autogpt_platform/frontend/.storybook/test-runner.ts @@ -0,0 +1,22 @@ +import type { TestRunnerConfig } from "@storybook/test-runner"; +import { injectAxe, checkA11y } from "axe-playwright"; + +/* + * See https://storybook.js.org/docs/writing-tests/test-runner#test-hook-api + * to learn more about the test-runner hooks API. + */ +const config: TestRunnerConfig = { + async preVisit(page) { + await injectAxe(page); + }, + async postVisit(page) { + await checkA11y(page, "#storybook-root", { + detailedReport: true, + detailedReportOptions: { + html: true, + }, + }); + }, +}; + +export default config; diff --git a/autogpt_platform/frontend/Dockerfile b/autogpt_platform/frontend/Dockerfile new file mode 100644 index 000000000000..8d509922120e --- /dev/null +++ b/autogpt_platform/frontend/Dockerfile @@ -0,0 +1,41 @@ +# Base stage for both dev and prod +FROM node:21-alpine AS base +WORKDIR /app +COPY autogpt_platform/frontend/package.json autogpt_platform/frontend/yarn.lock ./ +RUN --mount=type=cache,target=/usr/local/share/.cache yarn install --frozen-lockfile + +# Dev stage +FROM base AS dev +ENV NODE_ENV=development +ENV HOSTNAME=0.0.0.0 +COPY autogpt_platform/frontend/ . +EXPOSE 3000 +CMD ["yarn", "run", "dev", "--hostname", "0.0.0.0"] + +# Build stage for prod +FROM base AS build +COPY autogpt_platform/frontend/ . +ENV SKIP_STORYBOOK_TESTS=true +RUN yarn build + +# Prod stage - based on NextJS reference Dockerfile https://github.com/vercel/next.js/blob/64271354533ed16da51be5dce85f0dbd15f17517/examples/with-docker/Dockerfile +FROM node:21-alpine AS prod +ENV NODE_ENV=production +ENV HOSTNAME=0.0.0.0 +WORKDIR /app + +RUN addgroup --system --gid 1001 nodejs +RUN adduser --system --uid 1001 nextjs + +RUN mkdir .next +RUN chown nextjs:nodejs .next + +COPY --from=build --chown=nextjs:nodejs /app/.next/standalone ./ +COPY --from=build --chown=nextjs:nodejs /app/.next/static ./.next/static + +COPY --from=build /app/public ./public + +USER nextjs + +EXPOSE 3000 +CMD ["node", "server.js"] diff --git a/autogpt_platform/frontend/README.md b/autogpt_platform/frontend/README.md new file mode 100644 index 000000000000..a89d28f406df --- /dev/null +++ b/autogpt_platform/frontend/README.md @@ -0,0 +1,88 @@ +This is the frontend for AutoGPT's next generation + +## Getting Started + +Run the following installation once. + +```bash +npm install +# or +yarn install +# or +pnpm install +# or +bun install +``` + +Next, run the development server: + +```bash +npm run dev +# or +yarn dev +# or +pnpm dev +# or +bun dev +``` + +Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. + +You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. + +For subsequent runs, you do not have to `npm install` again. Simply do `npm run dev`. + +If the project is updated via git, you will need to `npm install` after each update. + +This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. + +## Deploy + +TODO + +## Storybook + +Storybook is a powerful development environment for UI components. It allows you to build UI components in isolation, making it easier to develop, test, and document your components independently from your main application. + +### Purpose in the Development Process + +1. **Component Development**: Develop and test UI components in isolation. +2. **Visual Testing**: Easily spot visual regressions. +3. **Documentation**: Automatically document components and their props. +4. **Collaboration**: Share components with your team or stakeholders for feedback. + +### How to Use Storybook + +1. **Start Storybook**: + Run the following command to start the Storybook development server: + + ```bash + npm run storybook + ``` + + This will start Storybook on port 6006. Open [http://localhost:6006](http://localhost:6006) in your browser to view your component library. + +2. **Build Storybook**: + To build a static version of Storybook for deployment, use: + + ```bash + npm run build-storybook + ``` + +3. **Running Storybook Tests**: + Storybook tests can be run using: + + ```bash + npm run test-storybook + ``` + + For CI environments, use: + + ```bash + npm run test-storybook:ci + ``` + +4. **Writing Stories**: + Create `.stories.tsx` files alongside your components to define different states and variations of your components. + +By integrating Storybook into our development workflow, we can streamline UI development, improve component reusability, and maintain a consistent design system across the project. diff --git a/autogpt_platform/frontend/components.json b/autogpt_platform/frontend/components.json new file mode 100644 index 000000000000..1d586e3d7101 --- /dev/null +++ b/autogpt_platform/frontend/components.json @@ -0,0 +1,17 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "tailwind.config.ts", + "css": "src/app/globals.css", + "baseColor": "neutral", + "cssVariables": false, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils" + } +} diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs new file mode 100644 index 000000000000..6700943d43e2 --- /dev/null +++ b/autogpt_platform/frontend/next.config.mjs @@ -0,0 +1,76 @@ +import { withSentryConfig } from "@sentry/nextjs"; + +/** @type {import('next').NextConfig} */ +const nextConfig = { + images: { + domains: [ + "images.unsplash.com", + "ddz4ak4pa3d19.cloudfront.net", + "upload.wikimedia.org", + "storage.googleapis.com", + + "picsum.photos", // for placeholder images + "dummyimage.com", // for placeholder images + "placekitten.com", // for placeholder images + ], + }, + output: "standalone", + // TODO: Re-enable TypeScript checks once current issues are resolved + typescript: { + ignoreBuildErrors: true, + }, +}; + +export default withSentryConfig(nextConfig, { + // For all available options, see: + // https://github.com/getsentry/sentry-webpack-plugin#options + + org: "significant-gravitas", + project: "builder", + + // Only print logs for uploading source maps in CI + silent: !process.env.CI, + + // For all available options, see: + // https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/ + + // Upload a larger set of source maps for prettier stack traces (increases build time) + widenClientFileUpload: true, + + // Automatically annotate React components to show their full name in breadcrumbs and session replay + reactComponentAnnotation: { + enabled: true, + }, + + // Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers. + // This can increase your server load as well as your hosting bill. + // Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client- + // side errors will fail. + tunnelRoute: "/store", + + // Hides source maps from generated client bundles + hideSourceMaps: true, + + // Automatically tree-shake Sentry logger statements to reduce bundle size + disableLogger: true, + + // Enables automatic instrumentation of Vercel Cron Monitors. (Does not yet work with App Router route handlers.) + // See the following for more information: + // https://docs.sentry.io/product/crons/ + // https://vercel.com/docs/cron-jobs + automaticVercelMonitors: true, + + async headers() { + return [ + { + source: "/:path*", + headers: [ + { + key: "Document-Policy", + value: "js-profiling", + }, + ], + }, + ]; + }, +}); diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json new file mode 100644 index 000000000000..acaa2d9eedaf --- /dev/null +++ b/autogpt_platform/frontend/package.json @@ -0,0 +1,123 @@ +{ + "name": "frontend", + "version": "0.3.4", + "private": true, + "scripts": { + "dev": "next dev", + "dev:nosentry": "NODE_ENV=development && DISABLE_SENTRY=true && next dev", + "dev:test": "NODE_ENV=test && next dev", + "build": "SKIP_STORYBOOK_TESTS=true next build", + "start": "next start", + "lint": "next lint && prettier --check .", + "format": "prettier --write .", + "type-check": "tsc --noEmit", + "test": "playwright test", + "test-ui": "playwright test --ui", + "gentests": "playwright codegen http://localhost:3000", + "storybook": "storybook dev -p 6006", + "build-storybook": "storybook build", + "test-storybook": "test-storybook", + "test-storybook:ci": "concurrently -k -s first -n \"SB,TEST\" -c \"magenta,blue\" \"npm run build-storybook -- --quiet && npx http-server storybook-static --port 6006 --silent\" \"wait-on tcp:6006 && npm run test-storybook\"" + }, + "browserslist": [ + "defaults" + ], + "dependencies": { + "@faker-js/faker": "^9.4.0", + "@hookform/resolvers": "^3.10.0", + "@next/third-parties": "^15.1.6", + "@radix-ui/react-alert-dialog": "^1.1.5", + "@radix-ui/react-avatar": "^1.1.1", + "@radix-ui/react-checkbox": "^1.1.2", + "@radix-ui/react-collapsible": "^1.1.1", + "@radix-ui/react-context-menu": "^2.2.5", + "@radix-ui/react-dialog": "^1.1.2", + "@radix-ui/react-dropdown-menu": "^2.1.5", + "@radix-ui/react-icons": "^1.3.2", + "@radix-ui/react-label": "^2.1.0", + "@radix-ui/react-popover": "^1.1.5", + "@radix-ui/react-radio-group": "^1.2.1", + "@radix-ui/react-scroll-area": "^1.2.1", + "@radix-ui/react-select": "^2.1.5", + "@radix-ui/react-separator": "^1.1.0", + "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-switch": "^1.1.1", + "@radix-ui/react-toast": "^1.2.5", + "@radix-ui/react-tooltip": "^1.1.7", + "@sentry/nextjs": "^8", + "@stripe/stripe-js": "^5.5.0", + "@supabase/ssr": "^0.5.2", + "@supabase/supabase-js": "^2.48.1", + "@tanstack/react-table": "^8.20.6", + "@xyflow/react": "^12.4.2", + "ajv": "^8.17.1", + "boring-avatars": "^1.11.2", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "cmdk": "1.0.4", + "cookie": "1.0.2", + "date-fns": "^4.1.0", + "dotenv": "^16.4.7", + "elliptic": "6.6.1", + "embla-carousel-react": "^8.5.2", + "framer-motion": "^11.16.0", + "geist": "^1.3.1", + "launchdarkly-react-client-sdk": "^3.6.0", + "lucide-react": "^0.474.0", + "moment": "^2.30.1", + "next": "^14.2.21", + "next-themes": "^0.4.4", + "react": "^18", + "react-day-picker": "^9.5.1", + "react-dom": "^18", + "react-hook-form": "^7.54.2", + "react-icons": "^5.4.0", + "react-markdown": "^9.0.3", + "react-modal": "^3.16.3", + "react-shepherd": "^6.1.7", + "recharts": "^2.14.1", + "tailwind-merge": "^2.6.0", + "tailwindcss-animate": "^1.0.7", + "uuid": "^11.0.5", + "zod": "^3.23.8" + }, + "devDependencies": { + "@chromatic-com/storybook": "^3.2.4", + "@playwright/test": "^1.50.0", + "@storybook/addon-a11y": "^8.5.2", + "@storybook/addon-essentials": "^8.5.2", + "@storybook/addon-interactions": "^8.5.2", + "@storybook/addon-links": "^8.5.2", + "@storybook/addon-onboarding": "^8.5.2", + "@storybook/blocks": "^8.5.2", + "@storybook/nextjs": "^8.5.2", + "@storybook/react": "^8.3.5", + "@storybook/test": "^8.3.5", + "@storybook/test-runner": "^0.21.0", + "@types/negotiator": "^0.6.3", + "@types/node": "^22.10.10", + "@types/react": "^18", + "@types/react-dom": "^18", + "@types/react-modal": "^3.16.3", + "axe-playwright": "^2.0.3", + "chromatic": "^11.25.1", + "concurrently": "^9.1.2", + "eslint": "^8", + "eslint-config-next": "15.1.6", + "eslint-plugin-storybook": "^0.11.2", + "msw": "^2.7.0", + "msw-storybook-addon": "^2.0.3", + "postcss": "^8", + "prettier": "^3.3.3", + "prettier-plugin-tailwindcss": "^0.6.11", + "storybook": "^8.5.2", + "tailwindcss": "^3.4.17", + "typescript": "^5" + }, + "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e", + "msw": { + "workerDirectory": [ + "public" + ] + } +} diff --git a/autogpt_platform/frontend/playwright.config.ts b/autogpt_platform/frontend/playwright.config.ts new file mode 100644 index 000000000000..ed0bdc445c45 --- /dev/null +++ b/autogpt_platform/frontend/playwright.config.ts @@ -0,0 +1,84 @@ +import { defineConfig, devices } from "@playwright/test"; + +/** + * Read environment variables from file. + * https://github.com/motdotla/dotenv + */ +import dotenv from "dotenv"; +import path from "path"; +dotenv.config({ path: path.resolve(__dirname, ".env") }); +dotenv.config({ path: path.resolve(__dirname, "../backend/.env") }); +/** + * See https://playwright.dev/docs/test-configuration. + */ +export default defineConfig({ + testDir: "./src/tests", + /* Run tests in files in parallel */ + fullyParallel: true, + /* Fail the build on CI if you accidentally left test.only in the source code. */ + forbidOnly: !!process.env.CI, + /* Retry on CI only */ + retries: process.env.CI ? 2 : 0, + /* Opt out of parallel tests on CI. */ + workers: process.env.CI ? 1 : undefined, + /* Reporter to use. See https://playwright.dev/docs/test-reporters */ + reporter: "html", + /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ + use: { + /* Base URL to use in actions like `await page.goto('/')`. */ + baseURL: "http://localhost:3000/", + + /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */ + trace: "on-first-retry", + screenshot: "only-on-failure", + bypassCSP: true, + }, + /* Maximum time one test can run for */ + timeout: 60000, + + /* Configure projects for major browsers */ + projects: [ + { + name: "chromium", + use: { ...devices["Desktop Chrome"] }, + }, + + // { + // name: "firefox", + // use: { ...devices["Desktop Firefox"] }, + // }, + + { + name: "webkit", + use: { ...devices["Desktop Safari"] }, + }, + + // /* Test against mobile viewports. */ + // // { + // // name: 'Mobile Chrome', + // // use: { ...devices['Pixel 5'] }, + // // }, + // // { + // // name: 'Mobile Safari', + // // use: { ...devices['iPhone 12'] }, + // // }, + + // /* Test against branded browsers. */ + // { + // name: "Microsoft Edge", + // use: { ...devices["Desktop Edge"], channel: "msedge" }, + // }, + // { + // name: 'Google Chrome', + // use: { ...devices['Desktop Chrome'], channel: 'chrome' }, + // }, + ], + + /* Run your local dev server before starting the tests */ + webServer: { + command: "npm run build && npm run start", + url: "http://localhost:3000/", + reuseExistingServer: !process.env.CI, + timeout: 120 * 1000, + }, +}); diff --git a/autogpt_platform/frontend/postcss.config.mjs b/autogpt_platform/frontend/postcss.config.mjs new file mode 100644 index 000000000000..1a69fd2a450a --- /dev/null +++ b/autogpt_platform/frontend/postcss.config.mjs @@ -0,0 +1,8 @@ +/** @type {import('postcss-load-config').Config} */ +const config = { + plugins: { + tailwindcss: {}, + }, +}; + +export default config; diff --git a/autogpt_platform/frontend/public/AUTOgpt_Logo_dark.png b/autogpt_platform/frontend/public/AUTOgpt_Logo_dark.png new file mode 100644 index 000000000000..622dd6a544ee Binary files /dev/null and b/autogpt_platform/frontend/public/AUTOgpt_Logo_dark.png differ diff --git a/autogpt_platform/frontend/public/AUTOgpt_Logo_light.png b/autogpt_platform/frontend/public/AUTOgpt_Logo_light.png new file mode 100644 index 000000000000..c5dda2d41bd2 Binary files /dev/null and b/autogpt_platform/frontend/public/AUTOgpt_Logo_light.png differ diff --git a/autogpt_platform/frontend/public/favicon.ico b/autogpt_platform/frontend/public/favicon.ico new file mode 100644 index 000000000000..74db31967107 Binary files /dev/null and b/autogpt_platform/frontend/public/favicon.ico differ diff --git a/autogpt_platform/frontend/public/mockServiceWorker.js b/autogpt_platform/frontend/public/mockServiceWorker.js new file mode 100644 index 000000000000..ec47a9a50a24 --- /dev/null +++ b/autogpt_platform/frontend/public/mockServiceWorker.js @@ -0,0 +1,307 @@ +/* eslint-disable */ +/* tslint:disable */ + +/** + * Mock Service Worker. + * @see https://github.com/mswjs/msw + * - Please do NOT modify this file. + * - Please do NOT serve this file on production. + */ + +const PACKAGE_VERSION = '2.7.0' +const INTEGRITY_CHECKSUM = '00729d72e3b82faf54ca8b9621dbb96f' +const IS_MOCKED_RESPONSE = Symbol('isMockedResponse') +const activeClientIds = new Set() + +self.addEventListener('install', function () { + self.skipWaiting() +}) + +self.addEventListener('activate', function (event) { + event.waitUntil(self.clients.claim()) +}) + +self.addEventListener('message', async function (event) { + const clientId = event.source.id + + if (!clientId || !self.clients) { + return + } + + const client = await self.clients.get(clientId) + + if (!client) { + return + } + + const allClients = await self.clients.matchAll({ + type: 'window', + }) + + switch (event.data) { + case 'KEEPALIVE_REQUEST': { + sendToClient(client, { + type: 'KEEPALIVE_RESPONSE', + }) + break + } + + case 'INTEGRITY_CHECK_REQUEST': { + sendToClient(client, { + type: 'INTEGRITY_CHECK_RESPONSE', + payload: { + packageVersion: PACKAGE_VERSION, + checksum: INTEGRITY_CHECKSUM, + }, + }) + break + } + + case 'MOCK_ACTIVATE': { + activeClientIds.add(clientId) + + sendToClient(client, { + type: 'MOCKING_ENABLED', + payload: { + client: { + id: client.id, + frameType: client.frameType, + }, + }, + }) + break + } + + case 'MOCK_DEACTIVATE': { + activeClientIds.delete(clientId) + break + } + + case 'CLIENT_CLOSED': { + activeClientIds.delete(clientId) + + const remainingClients = allClients.filter((client) => { + return client.id !== clientId + }) + + // Unregister itself when there are no more clients + if (remainingClients.length === 0) { + self.registration.unregister() + } + + break + } + } +}) + +self.addEventListener('fetch', function (event) { + const { request } = event + + // Bypass navigation requests. + if (request.mode === 'navigate') { + return + } + + // Opening the DevTools triggers the "only-if-cached" request + // that cannot be handled by the worker. Bypass such requests. + if (request.cache === 'only-if-cached' && request.mode !== 'same-origin') { + return + } + + // Bypass all requests when there are no active clients. + // Prevents the self-unregistered worked from handling requests + // after it's been deleted (still remains active until the next reload). + if (activeClientIds.size === 0) { + return + } + + // Generate unique request ID. + const requestId = crypto.randomUUID() + event.respondWith(handleRequest(event, requestId)) +}) + +async function handleRequest(event, requestId) { + const client = await resolveMainClient(event) + const response = await getResponse(event, client, requestId) + + // Send back the response clone for the "response:*" life-cycle events. + // Ensure MSW is active and ready to handle the message, otherwise + // this message will pend indefinitely. + if (client && activeClientIds.has(client.id)) { + ;(async function () { + const responseClone = response.clone() + + sendToClient( + client, + { + type: 'RESPONSE', + payload: { + requestId, + isMockedResponse: IS_MOCKED_RESPONSE in response, + type: responseClone.type, + status: responseClone.status, + statusText: responseClone.statusText, + body: responseClone.body, + headers: Object.fromEntries(responseClone.headers.entries()), + }, + }, + [responseClone.body], + ) + })() + } + + return response +} + +// Resolve the main client for the given event. +// Client that issues a request doesn't necessarily equal the client +// that registered the worker. It's with the latter the worker should +// communicate with during the response resolving phase. +async function resolveMainClient(event) { + const client = await self.clients.get(event.clientId) + + if (activeClientIds.has(event.clientId)) { + return client + } + + if (client?.frameType === 'top-level') { + return client + } + + const allClients = await self.clients.matchAll({ + type: 'window', + }) + + return allClients + .filter((client) => { + // Get only those clients that are currently visible. + return client.visibilityState === 'visible' + }) + .find((client) => { + // Find the client ID that's recorded in the + // set of clients that have registered the worker. + return activeClientIds.has(client.id) + }) +} + +async function getResponse(event, client, requestId) { + const { request } = event + + // Clone the request because it might've been already used + // (i.e. its body has been read and sent to the client). + const requestClone = request.clone() + + function passthrough() { + // Cast the request headers to a new Headers instance + // so the headers can be manipulated with. + const headers = new Headers(requestClone.headers) + + // Remove the "accept" header value that marked this request as passthrough. + // This prevents request alteration and also keeps it compliant with the + // user-defined CORS policies. + const acceptHeader = headers.get('accept') + if (acceptHeader) { + const values = acceptHeader.split(',').map((value) => value.trim()) + const filteredValues = values.filter( + (value) => value !== 'msw/passthrough', + ) + + if (filteredValues.length > 0) { + headers.set('accept', filteredValues.join(', ')) + } else { + headers.delete('accept') + } + } + + return fetch(requestClone, { headers }) + } + + // Bypass mocking when the client is not active. + if (!client) { + return passthrough() + } + + // Bypass initial page load requests (i.e. static assets). + // The absence of the immediate/parent client in the map of the active clients + // means that MSW hasn't dispatched the "MOCK_ACTIVATE" event yet + // and is not ready to handle requests. + if (!activeClientIds.has(client.id)) { + return passthrough() + } + + // Notify the client that a request has been intercepted. + const requestBuffer = await request.arrayBuffer() + const clientMessage = await sendToClient( + client, + { + type: 'REQUEST', + payload: { + id: requestId, + url: request.url, + mode: request.mode, + method: request.method, + headers: Object.fromEntries(request.headers.entries()), + cache: request.cache, + credentials: request.credentials, + destination: request.destination, + integrity: request.integrity, + redirect: request.redirect, + referrer: request.referrer, + referrerPolicy: request.referrerPolicy, + body: requestBuffer, + keepalive: request.keepalive, + }, + }, + [requestBuffer], + ) + + switch (clientMessage.type) { + case 'MOCK_RESPONSE': { + return respondWithMock(clientMessage.data) + } + + case 'PASSTHROUGH': { + return passthrough() + } + } + + return passthrough() +} + +function sendToClient(client, message, transferrables = []) { + return new Promise((resolve, reject) => { + const channel = new MessageChannel() + + channel.port1.onmessage = (event) => { + if (event.data && event.data.error) { + return reject(event.data.error) + } + + resolve(event.data) + } + + client.postMessage( + message, + [channel.port2].concat(transferrables.filter(Boolean)), + ) + }) +} + +async function respondWithMock(response) { + // Setting response status code to 0 is a no-op. + // However, when responding with a "Response.error()", the produced Response + // instance will have status code set to 0. Since it's not possible to create + // a Response instance with status code 0, handle that use-case separately. + if (response.status === 0) { + return Response.error() + } + + const mockedResponse = new Response(response.body, response) + + Reflect.defineProperty(mockedResponse, IS_MOCKED_RESPONSE, { + value: true, + enumerable: true, + }) + + return mockedResponse +} diff --git a/autogpt_platform/frontend/sentry.client.config.ts b/autogpt_platform/frontend/sentry.client.config.ts new file mode 100644 index 000000000000..bbfe73ea514e --- /dev/null +++ b/autogpt_platform/frontend/sentry.client.config.ts @@ -0,0 +1,51 @@ +// This file configures the initialization of Sentry on the client. +// The config you add here will be used whenever a users loads a page in their browser. +// https://docs.sentry.io/platforms/javascript/guides/nextjs/ + +import * as Sentry from "@sentry/nextjs"; + +Sentry.init({ + dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288", + + enabled: process.env.DISABLE_SENTRY !== "true", + + // Add optional integrations for additional features + integrations: [ + Sentry.replayIntegration(), + Sentry.httpClientIntegration(), + Sentry.replayCanvasIntegration(), + Sentry.reportingObserverIntegration(), + Sentry.browserProfilingIntegration(), + // Sentry.feedbackIntegration({ + // // Additional SDK configuration goes in here, for example: + // colorScheme: "system", + // }), + ], + + // Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control. + tracesSampleRate: 1, + + // Set `tracePropagationTargets` to control for which URLs trace propagation should be enabled + tracePropagationTargets: [ + "localhost", + /^https:\/\/dev\-builder\.agpt\.co\/api/, + ], + + // Define how likely Replay events are sampled. + // This sets the sample rate to be 10%. You may want this to be 100% while + // in development and sample at a lower rate in production + replaysSessionSampleRate: 0.1, + + // Define how likely Replay events are sampled when an error occurs. + replaysOnErrorSampleRate: 1.0, + + // Setting this option to true will print useful information to the console while you're setting up Sentry. + debug: false, + + // Set profilesSampleRate to 1.0 to profile every transaction. + // Since profilesSampleRate is relative to tracesSampleRate, + // the final profiling rate can be computed as tracesSampleRate * profilesSampleRate + // For example, a tracesSampleRate of 0.5 and profilesSampleRate of 0.5 would + // result in 25% of transactions being profiled (0.5*0.5=0.25) + profilesSampleRate: 1.0, +}); diff --git a/autogpt_platform/frontend/sentry.edge.config.ts b/autogpt_platform/frontend/sentry.edge.config.ts new file mode 100644 index 000000000000..8a566e17b783 --- /dev/null +++ b/autogpt_platform/frontend/sentry.edge.config.ts @@ -0,0 +1,18 @@ +// This file configures the initialization of Sentry for edge features (middleware, edge routes, and so on). +// The config you add here will be used whenever one of the edge features is loaded. +// Note that this config is unrelated to the Vercel Edge Runtime and is also required when running locally. +// https://docs.sentry.io/platforms/javascript/guides/nextjs/ + +import * as Sentry from "@sentry/nextjs"; + +Sentry.init({ + dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288", + + enabled: process.env.NODE_ENV !== "development", + + // Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control. + tracesSampleRate: 1, + + // Setting this option to true will print useful information to the console while you're setting up Sentry. + debug: false, +}); diff --git a/autogpt_platform/frontend/sentry.server.config.ts b/autogpt_platform/frontend/sentry.server.config.ts new file mode 100644 index 000000000000..20f0df5a3983 --- /dev/null +++ b/autogpt_platform/frontend/sentry.server.config.ts @@ -0,0 +1,25 @@ +// This file configures the initialization of Sentry on the server. +// The config you add here will be used whenever the server handles a request. +// https://docs.sentry.io/platforms/javascript/guides/nextjs/ + +import * as Sentry from "@sentry/nextjs"; +// import { NodeProfilingIntegration } from "@sentry/profiling-node"; + +Sentry.init({ + dsn: "https://fe4e4aa4a283391808a5da396da20159@o4505260022104064.ingest.us.sentry.io/4507946746380288", + + enabled: process.env.NODE_ENV !== "development", + + // Define how likely traces are sampled. Adjust this value in production, or use tracesSampler for greater control. + tracesSampleRate: 1, + + // Setting this option to true will print useful information to the console while you're setting up Sentry. + debug: false, + + // Integrations + integrations: [ + Sentry.anrIntegration(), + // NodeProfilingIntegration, + // Sentry.fsIntegration(), + ], +}); diff --git a/autogpt_platform/frontend/src/app/admin/dashboard/page.tsx b/autogpt_platform/frontend/src/app/admin/dashboard/page.tsx new file mode 100644 index 000000000000..a275fb15a903 --- /dev/null +++ b/autogpt_platform/frontend/src/app/admin/dashboard/page.tsx @@ -0,0 +1,18 @@ +import { withRoleAccess } from "@/lib/withRoleAccess"; +import React from "react"; + +function AdminDashboard() { + return ( +
+

Admin Dashboard

+ {/* Add your admin-only content here */} +
+ ); +} + +export default async function AdminDashboardPage() { + "use server"; + const withAdminAccess = await withRoleAccess(["admin"]); + const ProtectedAdminDashboard = await withAdminAccess(AdminDashboard); + return ; +} diff --git a/autogpt_platform/frontend/src/app/admin/layout.tsx b/autogpt_platform/frontend/src/app/admin/layout.tsx new file mode 100644 index 000000000000..0ff36ced3ac9 --- /dev/null +++ b/autogpt_platform/frontend/src/app/admin/layout.tsx @@ -0,0 +1,100 @@ +"use client"; + +import { useState } from "react"; +import Link from "next/link"; +import { BinaryIcon, XIcon } from "lucide-react"; +import { usePathname } from "next/navigation"; // Add this import + +const tabs = [ + { name: "Dashboard", href: "/admin/dashboard" }, + { name: "Marketplace", href: "/admin/marketplace" }, + { name: "Users", href: "/admin/users" }, + { name: "Settings", href: "/admin/settings" }, +]; + +export default function AdminLayout({ + children, +}: { + children: React.ReactNode; +}) { + const pathname = usePathname(); // Get the current pathname + const [activeTab, setActiveTab] = useState(() => { + // Set active tab based on the current route + return tabs.find((tab) => tab.href === pathname)?.name || tabs[0].name; + }); + const [mobileMenuOpen, setMobileMenuOpen] = useState(false); + + return ( +
+ + +
+
{children}
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/admin/marketplace/page.tsx b/autogpt_platform/frontend/src/app/admin/marketplace/page.tsx new file mode 100644 index 000000000000..0dba6c7e9cab --- /dev/null +++ b/autogpt_platform/frontend/src/app/admin/marketplace/page.tsx @@ -0,0 +1,25 @@ +import { withRoleAccess } from "@/lib/withRoleAccess"; + +import React from "react"; +// import { getReviewableAgents } from "@/components/admin/marketplace/actions"; +// import AdminMarketplaceAgentList from "@/components/admin/marketplace/AdminMarketplaceAgentList"; +// import AdminFeaturedAgentsControl from "@/components/admin/marketplace/AdminFeaturedAgentsControl"; +import { Separator } from "@/components/ui/separator"; +async function AdminMarketplace() { + // const reviewableAgents = await getReviewableAgents(); + + return ( + <> + {/* + + */} + + ); +} + +export default async function AdminDashboardPage() { + "use server"; + const withAdminAccess = await withRoleAccess(["admin"]); + const ProtectedAdminMarketplace = await withAdminAccess(AdminMarketplace); + return ; +} diff --git a/autogpt_platform/frontend/src/app/admin/settings/page.tsx b/autogpt_platform/frontend/src/app/admin/settings/page.tsx new file mode 100644 index 000000000000..e57ea08b3a01 --- /dev/null +++ b/autogpt_platform/frontend/src/app/admin/settings/page.tsx @@ -0,0 +1,18 @@ +import { withRoleAccess } from "@/lib/withRoleAccess"; +import React from "react"; + +function AdminSettings() { + return ( +
+

Admin Settings

+ {/* Add your admin-only settings content here */} +
+ ); +} + +export default async function AdminSettingsPage() { + "use server"; + const withAdminAccess = await withRoleAccess(["admin"]); + const ProtectedAdminSettings = await withAdminAccess(AdminSettings); + return ; +} diff --git a/autogpt_platform/frontend/src/app/admin/users/page.tsx b/autogpt_platform/frontend/src/app/admin/users/page.tsx new file mode 100644 index 000000000000..5c25206cd1a7 --- /dev/null +++ b/autogpt_platform/frontend/src/app/admin/users/page.tsx @@ -0,0 +1,18 @@ +import { withRoleAccess } from "@/lib/withRoleAccess"; +import React from "react"; + +function AdminUsers() { + return ( +
+

Users Dashboard

+ {/* Add your admin-only content here */} +
+ ); +} + +export default async function AdminUsersPage() { + "use server"; + const withAdminAccess = await withRoleAccess(["admin"]); + const ProtectedAdminUsers = await withAdminAccess(AdminUsers); + return ; +} diff --git a/autogpt_platform/frontend/src/app/auth/auth-code-error/page.tsx b/autogpt_platform/frontend/src/app/auth/auth-code-error/page.tsx new file mode 100644 index 000000000000..797cad062846 --- /dev/null +++ b/autogpt_platform/frontend/src/app/auth/auth-code-error/page.tsx @@ -0,0 +1,36 @@ +"use client"; + +import { useEffect, useState } from "react"; + +export default function AuthErrorPage() { + const [errorType, setErrorType] = useState(null); + const [errorCode, setErrorCode] = useState(null); + const [errorDescription, setErrorDescription] = useState(null); + + useEffect(() => { + // This code only runs on the client side + if (typeof window !== "undefined") { + const hash = window.location.hash.substring(1); // Remove the leading '#' + const params = new URLSearchParams(hash); + + setErrorType(params.get("error")); + setErrorCode(params.get("error_code")); + setErrorDescription( + params.get("error_description")?.replace(/\+/g, " ") ?? null, + ); // Replace '+' with space + } + }, []); + + if (!errorType && !errorCode && !errorDescription) { + return
Loading...
; + } + + return ( +
+

Authentication Error

+ {errorType &&

Error Type: {errorType}

} + {errorCode &&

Error Code: {errorCode}

} + {errorDescription &&

Error Description: {errorDescription}

} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/auth/callback/route.ts b/autogpt_platform/frontend/src/app/auth/callback/route.ts new file mode 100644 index 000000000000..ba5bc0db14a2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/auth/callback/route.ts @@ -0,0 +1,36 @@ +import getServerSupabase from "@/lib/supabase/getServerSupabase"; +import { NextResponse } from "next/server"; + +// Handle the callback to complete the user session login +export async function GET(request: Request) { + const { searchParams, origin } = new URL(request.url); + const code = searchParams.get("code"); + // if "next" is in param, use it as the redirect URL + const next = searchParams.get("next") ?? "/"; + + if (code) { + const supabase = getServerSupabase(); + + if (!supabase) { + return NextResponse.redirect(`${origin}/error`); + } + + const { data, error } = await supabase.auth.exchangeCodeForSession(code); + // data.session?.refresh_token is available if you need to store it for later use + if (!error) { + const forwardedHost = request.headers.get("x-forwarded-host"); // original origin before load balancer + const isLocalEnv = process.env.NODE_ENV === "development"; + if (isLocalEnv) { + // we can be sure that there is no load balancer in between, so no need to watch for X-Forwarded-Host + return NextResponse.redirect(`${origin}${next}`); + } else if (forwardedHost) { + return NextResponse.redirect(`https://${forwardedHost}${next}`); + } else { + return NextResponse.redirect(`${origin}${next}`); + } + } + } + + // return the user to an error page with instructions + return NextResponse.redirect(`${origin}/auth/auth-code-error`); +} diff --git a/autogpt_platform/frontend/src/app/auth/confirm/route.ts b/autogpt_platform/frontend/src/app/auth/confirm/route.ts new file mode 100644 index 000000000000..e5d432b8fe8c --- /dev/null +++ b/autogpt_platform/frontend/src/app/auth/confirm/route.ts @@ -0,0 +1,33 @@ +import { type EmailOtpType } from "@supabase/supabase-js"; +import { type NextRequest } from "next/server"; + +import { redirect } from "next/navigation"; +import getServerSupabase from "@/lib/supabase/getServerSupabase"; + +// Email confirmation route +export async function GET(request: NextRequest) { + const { searchParams } = new URL(request.url); + const token_hash = searchParams.get("token_hash"); + const type = searchParams.get("type") as EmailOtpType | null; + const next = searchParams.get("next") ?? "/"; + + if (token_hash && type) { + const supabase = getServerSupabase(); + + if (!supabase) { + redirect("/error"); + } + + const { error } = await supabase.auth.verifyOtp({ + type, + token_hash, + }); + if (!error) { + // redirect user to specified redirect URL or root of app + redirect(next); + } + } + + // redirect the user to an error page with some instructions + redirect("/error"); +} diff --git a/autogpt_platform/frontend/src/app/auth/integrations/oauth_callback/route.ts b/autogpt_platform/frontend/src/app/auth/integrations/oauth_callback/route.ts new file mode 100644 index 000000000000..5d4100d48e64 --- /dev/null +++ b/autogpt_platform/frontend/src/app/auth/integrations/oauth_callback/route.ts @@ -0,0 +1,41 @@ +import { OAuthPopupResultMessage } from "@/components/integrations/credentials-input"; +import { NextResponse } from "next/server"; + +// This route is intended to be used as the callback for integration OAuth flows, +// controlled by the CredentialsInput component. The CredentialsInput opens the login +// page in a pop-up window, which then redirects to this route to close the loop. +export async function GET(request: Request) { + const { searchParams, origin } = new URL(request.url); + const code = searchParams.get("code"); + const state = searchParams.get("state"); + + console.debug("OAuth callback received:", { code, state }); + + const message: OAuthPopupResultMessage = + code && state + ? { message_type: "oauth_popup_result", success: true, code, state } + : { + message_type: "oauth_popup_result", + success: false, + message: `Incomplete query: ${searchParams.toString()}`, + }; + + console.debug("Sending message to opener:", message); + + // Return a response with the message as JSON and a script to close the window + return new NextResponse( + ` + + + + + + `, + { + headers: { "Content-Type": "text/html" }, + }, + ); +} diff --git a/autogpt_platform/frontend/src/app/build/page.tsx b/autogpt_platform/frontend/src/app/build/page.tsx new file mode 100644 index 000000000000..8c0ace995fb9 --- /dev/null +++ b/autogpt_platform/frontend/src/app/build/page.tsx @@ -0,0 +1,15 @@ +"use client"; + +import { useSearchParams } from "next/navigation"; +import FlowEditor from "@/components/Flow"; + +export default function Home() { + const query = useSearchParams(); + + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/app/dictionaries/en.json b/autogpt_platform/frontend/src/app/dictionaries/en.json new file mode 100644 index 000000000000..d5f6f3d7798a --- /dev/null +++ b/autogpt_platform/frontend/src/app/dictionaries/en.json @@ -0,0 +1,22 @@ +{ + "auth": { + "signIn": "Sign In", + "email": "Email", + "password": "Password", + "submit": "Submit", + "error": "Invalid login credentials" + }, + "dashboard": { + "welcome": "Welcome to your dashboard", + "stats": "Your Stats", + "recentActivity": "Recent Activity" + }, + "admin": { + "title": "Admin Dashboard", + "users": "Users Management", + "settings": "System Settings" + }, + "home": { + "welcome": "Welcome to the Home Page" + } +} diff --git a/autogpt_platform/frontend/src/app/dictionaries/es.json b/autogpt_platform/frontend/src/app/dictionaries/es.json new file mode 100644 index 000000000000..5f71068e703e --- /dev/null +++ b/autogpt_platform/frontend/src/app/dictionaries/es.json @@ -0,0 +1,22 @@ +{ + "auth": { + "signIn": "Iniciar Sesión", + "email": "Correo electrónico", + "password": "Contraseña", + "submit": "Enviar", + "error": "Credenciales inválidas" + }, + "dashboard": { + "welcome": "Bienvenido a tu panel", + "stats": "Tus Estadísticas", + "recentActivity": "Actividad Reciente" + }, + "admin": { + "title": "Panel de Administración", + "users": "Gestión de Usuarios", + "settings": "Configuración del Sistema" + }, + "home": { + "welcome": "Bienvenido a la Página de Inicio" + } +} diff --git a/autogpt_platform/frontend/src/app/error.tsx b/autogpt_platform/frontend/src/app/error.tsx new file mode 100644 index 000000000000..ce4db030c65e --- /dev/null +++ b/autogpt_platform/frontend/src/app/error.tsx @@ -0,0 +1,43 @@ +"use client"; + +import { useEffect } from "react"; +import { IconCircleAlert } from "@/components/ui/icons"; +import { Button } from "@/components/ui/button"; +import Link from "next/link"; + +export default function Error({ + error, + reset, +}: { + error: Error & { digest?: string }; + reset: () => void; +}) { + useEffect(() => { + console.error(error); + }, [error]); + + return ( +
+
+
+ +
+

+ Oops, something went wrong! +

+

+ We're sorry, but an unexpected error has occurred. Please try + again later or contact support if the issue persists. +

+
+ + +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/global-error.tsx b/autogpt_platform/frontend/src/app/global-error.tsx new file mode 100644 index 000000000000..9388e06e02bd --- /dev/null +++ b/autogpt_platform/frontend/src/app/global-error.tsx @@ -0,0 +1,27 @@ +"use client"; + +import * as Sentry from "@sentry/nextjs"; +import NextError from "next/error"; +import { useEffect } from "react"; + +export default function GlobalError({ + error, +}: { + error: Error & { digest?: string }; +}) { + useEffect(() => { + Sentry.captureException(error); + }, [error]); + + return ( + + + {/* `NextError` is the default Next.js error page component. Its type + definition requires a `statusCode` prop. However, since the App Router + does not expose status codes for errors, we simply pass 0 to render a + generic error message. */} + + + + ); +} diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css new file mode 100644 index 000000000000..c2998c08d07d --- /dev/null +++ b/autogpt_platform/frontend/src/app/globals.css @@ -0,0 +1,122 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +@layer base { + .font-neue { + font-family: "PP Neue Montreal TT", sans-serif; + } +} + +@layer utilities { + .w-110 { + width: 27.5rem; + } + .h-7\.5 { + height: 1.1875rem; + } + .h-18 { + height: 4.5rem; + } + .h-238 { + height: 14.875rem; + } + .top-158 { + top: 9.875rem; + } + .top-254 { + top: 15.875rem; + } + .top-284 { + top: 17.75rem; + } + .top-360 { + top: 22.5rem; + } + .left-297 { + left: 18.5625rem; + } + .left-34 { + left: 2.125rem; + } +} + +@layer utilities { + .text-balance { + text-wrap: balance; + } +} + +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 240 10% 3.9%; + --card: 0 0% 100%; + --card-foreground: 240 10% 3.9%; + --popover: 0 0% 100%; + --popover-foreground: 240 10% 3.9%; + --primary: 240 5.9% 10%; + --primary-foreground: 0 0% 98%; + --secondary: 240 4.8% 95.9%; + --secondary-foreground: 240 5.9% 10%; + --muted: 240 4.8% 95.9%; + --muted-foreground: 240 3.8% 46.1%; + --accent: 240 4.8% 95.9%; + --accent-foreground: 240 5.9% 10%; + --destructive: 0 84.2% 60.2%; + --destructive-foreground: 0 0% 98%; + --border: 240 5.9% 90%; + --input: 240 5.9% 85%; + --ring: 240 5.9% 10%; + --radius: 0.5rem; + --chart-1: 12 76% 61%; + --chart-2: 173 58% 39%; + --chart-3: 197 37% 24%; + --chart-4: 43 74% 66%; + --chart-5: 27 87% 67%; + } + + .dark { + --background: 240 10% 3.9%; + --foreground: 0 0% 98%; + --card: 240 10% 3.9%; + --card-foreground: 0 0% 98%; + --popover: 240 10% 3.9%; + --popover-foreground: 0 0% 98%; + --primary: 0 0% 98%; + --primary-foreground: 240 5.9% 10%; + --secondary: 240 3.7% 15.9%; + --secondary-foreground: 0 0% 98%; + --muted: 240 3.7% 15.9%; + --muted-foreground: 240 5% 64.9%; + --accent: 240 3.7% 15.9%; + --accent-foreground: 0 0% 98%; + --destructive: 0 62.8% 30.6%; + --destructive-foreground: 0 0% 98%; + --border: 240 3.7% 15.9%; + --input: 240 3.7% 15.9%; + --ring: 240 4.9% 83.9%; + --chart-1: 220 70% 50%; + --chart-2: 160 60% 45%; + --chart-3: 30 80% 55%; + --chart-4: 280 65% 60%; + --chart-5: 340 75% 55%; + } +} + +@layer base { + * { + @apply border-border; + } + body { + @apply bg-background text-foreground; + } + + .agpt-border-input { + @apply border border-input focus-visible:border-gray-400 focus-visible:outline-none; + } + + .agpt-shadow-input { + @apply shadow-sm focus-visible:shadow-md; + } +} diff --git a/autogpt_platform/frontend/src/app/health/page.tsx b/autogpt_platform/frontend/src/app/health/page.tsx new file mode 100644 index 000000000000..4586a010b43e --- /dev/null +++ b/autogpt_platform/frontend/src/app/health/page.tsx @@ -0,0 +1,3 @@ +export default function HealthPage() { + return
Yay im healthy
; +} diff --git a/autogpt_platform/frontend/src/app/layout.tsx b/autogpt_platform/frontend/src/app/layout.tsx new file mode 100644 index 000000000000..9f05958370bf --- /dev/null +++ b/autogpt_platform/frontend/src/app/layout.tsx @@ -0,0 +1,117 @@ +import React from "react"; +import type { Metadata } from "next"; +import { Inter, Poppins } from "next/font/google"; +import { Providers } from "@/app/providers"; +import { cn } from "@/lib/utils"; +import { Navbar } from "@/components/agptui/Navbar"; + +import "./globals.css"; +import TallyPopupSimple from "@/components/TallyPopup"; +import { GoogleAnalytics } from "@next/third-parties/google"; +import { Toaster } from "@/components/ui/toaster"; +import { IconType } from "@/components/ui/icons"; +import { GeistSans } from "geist/font/sans"; +import { GeistMono } from "geist/font/mono"; + +// Fonts +const inter = Inter({ subsets: ["latin"], variable: "--font-inter" }); +const poppins = Poppins({ + subsets: ["latin"], + weight: ["400", "500", "600", "700"], + variable: "--font-poppins", +}); + +export const metadata: Metadata = { + title: "NextGen AutoGPT", + description: "Your one stop shop to creating AI Agents", +}; + +export default async function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + + +
+ +
{children}
+ +
+ +
+ + + + + ); +} diff --git a/autogpt_platform/frontend/src/app/login/actions.ts b/autogpt_platform/frontend/src/app/login/actions.ts new file mode 100644 index 000000000000..ba696c094fd5 --- /dev/null +++ b/autogpt_platform/frontend/src/app/login/actions.ts @@ -0,0 +1,92 @@ +"use server"; +import { revalidatePath } from "next/cache"; +import { redirect } from "next/navigation"; +import { z } from "zod"; +import * as Sentry from "@sentry/nextjs"; +import getServerSupabase from "@/lib/supabase/getServerSupabase"; +import BackendAPI from "@/lib/autogpt-server-api"; +import { loginFormSchema, LoginProvider } from "@/types/auth"; + +export async function logout() { + return await Sentry.withServerActionInstrumentation( + "logout", + {}, + async () => { + const supabase = getServerSupabase(); + + if (!supabase) { + redirect("/error"); + } + + const { error } = await supabase.auth.signOut(); + + if (error) { + console.error("Error logging out", error); + return error.message; + } + + revalidatePath("/", "layout"); + redirect("/login"); + }, + ); +} + +export async function login(values: z.infer) { + return await Sentry.withServerActionInstrumentation("login", {}, async () => { + const supabase = getServerSupabase(); + const api = new BackendAPI(); + + if (!supabase) { + redirect("/error"); + } + + // We are sure that the values are of the correct type because zod validates the form + const { data, error } = await supabase.auth.signInWithPassword(values); + + if (error) { + console.error("Error logging in", error); + return error.message; + } + + await api.createUser(); + + if (data.session) { + await supabase.auth.setSession(data.session); + } + console.log("Logged in"); + revalidatePath("/", "layout"); + redirect("/"); + }); +} + +export async function providerLogin(provider: LoginProvider) { + return await Sentry.withServerActionInstrumentation( + "providerLogin", + {}, + async () => { + const supabase = getServerSupabase(); + const api = new BackendAPI(); + + if (!supabase) { + redirect("/error"); + } + + const { error } = await supabase!.auth.signInWithOAuth({ + provider: provider, + options: { + redirectTo: + process.env.AUTH_CALLBACK_URL ?? + `http://localhost:3000/auth/callback`, + }, + }); + + if (error) { + console.error("Error logging in", error); + return error.message; + } + + await api.createUser(); + console.log("Logged in"); + }, + ); +} diff --git a/autogpt_platform/frontend/src/app/login/page.tsx b/autogpt_platform/frontend/src/app/login/page.tsx new file mode 100644 index 000000000000..797c8ad85e9b --- /dev/null +++ b/autogpt_platform/frontend/src/app/login/page.tsx @@ -0,0 +1,159 @@ +"use client"; +import { login, providerLogin } from "./actions"; +import { + Form, + FormControl, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { useForm } from "react-hook-form"; +import { Input } from "@/components/ui/input"; +import { z } from "zod"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { useCallback, useState } from "react"; +import { useRouter } from "next/navigation"; +import Link from "next/link"; +import useSupabase from "@/hooks/useSupabase"; +import Spinner from "@/components/Spinner"; +import { + AuthCard, + AuthHeader, + AuthButton, + AuthFeedback, + AuthBottomText, + PasswordInput, +} from "@/components/auth"; +import { loginFormSchema } from "@/types/auth"; + +export default function LoginPage() { + const { supabase, user, isUserLoading } = useSupabase(); + const [feedback, setFeedback] = useState(null); + const router = useRouter(); + const [isLoading, setIsLoading] = useState(false); + + const form = useForm>({ + resolver: zodResolver(loginFormSchema), + defaultValues: { + email: "", + password: "", + }, + }); + + // TODO: uncomment when we enable social login + // const onProviderLogin = useCallback(async ( + // provider: LoginProvider, + // ) => { + // setIsLoading(true); + // const error = await providerLogin(provider); + // setIsLoading(false); + // if (error) { + // setFeedback(error); + // return; + // } + // setFeedback(null); + // }, [supabase]); + + const onLogin = useCallback( + async (data: z.infer) => { + setIsLoading(true); + + if (!(await form.trigger())) { + setIsLoading(false); + return; + } + + const error = await login(data); + setIsLoading(false); + if (error) { + setFeedback(error); + return; + } + setFeedback(null); + }, + [form], + ); + + if (user) { + console.debug("User exists, redirecting to /"); + router.push("/"); + } + + if (isUserLoading || user) { + return ; + } + + if (!supabase) { + return ( +
+ User accounts are disabled because Supabase client is unavailable +
+ ); + } + + return ( + + Login to your account +
+ + ( + + Email + + + + + + )} + /> + ( + + + Password + + Forgot your password? + + + + + + + + )} + /> + onLogin(form.getValues())} + isLoading={isLoading} + type="submit" + > + Login + + + + + +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/marketplace/(user)/api_keys/page.tsx b/autogpt_platform/frontend/src/app/marketplace/(user)/api_keys/page.tsx new file mode 100644 index 000000000000..87f3d58b4eb3 --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/(user)/api_keys/page.tsx @@ -0,0 +1,11 @@ +import { APIKeysSection } from "@/components/agptui/composite/APIKeySection"; + +const ApiKeysPage = () => { + return ( +
+ +
+ ); +}; + +export default ApiKeysPage; diff --git a/autogpt_platform/frontend/src/app/marketplace/(user)/credits/page.tsx b/autogpt_platform/frontend/src/app/marketplace/(user)/credits/page.tsx new file mode 100644 index 000000000000..378f592ef78e --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/(user)/credits/page.tsx @@ -0,0 +1,186 @@ +"use client"; +import { useEffect, useCallback } from "react"; +import { Button } from "@/components/agptui/Button"; +import useCredits from "@/hooks/useCredits"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { useSearchParams, useRouter } from "next/navigation"; +import { useToast } from "@/components/ui/use-toast"; + +export default function CreditsPage() { + const api = useBackendAPI(); + const { requestTopUp, autoTopUpConfig, updateAutoTopUpConfig } = useCredits(); + const router = useRouter(); + const searchParams = useSearchParams(); + const topupStatus = searchParams.get("topup") as "success" | "cancel" | null; + const { toast } = useToast(); + + const toastOnFail = useCallback( + (action: string, fn: () => Promise) => { + fn().catch((e) => { + toast({ + title: `Unable to ${action}`, + description: e.message, + variant: "destructive", + duration: 10000, + }); + }); + }, + [toast], + ); + + useEffect(() => { + if (api && topupStatus === "success") { + toastOnFail("fulfill checkout", () => api.fulfillCheckout()); + } + }, [api, topupStatus, toastOnFail]); + + const openBillingPortal = async () => { + toastOnFail("open billing portal", async () => { + const portal = await api.getUserPaymentPortalLink(); + router.push(portal.url); + }); + }; + + const submitTopUp = (e: React.FormEvent) => { + e.preventDefault(); + const form = e.currentTarget; + const amount = parseInt(new FormData(form).get("topUpAmount") as string); + toastOnFail("request top-up", () => requestTopUp(amount)); + }; + + const submitAutoTopUpConfig = (e: React.FormEvent) => { + e.preventDefault(); + const form = e.currentTarget; + const formData = new FormData(form); + const amount = parseInt(formData.get("topUpAmount") as string); + const threshold = parseInt(formData.get("threshold") as string); + toastOnFail("update auto top-up config", () => + updateAutoTopUpConfig(amount, threshold).then(() => { + toast({ title: "Auto top-up config updated! 🎉" }); + }), + ); + }; + + return ( +
+

+ Credits +

+ +
+ {/* Top-up Form */} +
+

Top-up Credits

+ +

+ {topupStatus === "success" && ( + + Your payment was successful. Your credits will be updated + shortly. You can click the refresh icon 🔄 in case it is not + updated. + + )} + {topupStatus === "cancel" && ( + + Payment failed. Your payment method has not been charged. + + )} +

+ +
+
+ + +
+ + +
+ + {/* Auto Top-up Form */} +
+

Auto Top-up Configuration

+ +
+ + +
+ +
+ + +
+ + +
+
+ +
+ {/* Payment Portal */} +

Manage Your Payment Methods

+
+

+ You can manage your cards and see your payment history in the + billing portal. +

+
+ + +
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/marketplace/(user)/dashboard/page.tsx b/autogpt_platform/frontend/src/app/marketplace/(user)/dashboard/page.tsx new file mode 100644 index 000000000000..221e751b40f8 --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/(user)/dashboard/page.tsx @@ -0,0 +1,136 @@ +"use client"; + +import * as React from "react"; +import { AgentTable } from "@/components/agptui/AgentTable"; +import { AgentTableRowProps } from "@/components/agptui/AgentTableRow"; +import { Button } from "@/components/agptui/Button"; +import { Separator } from "@/components/ui/separator"; +import { StatusType } from "@/components/agptui/Status"; +import { PublishAgentPopout } from "@/components/agptui/composite/PublishAgentPopout"; +import { useCallback, useEffect, useState } from "react"; +import { + StoreSubmissionsResponse, + StoreSubmissionRequest, +} from "@/lib/autogpt-server-api/types"; +import useSupabase from "@/hooks/useSupabase"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; + +export default function Page({}: {}) { + const { supabase } = useSupabase(); + const api = useBackendAPI(); + const [submissions, setSubmissions] = useState(); + const [openPopout, setOpenPopout] = useState(false); + const [submissionData, setSubmissionData] = + useState(); + const [popoutStep, setPopoutStep] = useState<"select" | "info" | "review">( + "info", + ); + + const fetchData = useCallback(async () => { + try { + const submissions = await api.getStoreSubmissions(); + setSubmissions(submissions); + } catch (error) { + console.error("Error fetching submissions:", error); + } + }, [api]); + + useEffect(() => { + if (!supabase) { + return; + } + fetchData(); + }, [supabase, fetchData]); + + const onEditSubmission = useCallback((submission: StoreSubmissionRequest) => { + setSubmissionData(submission); + setPopoutStep("review"); + setOpenPopout(true); + }, []); + + const onDeleteSubmission = useCallback( + (submission_id: string) => { + if (!supabase) { + return; + } + api.deleteStoreSubmission(submission_id); + fetchData(); + }, + [api, supabase, fetchData], + ); + + const onOpenPopout = useCallback(() => { + setPopoutStep("select"); + setOpenPopout(true); + }, []); + + return ( +
+ {/* Header Section */} +
+
+

+ Agent dashboard +

+
+

+ Submit a New Agent +

+

+ Select from the list of agents you currently have, or upload from + your local machine. +

+
+
+ + Submit agent + + } + openPopout={openPopout} + inputStep={popoutStep} + submissionData={submissionData} + /> +
+ + + + {/* Agents Section */} +
+

+ Your uploaded agents +

+ {submissions && ( + ({ + id: index, + agent_id: submission.agent_id, + agent_version: submission.agent_version, + sub_heading: submission.sub_heading, + date_submitted: submission.date_submitted, + agentName: submission.name, + description: submission.description, + imageSrc: submission.image_urls || [""], + dateSubmitted: new Date( + submission.date_submitted, + ).toLocaleDateString(), + status: submission.status.toLowerCase() as StatusType, + runs: submission.runs, + rating: submission.rating, + })) || [] + } + onEditSubmission={onEditSubmission} + onDeleteSubmission={onDeleteSubmission} + /> + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/marketplace/(user)/integrations/page.tsx b/autogpt_platform/frontend/src/app/marketplace/(user)/integrations/page.tsx new file mode 100644 index 000000000000..f92d63344de6 --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/(user)/integrations/page.tsx @@ -0,0 +1,232 @@ +"use client"; +import { Button } from "@/components/ui/button"; +import { useRouter } from "next/navigation"; +import { useCallback, useContext, useMemo, useState } from "react"; +import { Separator } from "@/components/ui/separator"; +import { useToast } from "@/components/ui/use-toast"; +import { IconKey, IconUser } from "@/components/ui/icons"; +import { LogOutIcon, Trash2Icon } from "lucide-react"; +import { providerIcons } from "@/components/integrations/credentials-input"; +import { CredentialsProvidersContext } from "@/components/integrations/credentials-provider"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; +import { CredentialsProviderName } from "@/lib/autogpt-server-api"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@/components/ui/alert-dialog"; +import useSupabase from "@/hooks/useSupabase"; +import Spinner from "@/components/Spinner"; + +export default function PrivatePage() { + const { supabase, user, isUserLoading } = useSupabase(); + const router = useRouter(); + const providers = useContext(CredentialsProvidersContext); + const { toast } = useToast(); + + const [confirmationDialogState, setConfirmationDialogState] = useState< + | { + open: true; + message: string; + onConfirm: () => void; + onReject: () => void; + } + | { open: false } + >({ open: false }); + + const removeCredentials = useCallback( + async ( + provider: CredentialsProviderName, + id: string, + force: boolean = false, + ) => { + if (!providers || !providers[provider]) { + return; + } + + let result; + try { + result = await providers[provider].deleteCredentials(id, force); + } catch (error: any) { + toast({ + title: "Something went wrong when deleting credentials: " + error, + variant: "destructive", + duration: 2000, + }); + setConfirmationDialogState({ open: false }); + return; + } + if (result.deleted) { + if (result.revoked) { + toast({ + title: "Credentials deleted", + duration: 2000, + }); + } else { + toast({ + title: "Credentials deleted from AutoGPT", + description: `You may also manually remove the connection to AutoGPT at ${provider}!`, + duration: 3000, + }); + } + setConfirmationDialogState({ open: false }); + } else if (result.need_confirmation) { + setConfirmationDialogState({ + open: true, + message: result.message, + onConfirm: () => removeCredentials(provider, id, true), + onReject: () => setConfirmationDialogState({ open: false }), + }); + } + }, + [providers, toast], + ); + + //TODO: remove when the way system credentials are handled is updated + // This contains ids for built-in "Use Credits for X" credentials + const hiddenCredentials = useMemo( + () => [ + "744fdc56-071a-4761-b5a5-0af0ce10a2b5", // Ollama + "fdb7f412-f519-48d1-9b5f-d2f73d0e01fe", // Revid + "760f84fc-b270-42de-91f6-08efe1b512d0", // Ideogram + "6b9fc200-4726-4973-86c9-cd526f5ce5db", // Replicate + "53c25cb8-e3ee-465c-a4d1-e75a4c899c2a", // OpenAI + "24e5d942-d9e3-4798-8151-90143ee55629", // Anthropic + "4ec22295-8f97-4dd1-b42b-2c6957a02545", // Groq + "7f7b0654-c36b-4565-8fa7-9a52575dfae2", // D-ID + "7f26de70-ba0d-494e-ba76-238e65e7b45f", // Jina + "66f20754-1b81-48e4-91d0-f4f0dd82145f", // Unreal Speech + "b5a0e27d-0c98-4df3-a4b9-10193e1f3c40", // Open Router + "6c0f5bd0-9008-4638-9d79-4b40b631803e", // FAL + "96153e04-9c6c-4486-895f-5bb683b1ecec", // Exa + "78d19fd7-4d59-4a16-8277-3ce310acf2b7", // E2B + "96b83908-2789-4dec-9968-18f0ece4ceb3", // Nvidia + "ed55ac19-356e-4243-a6cb-bc599e9b716f", // Mem0 + ], + [], + ); + + if (isUserLoading) { + return ; + } + + if (!user || !supabase) { + router.push("/login"); + return null; + } + + const allCredentials = providers + ? Object.values(providers).flatMap((provider) => + [ + ...provider.savedOAuthCredentials, + ...provider.savedApiKeys, + ...provider.savedUserPasswordCredentials, + ] + .filter((cred) => !hiddenCredentials.includes(cred.id)) + .map((credentials) => ({ + ...credentials, + provider: provider.provider, + providerName: provider.providerName, + ProviderIcon: providerIcons[provider.provider], + TypeIcon: { + oauth2: IconUser, + api_key: IconKey, + user_password: IconKey, + }[credentials.type], + })), + ) + : []; + + return ( +
+

Connections & Credentials

+ + + + Provider + Name + Actions + + + + {allCredentials.map((cred) => ( + + +
+ + {cred.providerName} +
+
+ +
+ + {cred.title || cred.username} +
+ + { + { + oauth2: "OAuth2 credentials", + api_key: "API key", + user_password: "Username & password", + }[cred.type] + }{" "} + - {cred.id} + +
+ + + +
+ ))} +
+
+ + + + + Are you sure? + + {confirmationDialogState.open && confirmationDialogState.message} + + + + + confirmationDialogState.open && + confirmationDialogState.onReject() + } + > + Cancel + + + confirmationDialogState.open && + confirmationDialogState.onConfirm() + } + > + Continue + + + + +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/marketplace/(user)/layout.tsx b/autogpt_platform/frontend/src/app/marketplace/(user)/layout.tsx new file mode 100644 index 000000000000..07b7d3f4b444 --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/(user)/layout.tsx @@ -0,0 +1,25 @@ +import * as React from "react"; +import { Sidebar } from "@/components/agptui/Sidebar"; + +export default function Layout({ children }: { children: React.ReactNode }) { + const sidebarLinkGroups = [ + { + links: [ + { text: "Creator Dashboard", href: "/marketplace/dashboard" }, + { text: "Agent dashboard", href: "/marketplace/agent-dashboard" }, + { text: "Credits", href: "/marketplace/credits" }, + { text: "Integrations", href: "/marketplace/integrations" }, + { text: "API Keys", href: "/marketplace/api_keys" }, + { text: "Profile", href: "/marketplace/profile" }, + { text: "Settings", href: "/marketplace/settings" }, + ], + }, + ]; + + return ( +
+ +
{children}
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/marketplace/(user)/profile/page.tsx b/autogpt_platform/frontend/src/app/marketplace/(user)/profile/page.tsx new file mode 100644 index 000000000000..c9c475e03220 --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/(user)/profile/page.tsx @@ -0,0 +1,37 @@ +import * as React from "react"; +import { ProfileInfoForm } from "@/components/agptui/ProfileInfoForm"; +import BackendAPI from "@/lib/autogpt-server-api"; +import { CreatorDetails } from "@/lib/autogpt-server-api/types"; + +async function getProfileData(api: BackendAPI) { + try { + const profile = await api.getStoreProfile("profile"); + return { + profile, + }; + } catch (error) { + console.error("Error fetching profile:", error); + return { + profile: null, + }; + } +} + +export default async function Page({}: {}) { + const api = new BackendAPI(); + const { profile } = await getProfileData(api); + + if (!profile) { + return ( +
+

Please log in to view your profile

+
+ ); + } + + return ( +
+ +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/marketplace/(user)/settings/page.tsx b/autogpt_platform/frontend/src/app/marketplace/(user)/settings/page.tsx new file mode 100644 index 000000000000..761f5bb692fe --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/(user)/settings/page.tsx @@ -0,0 +1,6 @@ +import * as React from "react"; +import { SettingsInputForm } from "@/components/agptui/SettingsInputForm"; + +export default function Page() { + return ; +} diff --git a/autogpt_platform/frontend/src/app/marketplace/agent/[creator]/[slug]/page.tsx b/autogpt_platform/frontend/src/app/marketplace/agent/[creator]/[slug]/page.tsx new file mode 100644 index 000000000000..cd6b44439644 --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/agent/[creator]/[slug]/page.tsx @@ -0,0 +1,102 @@ +import BackendAPI from "@/lib/autogpt-server-api"; +import { BreadCrumbs } from "@/components/agptui/BreadCrumbs"; +import { AgentInfo } from "@/components/agptui/AgentInfo"; +import { AgentImages } from "@/components/agptui/AgentImages"; +import { AgentsSection } from "@/components/agptui/composite/AgentsSection"; +import { BecomeACreator } from "@/components/agptui/BecomeACreator"; +import { Separator } from "@/components/ui/separator"; +import { Metadata } from "next"; + +export async function generateMetadata({ + params, +}: { + params: { creator: string; slug: string }; +}): Promise { + const api = new BackendAPI(); + const agent = await api.getStoreAgent(params.creator, params.slug); + + return { + title: `${agent.agent_name} - AutoGPT Store`, + description: agent.description, + }; +} + +// export async function generateStaticParams() { +// const api = new BackendAPI(); +// const agents = await api.getStoreAgents({ featured: true }); +// return agents.agents.map((agent) => ({ +// creator: agent.creator, +// slug: agent.slug, +// })); +// } + +export default async function Page({ + params, +}: { + params: { creator: string; slug: string }; +}) { + const creator_lower = params.creator.toLowerCase(); + const api = new BackendAPI(); + const agent = await api.getStoreAgent(creator_lower, params.slug); + const otherAgents = await api.getStoreAgents({ creator: creator_lower }); + const similarAgents = await api.getStoreAgents({ + // We are using slug as we know its has been sanitized and is not null + search_query: agent.slug.replace(/-/g, " "), + }); + + const breadcrumbs = [ + { name: "Store", link: "/marketplace" }, + { + name: agent.creator, + link: `/marketplace/creator/${encodeURIComponent(agent.creator)}`, + }, + { name: agent.agent_name, link: "#" }, + ]; + + return ( +
+
+ + +
+
+ +
+ +
+ + + + + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/marketplace/creator/[creator]/page.tsx b/autogpt_platform/frontend/src/app/marketplace/creator/[creator]/page.tsx new file mode 100644 index 000000000000..904f02b93f40 --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/creator/[creator]/page.tsx @@ -0,0 +1,100 @@ +import BackendAPI from "@/lib/autogpt-server-api"; +import { + CreatorDetails as Creator, + StoreAgent, +} from "@/lib/autogpt-server-api"; +import { AgentsSection } from "@/components/agptui/composite/AgentsSection"; +import { BreadCrumbs } from "@/components/agptui/BreadCrumbs"; +import { Metadata } from "next"; +import { CreatorInfoCard } from "@/components/agptui/CreatorInfoCard"; +import { CreatorLinks } from "@/components/agptui/CreatorLinks"; + +export async function generateMetadata({ + params, +}: { + params: { creator: string }; +}): Promise { + const api = new BackendAPI(); + const creator = await api.getStoreCreator(params.creator.toLowerCase()); + + return { + title: `${creator.name} - AutoGPT Store`, + description: creator.description, + }; +} + +// export async function generateStaticParams() { +// const api = new BackendAPI(); +// const creators = await api.getStoreCreators({ featured: true }); +// return creators.creators.map((creator) => ({ +// creator: creator.username, +// })); +// } + +export default async function Page({ + params, +}: { + params: { creator: string }; +}) { + const api = new BackendAPI(); + + try { + const creator = await api.getStoreCreator(params.creator); + const creatorAgents = await api.getStoreAgents({ creator: params.creator }); + + return ( +
+
+ + +
+
+ +
+
+

+ About +

+
+ {creator.description} +
+ + +
+
+
+
+ +
+
+
+ ); + } catch (error) { + return ( +
+
+ Creator not found +
+
+ ); + } +} diff --git a/autogpt_platform/frontend/src/app/marketplace/page.tsx b/autogpt_platform/frontend/src/app/marketplace/page.tsx new file mode 100644 index 000000000000..297fee0c7fca --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/page.tsx @@ -0,0 +1,179 @@ +import * as React from "react"; +import { HeroSection } from "@/components/agptui/composite/HeroSection"; +import { + FeaturedSection, + FeaturedAgent, +} from "@/components/agptui/composite/FeaturedSection"; +import { + AgentsSection, + Agent, +} from "@/components/agptui/composite/AgentsSection"; +import { BecomeACreator } from "@/components/agptui/BecomeACreator"; +import { + FeaturedCreators, + FeaturedCreator, +} from "@/components/agptui/composite/FeaturedCreators"; +import { Separator } from "@/components/ui/separator"; +import { Metadata } from "next"; +import { + StoreAgentsResponse, + CreatorsResponse, +} from "@/lib/autogpt-server-api/types"; +import BackendAPI from "@/lib/autogpt-server-api"; + +async function getStoreData() { + try { + const api = new BackendAPI(); + + // Add error handling and default values + let featuredAgents: StoreAgentsResponse = { + agents: [], + pagination: { + total_items: 0, + total_pages: 0, + current_page: 0, + page_size: 0, + }, + }; + let topAgents: StoreAgentsResponse = { + agents: [], + pagination: { + total_items: 0, + total_pages: 0, + current_page: 0, + page_size: 0, + }, + }; + let featuredCreators: CreatorsResponse = { + creators: [], + pagination: { + total_items: 0, + total_pages: 0, + current_page: 0, + page_size: 0, + }, + }; + + try { + [featuredAgents, topAgents, featuredCreators] = await Promise.all([ + api.getStoreAgents({ featured: true }), + api.getStoreAgents({ sorted_by: "runs" }), + api.getStoreCreators({ featured: true, sorted_by: "num_agents" }), + ]); + } catch (error) { + console.error("Error fetching store data:", error); + } + + return { + featuredAgents, + topAgents, + featuredCreators, + }; + } catch (error) { + console.error("Error in getStoreData:", error); + return { + featuredAgents: { + agents: [], + pagination: { + total_items: 0, + total_pages: 0, + current_page: 0, + page_size: 0, + }, + }, + topAgents: { + agents: [], + pagination: { + total_items: 0, + total_pages: 0, + current_page: 0, + page_size: 0, + }, + }, + featuredCreators: { + creators: [], + pagination: { + total_items: 0, + total_pages: 0, + current_page: 0, + page_size: 0, + }, + }, + }; + } +} + +// FIX: Correct metadata +export const metadata: Metadata = { + title: "Marketplace - NextGen AutoGPT", + description: "Find and use AI Agents created by our community", + applicationName: "NextGen AutoGPT Store", + authors: [{ name: "AutoGPT Team" }], + keywords: [ + "AI agents", + "automation", + "artificial intelligence", + "AutoGPT", + "marketplace", + ], + robots: { + index: true, + follow: true, + }, + openGraph: { + title: "Marketplace - NextGen AutoGPT", + description: "Find and use AI Agents created by our community", + type: "website", + siteName: "NextGen AutoGPT Store", + images: [ + { + url: "/images/store-og.png", + width: 1200, + height: 630, + alt: "NextGen AutoGPT Store", + }, + ], + }, + twitter: { + card: "summary_large_image", + title: "Marketplace - NextGen AutoGPT", + description: "Find and use AI Agents created by our community", + images: ["/images/store-twitter.png"], + }, + icons: { + icon: "/favicon.ico", + shortcut: "/favicon-16x16.png", + apple: "/apple-touch-icon.png", + }, +}; + +export default async function Page({}: {}) { + // Get data server-side + const { featuredAgents, topAgents, featuredCreators } = await getStoreData(); + + return ( +
+
+ + + + + + + + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/marketplace/search/page.tsx b/autogpt_platform/frontend/src/app/marketplace/search/page.tsx new file mode 100644 index 000000000000..717ac96cb549 --- /dev/null +++ b/autogpt_platform/frontend/src/app/marketplace/search/page.tsx @@ -0,0 +1,182 @@ +"use client"; + +import { useState, useEffect } from "react"; +import { AgentsSection } from "@/components/agptui/composite/AgentsSection"; +import { SearchBar } from "@/components/agptui/SearchBar"; +import { FeaturedCreators } from "@/components/agptui/composite/FeaturedCreators"; +import { Separator } from "@/components/ui/separator"; +import { SearchFilterChips } from "@/components/agptui/SearchFilterChips"; +import { SortDropdown } from "@/components/agptui/SortDropdown"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; + +export default function Page({ + searchParams, +}: { + searchParams: { searchTerm?: string; sort?: string }; +}) { + return ( + + ); +} + +function SearchResults({ + searchTerm, + sort, +}: { + searchTerm: string; + sort: string; +}) { + const [showAgents, setShowAgents] = useState(true); + const [showCreators, setShowCreators] = useState(true); + const [agents, setAgents] = useState([]); + const [creators, setCreators] = useState([]); + const [isLoading, setIsLoading] = useState(true); + const api = useBackendAPI(); + + useEffect(() => { + const fetchData = async () => { + setIsLoading(true); + + try { + const [agentsRes, creatorsRes] = await Promise.all([ + api.getStoreAgents({ + search_query: searchTerm, + sorted_by: sort, + }), + api.getStoreCreators({ + search_query: searchTerm, + }), + ]); + + setAgents(agentsRes.agents || []); + setCreators(creatorsRes.creators || []); + } catch (error) { + console.error("Error fetching data:", error); + } finally { + setIsLoading(false); + } + }; + + fetchData(); + }, [api, searchTerm, sort]); + + const agentsCount = agents.length; + const creatorsCount = creators.length; + const totalCount = agentsCount + creatorsCount; + + const handleFilterChange = (value: string) => { + if (value === "agents") { + setShowAgents(true); + setShowCreators(false); + } else if (value === "creators") { + setShowAgents(false); + setShowCreators(true); + } else { + setShowAgents(true); + setShowCreators(true); + } + }; + + const handleSortChange = (sortValue: string) => { + let sortBy = "recent"; + if (sortValue === "runs") { + sortBy = "runs"; + } else if (sortValue === "rating") { + sortBy = "rating"; + } + + const sortedAgents = [...agents].sort((a, b) => { + if (sortBy === "runs") { + return b.runs - a.runs; + } else if (sortBy === "rating") { + return b.rating - a.rating; + } else { + return ( + new Date(b.updated_at).getTime() - new Date(a.updated_at).getTime() + ); + } + }); + + const sortedCreators = [...creators].sort((a, b) => { + if (sortBy === "runs") { + return b.agent_runs - a.agent_runs; + } else if (sortBy === "rating") { + return b.agent_rating - a.agent_rating; + } else { + // Creators don't have updated_at, sort by number of agents as fallback + return b.num_agents - a.num_agents; + } + }); + + setAgents(sortedAgents); + setCreators(sortedCreators); + }; + + return ( +
+
+
+
+

+ Results for: +

+

+ {searchTerm} +

+
+
+ +
+
+ + {isLoading ? ( +
+

Loading...

+
+ ) : totalCount > 0 ? ( + <> +
+ + +
+ {/* Content section */} +
+ {showAgents && agentsCount > 0 && ( +
+ +
+ )} + + {showAgents && agentsCount > 0 && creatorsCount > 0 && ( + + )} + {showCreators && creatorsCount > 0 && ( + + )} +
+ + ) : ( +
+

+ No results found +

+

+ Try adjusting your search terms or filters +

+
+ )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/monitoring/loading.tsx b/autogpt_platform/frontend/src/app/monitoring/loading.tsx new file mode 100644 index 000000000000..b0be672e03c2 --- /dev/null +++ b/autogpt_platform/frontend/src/app/monitoring/loading.tsx @@ -0,0 +1,21 @@ +import AgentFlowListSkeleton from "@/components/monitor/skeletons/AgentFlowListSkeleton"; +import React from "react"; +import FlowRunsListSkeleton from "@/components/monitor/skeletons/FlowRunsListSkeleton"; +import FlowRunsStatusSkeleton from "@/components/monitor/skeletons/FlowRunsStatusSkeleton"; + +export default function MonitorLoadingSkeleton() { + return ( +
+
+ {/* Agents Section */} + + + {/* Runs Section */} + + + {/* Stats Section */} + +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/app/monitoring/page.tsx b/autogpt_platform/frontend/src/app/monitoring/page.tsx new file mode 100644 index 000000000000..0be12aaee5c5 --- /dev/null +++ b/autogpt_platform/frontend/src/app/monitoring/page.tsx @@ -0,0 +1,141 @@ +"use client"; +import React, { useCallback, useEffect, useState } from "react"; + +import { GraphExecution, Schedule, GraphMeta } from "@/lib/autogpt-server-api"; + +import { Card } from "@/components/ui/card"; +import { + AgentFlowList, + FlowInfo, + FlowRunInfo, + FlowRunsList, + FlowRunsStats, +} from "@/components/monitor"; +import { SchedulesTable } from "@/components/monitor/scheduleTable"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; + +const Monitor = () => { + const [flows, setFlows] = useState([]); + const [executions, setExecutions] = useState([]); + const [schedules, setSchedules] = useState([]); + const [selectedFlow, setSelectedFlow] = useState(null); + const [selectedRun, setSelectedRun] = useState(null); + const [sortColumn, setSortColumn] = useState("id"); + const [sortDirection, setSortDirection] = useState<"asc" | "desc">("asc"); + const api = useBackendAPI(); + + const fetchSchedules = useCallback(async () => { + setSchedules(await api.listSchedules()); + }, [api]); + + const removeSchedule = useCallback( + async (scheduleId: string) => { + const removedSchedule = await api.deleteSchedule(scheduleId); + setSchedules(schedules.filter((s) => s.id !== removedSchedule.id)); + }, + [schedules, api], + ); + + const fetchAgents = useCallback(() => { + api.listLibraryAgents().then((agent) => { + setFlows(agent); + }); + api.getExecutions().then((executions) => { + setExecutions(executions); + }); + }, [api]); + + useEffect(() => { + fetchAgents(); + }, [fetchAgents]); + + useEffect(() => { + fetchSchedules(); + }, [fetchSchedules]); + + useEffect(() => { + const intervalId = setInterval(() => fetchAgents(), 5000); + return () => clearInterval(intervalId); + }, [fetchAgents, flows]); + + const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2"; + const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3"; + const column3 = "col-span-full xl:col-span-4 xxl:col-span-5"; + + const handleSort = (column: keyof Schedule) => { + if (sortColumn === column) { + setSortDirection(sortDirection === "asc" ? "desc" : "asc"); + } else { + setSortColumn(column); + setSortDirection("asc"); + } + }; + + return ( +
+ { + setSelectedRun(null); + setSelectedFlow(f.id == selectedFlow?.id ? null : (f as GraphMeta)); + }} + /> + v.graph_id == selectedFlow.id) + : executions), + ].sort((a, b) => Number(b.started_at) - Number(a.started_at))} + selectedRun={selectedRun} + onSelectRun={(r) => + setSelectedRun(r.execution_id == selectedRun?.execution_id ? null : r) + } + /> + {(selectedRun && ( + f.id == selectedRun.graph_id)! + } + execution={selectedRun} + className={column3} + /> + )) || + (selectedFlow && ( + e.graph_id == selectedFlow.id)} + className={column3} + refresh={() => { + fetchAgents(); + setSelectedFlow(null); + setSelectedRun(null); + }} + /> + )) || ( + + + + )} +
+ +
+
+ ); +}; + +export default Monitor; diff --git a/autogpt_platform/frontend/src/app/page.tsx b/autogpt_platform/frontend/src/app/page.tsx new file mode 100644 index 000000000000..5a079f262994 --- /dev/null +++ b/autogpt_platform/frontend/src/app/page.tsx @@ -0,0 +1,7 @@ +"use client"; + +import { redirect } from "next/navigation"; + +export default function Page() { + redirect("/marketplace"); +} diff --git a/autogpt_platform/frontend/src/app/profile/page.tsx b/autogpt_platform/frontend/src/app/profile/page.tsx new file mode 100644 index 000000000000..445077dabe4e --- /dev/null +++ b/autogpt_platform/frontend/src/app/profile/page.tsx @@ -0,0 +1,242 @@ +"use client"; +import { Button } from "@/components/ui/button"; +import { useRouter } from "next/navigation"; +import { useCallback, useContext, useMemo, useState } from "react"; +import { Separator } from "@/components/ui/separator"; +import { useToast } from "@/components/ui/use-toast"; +import { IconKey, IconUser } from "@/components/ui/icons"; +import { LogOutIcon, Trash2Icon } from "lucide-react"; +import { providerIcons } from "@/components/integrations/credentials-input"; +import { CredentialsProvidersContext } from "@/components/integrations/credentials-provider"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; +import { CredentialsProviderName } from "@/lib/autogpt-server-api"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@/components/ui/alert-dialog"; +import useSupabase from "@/hooks/useSupabase"; +import Spinner from "@/components/Spinner"; + +export default function PrivatePage() { + const { supabase, user, isUserLoading } = useSupabase(); + const router = useRouter(); + const providers = useContext(CredentialsProvidersContext); + const { toast } = useToast(); + + const [confirmationDialogState, setConfirmationDialogState] = useState< + | { + open: true; + message: string; + onConfirm: () => void; + onReject: () => void; + } + | { open: false } + >({ open: false }); + + const removeCredentials = useCallback( + async ( + provider: CredentialsProviderName, + id: string, + force: boolean = false, + ) => { + if (!providers || !providers[provider]) { + return; + } + + let result; + try { + result = await providers[provider].deleteCredentials(id, force); + } catch (error: any) { + toast({ + title: "Something went wrong when deleting credentials: " + error, + variant: "destructive", + duration: 2000, + }); + setConfirmationDialogState({ open: false }); + return; + } + if (result.deleted) { + if (result.revoked) { + toast({ + title: "Credentials deleted", + duration: 2000, + }); + } else { + toast({ + title: "Credentials deleted from AutoGPT", + description: `You may also manually remove the connection to AutoGPT at ${provider}!`, + duration: 3000, + }); + } + setConfirmationDialogState({ open: false }); + } else if (result.need_confirmation) { + setConfirmationDialogState({ + open: true, + message: result.message, + onConfirm: () => removeCredentials(provider, id, true), + onReject: () => setConfirmationDialogState({ open: false }), + }); + } + }, + [providers, toast], + ); + + //TODO: remove when the way system credentials are handled is updated + // This contains ids for built-in "Use Credits for X" credentials + const hiddenCredentials = useMemo( + () => [ + "744fdc56-071a-4761-b5a5-0af0ce10a2b5", // Ollama + "fdb7f412-f519-48d1-9b5f-d2f73d0e01fe", // Revid + "760f84fc-b270-42de-91f6-08efe1b512d0", // Ideogram + "6b9fc200-4726-4973-86c9-cd526f5ce5db", // Replicate + "53c25cb8-e3ee-465c-a4d1-e75a4c899c2a", // OpenAI + "24e5d942-d9e3-4798-8151-90143ee55629", // Anthropic + "4ec22295-8f97-4dd1-b42b-2c6957a02545", // Groq + "7f7b0654-c36b-4565-8fa7-9a52575dfae2", // D-ID + "7f26de70-ba0d-494e-ba76-238e65e7b45f", // Jina + "66f20754-1b81-48e4-91d0-f4f0dd82145f", // Unreal Speech + "b5a0e27d-0c98-4df3-a4b9-10193e1f3c40", // Open Router + "6c0f5bd0-9008-4638-9d79-4b40b631803e", // FAL + "96153e04-9c6c-4486-895f-5bb683b1ecec", // Exa + "78d19fd7-4d59-4a16-8277-3ce310acf2b7", // E2B + "96b83908-2789-4dec-9968-18f0ece4ceb3", // Nvidia + "ed55ac19-356e-4243-a6cb-bc599e9b716f", // Mem0 + ], + [], + ); + + if (isUserLoading) { + return ; + } + + if (!user || !supabase) { + router.push("/login"); + return null; + } + + const allCredentials = providers + ? Object.values(providers).flatMap((provider) => + [ + ...provider.savedOAuthCredentials, + ...provider.savedApiKeys, + ...provider.savedUserPasswordCredentials, + ] + .filter((cred) => !hiddenCredentials.includes(cred.id)) + .map((credentials) => ({ + ...credentials, + provider: provider.provider, + providerName: provider.providerName, + ProviderIcon: providerIcons[provider.provider], + TypeIcon: { + oauth2: IconUser, + api_key: IconKey, + user_password: IconKey, + }[credentials.type], + })), + ) + : []; + + return ( +
+
+

+ Hello {user.email} +

+ +
+ +

Connections & Credentials

+ + + + Provider + Name + Actions + + + + {allCredentials.map((cred) => ( + + +
+ + {cred.providerName} +
+
+ +
+ + {cred.title || cred.username} +
+ + { + { + oauth2: "OAuth2 credentials", + api_key: "API key", + user_password: "User password", + }[cred.type] + }{" "} + - {cred.id} + +
+ + + +
+ ))} +
+
+ + + + + Are you sure? + + {confirmationDialogState.open && confirmationDialogState.message} + + + + + confirmationDialogState.open && + confirmationDialogState.onReject() + } + > + Cancel + + + confirmationDialogState.open && + confirmationDialogState.onConfirm() + } + > + Continue + + + + +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/providers.tsx b/autogpt_platform/frontend/src/app/providers.tsx new file mode 100644 index 000000000000..bae111b84dea --- /dev/null +++ b/autogpt_platform/frontend/src/app/providers.tsx @@ -0,0 +1,23 @@ +"use client"; + +import * as React from "react"; +import { ThemeProvider as NextThemesProvider } from "next-themes"; +import { ThemeProviderProps } from "next-themes"; +import { BackendAPIProvider } from "@/lib/autogpt-server-api/context"; +import { TooltipProvider } from "@/components/ui/tooltip"; +import CredentialsProvider from "@/components/integrations/credentials-provider"; +import { LaunchDarklyProvider } from "@/components/feature-flag/feature-flag-provider"; + +export function Providers({ children, ...props }: ThemeProviderProps) { + return ( + + + + + {children} + + + + + ); +} diff --git a/autogpt_platform/frontend/src/app/reset_password/actions.ts b/autogpt_platform/frontend/src/app/reset_password/actions.ts new file mode 100644 index 000000000000..eebea08b979f --- /dev/null +++ b/autogpt_platform/frontend/src/app/reset_password/actions.ts @@ -0,0 +1,60 @@ +"use server"; +import getServerSupabase from "@/lib/supabase/getServerSupabase"; +import { redirect } from "next/navigation"; +import * as Sentry from "@sentry/nextjs"; +import { headers } from "next/headers"; + +export async function sendResetEmail(email: string) { + return await Sentry.withServerActionInstrumentation( + "sendResetEmail", + {}, + async () => { + const supabase = getServerSupabase(); + const headersList = headers(); + const host = headersList.get("host"); + const protocol = + process.env.NODE_ENV === "development" ? "http" : "https"; + const origin = `${protocol}://${host}`; + + if (!supabase) { + redirect("/error"); + } + + const { error } = await supabase.auth.resetPasswordForEmail(email, { + redirectTo: `${origin}/reset_password`, + }); + + if (error) { + console.error("Error sending reset email", error); + return error.message; + } + + console.log("Reset email sent"); + redirect("/reset_password"); + }, + ); +} + +export async function changePassword(password: string) { + return await Sentry.withServerActionInstrumentation( + "changePassword", + {}, + async () => { + const supabase = getServerSupabase(); + + if (!supabase) { + redirect("/error"); + } + + const { error } = await supabase.auth.updateUser({ password }); + + if (error) { + console.error("Error changing password", error); + return error.message; + } + + await supabase.auth.signOut(); + redirect("/login"); + }, + ); +} diff --git a/autogpt_platform/frontend/src/app/reset_password/page.tsx b/autogpt_platform/frontend/src/app/reset_password/page.tsx new file mode 100644 index 000000000000..36d1413bfef0 --- /dev/null +++ b/autogpt_platform/frontend/src/app/reset_password/page.tsx @@ -0,0 +1,186 @@ +"use client"; +import { + AuthCard, + AuthHeader, + AuthButton, + AuthFeedback, + PasswordInput, +} from "@/components/auth"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { Input } from "@/components/ui/input"; +import useSupabase from "@/hooks/useSupabase"; +import { sendEmailFormSchema, changePasswordFormSchema } from "@/types/auth"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { useCallback, useState } from "react"; +import { useForm } from "react-hook-form"; +import { z } from "zod"; +import { changePassword, sendResetEmail } from "./actions"; +import Spinner from "@/components/Spinner"; + +export default function ResetPasswordPage() { + const { supabase, user, isUserLoading } = useSupabase(); + const [isLoading, setIsLoading] = useState(false); + const [feedback, setFeedback] = useState(null); + const [isError, setIsError] = useState(false); + const [disabled, setDisabled] = useState(false); + + const sendEmailForm = useForm>({ + resolver: zodResolver(sendEmailFormSchema), + defaultValues: { + email: "", + }, + }); + + const changePasswordForm = useForm>({ + resolver: zodResolver(changePasswordFormSchema), + defaultValues: { + password: "", + confirmPassword: "", + }, + }); + + const onSendEmail = useCallback( + async (data: z.infer) => { + setIsLoading(true); + setFeedback(null); + + if (!(await sendEmailForm.trigger())) { + setIsLoading(false); + return; + } + + const error = await sendResetEmail(data.email); + setIsLoading(false); + if (error) { + setFeedback(error); + setIsError(true); + return; + } + setDisabled(true); + setFeedback( + "Password reset email sent if user exists. Please check your email.", + ); + setIsError(false); + }, + [sendEmailForm], + ); + + const onChangePassword = useCallback( + async (data: z.infer) => { + setIsLoading(true); + setFeedback(null); + + if (!(await changePasswordForm.trigger())) { + setIsLoading(false); + return; + } + + const error = await changePassword(data.password); + setIsLoading(false); + if (error) { + setFeedback(error); + setIsError(true); + return; + } + setFeedback("Password changed successfully. Redirecting to login."); + setIsError(false); + }, + [changePasswordForm], + ); + + if (isUserLoading) { + return ; + } + + if (!supabase) { + return ( +
+ User accounts are disabled because Supabase client is unavailable +
+ ); + } + + return ( + + Reset Password + {user ? ( +
+ + ( + + Password + + + + + + )} + /> + ( + + Confirm Password + + + + + Password needs to be at least 6 characters long + + + + )} + /> + onChangePassword(changePasswordForm.getValues())} + isLoading={isLoading} + type="submit" + > + Update password + + + + + ) : ( +
+ + ( + + Email + + + + + + )} + /> + onSendEmail(sendEmailForm.getValues())} + isLoading={isLoading} + disabled={disabled} + type="submit" + > + Send reset email + + + + + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/signup/actions.ts b/autogpt_platform/frontend/src/app/signup/actions.ts new file mode 100644 index 000000000000..80fd19aa90b8 --- /dev/null +++ b/autogpt_platform/frontend/src/app/signup/actions.ts @@ -0,0 +1,44 @@ +"use server"; +import { revalidatePath } from "next/cache"; +import { redirect } from "next/navigation"; +import { z } from "zod"; +import * as Sentry from "@sentry/nextjs"; +import getServerSupabase from "@/lib/supabase/getServerSupabase"; +import { signupFormSchema } from "@/types/auth"; + +export async function signup(values: z.infer) { + "use server"; + return await Sentry.withServerActionInstrumentation( + "signup", + {}, + async () => { + const supabase = getServerSupabase(); + + if (!supabase) { + redirect("/error"); + } + + // We are sure that the values are of the correct type because zod validates the form + const { data, error } = await supabase.auth.signUp(values); + + if (error) { + console.error("Error signing up", error); + // FIXME: supabase doesn't return the correct error message for this case + if (error.message.includes("P0001")) { + return "Please join our waitlist for your turn: https://agpt.co/waitlist"; + } + if (error.code?.includes("user_already_exists")) { + redirect("/login"); + } + return error.message; + } + + if (data.session) { + await supabase.auth.setSession(data.session); + } + console.log("Signed up"); + revalidatePath("/", "layout"); + redirect("/marketplace/profile"); + }, + ); +} diff --git a/autogpt_platform/frontend/src/app/signup/page.tsx b/autogpt_platform/frontend/src/app/signup/page.tsx new file mode 100644 index 000000000000..b7df6d03411b --- /dev/null +++ b/autogpt_platform/frontend/src/app/signup/page.tsx @@ -0,0 +1,224 @@ +"use client"; +import { signup } from "./actions"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { useForm } from "react-hook-form"; +import { Input } from "@/components/ui/input"; +import { z } from "zod"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { useCallback, useState } from "react"; +import { useRouter } from "next/navigation"; +import Link from "next/link"; +import { Checkbox } from "@/components/ui/checkbox"; +import useSupabase from "@/hooks/useSupabase"; +import Spinner from "@/components/Spinner"; +import { + AuthCard, + AuthHeader, + AuthButton, + AuthFeedback, + AuthBottomText, + PasswordInput, +} from "@/components/auth"; +import { signupFormSchema } from "@/types/auth"; + +export default function SignupPage() { + const { supabase, user, isUserLoading } = useSupabase(); + const [feedback, setFeedback] = useState(null); + const router = useRouter(); + const [isLoading, setIsLoading] = useState(false); + const [showWaitlistPrompt, setShowWaitlistPrompt] = useState(false); + + const form = useForm>({ + resolver: zodResolver(signupFormSchema), + defaultValues: { + email: "", + password: "", + confirmPassword: "", + agreeToTerms: false, + }, + }); + + const onSignup = useCallback( + async (data: z.infer) => { + setIsLoading(true); + + if (!(await form.trigger())) { + setIsLoading(false); + return; + } + + const error = await signup(data); + setIsLoading(false); + if (error) { + setShowWaitlistPrompt(true); + return; + } + setFeedback(null); + }, + [form], + ); + + if (user) { + console.debug("User exists, redirecting to /"); + router.push("/"); + } + + if (isUserLoading || user) { + return ; + } + + if (!supabase) { + return ( +
+ User accounts are disabled because Supabase client is unavailable +
+ ); + } + + return ( + + Create a new account +
+ + ( + + Email + + + + + + )} + /> + ( + + Password + + + + + + )} + /> + ( + + Confirm Password + + + + + Password needs to be at least 6 characters long + + + + )} + /> + onSignup(form.getValues())} + isLoading={isLoading} + type="submit" + > + Sign up + + ( + + + + +
+ + + I agree to the + + + Terms of Use + + + and + + + Privacy Policy + + + +
+
+ )} + /> + + + + {showWaitlistPrompt && ( +
+ + The provided email may not be allowed to sign up. + +
+ + - AutoGPT Platform is currently in closed beta. You can join + + + the waitlist here. + +
+ + - Make sure you use the same email address you used to sign up for + the waitlist. + +
+ + - You can self host the platform, visit our + + + GitHub repository. + +
+ )} + +
+ ); +} diff --git a/autogpt_platform/frontend/src/app/unauthorized/page.tsx b/autogpt_platform/frontend/src/app/unauthorized/page.tsx new file mode 100644 index 000000000000..cca282b3f00e --- /dev/null +++ b/autogpt_platform/frontend/src/app/unauthorized/page.tsx @@ -0,0 +1,9 @@ +// app/unauthorized/page.tsx +export default function Unauthorized() { + return ( +
+

Unauthorized Access

+

You do not have permission to view this page.

+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/ConnectionLine.tsx b/autogpt_platform/frontend/src/components/ConnectionLine.tsx new file mode 100644 index 000000000000..655d5bef19bc --- /dev/null +++ b/autogpt_platform/frontend/src/components/ConnectionLine.tsx @@ -0,0 +1,34 @@ +import { + BaseEdge, + ConnectionLineComponentProps, + getBezierPath, + Position, +} from "@xyflow/react"; + +const ConnectionLine: React.FC = ({ + fromPosition, + fromHandle, + fromX, + fromY, + toPosition, + toX, + toY, +}) => { + const sourceX = + fromPosition === Position.Right + ? fromX + (fromHandle?.width! / 2 - 5) + : fromX - (fromHandle?.width! / 2 - 5); + + const [path] = getBezierPath({ + sourceX: sourceX, + sourceY: fromY, + sourcePosition: fromPosition, + targetX: toX, + targetY: toY, + targetPosition: toPosition, + }); + + return ; +}; + +export default ConnectionLine; diff --git a/autogpt_platform/frontend/src/components/CustomEdge.tsx b/autogpt_platform/frontend/src/components/CustomEdge.tsx new file mode 100644 index 000000000000..4f0d3def7d01 --- /dev/null +++ b/autogpt_platform/frontend/src/components/CustomEdge.tsx @@ -0,0 +1,235 @@ +import React, { useCallback, useContext, useEffect, useState } from "react"; +import { + BaseEdge, + EdgeLabelRenderer, + EdgeProps, + useReactFlow, + XYPosition, + Edge, + Node, +} from "@xyflow/react"; +import "./customedge.css"; +import { X } from "lucide-react"; +import { useBezierPath } from "@/hooks/useBezierPath"; +import { FlowContext } from "./Flow"; + +export type CustomEdgeData = { + edgeColor: string; + sourcePos?: XYPosition; + isStatic?: boolean; + beadUp?: number; + beadDown?: number; + beadData?: any[]; +}; + +type Bead = { + t: number; + targetT: number; + startTime: number; +}; + +export type CustomEdge = Edge; + +export function CustomEdge({ + id, + data, + selected, + sourceX, + sourceY, + targetX, + targetY, + markerEnd, +}: EdgeProps) { + const [isHovered, setIsHovered] = useState(false); + const [beads, setBeads] = useState<{ + beads: Bead[]; + created: number; + destroyed: number; + }>({ beads: [], created: 0, destroyed: 0 }); + const { svgPath, length, getPointForT, getTForDistance } = useBezierPath( + sourceX - 5, + sourceY - 5, + targetX + 3, + targetY - 5, + ); + const { deleteElements } = useReactFlow(); + const { visualizeBeads } = useContext(FlowContext) ?? { + visualizeBeads: "no", + }; + + const onEdgeRemoveClick = () => { + deleteElements({ edges: [{ id }] }); + }; + + const animationDuration = 500; // Duration in milliseconds for bead to travel the curve + const beadDiameter = 12; + const deltaTime = 16; + + const setTargetPositions = useCallback( + (beads: Bead[]) => { + const distanceBetween = Math.min( + (length - beadDiameter) / (beads.length + 1), + beadDiameter, + ); + + return beads.map((bead, index) => { + const distanceFromEnd = beadDiameter * 1.35; + const targetPosition = distanceBetween * index + distanceFromEnd; + const t = getTForDistance(-targetPosition); + + return { + ...bead, + t: visualizeBeads === "animate" ? bead.t : t, + targetT: t, + } as Bead; + }); + }, + [getTForDistance, length, visualizeBeads], + ); + + useEffect(() => { + if (data?.beadUp === 0 && data?.beadDown === 0) { + setBeads({ beads: [], created: 0, destroyed: 0 }); + return; + } + + const beadUp = data?.beadUp!; + + // Add beads + setBeads(({ beads, created, destroyed }) => { + const newBeads = []; + for (let i = 0; i < beadUp - created; i++) { + newBeads.push({ t: 0, targetT: 0, startTime: Date.now() }); + } + + const b = setTargetPositions([...beads, ...newBeads]); + return { beads: b, created: beadUp, destroyed }; + }); + + // Remove beads if not animating + if (visualizeBeads !== "animate") { + setBeads(({ beads, created, destroyed }) => { + let destroyedCount = 0; + + const newBeads = beads + .map((bead) => ({ ...bead })) + .filter((bead, index) => { + const beadDown = data?.beadDown!; + + // Remove always one less bead in case of static edge, so it stays at the connection point + const removeCount = beadDown - destroyed - (data?.isStatic ? 1 : 0); + if (bead.t >= bead.targetT && index < removeCount) { + destroyedCount++; + return false; + } + return true; + }); + + return { + beads: setTargetPositions(newBeads), + created, + destroyed: destroyed + destroyedCount, + }; + }); + return; + } + + // Animate and remove beads + const interval = setInterval(() => { + setBeads(({ beads, created, destroyed }) => { + let destroyedCount = 0; + + const newBeads = beads + .map((bead) => { + const progressIncrement = deltaTime / animationDuration; + const t = Math.min( + bead.t + bead.targetT * progressIncrement, + bead.targetT, + ); + + return { + ...bead, + t, + }; + }) + .filter((bead, index) => { + const beadDown = data?.beadDown!; + + // Remove always one less bead in case of static edge, so it stays at the connection point + const removeCount = beadDown - destroyed - (data?.isStatic ? 1 : 0); + if (bead.t >= bead.targetT && index < removeCount) { + destroyedCount++; + return false; + } + return true; + }); + + return { + beads: setTargetPositions(newBeads), + created, + destroyed: destroyed + destroyedCount, + }; + }); + }, deltaTime); + + return () => clearInterval(interval); + }, [data, setTargetPositions, visualizeBeads]); + + const middle = getPointForT(0.5); + + return ( + <> + + setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + /> + +
+ +
+
+ {beads.beads.map((bead, index) => { + const pos = getPointForT(bead.t); + return ( + + ); + })} + + ); +} diff --git a/autogpt_platform/frontend/src/components/CustomNode.tsx b/autogpt_platform/frontend/src/components/CustomNode.tsx new file mode 100644 index 000000000000..82c633b8f67b --- /dev/null +++ b/autogpt_platform/frontend/src/components/CustomNode.tsx @@ -0,0 +1,899 @@ +import React, { + useState, + useEffect, + useCallback, + useRef, + useContext, + useMemo, +} from "react"; +import { NodeProps, useReactFlow, Node as XYNode, Edge } from "@xyflow/react"; +import "@xyflow/react/dist/style.css"; +import "./customnode.css"; +import InputModalComponent from "./InputModalComponent"; +import OutputModalComponent from "./OutputModalComponent"; +import { + BlockIORootSchema, + BlockIOSubSchema, + BlockIOStringSubSchema, + Category, + Node, + NodeExecutionResult, + BlockUIType, + BlockCost, +} from "@/lib/autogpt-server-api"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { + beautifyString, + cn, + getValue, + hasNonNullNonObjectValue, + parseKeys, + setNestedProperty, +} from "@/lib/utils"; +import { Button } from "@/components/ui/button"; +import { Switch } from "@/components/ui/switch"; +import { TextRenderer } from "@/components/ui/render"; +import { history } from "./history"; +import NodeHandle from "./NodeHandle"; +import { + NodeGenericInputField, + NodeTextBoxInput, +} from "./node-input-components"; +import { getPrimaryCategoryColor } from "@/lib/utils"; +import { FlowContext } from "./Flow"; +import { Badge } from "./ui/badge"; +import NodeOutputs from "./NodeOutputs"; +import SchemaTooltip from "./SchemaTooltip"; +import { IconCoin } from "./ui/icons"; +import * as Separator from "@radix-ui/react-separator"; +import * as ContextMenu from "@radix-ui/react-context-menu"; +import { + DotsVerticalIcon, + TrashIcon, + CopyIcon, + ExitIcon, +} from "@radix-ui/react-icons"; + +export type ConnectionData = Array<{ + edge_id: string; + source: string; + sourceHandle: string; + target: string; + targetHandle: string; +}>; + +export type CustomNodeData = { + blockType: string; + blockCosts: BlockCost[]; + title: string; + description: string; + categories: Category[]; + inputSchema: BlockIORootSchema; + outputSchema: BlockIORootSchema; + hardcodedValues: { [key: string]: any }; + connections: ConnectionData; + webhook?: Node["webhook"]; + isOutputOpen: boolean; + status?: NodeExecutionResult["status"]; + /** executionResults contains outputs across multiple executions + * with the last element being the most recent output */ + executionResults?: { + execId: string; + data: NodeExecutionResult["output_data"]; + }[]; + block_id: string; + backend_id?: string; + errors?: { [key: string]: string }; + isOutputStatic?: boolean; + uiType: BlockUIType; +}; + +export type CustomNode = XYNode; + +export function CustomNode({ + data, + id, + width, + height, + selected, +}: NodeProps) { + const [isOutputOpen, setIsOutputOpen] = useState(data.isOutputOpen || false); + const [isAdvancedOpen, setIsAdvancedOpen] = useState(false); + const [isModalOpen, setIsModalOpen] = useState(false); + const [activeKey, setActiveKey] = useState(null); + const [inputModalValue, setInputModalValue] = useState(""); + const [isOutputModalOpen, setIsOutputModalOpen] = useState(false); + const { updateNodeData, deleteElements, addNodes, getNode } = useReactFlow< + CustomNode, + Edge + >(); + const isInitialSetup = useRef(true); + const flowContext = useContext(FlowContext); + const api = useBackendAPI(); + let nodeFlowId = ""; + + if (data.uiType === BlockUIType.AGENT) { + // Display the graph's schema instead AgentExecutorBlock's schema. + data.inputSchema = data.hardcodedValues?.input_schema || {}; + data.outputSchema = data.hardcodedValues?.output_schema || {}; + nodeFlowId = data.hardcodedValues?.graph_id || nodeFlowId; + } + + if (!flowContext) { + throw new Error("FlowContext consumer must be inside FlowEditor component"); + } + + const { setIsAnyModalOpen, getNextNodeId } = flowContext; + + useEffect(() => { + if (data.executionResults || data.status) { + setIsOutputOpen(true); + } + }, [data.executionResults, data.status]); + + useEffect(() => { + setIsOutputOpen(data.isOutputOpen); + }, [data.isOutputOpen]); + + useEffect(() => { + setIsAnyModalOpen?.(isModalOpen || isOutputModalOpen); + }, [isModalOpen, isOutputModalOpen, data, setIsAnyModalOpen]); + + useEffect(() => { + isInitialSetup.current = false; + }, []); + + const setHardcodedValues = (values: any) => { + updateNodeData(id, { hardcodedValues: values }); + }; + + const setErrors = (errors: { [key: string]: string }) => { + updateNodeData(id, { errors }); + }; + + const toggleOutput = (checked: boolean) => { + setIsOutputOpen(checked); + }; + + const toggleAdvancedSettings = (checked: boolean) => { + setIsAdvancedOpen(checked); + }; + + const generateOutputHandles = ( + schema: BlockIORootSchema, + nodeType: BlockUIType, + ) => { + if ( + !schema?.properties || + nodeType === BlockUIType.OUTPUT || + nodeType === BlockUIType.NOTE + ) + return null; + + const renderHandles = ( + propSchema: { [key: string]: BlockIOSubSchema }, + keyPrefix = "", + titlePrefix = "", + ) => { + return Object.keys(propSchema).map((propKey) => { + const fieldSchema = propSchema[propKey]; + const fieldTitle = + titlePrefix + (fieldSchema.title || beautifyString(propKey)); + + return ( +
+ + {"properties" in fieldSchema && + renderHandles( + fieldSchema.properties, + `${keyPrefix}${propKey}_#_`, + `${fieldTitle}.`, + )} +
+ ); + }); + }; + + return renderHandles(schema.properties); + }; + + const generateInputHandles = ( + schema: BlockIORootSchema, + nodeType: BlockUIType, + ) => { + if (!schema?.properties) return null; + let keys = Object.entries(schema.properties); + switch (nodeType) { + case BlockUIType.NOTE: + // For NOTE blocks, don't render any input handles + const [noteKey, noteSchema] = keys[0]; + return ( +
+ +
+ ); + + default: + const getInputPropKey = (key: string) => + nodeType == BlockUIType.AGENT ? `data.${key}` : key; + + return keys.map(([propKey, propSchema]) => { + const isRequired = data.inputSchema.required?.includes(propKey); + const isAdvanced = propSchema.advanced; + const isHidden = propSchema.hidden; + const isConnectable = + // No input connection handles on INPUT and WEBHOOK blocks + ![ + BlockUIType.INPUT, + BlockUIType.WEBHOOK, + BlockUIType.WEBHOOK_MANUAL, + ].includes(nodeType) && + // No input connection handles for credentials + propKey !== "credentials" && + !propKey.endsWith("_credentials") && + // For OUTPUT blocks, only show the 'value' (hides 'name') input connection handle + !(nodeType == BlockUIType.OUTPUT && propKey == "name"); + const isConnected = isInputHandleConnected(propKey); + return ( + !isHidden && + (isRequired || isAdvancedOpen || isConnected || !isAdvanced) && ( +
+ {isConnectable && + !( + "oneOf" in propSchema && + propSchema.oneOf && + "discriminator" in propSchema && + propSchema.discriminator + ) ? ( + + ) : ( + propKey !== "credentials" && + !propKey.endsWith("_credentials") && ( +
+ + {propSchema.title || beautifyString(propKey)} + + +
+ ) + )} + {isConnected || ( + + )} +
+ ) + ); + }); + } + }; + const handleInputChange = (path: string, value: any) => { + const keys = parseKeys(path); + const newValues = JSON.parse(JSON.stringify(data.hardcodedValues)); + let current = newValues; + + for (let i = 0; i < keys.length - 1; i++) { + const { key: currentKey, index } = keys[i]; + if (index !== undefined) { + if (!current[currentKey]) current[currentKey] = []; + if (!current[currentKey][index]) current[currentKey][index] = {}; + current = current[currentKey][index]; + } else { + if (!current[currentKey]) current[currentKey] = {}; + current = current[currentKey]; + } + } + + const lastKey = keys[keys.length - 1]; + if (lastKey.index !== undefined) { + if (!current[lastKey.key]) current[lastKey.key] = []; + current[lastKey.key][lastKey.index] = value; + } else { + current[lastKey.key] = value; + } + + if (!isInitialSetup.current) { + history.push({ + type: "UPDATE_INPUT", + payload: { nodeId: id, oldValues: data.hardcodedValues, newValues }, + undo: () => setHardcodedValues(data.hardcodedValues), + redo: () => setHardcodedValues(newValues), + }); + } + + setHardcodedValues(newValues); + const errors = data.errors || {}; + // Remove error with the same key + setNestedProperty(errors, path, null); + setErrors({ ...errors }); + }; + + const isInputHandleConnected = (key: string) => { + return ( + data.connections && + data.connections.some((conn: any) => { + if (typeof conn === "string") { + const [_source, target] = conn.split(" -> "); + return target.includes(key) && target.includes(data.title); + } + return conn.target === id && conn.targetHandle === key; + }) + ); + }; + + const isOutputHandleConnected = (key: string) => { + return ( + data.connections && + data.connections.some((conn: any) => { + if (typeof conn === "string") { + const [source, _target] = conn.split(" -> "); + return source.includes(key) && source.includes(data.title); + } + return conn.source === id && conn.sourceHandle === key; + }) + ); + }; + + const handleInputClick = (key: string) => { + console.debug(`Opening modal for key: ${key}`); + setActiveKey(key); + const value = getValue(key, data.hardcodedValues); + setInputModalValue( + typeof value === "object" ? JSON.stringify(value, null, 2) : value, + ); + setIsModalOpen(true); + }; + + const handleModalSave = (value: string) => { + if (activeKey) { + try { + const parsedValue = JSON.parse(value); + handleInputChange(activeKey, parsedValue); + } catch (error) { + handleInputChange(activeKey, value); + } + } + setIsModalOpen(false); + setActiveKey(null); + }; + + const handleOutputClick = () => { + setIsOutputModalOpen(true); + }; + + const deleteNode = useCallback(() => { + console.debug("Deleting node:", id); + + // Remove the node + deleteElements({ nodes: [{ id }] }); + }, [id, deleteElements]); + + const copyNode = useCallback(() => { + const newId = getNextNodeId(); + const currentNode = getNode(id); + + if (!currentNode) { + console.error("Cannot copy node: current node not found"); + return; + } + + const verticalOffset = height ?? 100; + + const newNode: CustomNode = { + id: newId, + type: currentNode.type, + position: { + x: currentNode.position.x, + y: currentNode.position.y - verticalOffset - 20, + }, + data: { + ...data, + title: `${data.title} (Copy)`, + block_id: data.block_id, + connections: [], + isOutputOpen: false, + }, + }; + + addNodes(newNode); + + history.push({ + type: "ADD_NODE", + payload: { node: { ...newNode, ...newNode.data } as CustomNodeData }, + undo: () => deleteElements({ nodes: [{ id: newId }] }), + redo: () => addNodes(newNode), + }); + }, [id, data, height, addNodes, deleteElements, getNode, getNextNodeId]); + + const hasConfigErrors = data.errors && hasNonNullNonObjectValue(data.errors); + const outputData = data.executionResults?.at(-1)?.data; + const hasOutputError = + typeof outputData === "object" && + outputData !== null && + "error" in outputData; + + useEffect(() => { + if (hasConfigErrors) { + const filteredErrors = Object.fromEntries( + Object.entries(data.errors || {}).filter(([, value]) => + hasNonNullNonObjectValue(value), + ), + ); + console.error( + "Block configuration errors for", + data.title, + ":", + filteredErrors, + ); + } + if (hasOutputError) { + console.error( + "Block output contains error for", + data.title, + ":", + outputData.error, + ); + } + }, [hasConfigErrors, hasOutputError, data.errors, outputData, data.title]); + + const blockClasses = [ + "custom-node", + "dark-theme", + "rounded-xl", + "bg-white/[.9] dark:bg-gray-800/[.9]", + "border border-gray-300 dark:border-gray-600", + data.uiType === BlockUIType.NOTE ? "w-[300px]" : "w-[500px]", + data.uiType === BlockUIType.NOTE + ? "bg-yellow-100 dark:bg-yellow-900" + : "bg-white dark:bg-gray-800", + selected ? "shadow-2xl" : "", + ] + .filter(Boolean) + .join(" "); + + const errorClass = + hasConfigErrors || hasOutputError + ? "border-red-200 dark:border-red-800 border-2" + : ""; + + const statusClass = (() => { + if (hasConfigErrors || hasOutputError) + return "border-red-200 dark:border-red-800 border-4"; + switch (data.status?.toLowerCase()) { + case "completed": + return "border-green-200 dark:border-green-800 border-4"; + case "running": + return "border-yellow-200 dark:border-yellow-800 border-4"; + case "failed": + return "border-red-200 dark:border-red-800 border-4"; + case "incomplete": + return "border-purple-200 dark:border-purple-800 border-4"; + case "queued": + return "border-cyan-200 dark:border-cyan-800 border-4"; + default: + return ""; + } + })(); + + const statusBackgroundClass = (() => { + if (hasConfigErrors || hasOutputError) return "bg-red-200 dark:bg-red-800"; + switch (data.status?.toLowerCase()) { + case "completed": + return "bg-green-200 dark:bg-green-800"; + case "running": + return "bg-yellow-200 dark:bg-yellow-800"; + case "failed": + return "bg-red-200 dark:bg-red-800"; + case "incomplete": + return "bg-purple-200 dark:bg-purple-800"; + case "queued": + return "bg-cyan-200 dark:bg-cyan-800"; + default: + return ""; + } + })(); + + const hasAdvancedFields = + data.inputSchema && + Object.entries(data.inputSchema.properties).some(([key, value]) => { + return ( + value.advanced === true && !data.inputSchema.required?.includes(key) + ); + }); + + const inputValues = data.hardcodedValues; + + const isCostFilterMatch = (costFilter: any, inputValues: any): boolean => { + /* + Filter rules: + - If costFilter is an object, then check if costFilter is the subset of inputValues + - Otherwise, check if costFilter is equal to inputValues. + - Undefined, null, and empty string are considered as equal. + */ + return typeof costFilter === "object" && typeof inputValues === "object" + ? Object.entries(costFilter).every( + ([k, v]) => + (!v && !inputValues[k]) || isCostFilterMatch(v, inputValues[k]), + ) + : costFilter === inputValues; + }; + + const blockCost = + data.blockCosts && + data.blockCosts.find((cost) => + isCostFilterMatch(cost.cost_filter, inputValues), + ); + + const [webhookStatus, setWebhookStatus] = useState< + "works" | "exists" | "broken" | "none" | "pending" | null + >(null); + + useEffect(() => { + if ( + ![BlockUIType.WEBHOOK, BlockUIType.WEBHOOK_MANUAL].includes(data.uiType) + ) + return; + if (!data.webhook) { + setWebhookStatus("none"); + return; + } + + setWebhookStatus("pending"); + api + .pingWebhook(data.webhook.id) + .then((pinged) => setWebhookStatus(pinged ? "works" : "exists")) + .catch((error: Error) => + error.message.includes("ping timed out") + ? setWebhookStatus("broken") + : setWebhookStatus("none"), + ); + }, [data.uiType, data.webhook, api, setWebhookStatus]); + + const webhookStatusDot = useMemo( + () => + webhookStatus && ( +
+ ), + [webhookStatus], + ); + + const LineSeparator = () => ( +
+ +
+ ); + + const ContextMenuContent = () => ( + + + + Copy + + {nodeFlowId && ( + window.open(`/build?flowID=${nodeFlowId}`)} + className="flex cursor-pointer items-center rounded-md px-3 py-2 hover:bg-gray-100 dark:hover:bg-gray-700" + > + + Open agent + + )} + + + + Delete + + + ); + + const onContextButtonTrigger = (e: React.MouseEvent) => { + e.preventDefault(); + const rect = e.currentTarget.getBoundingClientRect(); + const event = new MouseEvent("contextmenu", { + bubbles: true, + clientX: rect.left + rect.width / 2, + clientY: rect.top + rect.height / 2, + }); + e.currentTarget.dispatchEvent(event); + }; + + const stripeColor = getPrimaryCategoryColor(data.categories); + + const nodeContent = () => ( +
+ {/* Header */} +
+ {/* Color Stripe */} +
+ +
+
+

+ +

+ #{id.split("-")[0]} + +
+ + {webhookStatusDot} + +
+
+ {blockCost && ( +
+ + {" "} + + {blockCost.cost_amount} + {" "} + credits/{blockCost.cost_type} + +
+ )} + {data.categories.map((category) => ( + + {beautifyString(category.category.toLowerCase())} + + ))} +
+
+ + +
+ + {/* Body */} +
+ {/* Input Handles */} + {data.uiType !== BlockUIType.NOTE ? ( +
+
+ {data.uiType === BlockUIType.WEBHOOK_MANUAL && + (data.webhook ? ( +
+ Webhook URL: +
+ + {data.webhook.url} + + +
+
+ ) : ( +

+ (A Webhook URL will be generated when you save the agent) +

+ ))} + {data.inputSchema && + generateInputHandles(data.inputSchema, data.uiType)} +
+
+ ) : ( +
+ {data.inputSchema && + generateInputHandles(data.inputSchema, data.uiType)} +
+ )} + + {/* Advanced Settings */} + {data.uiType !== BlockUIType.NOTE && hasAdvancedFields && ( + <> + +
+ Advanced + +
+ + )} + {/* Output Handles */} + {data.uiType !== BlockUIType.NOTE && ( + <> + +
+
+ {data.outputSchema && + generateOutputHandles(data.outputSchema, data.uiType)} +
+
+ + )} +
+ {/* End Body */} + {/* Footer */} +
+ {/* Display Outputs */} + {isOutputOpen && data.uiType !== BlockUIType.NOTE && ( +
+ {(data.executionResults?.length ?? 0) > 0 ? ( +
+ + +
+ +
+
+ ) : ( +
+ )} +
+ + {hasConfigErrors || hasOutputError + ? "Error" + : data.status + ? beautifyString(data.status) + : "Not Run"} + +
+
+ )} +
+ setIsModalOpen(false)} + onSave={handleModalSave} + defaultValue={inputModalValue} + key={activeKey} + /> + setIsOutputModalOpen(false)} + executionResults={data.executionResults?.toReversed() || []} + /> +
+ ); + + return ( + + {nodeContent()} + + ); +} diff --git a/autogpt_platform/frontend/src/components/DataTable.tsx b/autogpt_platform/frontend/src/components/DataTable.tsx new file mode 100644 index 000000000000..aba62af6c275 --- /dev/null +++ b/autogpt_platform/frontend/src/components/DataTable.tsx @@ -0,0 +1,94 @@ +import React from "react"; +import { beautifyString } from "@/lib/utils"; +import { Button } from "./ui/button"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "./ui/table"; +import { Clipboard } from "lucide-react"; +import { useToast } from "./ui/use-toast"; +import { ContentRenderer } from "./ui/render"; + +type DataTableProps = { + title?: string; + truncateLongData?: boolean; + data: { [key: string]: Array }; +}; + +export default function DataTable({ + title, + truncateLongData, + data, +}: DataTableProps) { + const { toast } = useToast(); + const maxChars = 100; + + const copyData = (pin: string, data: string) => { + navigator.clipboard.writeText(data).then(() => { + toast({ + title: `"${pin}" data copied to clipboard!`, + duration: 2000, + }); + }); + }; + + return ( + <> + {title && {title}} + + + + Pin + Data + + + + {Object.entries(data).map(([key, value]) => ( + + + {beautifyString(key)} + + +
+ + {value.map((item, index) => ( + + + {index < value.length - 1 && ", "} + + ))} +
+
+
+ ))} +
+
+ + ); +} diff --git a/autogpt_platform/frontend/src/components/Flow.tsx b/autogpt_platform/frontend/src/components/Flow.tsx new file mode 100644 index 000000000000..df6a6b7b1d4c --- /dev/null +++ b/autogpt_platform/frontend/src/components/Flow.tsx @@ -0,0 +1,744 @@ +"use client"; +import React, { + useState, + useCallback, + useEffect, + useRef, + MouseEvent, + createContext, +} from "react"; +import { + ReactFlow, + ReactFlowProvider, + Controls, + Background, + Node, + OnConnect, + Connection, + MarkerType, + NodeChange, + EdgeChange, + useReactFlow, + applyEdgeChanges, + applyNodeChanges, + useViewport, +} from "@xyflow/react"; +import "@xyflow/react/dist/style.css"; +import { CustomNode } from "./CustomNode"; +import "./flow.css"; +import { BlockUIType, Link } from "@/lib/autogpt-server-api"; +import { getTypeColor, findNewlyAddedBlockCoordinates } from "@/lib/utils"; +import { history } from "./history"; +import { CustomEdge } from "./CustomEdge"; +import ConnectionLine from "./ConnectionLine"; +import { Control, ControlPanel } from "@/components/edit/control/ControlPanel"; +import { SaveControl } from "@/components/edit/control/SaveControl"; +import { BlocksControl } from "@/components/edit/control/BlocksControl"; +import { IconUndo2, IconRedo2 } from "@/components/ui/icons"; +import { startTutorial } from "./tutorial"; +import useAgentGraph from "@/hooks/useAgentGraph"; +import { v4 as uuidv4 } from "uuid"; +import { useRouter, usePathname, useSearchParams } from "next/navigation"; +import RunnerUIWrapper, { + RunnerUIWrapperRef, +} from "@/components/RunnerUIWrapper"; +import PrimaryActionBar from "@/components/PrimaryActionButton"; +import { useToast } from "@/components/ui/use-toast"; +import { useCopyPaste } from "../hooks/useCopyPaste"; +import { CronScheduler } from "./cronScheduler"; + +// This is for the history, this is the minimum distance a block must move before it is logged +// It helps to prevent spamming the history with small movements especially when pressing on a input in a block +const MINIMUM_MOVE_BEFORE_LOG = 50; + +type FlowContextType = { + visualizeBeads: "no" | "static" | "animate"; + setIsAnyModalOpen: (isOpen: boolean) => void; + getNextNodeId: () => string; +}; + +export type NodeDimension = { + [nodeId: string]: { + x: number; + y: number; + width: number; + height: number; + }; +}; + +export const FlowContext = createContext(null); + +const FlowEditor: React.FC<{ + flowID?: string; + className?: string; +}> = ({ flowID, className }) => { + const { + addNodes, + addEdges, + getNode, + deleteElements, + updateNode, + setViewport, + } = useReactFlow(); + const [nodeId, setNodeId] = useState(1); + const [isAnyModalOpen, setIsAnyModalOpen] = useState(false); + const [visualizeBeads, setVisualizeBeads] = useState< + "no" | "static" | "animate" + >("animate"); + const { + agentName, + setAgentName, + agentDescription, + setAgentDescription, + savedAgent, + availableNodes, + availableFlows, + getOutputType, + requestSave, + requestSaveAndRun, + requestStopRun, + scheduleRunner, + isSaving, + isRunning, + isStopping, + isScheduling, + setIsScheduling, + nodes, + setNodes, + edges, + setEdges, + } = useAgentGraph(flowID, visualizeBeads !== "no"); + + const router = useRouter(); + const pathname = usePathname(); + const params = useSearchParams(); + const initialPositionRef = useRef<{ + [key: string]: { x: number; y: number }; + }>({}); + const isDragging = useRef(false); + + // State to control if blocks menu should be pinned open + const [pinBlocksPopover, setPinBlocksPopover] = useState(false); + // State to control if save popover should be pinned open + const [pinSavePopover, setPinSavePopover] = useState(false); + + const runnerUIRef = useRef(null); + + const [openCron, setOpenCron] = useState(false); + + const { toast } = useToast(); + + const TUTORIAL_STORAGE_KEY = "shepherd-tour"; + + // It stores the dimension of all nodes with position as well + const [nodeDimensions, setNodeDimensions] = useState({}); + + useEffect(() => { + if (params.get("resetTutorial") === "true") { + localStorage.removeItem(TUTORIAL_STORAGE_KEY); + router.push(pathname); + } else if (!localStorage.getItem(TUTORIAL_STORAGE_KEY)) { + const emptyNodes = (forceRemove: boolean = false) => + forceRemove ? (setNodes([]), setEdges([]), true) : nodes.length === 0; + startTutorial(emptyNodes, setPinBlocksPopover, setPinSavePopover); + localStorage.setItem(TUTORIAL_STORAGE_KEY, "yes"); + } + }, [ + availableNodes, + router, + pathname, + params, + setEdges, + setNodes, + nodes.length, + ]); + + useEffect(() => { + if (params.get("open_scheduling") === "true") { + setOpenCron(true); + } + }, [params]); + + useEffect(() => { + const handleKeyDown = (event: KeyboardEvent) => { + const isMac = navigator.platform.toUpperCase().indexOf("MAC") >= 0; + const isUndo = + (isMac ? event.metaKey : event.ctrlKey) && event.key === "z"; + const isRedo = + (isMac ? event.metaKey : event.ctrlKey) && + (event.key === "y" || (event.shiftKey && event.key === "Z")); + + if (isUndo) { + event.preventDefault(); + handleUndo(); + } + + if (isRedo) { + event.preventDefault(); + handleRedo(); + } + }; + + window.addEventListener("keydown", handleKeyDown); + + return () => { + window.removeEventListener("keydown", handleKeyDown); + }; + }, []); + + const onNodeDragStart = (_: MouseEvent, node: Node) => { + initialPositionRef.current[node.id] = { ...node.position }; + isDragging.current = true; + }; + + const onNodeDragEnd = (_: MouseEvent, node: Node | null) => { + if (!node) return; + + isDragging.current = false; + const oldPosition = initialPositionRef.current[node.id]; + const newPosition = node.position; + + // Calculate the movement distance + if (!oldPosition || !newPosition) return; + + const distanceMoved = Math.sqrt( + Math.pow(newPosition.x - oldPosition.x, 2) + + Math.pow(newPosition.y - oldPosition.y, 2), + ); + + if (distanceMoved > MINIMUM_MOVE_BEFORE_LOG) { + // Minimum movement threshold + history.push({ + type: "UPDATE_NODE_POSITION", + payload: { nodeId: node.id, oldPosition, newPosition }, + undo: () => updateNode(node.id, { position: oldPosition }), + redo: () => updateNode(node.id, { position: newPosition }), + }); + } + delete initialPositionRef.current[node.id]; + }; + + // Function to clear status, output, and close the output info dropdown of all nodes + // and reset data beads on edges + const clearNodesStatusAndOutput = useCallback(() => { + setNodes((nds) => { + const newNodes = nds.map((node) => ({ + ...node, + data: { + ...node.data, + status: undefined, + isOutputOpen: false, + }, + })); + + return newNodes; + }); + }, [setNodes]); + + const onNodesChange = useCallback( + (nodeChanges: NodeChange[]) => { + // Persist the changes + setNodes((prev) => applyNodeChanges(nodeChanges, prev)); + + // Remove all edges that were connected to deleted nodes + nodeChanges + .filter((change) => change.type === "remove") + .forEach((deletedNode) => { + const nodeID = deletedNode.id; + const deletedNodeData = nodes.find((node) => node.id === nodeID); + + if (deletedNodeData) { + history.push({ + type: "DELETE_NODE", + payload: { node: deletedNodeData.data }, + undo: () => addNodes(deletedNodeData), + redo: () => deleteElements({ nodes: [{ id: nodeID }] }), + }); + } + + const connectedEdges = edges.filter((edge) => + [edge.source, edge.target].includes(nodeID), + ); + deleteElements({ + edges: connectedEdges.map((edge) => ({ id: edge.id })), + }); + }); + }, + [deleteElements, setNodes, nodes, edges, addNodes], + ); + + const formatEdgeID = useCallback((conn: Link | Connection): string => { + if ("sink_id" in conn) { + return `${conn.source_id}_${conn.source_name}_${conn.sink_id}_${conn.sink_name}`; + } else { + return `${conn.source}_${conn.sourceHandle}_${conn.target}_${conn.targetHandle}`; + } + }, []); + + const onConnect: OnConnect = useCallback( + (connection: Connection) => { + // Check if this exact connection already exists + const existingConnection = edges.find( + (edge) => + edge.source === connection.source && + edge.target === connection.target && + edge.sourceHandle === connection.sourceHandle && + edge.targetHandle === connection.targetHandle, + ); + + if (existingConnection) { + console.warn("This exact connection already exists."); + return; + } + + const edgeColor = getTypeColor( + getOutputType(nodes, connection.source!, connection.sourceHandle!), + ); + const sourceNode = getNode(connection.source!); + const newEdge: CustomEdge = { + id: formatEdgeID(connection), + type: "custom", + markerEnd: { + type: MarkerType.ArrowClosed, + strokeWidth: 2, + color: edgeColor, + }, + data: { + edgeColor, + sourcePos: sourceNode!.position, + isStatic: sourceNode!.data.isOutputStatic, + }, + ...connection, + source: connection.source!, + target: connection.target!, + }; + + addEdges(newEdge); + history.push({ + type: "ADD_EDGE", + payload: { edge: newEdge }, + undo: () => { + deleteElements({ edges: [{ id: newEdge.id }] }); + }, + redo: () => { + addEdges(newEdge); + }, + }); + clearNodesStatusAndOutput(); // Clear status and output on connection change + }, + [ + getNode, + addEdges, + deleteElements, + clearNodesStatusAndOutput, + nodes, + edges, + formatEdgeID, + getOutputType, + ], + ); + + const onEdgesChange = useCallback( + (edgeChanges: EdgeChange[]) => { + // Persist the changes + setEdges((prev) => applyEdgeChanges(edgeChanges, prev)); + + // Propagate edge changes to node data + const addedEdges = edgeChanges.filter((change) => change.type === "add"), + replaceEdges = edgeChanges.filter( + (change) => change.type === "replace", + ), + removedEdges = edgeChanges.filter((change) => change.type === "remove"), + selectedEdges = edgeChanges.filter( + (change) => change.type === "select", + ); + + if (addedEdges.length > 0 || removedEdges.length > 0) { + setNodes((nds) => { + const newNodes = nds.map((node) => ({ + ...node, + data: { + ...node.data, + connections: [ + // Remove node connections for deleted edges + ...node.data.connections.filter( + (conn) => + !removedEdges.some( + (removedEdge) => removedEdge.id === conn.edge_id, + ), + ), + // Add node connections for added edges + ...addedEdges.map((addedEdge) => ({ + edge_id: addedEdge.item.id, + source: addedEdge.item.source, + target: addedEdge.item.target, + sourceHandle: addedEdge.item.sourceHandle!, + targetHandle: addedEdge.item.targetHandle!, + })), + ], + }, + })); + + return newNodes; + }); + + if (removedEdges.length > 0) { + clearNodesStatusAndOutput(); // Clear status and output on edge deletion + } + } + + if (replaceEdges.length > 0) { + // Reset node connections for all edges + console.warn( + "useReactFlow().setRootEdges was used to overwrite all edges. " + + "Use addEdges, deleteElements, or reconnectEdge for incremental changes.", + replaceEdges, + ); + setNodes((nds) => + nds.map((node) => ({ + ...node, + data: { + ...node.data, + connections: [ + ...replaceEdges.map((replaceEdge) => ({ + edge_id: replaceEdge.item.id, + source: replaceEdge.item.source, + target: replaceEdge.item.target, + sourceHandle: replaceEdge.item.sourceHandle!, + targetHandle: replaceEdge.item.targetHandle!, + })), + ], + }, + })), + ); + clearNodesStatusAndOutput(); + } + }, + [setNodes, clearNodesStatusAndOutput, setEdges], + ); + + const getNextNodeId = useCallback(() => { + return uuidv4(); + }, []); + + const { x, y, zoom } = useViewport(); + + // Set the initial view port to center the canvas. + useEffect(() => { + if (nodes.length <= 0 || x !== 0 || y !== 0) { + return; + } + + const topLeft = { x: Infinity, y: Infinity }; + const bottomRight = { x: -Infinity, y: -Infinity }; + + nodes.forEach((node) => { + const { x, y } = node.position; + topLeft.x = Math.min(topLeft.x, x); + topLeft.y = Math.min(topLeft.y, y); + // Rough estimate of the width and height of the node: 500x400. + bottomRight.x = Math.max(bottomRight.x, x + 500); + bottomRight.y = Math.max(bottomRight.y, y + 400); + }); + + const centerX = (topLeft.x + bottomRight.x) / 2; + const centerY = (topLeft.y + bottomRight.y) / 2; + const zoom = 0.8; + + setViewport({ + x: window.innerWidth / 2 - centerX * zoom, + y: window.innerHeight / 2 - centerY * zoom, + zoom: zoom, + }); + }, [nodes, setViewport, x, y]); + + const addNode = useCallback( + (blockId: string, nodeType: string, hardcodedValues: any = {}) => { + const nodeSchema = availableNodes.find((node) => node.id === blockId); + if (!nodeSchema) { + console.error(`Schema not found for block ID: ${blockId}`); + return; + } + + /* + Calculate a position to the right of the newly added block, allowing for some margin. + If adding to the right side causes the new block to collide with an existing block, attempt to place it at the bottom or left. + Why not the top? Because the height of the new block is unknown. + If it still collides, run a loop to find the best position where it does not collide. + Then, adjust the canvas to center on the newly added block. + Note: The width is known, e.g., w = 300px for a note and w = 500px for others, but the height is dynamic. + */ + + // Alternative: We could also use D3 force, Intersection for this (React flow Pro examples) + + const viewportCoordinates = + nodeDimensions && Object.keys(nodeDimensions).length > 0 + ? // we will get all the dimension of nodes, then store + findNewlyAddedBlockCoordinates( + nodeDimensions, + nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500, + 60, + 1.0, + ) + : // we will get all the dimension of nodes, then store + { + x: window.innerWidth / 2 - x, + y: window.innerHeight / 2 - y, + }; + + const newNode: CustomNode = { + id: nodeId.toString(), + type: "custom", + position: viewportCoordinates, // Set the position to the calculated viewport center + data: { + blockType: nodeType, + blockCosts: nodeSchema.costs, + title: `${nodeType} ${nodeId}`, + description: nodeSchema.description, + categories: nodeSchema.categories, + inputSchema: nodeSchema.inputSchema, + outputSchema: nodeSchema.outputSchema, + hardcodedValues: hardcodedValues, + connections: [], + isOutputOpen: false, + block_id: blockId, + isOutputStatic: nodeSchema.staticOutput, + uiType: nodeSchema.uiType, + }, + }; + + addNodes(newNode); + setNodeId((prevId) => prevId + 1); + clearNodesStatusAndOutput(); // Clear status and output when a new node is added + + setViewport( + { + // Rough estimate of the dimension of the node is: 500x400px. + // Though we skip shifting the X, considering the block menu side-bar. + x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2, + y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2, + zoom: 0.8, + }, + { duration: 500 }, + ); + + history.push({ + type: "ADD_NODE", + payload: { node: { ...newNode, ...newNode.data } }, + undo: () => deleteElements({ nodes: [{ id: newNode.id }] }), + redo: () => addNodes(newNode), + }); + }, + [ + nodeId, + setViewport, + availableNodes, + addNodes, + nodeDimensions, + deleteElements, + clearNodesStatusAndOutput, + x, + y, + ], + ); + + const findNodeDimensions = useCallback(() => { + const newNodeDimensions: NodeDimension = nodes.reduce((acc, node) => { + const nodeElement = document.querySelector( + `[data-id="custom-node-${node.id}"]`, + ); + if (nodeElement) { + const rect = nodeElement.getBoundingClientRect(); + const { left, top, width, height } = rect; + + // Convert screen coordinates to flow coordinates + const flowX = (left - x) / zoom; + const flowY = (top - y) / zoom; + const flowWidth = width / zoom; + const flowHeight = height / zoom; + + acc[node.id] = { + x: flowX, + y: flowY, + width: flowWidth, + height: flowHeight, + }; + } + return acc; + }, {} as NodeDimension); + + setNodeDimensions(newNodeDimensions); + }, [nodes, x, y, zoom]); + + useEffect(() => { + findNodeDimensions(); + }, [nodes, findNodeDimensions]); + + const handleUndo = () => { + history.undo(); + }; + + const handleRedo = () => { + history.redo(); + }; + + const handleCopyPaste = useCopyPaste(getNextNodeId); + + const handleKeyDown = useCallback( + (event: KeyboardEvent) => { + // Prevent copy/paste if any modal is open or if the focus is on an input element + const activeElement = document.activeElement; + const isInputField = + activeElement?.tagName === "INPUT" || + activeElement?.tagName === "TEXTAREA" || + activeElement?.getAttribute("contenteditable") === "true"; + + if (isAnyModalOpen || isInputField) return; + + handleCopyPaste(event); + }, + [isAnyModalOpen, handleCopyPaste], + ); + + useEffect(() => { + window.addEventListener("keydown", handleKeyDown); + return () => { + window.removeEventListener("keydown", handleKeyDown); + }; + }, [handleKeyDown]); + + const onNodesDelete = useCallback(() => { + clearNodesStatusAndOutput(); + }, [clearNodesStatusAndOutput]); + + const editorControls: Control[] = [ + { + label: "Undo", + icon: , + onClick: handleUndo, + }, + { + label: "Redo", + icon: , + onClick: handleRedo, + }, + ]; + + // This function is called after cron expression is created + // So you can collect inputs for scheduling + const afterCronCreation = (cronExpression: string) => { + runnerUIRef.current?.collectInputsForScheduling(cronExpression); + }; + + // This function Opens up form for creating cron expression + const handleScheduleButton = () => { + if (!savedAgent) { + toast({ + title: `Please save the agent using the button in the left sidebar before running it.`, + duration: 2000, + }); + return; + } + setOpenCron(true); + }; + + return ( + +
+ + + + + } + botChildren={ + requestSave()} + agentDescription={agentDescription} + onDescriptionChange={setAgentDescription} + agentName={agentName} + onNameChange={setAgentName} + pinSavePopover={pinSavePopover} + /> + } + > + runnerUIRef.current?.openRunnerOutput()} + onClickRunAgent={() => { + if (!savedAgent) { + toast({ + title: `Please save the agent using the button in the left sidebar before running it.`, + duration: 2000, + }); + return; + } + if (!isRunning) { + runnerUIRef.current?.runOrOpenInput(); + } else { + requestStopRun(); + } + }} + onClickScheduleButton={handleScheduleButton} + isScheduling={isScheduling} + isDisabled={!savedAgent} + isRunning={isRunning} + requestStopRun={requestStopRun} + runAgentTooltip={!isRunning ? "Run Agent" : "Stop Agent"} + /> + + +
+ +
+ ); +}; + +const WrappedFlowEditor: typeof FlowEditor = (props) => ( + + + +); + +export default WrappedFlowEditor; diff --git a/autogpt_platform/frontend/src/components/InputModalComponent.tsx b/autogpt_platform/frontend/src/components/InputModalComponent.tsx new file mode 100644 index 000000000000..08fe0700eea8 --- /dev/null +++ b/autogpt_platform/frontend/src/components/InputModalComponent.tsx @@ -0,0 +1,107 @@ +import React, { FC, useEffect, useState } from "react"; +import { Button } from "./ui/button"; +import { Textarea } from "./ui/textarea"; +import { Maximize2, Minimize2, Clipboard } from "lucide-react"; +import { createPortal } from "react-dom"; +import { toast } from "./ui/use-toast"; + +interface ModalProps { + isOpen: boolean; + onClose: () => void; + onSave: (value: string) => void; + title?: string; + defaultValue: string; +} + +const InputModalComponent: FC = ({ + isOpen, + onClose, + onSave, + title, + defaultValue, +}) => { + const [tempValue, setTempValue] = useState(defaultValue); + const [isMaximized, setIsMaximized] = useState(false); + + useEffect(() => { + if (isOpen) { + setTempValue(defaultValue); + setIsMaximized(false); + } + }, [isOpen, defaultValue]); + + const handleSave = () => { + onSave(tempValue); + onClose(); + }; + + const toggleSize = () => { + setIsMaximized(!isMaximized); + }; + + const copyValue = () => { + navigator.clipboard.writeText(tempValue).then(() => { + toast({ + title: "Input value copied to clipboard!", + duration: 2000, + }); + }); + }; + + if (!isOpen) { + return null; + } + + const modalContent = ( + + +
+ + +
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/RatingCard.stories.tsx b/autogpt_platform/frontend/src/components/agptui/RatingCard.stories.tsx new file mode 100644 index 000000000000..c122b7f5666b --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/RatingCard.stories.tsx @@ -0,0 +1,47 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { RatingCard } from "./RatingCard"; + +const meta = { + title: "AGPT UI/RatingCard", + component: RatingCard, + parameters: { + layout: "centered", + }, + tags: ["autodocs"], +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + agentName: "Test Agent", + // onSubmit: (rating) => { + // console.log("Rating submitted:", rating); + // }, + // onClose: () => { + // console.log("Rating card closed"); + // }, + storeListingVersionId: "1", + }, +}; + +export const LongAgentName: Story = { + args: { + agentName: "Very Long Agent Name That Might Need Special Handling", + // onSubmit: (rating) => { + // console.log("Rating submitted:", rating); + // }, + // onClose: () => { + // console.log("Rating card closed"); + // }, + storeListingVersionId: "1", + }, +}; + +export const WithoutCallbacks: Story = { + args: { + agentName: "Test Agent", + storeListingVersionId: "1", + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/RatingCard.tsx b/autogpt_platform/frontend/src/components/agptui/RatingCard.tsx new file mode 100644 index 000000000000..3486d5482bde --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/RatingCard.tsx @@ -0,0 +1,109 @@ +"use client"; + +import * as React from "react"; +import { Cross1Icon } from "@radix-ui/react-icons"; +import { IconStar, IconStarFilled } from "@/components/ui/icons"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; + +interface RatingCardProps { + agentName: string; + storeListingVersionId: string; +} + +export const RatingCard: React.FC = ({ + agentName, + storeListingVersionId, +}) => { + const [rating, setRating] = React.useState(0); + const [hoveredRating, setHoveredRating] = React.useState(0); + const [isVisible, setIsVisible] = React.useState(true); + const api = useBackendAPI(); + + const handleClose = () => { + setIsVisible(false); + }; + + if (!isVisible) return null; + + const handleSubmit = async (rating: number) => { + if (rating > 0) { + console.log(`Rating submitted for ${agentName}:`, rating); + await api.reviewAgent("--", agentName, { + store_listing_version_id: storeListingVersionId, + score: rating, + }); + handleClose(); + } + }; + + const getRatingText = (rating: number) => { + switch (rating) { + case 1: + return "Needs improvement"; + case 2: + return "Meh"; + case 3: + return "Average"; + case 4: + return "Good"; + case 5: + return "Awesome!"; + default: + return "Rate it!"; + } + }; + + return ( +
+ + +
Rate agent
+ +
+ Could you rate {agentName} agent for us? +
+ +
+
+ {[1, 2, 3, 4, 5].map((star) => ( + + ))} +
+ +
+ {getRatingText(hoveredRating || rating)} +
+
+ +
+ +
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/SearchBar.stories.tsx b/autogpt_platform/frontend/src/components/agptui/SearchBar.stories.tsx new file mode 100644 index 000000000000..a807a1d1b8bb --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/SearchBar.stories.tsx @@ -0,0 +1,86 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { SearchBar } from "./SearchBar"; +import { userEvent, within, expect } from "@storybook/test"; + +const meta = { + title: "AGPT UI/Search Bar", + component: SearchBar, + parameters: { + layout: { + center: true, + padding: 0, + }, + nextjs: { + appDirectory: true, + navigation: { + pathname: "/search", + query: { + searchTerm: "", + }, + }, + }, + }, + tags: ["autodocs"], + argTypes: { + placeholder: { control: "text" }, + backgroundColor: { control: "text" }, + iconColor: { control: "text" }, + textColor: { control: "text" }, + placeholderColor: { control: "text" }, + }, + decorators: [ + (Story) => ( +
+ +
+ ), + ], +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + placeholder: 'Search for tasks like "optimise SEO"', + }, +}; + +export const CustomStyles: Story = { + args: { + placeholder: "Enter your search query", + backgroundColor: "bg-blue-100", + iconColor: "text-blue-500", + textColor: "text-blue-700", + placeholderColor: "text-blue-400", + }, +}; + +export const WithInteraction: Story = { + args: { + placeholder: "Type and press Enter", + }, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + const input = canvas.getByPlaceholderText("Type and press Enter"); + + await userEvent.type(input, "test query"); + await userEvent.keyboard("{Enter}"); + + await expect(input).toHaveValue("test query"); + }, +}; + +export const EmptySubmit: Story = { + args: { + placeholder: "Empty submit test", + }, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + const input = canvas.getByPlaceholderText("Empty submit test"); + + await userEvent.keyboard("{Enter}"); + + await expect(input).toHaveValue(""); + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/SearchBar.tsx b/autogpt_platform/frontend/src/components/agptui/SearchBar.tsx new file mode 100644 index 000000000000..7f23d87f0777 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/SearchBar.tsx @@ -0,0 +1,60 @@ +"use client"; + +import * as React from "react"; +import { useRouter } from "next/navigation"; + +import { MagnifyingGlassIcon } from "@radix-ui/react-icons"; + +interface SearchBarProps { + placeholder?: string; + backgroundColor?: string; + iconColor?: string; + textColor?: string; + placeholderColor?: string; + width?: string; + height?: string; +} + +/** SearchBar component for user input and search functionality. */ +export const SearchBar: React.FC = ({ + placeholder = 'Search for tasks like "optimise SEO"', + backgroundColor = "bg-neutral-100 dark:bg-neutral-800", + iconColor = "text-[#646464] dark:text-neutral-400", + textColor = "text-[#707070] dark:text-neutral-200", + placeholderColor = "text-[#707070] dark:text-neutral-400", + width = "w-9/10 lg:w-[56.25rem]", + height = "h-[60px]", +}) => { + const router = useRouter(); + + const [searchQuery, setSearchQuery] = React.useState(""); + + const handleSubmit = (event: React.FormEvent) => { + event.preventDefault(); + console.log(searchQuery); + + if (searchQuery.trim()) { + // Encode the search term and navigate to the desired path + const encodedTerm = encodeURIComponent(searchQuery); + router.push(`/marketplace/search?searchTerm=${encodedTerm}`); + } + }; + + return ( +
+ + setSearchQuery(e.target.value)} + placeholder={placeholder} + className={`flex-grow border-none bg-transparent ${textColor} font-['Geist'] text-lg font-normal leading-[2.25rem] tracking-tight md:text-xl placeholder:${placeholderColor} focus:outline-none`} + data-testid="store-search-input" + /> + + ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/SearchFilterChips.tsx b/autogpt_platform/frontend/src/components/agptui/SearchFilterChips.tsx new file mode 100644 index 000000000000..44b651ca3e29 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/SearchFilterChips.tsx @@ -0,0 +1,64 @@ +"use client"; + +import * as React from "react"; + +interface FilterOption { + label: string; + count: number; + value: string; +} + +interface SearchFilterChipsProps { + totalCount?: number; + agentsCount?: number; + creatorsCount?: number; + onFilterChange?: (value: string) => void; +} + +export const SearchFilterChips: React.FC = ({ + totalCount = 10, + agentsCount = 8, + creatorsCount = 2, + onFilterChange, +}) => { + const [selected, setSelected] = React.useState("all"); + + const filters: FilterOption[] = [ + { label: "All", count: totalCount, value: "all" }, + { label: "Agents", count: agentsCount, value: "agents" }, + { label: "Creators", count: creatorsCount, value: "creators" }, + ]; + + const handleFilterClick = (value: string) => { + setSelected(value); + onFilterChange?.(value); + console.log(`Filter selected: ${value}`); + }; + + return ( +
+ {filters.map((filter) => ( + + ))} +
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/SettingsInputForm.stories.tsx b/autogpt_platform/frontend/src/components/agptui/SettingsInputForm.stories.tsx new file mode 100644 index 000000000000..f7898c855708 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/SettingsInputForm.stories.tsx @@ -0,0 +1,23 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { SettingsInputForm } from "./SettingsInputForm"; + +const meta: Meta = { + title: "AGPT UI/Settings/Settings Input Form", + component: SettingsInputForm, + parameters: { + layout: "fullscreen", + }, +}; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + email: "johndoe@email.com", + desktopNotifications: { + first: false, + second: true, + }, + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/SettingsInputForm.tsx b/autogpt_platform/frontend/src/components/agptui/SettingsInputForm.tsx new file mode 100644 index 000000000000..9ea01bc68785 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/SettingsInputForm.tsx @@ -0,0 +1,134 @@ +"use client"; + +import * as React from "react"; +import { Button } from "@/components/ui/button"; +import useSupabase from "@/hooks/useSupabase"; + +interface SettingsInputFormProps { + email?: string; + desktopNotifications?: { + first: boolean; + second: boolean; + }; +} + +export const SettingsInputForm = ({ + email = "johndoe@email.com", + desktopNotifications = { first: false, second: true }, +}: SettingsInputFormProps) => { + const [password, setPassword] = React.useState(""); + const [confirmPassword, setConfirmPassword] = React.useState(""); + const [passwordsMatch, setPasswordsMatch] = React.useState(true); + const { supabase } = useSupabase(); + + const handleSaveChanges = async () => { + if (password !== confirmPassword) { + setPasswordsMatch(false); + return; + } + setPasswordsMatch(true); + if (supabase) { + try { + const { error } = await supabase.auth.updateUser({ + password: password, + }); + if (error) { + console.error("Error updating user:", error); + } else { + console.log("User updated successfully"); + } + } catch (error) { + console.error("Error updating user:", error); + } + } + }; + + const handleCancel = () => { + setPassword(""); + setConfirmPassword(""); + setPasswordsMatch(true); + }; + + return ( +
+

+ Settings +

+ + {/* My Account Section */} +
+

+ My account +

+
+ {/* Password Input */} +
+
+ + setPassword(e.target.value)} + className="h-[50px] w-full rounded-[35px] border border-neutral-200 bg-transparent px-6 py-3 text-base text-slate-950 dark:border-neutral-700 dark:text-white" + aria-label="Password field" + /> +
+
+ + {/* Confirm Password Input */} +
+
+ + setConfirmPassword(e.target.value)} + className="h-[50px] w-full rounded-[35px] border border-neutral-200 bg-transparent px-6 py-3 text-base text-slate-950 dark:border-neutral-700 dark:text-white" + aria-label="Confirm Password field" + /> +
+
+
+
+ +
+ +
+
+ + +
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/Sidebar.stories.tsx b/autogpt_platform/frontend/src/components/agptui/Sidebar.stories.tsx new file mode 100644 index 000000000000..24a10df1ab3f --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/Sidebar.stories.tsx @@ -0,0 +1,34 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { Sidebar } from "./Sidebar"; + +const meta = { + title: "AGPT UI/Sidebar", + component: Sidebar, + parameters: { + layout: "centered", + }, + tags: ["autodocs"], + argTypes: { + linkGroups: { control: "object" }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +const defaultLinkGroups = [ + { + links: [ + { text: "Agent dashboard", href: "/dashboard" }, + { text: "Integrations", href: "/integrations" }, + { text: "Profile", href: "/profile" }, + { text: "Settings", href: "/settings" }, + ], + }, +]; + +export const Default: Story = { + args: { + linkGroups: defaultLinkGroups, + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/Sidebar.tsx b/autogpt_platform/frontend/src/components/agptui/Sidebar.tsx new file mode 100644 index 000000000000..08f56faa1fb5 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/Sidebar.tsx @@ -0,0 +1,173 @@ +import * as React from "react"; +import Link from "next/link"; +import { Button } from "./Button"; +import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet"; +import { KeyIcon, Menu } from "lucide-react"; +import { + IconDashboardLayout, + IconIntegrations, + IconProfile, + IconSliders, + IconCoin, +} from "../ui/icons"; + +interface SidebarLinkGroup { + links: { + text: string; + href: string; + }[]; +} + +interface SidebarProps { + linkGroups: SidebarLinkGroup[]; +} + +export const Sidebar: React.FC = ({ linkGroups }) => { + const stripeAvailable = Boolean( + process.env.NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY, + ); + + return ( + <> + + + + + +
+
+ + +
+ Creator dashboard +
+ + {stripeAvailable && ( + + +
+ Credits +
+ + )} + + +
+ Integrations +
+ + + +
+ API Keys +
+ + + +
+ Profile +
+ + + +
+ Settings +
+ +
+
+
+
+ +
+
+
+ + +
+ Agent dashboard +
+ + {stripeAvailable && ( + + +
+ Credits +
+ + )} + + +
+ Integrations +
+ + + +
+ API Keys +
+ + + +
+ Profile +
+ + + +
+ Settings +
+ +
+
+
+ + ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/SortDropdown.tsx b/autogpt_platform/frontend/src/components/agptui/SortDropdown.tsx new file mode 100644 index 000000000000..df0a1ae169a7 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/SortDropdown.tsx @@ -0,0 +1,65 @@ +"use client"; + +import * as React from "react"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { ChevronDownIcon } from "@radix-ui/react-icons"; + +const sortOptions: SortOption[] = [ + { label: "Most Recent", value: "recent" }, + { label: "Most Runs", value: "runs" }, + { label: "Highest Rated", value: "rating" }, +]; + +interface SortOption { + label: string; + value: string; +} + +export const SortDropdown: React.FC<{ + onSort: (sortValue: string) => void; +}> = ({ onSort }) => { + const [selected, setSelected] = React.useState(sortOptions[0]); + + const handleSelect = (option: SortOption) => { + setSelected(option); + onSort(option.value); + console.log(`Sorting by: ${option.label} (${option.value})`); + }; + + return ( + + + + Sort by + + + {selected.label} + + + + + {sortOptions.map((option) => ( + handleSelect(option)} + > + {option.label} + + ))} + + + ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/Status.stories.tsx b/autogpt_platform/frontend/src/components/agptui/Status.stories.tsx new file mode 100644 index 000000000000..390dc0b50141 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/Status.stories.tsx @@ -0,0 +1,58 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { Status, StatusType } from "./Status"; + +const meta = { + title: "AGPT UI/Status", + component: Status, + parameters: { + layout: "centered", + }, + tags: ["autodocs"], + argTypes: { + status: { + control: "select", + options: ["draft", "awaiting_review", "approved", "rejected"], + }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Draft: Story = { + args: { + status: "draft" as StatusType, + }, +}; + +export const AwaitingReview: Story = { + args: { + status: "awaiting_review" as StatusType, + }, +}; + +export const Approved: Story = { + args: { + status: "approved" as StatusType, + }, +}; + +export const Rejected: Story = { + args: { + status: "rejected" as StatusType, + }, +}; + +export const AllStatuses: Story = { + args: { + status: "draft" as StatusType, + }, + render: () => ( +
+ + + + +
+ ), +}; diff --git a/autogpt_platform/frontend/src/components/agptui/Status.tsx b/autogpt_platform/frontend/src/components/agptui/Status.tsx new file mode 100644 index 000000000000..7e34a7c74841 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/Status.tsx @@ -0,0 +1,75 @@ +import * as React from "react"; + +export type StatusType = "draft" | "awaiting_review" | "approved" | "rejected"; + +interface StatusProps { + status: StatusType; +} + +const statusConfig: Record< + StatusType, + { + bgColor: string; + dotColor: string; + text: string; + darkBgColor: string; + darkDotColor: string; + } +> = { + draft: { + bgColor: "bg-blue-50", + dotColor: "bg-blue-500", + text: "Draft", + darkBgColor: "dark:bg-blue-900", + darkDotColor: "dark:bg-blue-300", + }, + awaiting_review: { + bgColor: "bg-amber-50", + dotColor: "bg-amber-500", + text: "Awaiting review", + darkBgColor: "dark:bg-amber-900", + darkDotColor: "dark:bg-amber-300", + }, + approved: { + bgColor: "bg-green-50", + dotColor: "bg-green-500", + text: "Approved", + darkBgColor: "dark:bg-green-900", + darkDotColor: "dark:bg-green-300", + }, + rejected: { + bgColor: "bg-red-50", + dotColor: "bg-red-500", + text: "Rejected", + darkBgColor: "dark:bg-red-900", + darkDotColor: "dark:bg-red-300", + }, +}; + +export const Status: React.FC = ({ status }) => { + /** + * Status component displays a badge with a colored dot and text indicating the agent's status + * @param status - The current status of the agent + * Valid values: 'draft', 'awaiting_review', 'approved', 'rejected' + */ + if (!status) { + return ; + } else if (!statusConfig[status]) { + return ; + } + + const config = statusConfig[status]; + + return ( +
+
+
+ {config.text} +
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/StoreCard.stories.tsx b/autogpt_platform/frontend/src/components/agptui/StoreCard.stories.tsx new file mode 100644 index 000000000000..48eb7fdfa959 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/StoreCard.stories.tsx @@ -0,0 +1,115 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { StoreCard } from "./StoreCard"; +import { userEvent, within, expect } from "@storybook/test"; + +const meta = { + title: "AGPT UI/StoreCard", + component: StoreCard, + parameters: { + layout: { + center: true, + fullscreen: true, + padding: 0, + }, + }, + tags: ["autodocs"], + argTypes: { + agentName: { control: "text" }, + agentImage: { control: "text" }, + description: { control: "text" }, + runs: { control: "number" }, + rating: { control: "number", min: 0, max: 5, step: 0.1 }, + onClick: { action: "clicked" }, + avatarSrc: { control: "text" }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + agentName: "SEO Optimizer", + agentImage: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: "Optimize your website's SEO with AI-powered suggestions", + runs: 10000, + rating: 4.5, + onClick: () => console.log("Default StoreCard clicked"), + avatarSrc: "https://github.com/shadcn.png", + }, +}; + +export const LowRating: Story = { + args: { + agentName: "Data Analyzer", + agentImage: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + description: "Analyze complex datasets with machine learning algorithms", + runs: 5000, + rating: 2.7, + onClick: () => console.log("LowRating StoreCard clicked"), + avatarSrc: "https://example.com/avatar2.jpg", + }, +}; + +export const HighRuns: Story = { + args: { + agentName: "Code Assistant", + agentImage: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: "Get AI-powered coding help for various programming languages", + runs: 1000000, + rating: 4.8, + onClick: () => console.log("HighRuns StoreCard clicked"), + avatarSrc: "https://example.com/avatar3.jpg", + }, +}; + +export const WithInteraction: Story = { + args: { + agentName: "Task Planner", + agentImage: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + description: "Plan and organize your tasks efficiently with AI", + runs: 50000, + rating: 4.2, + onClick: () => console.log("WithInteraction StoreCard clicked"), + avatarSrc: "https://example.com/avatar4.jpg", + }, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + const storeCard = canvas.getByText("Task Planner"); + + await userEvent.hover(storeCard); + await userEvent.click(storeCard); + }, +}; + +export const LongDescription: Story = { + args: { + agentName: "AI Writing Assistant", + agentImage: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: + "Enhance your writing with our advanced AI-powered assistant. It offers real-time suggestions for grammar, style, and tone, helps with research and fact-checking.", + runs: 75000, + rating: 4.7, + onClick: () => console.log("LongDescription StoreCard clicked"), + avatarSrc: "https://example.com/avatar5.jpg", + }, +}; + +export const HiddenAvatar: Story = { + args: { + agentName: "Data Visualizer", + agentImage: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: "Create stunning visualizations from complex datasets", + runs: 60000, + rating: 4.6, + onClick: () => console.log("HiddenAvatar StoreCard clicked"), + avatarSrc: "https://example.com/avatar6.jpg", + hideAvatar: true, + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/StoreCard.tsx b/autogpt_platform/frontend/src/components/agptui/StoreCard.tsx new file mode 100644 index 000000000000..d8e892561b80 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/StoreCard.tsx @@ -0,0 +1,109 @@ +import * as React from "react"; +import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar"; +import Image from "next/image"; +import { StarRatingIcons } from "@/components/ui/icons"; + +interface StoreCardProps { + agentName: string; + agentImage: string; + description: string; + runs: number; + rating: number; + onClick: () => void; + avatarSrc: string; + hideAvatar?: boolean; + creatorName?: string; +} + +export const StoreCard: React.FC = ({ + agentName, + agentImage, + description, + runs, + rating, + onClick, + avatarSrc, + hideAvatar = false, + creatorName, +}) => { + const handleClick = () => { + onClick(); + }; + + return ( +
{ + if (e.key === "Enter" || e.key === " ") { + handleClick(); + } + }} + > + {/* Header Image Section with Avatar */} +
+ {`${agentName} + {!hideAvatar && ( +
+ + + + {(creatorName || agentName).charAt(0)} + + +
+ )} +
+ + {/* Content Section */} +
+ {/* Title and Creator */} +

+ {agentName} +

+ {!hideAvatar && creatorName && ( +

+ by {creatorName} +

+ )} + + {/* Description */} +

+ {description} +

+ + {/* Stats Row */} +
+
+ {runs.toLocaleString()} runs +
+
+ + {rating.toFixed(1)} + +
+ {StarRatingIcons(rating)} +
+
+
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/ThemeToggle.tsx b/autogpt_platform/frontend/src/components/agptui/ThemeToggle.tsx new file mode 100644 index 000000000000..515a98cda1e3 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/ThemeToggle.tsx @@ -0,0 +1,45 @@ +"use client"; + +import * as React from "react"; +import { useTheme } from "next-themes"; +import { IconMoon, IconSun } from "@/components/ui/icons"; +import { Button } from "./Button"; + +export function ThemeToggle() { + const { theme, setTheme } = useTheme(); + const [mounted, setMounted] = React.useState(false); + + React.useEffect(() => { + setMounted(true); + }, []); + + if (!mounted) { + return ( +
+
+
+ ); + } + + return ( +
setTheme(theme === "light" ? "dark" : "light")} + role="button" + tabIndex={0} + > +
+ {theme === "light" ? ( + + ) : ( + + )} +
+ Toggle theme +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/agptui/composite/APIKeySection.tsx b/autogpt_platform/frontend/src/components/agptui/composite/APIKeySection.tsx new file mode 100644 index 000000000000..3eaa0a5270b9 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/APIKeySection.tsx @@ -0,0 +1,296 @@ +"use client"; + +import { useState, useEffect } from "react"; + +import { APIKey, APIKeyPermission } from "@/lib/autogpt-server-api/types"; + +import { LuCopy } from "react-icons/lu"; +import { Loader2, MoreVertical } from "lucide-react"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { useToast } from "@/components/ui/use-toast"; +import { + Card, + CardContent, + CardDescription, + CardHeader, + CardTitle, +} from "@/components/ui/card"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogFooter, + DialogHeader, + DialogTitle, + DialogTrigger, +} from "@/components/ui/dialog"; +import { Button } from "@/components/ui/button"; +import { Label } from "@/components/ui/label"; +import { Input } from "@/components/ui/input"; +import { Checkbox } from "@/components/ui/checkbox"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; +import { Badge } from "@/components/ui/badge"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; + +export function APIKeysSection() { + const [apiKeys, setApiKeys] = useState([]); + const [isLoading, setIsLoading] = useState(true); + const [isCreateOpen, setIsCreateOpen] = useState(false); + const [isKeyDialogOpen, setIsKeyDialogOpen] = useState(false); + const [newKeyName, setNewKeyName] = useState(""); + const [newKeyDescription, setNewKeyDescription] = useState(""); + const [newApiKey, setNewApiKey] = useState(""); + const [selectedPermissions, setSelectedPermissions] = useState< + APIKeyPermission[] + >([]); + const { toast } = useToast(); + const api = useBackendAPI(); + + useEffect(() => { + loadAPIKeys(); + }, []); + + const loadAPIKeys = async () => { + setIsLoading(true); + try { + const keys = await api.listAPIKeys(); + setApiKeys(keys.filter((key) => key.status === "ACTIVE")); + } finally { + setIsLoading(false); + } + }; + + const handleCreateKey = async () => { + try { + const response = await api.createAPIKey( + newKeyName, + selectedPermissions, + newKeyDescription, + ); + + setNewApiKey(response.plain_text_key); + setIsCreateOpen(false); + setIsKeyDialogOpen(true); + loadAPIKeys(); + } catch (error) { + toast({ + title: "Error", + description: "Failed to create AutoGPT Platform API key", + variant: "destructive", + }); + } + }; + + const handleCopyKey = () => { + navigator.clipboard.writeText(newApiKey); + toast({ + title: "Copied", + description: "AutoGPT Platform API key copied to clipboard", + }); + }; + + const handleRevokeKey = async (keyId: string) => { + try { + await api.revokeAPIKey(keyId); + toast({ + title: "Success", + description: "AutoGPT Platform API key revoked successfully", + }); + loadAPIKeys(); + } catch (error) { + toast({ + title: "Error", + description: "Failed to revoke AutoGPT Platform API key", + variant: "destructive", + }); + } + }; + + return ( + + + AutoGPT Platform API Keys + + Manage your AutoGPT Platform API keys for programmatic access + + + +
+ + + + + + + Create New API Key + + Create a new AutoGPT Platform API key + + +
+
+ + setNewKeyName(e.target.value)} + placeholder="My AutoGPT Platform API Key" + /> +
+
+ + setNewKeyDescription(e.target.value)} + placeholder="Used for..." + /> +
+
+ + {Object.values(APIKeyPermission).map((permission) => ( +
+ { + setSelectedPermissions( + checked + ? [...selectedPermissions, permission] + : selectedPermissions.filter( + (p) => p !== permission, + ), + ); + }} + /> + +
+ ))} +
+
+ + + + +
+
+ + + + + AutoGPT Platform API Key Created + + Please copy your AutoGPT API key now. You won't be able + to see it again! + + +
+ + {newApiKey} + + +
+ + + +
+
+
+ + {isLoading ? ( +
+ +
+ ) : ( + apiKeys.length > 0 && ( + + + + Name + API Key + Status + Created + Last Used + + + + + {apiKeys.map((key) => ( + + {key.name} + +
+ {`${key.prefix}******************${key.postfix}`} +
+
+ + + {key.status} + + + + {new Date(key.created_at).toLocaleDateString()} + + + {key.last_used_at + ? new Date(key.last_used_at).toLocaleDateString() + : "Never"} + + + + + + + + handleRevokeKey(key.id)} + > + Revoke + + + + +
+ ))} +
+
+ ) + )} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/agptui/composite/AgentsSection.stories.tsx b/autogpt_platform/frontend/src/components/agptui/composite/AgentsSection.stories.tsx new file mode 100644 index 000000000000..7e23f22e6c9c --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/AgentsSection.stories.tsx @@ -0,0 +1,199 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { Agent, AgentsSection } from "./AgentsSection"; +import { userEvent, within, expect } from "@storybook/test"; + +const meta = { + title: "AGPT UI/Composite/Agents Section", + component: AgentsSection, + parameters: { + layout: { + center: true, + fullscreen: true, + padding: 0, + }, + }, + tags: ["autodocs"], + argTypes: { + sectionTitle: { control: "text" }, + agents: { control: "object" }, + // onCardClick: { action: "clicked" }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +const mockTopAgents = [ + { + agent_name: "SEO Optimizer Pro", + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: + "Boost your website's search engine rankings with our advanced AI-powered SEO optimization tool.", + runs: 50000, + rating: 4.7, + creator_avatar: "https://example.com/avatar1.jpg", + slug: "seo-optimizer-pro", + creator: "John Doe", + sub_heading: "SEO Expert", + }, + { + agent_name: "Content Writer AI", + agent_image: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + description: + "Generate high-quality, engaging content for your blog, social media, or marketing campaigns.", + runs: 75000, + rating: 4.5, + creator_avatar: "https://example.com/avatar2.jpg", + slug: "content-writer-ai", + creator: "Jane Doe", + sub_heading: "Content Writer", + }, + { + agent_name: "Data Analyzer Lite", + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: "A basic tool for analyzing small to medium-sized datasets.", + runs: 10000, + rating: 3.8, + creator_avatar: "https://example.com/avatar3.jpg", + slug: "data-analyzer-lite", + creator: "John Doe", + sub_heading: "Data Analyst", + }, +] satisfies Agent[]; + +export const Default: Story = { + args: { + sectionTitle: "Top Agents", + agents: mockTopAgents, + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, +}; + +export const SingleAgent: Story = { + args: { + sectionTitle: "Top Agents", + agents: [mockTopAgents[0]], + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, +}; + +export const NoAgents: Story = { + args: { + sectionTitle: "Top Agents", + agents: [], + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, +}; + +export const WithInteraction: Story = { + args: { + sectionTitle: "Top Agents", + agents: mockTopAgents, + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + const firstCard = canvas.getAllByRole("store-card")[0]; + await userEvent.click(firstCard); + await expect(firstCard).toHaveAttribute("aria-pressed", "true"); + }, +}; + +export const MultiRowAgents: Story = { + args: { + sectionTitle: "Top Agents", + agents: [ + ...mockTopAgents, + { + agent_name: "Image Recognition AI", + agent_image: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + description: + "Accurately identify and classify objects in images using state-of-the-art machine learning algorithms.", + runs: 60000, + rating: 4.6, + creator_avatar: "https://example.com/avatar4.jpg", + slug: "image-recognition-ai", + creator: "John Doe", + sub_heading: "Image Recognition", + }, + { + agent_name: "Natural Language Processor", + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: + "Analyze and understand human language with advanced NLP techniques.", + runs: 80000, + rating: 4.8, + creator_avatar: "https://example.com/avatar5.jpg", + slug: "natural-language-processor", + creator: "John Doe", + sub_heading: "Natural Language Processing", + }, + { + agent_name: "Sentiment Analyzer", + agent_image: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + description: + "Determine the emotional tone of text data for customer feedback analysis.", + runs: 45000, + rating: 4.3, + creator_avatar: "https://example.com/avatar6.jpg", + slug: "sentiment-analyzer", + creator: "John Doe", + sub_heading: "Sentiment Analysis", + }, + { + agent_name: "Chatbot Builder", + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: + "Create intelligent chatbots for customer service and engagement.", + runs: 55000, + rating: 4.4, + creator_avatar: "https://example.com/avatar7.jpg", + slug: "chatbot-builder", + creator: "John Doe", + sub_heading: "Chatbot Developer", + }, + { + agent_name: "Predictive Analytics Tool", + agent_image: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + description: + "Forecast future trends and outcomes based on historical data.", + runs: 40000, + rating: 4.2, + creator_avatar: "https://example.com/avatar8.jpg", + slug: "predictive-analytics-tool", + creator: "John Doe", + sub_heading: "Predictive Analytics", + }, + { + agent_name: "Text-to-Speech Converter", + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + description: + "Convert written text into natural-sounding speech in multiple languages.", + runs: 35000, + rating: 4.1, + creator_avatar: "https://example.com/avatar9.jpg", + slug: "text-to-speech-converter", + creator: "John Doe", + sub_heading: "Text-to-Speech", + }, + ], + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, +}; + +export const HiddenAvatars: Story = { + args: { + ...Default.args, + hideAvatars: true, + sectionTitle: "Agents with Hidden Avatars", + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/AgentsSection.tsx b/autogpt_platform/frontend/src/components/agptui/composite/AgentsSection.tsx new file mode 100644 index 000000000000..2a2c00480674 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/AgentsSection.tsx @@ -0,0 +1,106 @@ +"use client"; + +import * as React from "react"; +import { StoreCard } from "@/components/agptui/StoreCard"; +import { + Carousel, + CarouselContent, + CarouselItem, +} from "@/components/ui/carousel"; +import { useRouter } from "next/navigation"; +import { cn } from "@/lib/utils"; + +export interface Agent { + slug: string; + agent_name: string; + agent_image: string; + creator: string; + creator_avatar: string; + sub_heading: string; + description: string; + runs: number; + rating: number; +} + +interface AgentsSectionProps { + sectionTitle: string; + agents: Agent[]; + hideAvatars?: boolean; +} + +export const AgentsSection: React.FC = ({ + sectionTitle, + agents: allAgents, + hideAvatars = false, +}) => { + const router = useRouter(); + + // TODO: Update this when we have pagination + const displayedAgents = allAgents; + + const handleCardClick = (creator: string, slug: string) => { + router.push( + `/marketplace/agent/${encodeURIComponent(creator)}/${encodeURIComponent(slug)}`, + ); + }; + + return ( +
+
+
+ {sectionTitle} +
+ {!displayedAgents || displayedAgents.length === 0 ? ( +
+ No agents found +
+ ) : ( + <> + {/* Mobile Carousel View */} + + + {displayedAgents.map((agent, index) => ( + + handleCardClick(agent.creator, agent.slug)} + /> + + ))} + + + +
+ {displayedAgents.map((agent, index) => ( + handleCardClick(agent.creator, agent.slug)} + /> + ))} +
+ + )} +
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/FeaturedCreators.stories.tsx b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedCreators.stories.tsx new file mode 100644 index 000000000000..96c184ae7634 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedCreators.stories.tsx @@ -0,0 +1,121 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { FeaturedCreators } from "./FeaturedCreators"; +import { userEvent, within, expect } from "@storybook/test"; + +const meta = { + title: "AGPT UI/Composite/Featured Creators", + component: FeaturedCreators, + parameters: { + layout: { + center: true, + fullscreen: true, + padding: 0, + }, + }, + tags: ["autodocs"], + argTypes: { + featuredCreators: { control: "object" }, + // onCardClick: { action: "cardClicked" }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +const defaultCreators = [ + { + name: "AI Innovator", + username: "ai_innovator", + description: + "Pushing the boundaries of AI technology with cutting-edge solutions and innovative approaches to machine learning.", + avatar_url: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + num_agents: 15, + }, + { + name: "Code Wizard", + username: "code_wizard", + description: + "Crafting elegant solutions with AI and helping others learn the magic of coding.", + avatar_url: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + num_agents: 8, + }, + { + name: "Data Alchemist", + username: "data_alchemist", + description: + "Transforming raw data into AI gold. Specializing in data processing and analytics.", + avatar_url: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + num_agents: 12, + }, + { + name: "ML Master", + username: "ml_master", + description: + "Specializing in machine learning algorithms and neural network architectures.", + avatar_url: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + num_agents: 20, + }, +]; + +export const Default: Story = { + args: { + featuredCreators: defaultCreators, + // onCardClick: (creatorName) => console.log(`Clicked on ${creatorName}`), + }, +}; + +export const SingleCreator: Story = { + args: { + featuredCreators: [defaultCreators[0]], + // onCardClick: (creatorName) => console.log(`Clicked on ${creatorName}`), + }, +}; + +export const ManyCreators: Story = { + args: { + featuredCreators: [ + ...defaultCreators, + { + name: "NLP Ninja", + username: "nlp_ninja", + description: + "Expert in natural language processing and text analysis systems.", + avatar_url: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + num_agents: 18, + }, + { + name: "AI Explorer", + username: "ai_explorer", + description: + "Discovering new frontiers in artificial intelligence and autonomous systems.", + avatar_url: + "https://upload.wikimedia.org/wikipedia/commons/c/c5/Big_buck_bunny_poster_big.jpg", + num_agents: 25, + }, + ], + // onCardClick: (creatorName) => console.log(`Clicked on ${creatorName}`), + }, +}; + +export const WithInteraction: Story = { + args: { + featuredCreators: defaultCreators, + // onCardClick: (creatorName) => console.log(`Clicked on ${creatorName}`), + }, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + const creatorCards = canvas.getAllByRole("creator-card"); + const firstCreatorCard = creatorCards[0]; + + await userEvent.hover(firstCreatorCard); + await userEvent.click(firstCreatorCard); + + // Check if the card has the expected hover and click effects + await expect(firstCreatorCard).toHaveClass("hover:shadow-lg"); + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/FeaturedCreators.tsx b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedCreators.tsx new file mode 100644 index 000000000000..bca4c1fc859f --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedCreators.tsx @@ -0,0 +1,56 @@ +"use client"; + +import * as React from "react"; +import { CreatorCard } from "@/components/agptui/CreatorCard"; +import { useRouter } from "next/navigation"; + +export interface FeaturedCreator { + name: string; + username: string; + description: string; + avatar_url: string; + num_agents: number; +} + +interface FeaturedCreatorsProps { + title?: string; + featuredCreators: FeaturedCreator[]; +} + +export const FeaturedCreators: React.FC = ({ + featuredCreators, + title = "Featured Creators", +}) => { + const router = useRouter(); + + const handleCardClick = (creator: string) => { + router.push(`/marketplace/creator/${encodeURIComponent(creator)}`); + }; + + // Only show first 4 creators + const displayedCreators = featuredCreators.slice(0, 4); + + return ( +
+
+

+ {title} +

+ +
+ {displayedCreators.map((creator, index) => ( + handleCardClick(creator.username)} + index={index} + /> + ))} +
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.stories.tsx b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.stories.tsx new file mode 100644 index 000000000000..b57b6477f341 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.stories.tsx @@ -0,0 +1,133 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { FeaturedAgent, FeaturedSection } from "./FeaturedSection"; +import { userEvent, within, expect } from "@storybook/test"; + +const meta = { + title: "AGPT UI/Composite/Featured Agents", + component: FeaturedSection, + parameters: { + layout: { + center: true, + fullscreen: true, + padding: 0, + }, + }, + tags: ["autodocs"], + argTypes: { + featuredAgents: { control: "object" }, + // onCardClick: { action: "clicked" }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +const mockFeaturedAgents = [ + { + agent_name: "Personalized Morning Coffee Newsletter example of three lines", + sub_heading: + "Transform ideas into breathtaking images with this AI-powered Image Generator.", + creator: "AI Solutions Inc.", + description: + "Elevate your web content with this powerful AI Webpage Copy Improver. Designed for marketers, SEO specialists, and web developers, this tool analyses and enhances website copy for maximum impact. Using advanced language models, it optimizes text for better clarity, SEO performance, and increased conversion rates.", + runs: 50000, + rating: 4.7, + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + creator_avatar: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + slug: "personalized-morning-coffee-newsletter", + }, + { + agent_name: "Data Analyzer Lite", + sub_heading: "Basic data analysis tool", + creator: "DataTech", + description: + "A lightweight data analysis tool for basic data processing needs.", + runs: 10000, + rating: 2.8, + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + creator_avatar: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + slug: "data-analyzer-lite", + }, + { + agent_name: "CodeAssist AI", + sub_heading: "Your AI coding companion", + creator: "DevTools Co.", + description: + "An intelligent coding assistant that helps developers write better code faster.", + runs: 1000000, + rating: 4.9, + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + creator_avatar: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + slug: "codeassist-ai", + }, + { + agent_name: "MultiTasker", + sub_heading: "All-in-one productivity suite", + creator: "Productivity Plus", + description: + "A comprehensive productivity suite that combines task management, note-taking, and project planning into one seamless interface. Features include smart task prioritization, automated scheduling, and AI-powered insights to help you work more efficiently.", + runs: 75000, + rating: 4.5, + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + creator_avatar: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + slug: "multitasker", + }, + { + agent_name: "QuickTask", + sub_heading: "Fast task automation", + creator: "EfficientWorks", + description: "Simple and efficient task automation tool.", + runs: 50000, + rating: 4.2, + agent_image: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + creator_avatar: + "https://framerusercontent.com/images/KCIpxr9f97EGJgpaoqnjKsrOPwI.jpg", + slug: "quicktask", + }, +] satisfies FeaturedAgent[]; + +export const Default: Story = { + args: { + featuredAgents: mockFeaturedAgents, + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, +}; + +export const SingleAgent: Story = { + args: { + featuredAgents: [mockFeaturedAgents[0]], + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, +}; + +export const NoAgents: Story = { + args: { + featuredAgents: [], + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, +}; + +export const WithInteraction: Story = { + args: { + featuredAgents: mockFeaturedAgents, + // onCardClick: (agentName: string) => console.log(`Clicked on ${agentName}`), + }, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + const featuredCard = canvas.getByText( + "Personalized Morning Coffee Newsletter example of three lines", + ); + + await userEvent.hover(featuredCard); + await userEvent.click(featuredCard); + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.tsx b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.tsx new file mode 100644 index 000000000000..5c1d257d922c --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/FeaturedSection.tsx @@ -0,0 +1,111 @@ +"use client"; + +import * as React from "react"; +import { FeaturedStoreCard } from "@/components/agptui/FeaturedStoreCard"; +import { + Carousel, + CarouselContent, + CarouselItem, + CarouselPrevious, + CarouselNext, + CarouselIndicator, +} from "@/components/ui/carousel"; +import { useCallback, useState } from "react"; +import { useRouter } from "next/navigation"; + +const BACKGROUND_COLORS = [ + "bg-violet-200 dark:bg-violet-800", // #ddd6fe / #5b21b6 + "bg-blue-200 dark:bg-blue-800", // #bfdbfe / #1e3a8a + "bg-green-200 dark:bg-green-800", // #bbf7d0 / #065f46 +]; + +export interface FeaturedAgent { + slug: string; + agent_name: string; + agent_image: string; + creator: string; + creator_avatar: string; + sub_heading: string; + description: string; + runs: number; + rating: number; +} + +interface FeaturedSectionProps { + featuredAgents: FeaturedAgent[]; +} + +export const FeaturedSection: React.FC = ({ + featuredAgents, +}) => { + const [currentSlide, setCurrentSlide] = useState(0); + const router = useRouter(); + + const handleCardClick = (creator: string, slug: string) => { + router.push( + `/marketplace/agent/${encodeURIComponent(creator)}/${encodeURIComponent(slug)}`, + ); + }; + + const handlePrevSlide = useCallback(() => { + setCurrentSlide((prev) => + prev === 0 ? featuredAgents.length - 1 : prev - 1, + ); + }, [featuredAgents.length]); + + const handleNextSlide = useCallback(() => { + setCurrentSlide((prev) => + prev === featuredAgents.length - 1 ? 0 : prev + 1, + ); + }, [featuredAgents.length]); + + const getBackgroundColor = (index: number) => { + return BACKGROUND_COLORS[index % BACKGROUND_COLORS.length]; + }; + + return ( +
+
+

+ Featured agents +

+ +
+ + + {featuredAgents.map((agent, index) => ( + + handleCardClick(agent.creator, agent.slug)} + /> + + ))} + +
+ + + +
+
+
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/HeroSection.stories.tsx b/autogpt_platform/frontend/src/components/agptui/composite/HeroSection.stories.tsx new file mode 100644 index 000000000000..11b101bd6bd2 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/HeroSection.stories.tsx @@ -0,0 +1,70 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { HeroSection } from "./HeroSection"; +import { userEvent, within, expect } from "@storybook/test"; + +const meta = { + title: "AGPT UI/Composite/Hero Section", + component: HeroSection, + parameters: { + layout: { + center: true, + fullscreen: true, + padding: 0, + }, + }, + tags: ["autodocs"], + argTypes: { + onSearch: { action: "searched" }, + onFilterChange: { action: "filtersChanged" }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: { + onSearch: (query: string) => console.log(`Searched: ${query}`), + onFilterChange: (selectedFilters: string[]) => + console.log(`Filters changed: ${selectedFilters.join(", ")}`), + }, +}; + +export const WithInteraction: Story = { + args: { + onSearch: (query: string) => console.log(`Searched: ${query}`), + onFilterChange: (selectedFilters: string[]) => + console.log(`Filters changed: ${selectedFilters.join(", ")}`), + }, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + const searchInput = canvas.getByRole("store-search-input"); + + await userEvent.type(searchInput, "test query"); + await userEvent.keyboard("{Enter}"); + + await expect(searchInput).toHaveValue("test query"); + + const filterChip = canvas.getByText("Marketing"); + await userEvent.click(filterChip); + + await expect(filterChip).toHaveClass("text-[#474747]"); + }, +}; + +export const EmptySearch: Story = { + args: { + onSearch: (query: string) => console.log(`Searched: ${query}`), + onFilterChange: (selectedFilters: string[]) => + console.log(`Filters changed: ${selectedFilters.join(", ")}`), + }, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + const searchInput = canvas.getByRole("store-search-input"); + + await userEvent.click(searchInput); + await userEvent.keyboard("{Enter}"); + + await expect(searchInput).toHaveValue(""); + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/HeroSection.tsx b/autogpt_platform/frontend/src/components/agptui/composite/HeroSection.tsx new file mode 100644 index 000000000000..61d025c35158 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/HeroSection.tsx @@ -0,0 +1,60 @@ +"use client"; + +import * as React from "react"; +import { SearchBar } from "@/components/agptui/SearchBar"; +import { FilterChips } from "@/components/agptui/FilterChips"; +import { useRouter } from "next/navigation"; + +export const HeroSection: React.FC = () => { + const router = useRouter(); + + function onFilterChange(selectedFilters: string[]) { + const encodedTerm = encodeURIComponent(selectedFilters.join(", ")); + router.push(`/marketplace/search?searchTerm=${encodedTerm}`); + } + + return ( +
+
+
+

+ + Explore AI agents built for{" "} + + + you + +
+ + by the{" "} + + + community + +

+
+

+ Bringing you AI agents designed by thinkers from around the world +

+
+ +
+
+
+ +
+
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.stories.tsx b/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.stories.tsx new file mode 100644 index 000000000000..9326edc3d736 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.stories.tsx @@ -0,0 +1,52 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { PublishAgentPopout } from "@/components/agptui/composite/PublishAgentPopout"; +import { userEvent, within, expect } from "@storybook/test"; + +const meta = { + title: "AGPT UI/Composite/Publish Agent Popout", + component: PublishAgentPopout, + parameters: { + layout: "centered", + }, + tags: ["autodocs"], + argTypes: { + trigger: { control: "object" }, + }, +} satisfies Meta; + +export default meta; +type Story = StoryObj; + +export const Default: Story = { + args: {}, +}; + +export const WithCustomTrigger: Story = { + args: { + trigger: , + }, +}; + +export const PublishFlow: Story = { + args: {}, + play: async ({ canvasElement }) => { + const canvas = within(canvasElement); + + // Open popout + const publishButton = canvas.getByText("Publish Agent"); + await userEvent.click(publishButton); + + // Select an agent (assuming one exists) + const agentCard = await canvas.findByRole("button", { + name: /select agent/i, + }); + await userEvent.click(agentCard); + + // Click next + const nextButton = canvas.getByText("Next"); + await userEvent.click(nextButton); + + // Fill out info form + // Note: Actual form interactions would need to be added based on PublishAgentInfo implementation + }, +}; diff --git a/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx b/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx new file mode 100644 index 000000000000..8842f6dbbb40 --- /dev/null +++ b/autogpt_platform/frontend/src/components/agptui/composite/PublishAgentPopout.tsx @@ -0,0 +1,302 @@ +"use client"; + +import * as React from "react"; +import { + Popover, + PopoverTrigger, + PopoverContent, + PopoverAnchor, +} from "@/components/ui/popover"; +import { PublishAgentSelect } from "../PublishAgentSelect"; +import { + PublishAgentInfo, + PublishAgentInfoInitialData, +} from "../PublishAgentSelectInfo"; +import { PublishAgentAwaitingReview } from "../PublishAgentAwaitingReview"; +import { Button } from "../Button"; +import { + StoreSubmissionRequest, + MyAgentsResponse, +} from "@/lib/autogpt-server-api"; +import { useRouter } from "next/navigation"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { useToast } from "@/components/ui/use-toast"; +interface PublishAgentPopoutProps { + trigger?: React.ReactNode; + openPopout?: boolean; + inputStep?: "select" | "info" | "review"; + submissionData?: StoreSubmissionRequest; +} + +export const PublishAgentPopout: React.FC = ({ + trigger, + openPopout = false, + inputStep = "select", + submissionData = { + name: "", + sub_heading: "", + slug: "", + description: "", + image_urls: [], + agent_id: "", + agent_version: 0, + categories: [], + }, +}) => { + const [step, setStep] = React.useState<"select" | "info" | "review">( + inputStep, + ); + const [myAgents, setMyAgents] = React.useState(null); + const [selectedAgent, setSelectedAgent] = React.useState(null); + const [initialData, setInitialData] = + React.useState({ + agent_id: "", + title: "", + subheader: "", + slug: "", + thumbnailSrc: "", + youtubeLink: "", + category: "", + description: "", + }); + const [publishData, setPublishData] = + React.useState(submissionData); + const [selectedAgentId, setSelectedAgentId] = React.useState( + null, + ); + const [selectedAgentVersion, setSelectedAgentVersion] = React.useState< + number | null + >(null); + const [open, setOpen] = React.useState(false); + + const popupId = React.useId(); + const router = useRouter(); + const api = useBackendAPI(); + + const { toast } = useToast(); + + React.useEffect(() => { + console.log("PublishAgentPopout Effect"); + setOpen(openPopout); + setStep(inputStep); + setPublishData(submissionData); + }, [openPopout]); // eslint-disable-line react-hooks/exhaustive-deps + + React.useEffect(() => { + console.log("LoadMyAgents Effect"); + if (open) { + const loadMyAgents = async () => { + try { + const response = await api.getMyAgents(); + setMyAgents(response); + } catch (error) { + console.error("Failed to load my agents:", error); + } + }; + + loadMyAgents(); + } + }, [open, api]); + + const handleClose = () => { + setStep("select"); + setSelectedAgent(null); + setPublishData({ + name: "", + sub_heading: "", + description: "", + image_urls: [], + agent_id: "", + agent_version: 0, + slug: "", + categories: [], + }); + setOpen(false); + }; + + const handleAgentSelect = (agentName: string) => { + setSelectedAgent(agentName); + }; + + const handleNextFromSelect = (agentId: string, agentVersion: number) => { + const selectedAgentData = myAgents?.agents.find( + (agent) => agent.agent_id === agentId, + ); + + const name = selectedAgentData?.agent_name || ""; + const description = selectedAgentData?.description || ""; + setInitialData({ + agent_id: agentId, + title: name, + subheader: "", + description: description, + thumbnailSrc: "", + youtubeLink: "", + category: "", + slug: name.replace(/ /g, "-"), + additionalImages: [], + }); + + setStep("info"); + setSelectedAgentId(agentId); + setSelectedAgentVersion(agentVersion); + }; + + const handleNextFromInfo = async ( + name: string, + subHeading: string, + slug: string, + description: string, + imageUrls: string[], + videoUrl: string, + categories: string[], + ) => { + const missingFields: string[] = []; + + if (!name) missingFields.push("Name"); + if (!subHeading) missingFields.push("Sub-heading"); + if (!description) missingFields.push("Description"); + if (!imageUrls.length) missingFields.push("Image"); + if (!categories.length) missingFields.push("Categories"); + + if (missingFields.length > 0) { + toast({ + title: "Missing Required Fields", + description: `Please fill in: ${missingFields.join(", ")}`, + duration: 3000, + }); + return; + } + + setPublishData({ + name, + sub_heading: subHeading, + description, + image_urls: imageUrls, + video_url: videoUrl, + agent_id: selectedAgentId || "", + agent_version: selectedAgentVersion || 0, + slug, + categories, + }); + + // Create store submission + try { + const submission = await api.createStoreSubmission({ + name: name, + sub_heading: subHeading, + description: description, + image_urls: imageUrls, + video_url: videoUrl, + agent_id: selectedAgentId || "", + agent_version: selectedAgentVersion || 0, + slug: slug.replace(/\s+/g, "-"), + categories: categories, + }); + console.log("Store submission created:", submission); + } catch (error) { + console.error("Error creating store submission:", error); + } + setStep("review"); + }; + + const handleBack = () => { + if (step === "info") { + setStep("select"); + } else if (step === "review") { + setStep("info"); + } + }; + + const renderContent = () => { + switch (step) { + case "select": + return ( +
+
+
+ ({ + name: agent.agent_name, + id: agent.agent_id, + version: agent.agent_version, + lastEdited: agent.last_edited, + imageSrc: "https://picsum.photos/300/200", // Fallback image if none provided + })) || [] + } + onSelect={handleAgentSelect} + onCancel={handleClose} + onNext={handleNextFromSelect} + onClose={handleClose} + onOpenBuilder={() => router.push("/build")} + /> +
+
+
+ ); + case "info": + return ( +
+
+
+ +
+
+
+ ); + case "review": + return publishData ? ( +
+
+
+ { + router.push("/marketplace/dashboard"); + handleClose(); + }} + /> +
+
+
+ ) : null; + } + }; + + return ( + { + if (isOpen !== open) { + setOpen(isOpen); + } + }} + > + + {trigger || } + + +
+
+ + + {renderContent()} + +
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/auth/AuthBottomText.tsx b/autogpt_platform/frontend/src/components/auth/AuthBottomText.tsx new file mode 100644 index 000000000000..7dcaecf48930 --- /dev/null +++ b/autogpt_platform/frontend/src/components/auth/AuthBottomText.tsx @@ -0,0 +1,37 @@ +import { cn } from "@/lib/utils"; +import Link from "next/link"; + +interface Props { + className?: string; + text: string; + linkText?: string; + href?: string; +} + +export default function AuthBottomText({ + className = "", + text, + linkText, + href = "", +}: Props) { + return ( +
+ + {text} + + {linkText && ( + + {linkText} + + )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/auth/AuthButton.tsx b/autogpt_platform/frontend/src/components/auth/AuthButton.tsx new file mode 100644 index 000000000000..be8c86f981c4 --- /dev/null +++ b/autogpt_platform/frontend/src/components/auth/AuthButton.tsx @@ -0,0 +1,36 @@ +import { ReactNode } from "react"; +import { Button } from "../ui/button"; +import { FaSpinner } from "react-icons/fa"; + +interface Props { + children?: ReactNode; + onClick: () => void; + isLoading?: boolean; + disabled?: boolean; + type?: "button" | "submit" | "reset"; +} + +export default function AuthButton({ + children, + onClick, + isLoading = false, + disabled = false, + type = "button", +}: Props) { + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/components/auth/AuthCard.tsx b/autogpt_platform/frontend/src/components/auth/AuthCard.tsx new file mode 100644 index 000000000000..c23ed0511e74 --- /dev/null +++ b/autogpt_platform/frontend/src/components/auth/AuthCard.tsx @@ -0,0 +1,22 @@ +import { ReactNode } from "react"; +import { cn } from "@/lib/utils"; + +interface Props { + children: ReactNode; + className?: string; +} + +export default function AuthCard({ children, className }: Props) { + return ( +
+
+ {children} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/auth/AuthFeedback.tsx b/autogpt_platform/frontend/src/components/auth/AuthFeedback.tsx new file mode 100644 index 000000000000..216a01b3660b --- /dev/null +++ b/autogpt_platform/frontend/src/components/auth/AuthFeedback.tsx @@ -0,0 +1,16 @@ +interface Props { + message?: string | null; + isError?: boolean; +} + +export default function AuthFeedback({ message = "", isError = false }: Props) { + return ( +
+ {isError ? ( +
{message}
+ ) : ( +
{message}
+ )} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/auth/AuthHeader.tsx b/autogpt_platform/frontend/src/components/auth/AuthHeader.tsx new file mode 100644 index 000000000000..221a40b4611b --- /dev/null +++ b/autogpt_platform/frontend/src/components/auth/AuthHeader.tsx @@ -0,0 +1,13 @@ +import { ReactNode } from "react"; + +interface Props { + children: ReactNode; +} + +export default function AuthHeader({ children }: Props) { + return ( +
+ {children} +
+ ); +} diff --git a/autogpt_platform/frontend/src/components/auth/PasswordInput.tsx b/autogpt_platform/frontend/src/components/auth/PasswordInput.tsx new file mode 100644 index 000000000000..8bafe0641d1f --- /dev/null +++ b/autogpt_platform/frontend/src/components/auth/PasswordInput.tsx @@ -0,0 +1,58 @@ +import { forwardRef, useState } from "react"; +import { EyeIcon, EyeOffIcon } from "lucide-react"; +import { Button } from "@/components/ui/button"; +import { Input, InputProps } from "@/components/ui/input"; +import { cn } from "@/lib/utils"; + +const PasswordInput = forwardRef( + ({ className, ...props }, ref) => { + const [showPassword, setShowPassword] = useState(false); + const disabled = + props.value === "" || props.value === undefined || props.disabled; + + return ( +
+ + + + {/* hides browsers password toggles */} + +
+ ); + }, +); +PasswordInput.displayName = "PasswordInput"; + +export { PasswordInput }; diff --git a/autogpt_platform/frontend/src/components/auth/index.ts b/autogpt_platform/frontend/src/components/auth/index.ts new file mode 100644 index 000000000000..348b6997ff2f --- /dev/null +++ b/autogpt_platform/frontend/src/components/auth/index.ts @@ -0,0 +1,15 @@ +import AuthBottomText from "./AuthBottomText"; +import AuthButton from "./AuthButton"; +import AuthCard from "./AuthCard"; +import AuthFeedback from "./AuthFeedback"; +import AuthHeader from "./AuthHeader"; +import { PasswordInput } from "./PasswordInput"; + +export { + AuthBottomText, + AuthButton, + AuthCard, + AuthFeedback, + AuthHeader, + PasswordInput, +}; diff --git a/autogpt_platform/frontend/src/components/cronScheduler.tsx b/autogpt_platform/frontend/src/components/cronScheduler.tsx new file mode 100644 index 000000000000..9625b2dd19f5 --- /dev/null +++ b/autogpt_platform/frontend/src/components/cronScheduler.tsx @@ -0,0 +1,422 @@ +import { useState } from "react"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { Label } from "@/components/ui/label"; +import { Input } from "@/components/ui/input"; +import { Button } from "@/components/ui/button"; +import { + Dialog, + DialogContent, + DialogTitle, + DialogTrigger, +} from "@/components/ui/dialog"; +import { Separator } from "./ui/separator"; +import { CronExpressionManager } from "@/lib/monitor/cronExpressionManager"; + +interface CronSchedulerProps { + setOpen: React.Dispatch>; + open: boolean; + afterCronCreation: (cronExpression: string) => void; +} + +export function CronScheduler({ + setOpen, + open, + afterCronCreation, +}: CronSchedulerProps) { + const [frequency, setFrequency] = useState< + "minute" | "hour" | "daily" | "weekly" | "monthly" | "yearly" | "custom" + >("minute"); + const [selectedDays, setSelectedDays] = useState([]); + const [selectedTime, setSelectedTime] = useState("00:00"); + const [showCustomDays, setShowCustomDays] = useState(false); + const [selectedMinute, setSelectedMinute] = useState("0"); + const [customInterval, setCustomInterval] = useState<{ + value: number; + unit: "minutes" | "hours" | "days"; + }>({ value: 1, unit: "minutes" }); + + // const [endType, setEndType] = useState<"never" | "on" | "after">("never"); + // const [endDate, setEndDate] = useState(); + // const [occurrences, setOccurrences] = useState(1); + + const weekDays = [ + { label: "Su", value: 0 }, + { label: "Mo", value: 1 }, + { label: "Tu", value: 2 }, + { label: "We", value: 3 }, + { label: "Th", value: 4 }, + { label: "Fr", value: 5 }, + { label: "Sa", value: 6 }, + ]; + + const months = [ + { label: "Jan", value: "January" }, + { label: "Feb", value: "February" }, + { label: "Mar", value: "March" }, + { label: "Apr", value: "April" }, + { label: "May", value: "May" }, + { label: "Jun", value: "June" }, + { label: "Jul", value: "July" }, + { label: "Aug", value: "August" }, + { label: "Sep", value: "September" }, + { label: "Oct", value: "October" }, + { label: "Nov", value: "November" }, + { label: "Dec", value: "December" }, + ]; + + const cron_manager = new CronExpressionManager(); + + return ( + + + Schedule Task +
+
+ + + + + {frequency === "hour" && ( +
+ + +
+ )} + + {frequency === "custom" && ( +
+ + + setCustomInterval({ + ...customInterval, + value: parseInt(e.target.value), + }) + } + /> + +
+ )} +
+ + {frequency === "weekly" && ( +
+
+ + + + +
+
+ {weekDays.map((day) => ( + + ))} +
+
+ )} + {frequency === "monthly" && ( +
+ +
+ + + + +
+ {showCustomDays && ( +
+ {Array.from({ length: 31 }, (_, i) => ( + + ))} +
+ )} +
+ )} + {frequency === "yearly" && ( +
+ +
+ +
+
+ {months.map((month, index) => ( + + ))} +
+
+ )} + + {frequency !== "minute" && frequency !== "hour" && ( +
+ + setSelectedTime(e.target.value)} + /> +
+ )} + + + {/* + + On the backend, we are using standard cron expressions, + which makes it challenging to add an end date or stop execution + after a certain time using only cron expressions. + (since standard cron expressions have limitations, like the lack of a year field or more...). + + We could also use ranges in cron expression for end dates but It doesm't cover all cases (sometimes break) + + To automatically end the scheduler, we need to store the end date and time occurrence in the database + and modify scheduler.add_job. Currently, we can only stop the scheduler manually from the monitor tab. + + */} + + {/*
+ + + setEndType(value) + } + > +
+ + +
+ +
+ + + + + + + + date < new Date()} + fromDate={new Date()} + /> + + +
+
+ + + setOccurrences(Number(e.target.value))} + /> + times +
+
+
*/} + +
+ + +
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/customedge.css b/autogpt_platform/frontend/src/components/customedge.css new file mode 100644 index 000000000000..6babb8e77065 --- /dev/null +++ b/autogpt_platform/frontend/src/components/customedge.css @@ -0,0 +1,48 @@ +.edge-label-renderer { + position: absolute; + pointer-events: all; +} + +.edge-label-button { + width: 20px; + height: 20px; + background: #eee; + border: 1px solid #fff; + cursor: pointer; + border-radius: 50%; + display: flex; + justify-content: center; + align-items: center; + padding: 0; + color: #555; + opacity: 0; + transition: + opacity 0.2s ease-in-out, + background-color 0.2s ease-in-out; +} + +.edge-label-button.visible { + opacity: 1; +} + +.edge-label-button:hover { + box-shadow: 0 0 6px 2px rgba(0, 0, 0, 0.08); + background: #f0f0f0; +} + +.edge-label-button svg { + width: 14px; + height: 14px; +} + +.react-flow__edge-interaction { + cursor: pointer; +} + +.react-flow__edges > svg:has(> g.selected) { + z-index: 10 !important; +} + +.react-flow__edgelabel-renderer { + z-index: 11 !important; +} diff --git a/autogpt_platform/frontend/src/components/customnode.css b/autogpt_platform/frontend/src/components/customnode.css new file mode 100644 index 000000000000..8e4ed0c87df2 --- /dev/null +++ b/autogpt_platform/frontend/src/components/customnode.css @@ -0,0 +1,159 @@ +.custom-node { + color: #000000; + box-sizing: border-box; + transition: border-color 0.3s ease-in-out; +} + +.custom-node [data-id="input-handles"], +.custom-node [data-id="input-handles"] > div > div { + margin-bottom: 1rem; +} + +.custom-node input:not([type="checkbox"]), +.custom-node textarea, +.custom-node select, +.custom-node [data-id^="date-picker"], +.custom-node [data-list-container], +.custom-node [data-add-item], +.custom-node [data-content-settings]. .array-item-container { + display: flex; + align-items: center; + min-width: calc(100% - 2.5rem); + max-width: 100%; +} + +.custom-node .custom-switch { + padding: 0.5rem 1.25rem; + display: flex; + align-items: center; + justify-content: space-between; +} + +.error-message { + color: #d9534f; + font-size: 13px; + padding-left: 0.5rem; +} + +/* Existing styles */ +.handle-container { + display: flex; + position: relative; + margin-bottom: 0px; + padding: 5px; + min-height: 44px; + height: 100%; +} + +.react-flow__handle { + background: transparent; + width: auto; + height: auto; + border: 0; + position: relative; + transform: none; +} + +.border-error { + border: 1px solid #d9534f; +} + +.select-input { + width: 100%; + padding: 5px; + border-radius: 4px; + border: 1px solid #000; + background: #fff; + color: #000; +} + +.radio-label { + display: block; + margin: 5px 0; + color: #000; +} + +.number-input { + width: 100%; + padding: 5px; + border-radius: 4px; + background: #fff; + color: #000; +} + +.array-item-container { + display: flex; + align-items: center; + margin-bottom: 5px; +} + +.array-item-input { + flex-grow: 1; + padding: 5px; + border-radius: 4px; + border: 1px solid #000; + background: #fff; + color: #000; +} + +.array-item-remove { + background: #d9534f; + border: none; + color: white; + cursor: pointer; + margin-left: 5px; + border-radius: 4px; + padding: 5px 10px; +} + +.array-item-add { + background: #5bc0de; + border: none; + color: white; + cursor: pointer; + border-radius: 4px; + padding: 5px 10px; + margin-top: 5px; +} + +.error-message { + color: #d9534f; + font-size: 13px; + margin-top: 5px; + margin-left: 5px; +} + +/* Styles for node states */ +.completed { + border-color: #27ae60; /* Green border for completed nodes */ +} + +.running { + border-color: #f39c12; /* Orange border for running nodes */ +} + +.failed { + border-color: #c0392b; /* Red border for failed nodes */ +} + +.incomplete { + border-color: #9f14ab; /* Pink border for incomplete nodes */ +} + +.queued { + border-color: #25e6e6; /* Cyan border for queued nodes */ +} + +.custom-switch { + padding-left: 2px; +} + +input[type="number"]::-webkit-outer-spin-button, +input[type="number"]::-webkit-inner-spin-button { + -webkit-appearance: none; + margin: 0; +} + +input[type="number"] { + -moz-appearance: textfield; +} diff --git a/autogpt_platform/frontend/src/components/edit/control/BlocksControl.tsx b/autogpt_platform/frontend/src/components/edit/control/BlocksControl.tsx new file mode 100644 index 000000000000..dc5addd17030 --- /dev/null +++ b/autogpt_platform/frontend/src/components/edit/control/BlocksControl.tsx @@ -0,0 +1,285 @@ +import React, { useState, useMemo } from "react"; +import { Card, CardContent, CardHeader } from "@/components/ui/card"; +import { Label } from "@/components/ui/label"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { TextRenderer } from "@/components/ui/render"; +import { ScrollArea } from "@/components/ui/scroll-area"; +import { CustomNode } from "@/components/CustomNode"; +import { beautifyString } from "@/lib/utils"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover"; +import { Block, BlockUIType, SpecialBlockID } from "@/lib/autogpt-server-api"; +import { MagnifyingGlassIcon, PlusIcon } from "@radix-ui/react-icons"; +import { IconToyBrick } from "@/components/ui/icons"; +import { getPrimaryCategoryColor } from "@/lib/utils"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { GraphMeta } from "@/lib/autogpt-server-api"; + +interface BlocksControlProps { + blocks: Block[]; + addBlock: ( + id: string, + name: string, + hardcodedValues: Record, + ) => void; + pinBlocksPopover: boolean; + flows: GraphMeta[]; + nodes: CustomNode[]; +} + +/** + * A React functional component that displays a control for managing blocks. + * + * @component + * @param {Object} BlocksControlProps - The properties for the BlocksControl component. + * @param {Block[]} BlocksControlProps.blocks - An array of blocks to be displayed and filtered. + * @param {(id: string, name: string) => void} BlocksControlProps.addBlock - A function to call when a block is added. + * @returns The rendered BlocksControl component. + */ +export const BlocksControl: React.FC = ({ + blocks, + addBlock, + pinBlocksPopover, + flows, + nodes, +}) => { + const [searchQuery, setSearchQuery] = useState(""); + const [selectedCategory, setSelectedCategory] = useState(null); + + const graphHasWebhookNodes = nodes.some( + (n) => n.data.uiType == BlockUIType.WEBHOOK, + ); + const graphHasInputNodes = nodes.some( + (n) => n.data.uiType == BlockUIType.INPUT, + ); + + const filteredAvailableBlocks = useMemo(() => { + const blockList = blocks + .filter((b) => b.uiType !== BlockUIType.AGENT) + .sort((a, b) => a.name.localeCompare(b.name)); + const agentBlockList = flows.map( + (flow) => + ({ + id: SpecialBlockID.AGENT, + name: flow.name, + description: + `Ver.${flow.version}` + + (flow.description ? ` | ${flow.description}` : ""), + categories: [{ category: "AGENT", description: "" }], + inputSchema: flow.input_schema, + outputSchema: flow.output_schema, + staticOutput: false, + uiType: BlockUIType.AGENT, + uiKey: flow.id, + costs: [], + hardcodedValues: { + graph_id: flow.id, + graph_version: flow.version, + input_schema: flow.input_schema, + output_schema: flow.output_schema, + }, + }) satisfies Block, + ); + + return blockList + .concat(agentBlockList) + .filter( + (block: Block) => + (block.name.toLowerCase().includes(searchQuery.toLowerCase()) || + beautifyString(block.name) + .toLowerCase() + .includes(searchQuery.toLowerCase()) || + block.description + .toLowerCase() + .includes(searchQuery.toLowerCase())) && + (!selectedCategory || + block.categories.some((cat) => cat.category === selectedCategory)), + ) + .map((block) => ({ + ...block, + notAvailable: + (block.uiType == BlockUIType.WEBHOOK && + graphHasWebhookNodes && + "Agents can only have one webhook-triggered block") || + (block.uiType == BlockUIType.WEBHOOK && + graphHasInputNodes && + "Webhook-triggered blocks can't be used together with input blocks") || + (block.uiType == BlockUIType.INPUT && + graphHasWebhookNodes && + "Input blocks can't be used together with a webhook-triggered block") || + null, + })); + }, [ + blocks, + flows, + searchQuery, + selectedCategory, + graphHasInputNodes, + graphHasWebhookNodes, + ]); + + const resetFilters = React.useCallback(() => { + setSearchQuery(""); + setSelectedCategory(null); + }, []); + + // Extract unique categories from blocks + const categories = Array.from( + new Set([ + null, + ...blocks + .flatMap((block) => block.categories.map((cat) => cat.category)) + .sort(), + ]), + ); + + return ( + open || resetFilters()} + > + + + + + + + Blocks + + + + +
+ +
+
+ + setSearchQuery(e.target.value)} + className="rounded-lg px-8 py-5 dark:bg-slate-800 dark:text-white" + data-id="blocks-control-search-input" + /> +
+
+ {categories.map((category) => { + const color = getPrimaryCategoryColor([ + { category: category || "All", description: "" }, + ]); + const colorClass = + selectedCategory === category ? `${color}` : ""; + return ( +
+ setSelectedCategory( + selectedCategory === category ? null : category, + ) + } + > + {beautifyString((category || "All").toLowerCase())} +
+ ); + })} +
+
+ + + {filteredAvailableBlocks.map((block) => ( + + !block.notAvailable && + addBlock(block.id, block.name, block?.hardcodedValues || {}) + } + title={block.notAvailable ?? undefined} + > +
+ +
+
+ + + + + + +
+
+ +
+
+
+ ))} +
+
+
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/edit/control/ControlPanel.tsx b/autogpt_platform/frontend/src/components/edit/control/ControlPanel.tsx new file mode 100644 index 000000000000..870d34582283 --- /dev/null +++ b/autogpt_platform/frontend/src/components/edit/control/ControlPanel.tsx @@ -0,0 +1,86 @@ +import { Card, CardContent } from "@/components/ui/card"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { Button } from "@/components/ui/button"; +import { Separator } from "@/components/ui/separator"; +import { cn } from "@/lib/utils"; +import React from "react"; + +/** + * Represents a control element for the ControlPanel Component. + * @type {Object} Control + * @property {React.ReactNode} icon - The icon of the control from lucide-react https://lucide.dev/icons/ + * @property {string} label - The label of the control, to be leveraged by ToolTip. + * @property {onclick} onClick - The function to be executed when the control is clicked. + */ +export type Control = { + icon: React.ReactNode; + label: string; + disabled?: boolean; + onClick: () => void; +}; + +interface ControlPanelProps { + controls: Control[]; + topChildren?: React.ReactNode; + botChildren?: React.ReactNode; + className?: string; +} + +/** + * ControlPanel component displays a panel with controls as icons.tsx with the ability to take in children. + * @param {Object} ControlPanelProps - The properties of the control panel component. + * @param {Array} ControlPanelProps.controls - An array of control objects representing actions to be preformed. + * @param {Array} ControlPanelProps.children - The child components of the control panel. + * @param {string} ControlPanelProps.className - Additional CSS class names for the control panel. + * @returns The rendered control panel component. + */ +export const ControlPanel = ({ + controls, + topChildren, + botChildren, + className, +}: ControlPanelProps) => { + return ( + + +
+ {topChildren} + + {controls.map((control, index) => ( + + +
+ +
+
+ + {control.label} + +
+ ))} + + {botChildren} +
+
+
+ ); +}; +export default ControlPanel; diff --git a/autogpt_platform/frontend/src/components/edit/control/SaveControl.tsx b/autogpt_platform/frontend/src/components/edit/control/SaveControl.tsx new file mode 100644 index 000000000000..bfa0408dbad4 --- /dev/null +++ b/autogpt_platform/frontend/src/components/edit/control/SaveControl.tsx @@ -0,0 +1,169 @@ +import React, { useCallback, useEffect } from "react"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover"; +import { Card, CardContent, CardFooter } from "@/components/ui/card"; +import { Input } from "@/components/ui/input"; +import { Button } from "@/components/ui/button"; +import { GraphMeta } from "@/lib/autogpt-server-api"; +import { Label } from "@/components/ui/label"; +import { IconSave } from "@/components/ui/icons"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { useToast } from "@/components/ui/use-toast"; + +interface SaveControlProps { + agentMeta: GraphMeta | null; + agentName: string; + agentDescription: string; + canSave: boolean; + onSave: () => void; + onNameChange: (name: string) => void; + onDescriptionChange: (description: string) => void; + pinSavePopover: boolean; +} + +/** + * A SaveControl component to be used within the ControlPanel. It allows the user to save the agent. + * @param {Object} SaveControlProps - The properties of the SaveControl component. + * @param {GraphMeta | null} SaveControlProps.agentMeta - The agent's metadata, or null if creating a new agent. + * @param {string} SaveControlProps.agentName - The agent's name. + * @param {string} SaveControlProps.agentDescription - The agent's description. + * @param {boolean} SaveControlProps.canSave - Whether the button to save the agent should be enabled. + * @param {() => void} SaveControlProps.onSave - Function to save the agent. + * @param {(name: string) => void} SaveControlProps.onNameChange - Function to handle name changes. + * @param {(description: string) => void} SaveControlProps.onDescriptionChange - Function to handle description changes. + * @returns The SaveControl component. + */ +export const SaveControl = ({ + agentMeta, + canSave, + onSave, + agentName, + onNameChange, + agentDescription, + onDescriptionChange, + pinSavePopover, +}: SaveControlProps) => { + /** + * Note for improvement: + * At the moment we are leveraging onDescriptionChange and onNameChange to handle the changes in the description and name of the agent. + * We should migrate this to be handled with form controls and a form library. + */ + + const handleSave = useCallback(() => { + onSave(); + }, [onSave]); + + const { toast } = useToast(); + + useEffect(() => { + const handleKeyDown = (event: KeyboardEvent) => { + if ((event.ctrlKey || event.metaKey) && event.key === "s") { + event.preventDefault(); // Stop the browser default action + handleSave(); // Call your save function + toast({ + duration: 2000, + title: "All changes saved successfully!", + }); + } + }; + + window.addEventListener("keydown", handleKeyDown); + + return () => { + window.removeEventListener("keydown", handleKeyDown); + }; + }, [handleSave, toast]); + + return ( + + + + + + + + Save + + + + +
+ + onNameChange(e.target.value)} + data-id="save-control-name-input" + data-testid="save-control-name-input" + maxLength={100} + /> + + onDescriptionChange(e.target.value)} + data-id="save-control-description-input" + data-testid="save-control-description-input" + maxLength={500} + /> + {agentMeta?.version && ( + <> + + + + )} +
+
+ + + +
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/feature-flag/feature-flag-provider.tsx b/autogpt_platform/frontend/src/components/feature-flag/feature-flag-provider.tsx new file mode 100644 index 000000000000..a9b133ff920e --- /dev/null +++ b/autogpt_platform/frontend/src/components/feature-flag/feature-flag-provider.tsx @@ -0,0 +1,15 @@ +import { LDProvider } from "launchdarkly-react-client-sdk"; +import { ReactNode } from "react"; + +export function LaunchDarklyProvider({ children }: { children: ReactNode }) { + const clientId = process.env.NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID; + const enabled = process.env.NEXT_PUBLIC_LAUNCHDARKLY_ENABLED === "true"; + + if (!enabled) return <>{children}; + + if (!clientId) { + throw new Error("NEXT_PUBLIC_LAUNCHDARKLY_CLIENT_ID is not defined"); + } + + return {children}; +} diff --git a/autogpt_platform/frontend/src/components/feature-flag/with-feature-flag.tsx b/autogpt_platform/frontend/src/components/feature-flag/with-feature-flag.tsx new file mode 100644 index 000000000000..79ed9d92ce0d --- /dev/null +++ b/autogpt_platform/frontend/src/components/feature-flag/with-feature-flag.tsx @@ -0,0 +1,46 @@ +"use client"; + +import { useFlags } from "launchdarkly-react-client-sdk"; +import { useRouter } from "next/navigation"; +import { useEffect, useState } from "react"; + +export function withFeatureFlag

( + WrappedComponent: React.ComponentType

, + flagKey: string, +) { + return function FeatureFlaggedComponent(props: P) { + const flags = useFlags(); + const router = useRouter(); + const [hasFlagLoaded, setHasFlagLoaded] = useState(false); + + useEffect(() => { + // Only proceed if flags received + if (flags && flagKey in flags) { + setHasFlagLoaded(true); + } + }, [flags]); + + useEffect(() => { + if (hasFlagLoaded && !flags[flagKey]) { + router.push("/404"); + } + }, [hasFlagLoaded, flags, router]); + + // Show loading state until flags loaded + if (!hasFlagLoaded) { + return ( +

+
+
+ ); + } + + // If flag is loaded but false, return null (will redirect) + if (!flags[flagKey]) { + return null; + } + + // Flag is loaded and true, show component + return ; + }; +} diff --git a/autogpt_platform/frontend/src/components/flow.css b/autogpt_platform/frontend/src/components/flow.css new file mode 100644 index 000000000000..cafd54659f15 --- /dev/null +++ b/autogpt_platform/frontend/src/components/flow.css @@ -0,0 +1,103 @@ +/* flow.css or index.css */ + +body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", + "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", + "Helvetica Neue", sans-serif; +} + +code { + font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", + monospace; +} + +.modal { + position: absolute; + top: 50%; + left: 50%; + right: auto; + bottom: auto; + margin-right: -50%; + transform: translate(-50%, -50%); + background: #ffffff; + padding: 20px; + border: 1px solid #ccc; + border-radius: 4px; + color: #000000; +} + +.overlay { + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: rgba(0, 0, 0, 0.75); +} + +.modal h2 { + margin-top: 0; +} + +.modal button { + margin-right: 10px; +} + +.modal form { + display: flex; + flex-direction: column; +} + +.modal form div { + margin-bottom: 15px; +} + +.sidebar { + position: fixed; + top: 0; + left: -600px; + width: 350px; + height: calc(100vh - 68px); /* Full height minus top offset */ + background-color: #ffffff; + color: #000000; + padding: 20px; + transition: left 0.3s ease; + z-index: 1000; + overflow-y: auto; + margin-top: 68px; /* Margin to push content below the top fixed area */ +} + +.sidebar.open { + left: 0; +} + +.sidebar h3 { + margin: 0 0 10px; +} + +.sidebar input { + margin: 0 0 10px; +} + +.sidebarNodeRowStyle { + display: flex; + justify-content: space-between; + align-items: center; + background-color: #e2e2e2; + padding: 10px; + margin-bottom: 10px; + border-radius: 10px; + cursor: grab; +} + +.sidebarNodeRowStyle.dragging { + opacity: 0.5; +} + +.flow-container { + position: absolute; + top: 0; + left: 0; + width: 100vw; + height: 100vh; +} diff --git a/autogpt_platform/frontend/src/components/history.ts b/autogpt_platform/frontend/src/components/history.ts new file mode 100644 index 000000000000..a9995c83e7a5 --- /dev/null +++ b/autogpt_platform/frontend/src/components/history.ts @@ -0,0 +1,96 @@ +// history.ts +import { CustomNodeData } from "./CustomNode"; +import { CustomEdgeData } from "./CustomEdge"; +import { Edge } from "@xyflow/react"; + +type ActionType = + | "ADD_NODE" + | "DELETE_NODE" + | "ADD_EDGE" + | "DELETE_EDGE" + | "UPDATE_NODE" + | "MOVE_NODE" + | "UPDATE_INPUT" + | "UPDATE_NODE_POSITION"; + +type AddNodePayload = { node: CustomNodeData }; +type DeleteNodePayload = { nodeId: string }; +type AddEdgePayload = { edge: Edge }; +type DeleteEdgePayload = { edgeId: string }; +type UpdateNodePayload = { nodeId: string; newData: Partial }; +type MoveNodePayload = { nodeId: string; position: { x: number; y: number } }; +type UpdateInputPayload = { + nodeId: string; + oldValues: { [key: string]: any }; + newValues: { [key: string]: any }; +}; +type UpdateNodePositionPayload = { + nodeId: string; + oldPosition: { x: number; y: number }; + newPosition: { x: number; y: number }; +}; + +type ActionPayload = + | AddNodePayload + | DeleteNodePayload + | AddEdgePayload + | DeleteEdgePayload + | UpdateNodePayload + | MoveNodePayload + | UpdateInputPayload + | UpdateNodePositionPayload; + +type Action = { + type: ActionType; + payload: ActionPayload; + undo: () => void; + redo: () => void; +}; + +class History { + private past: Action[] = []; + private future: Action[] = []; + + push(action: Action) { + this.past.push(action); + this.future = []; + } + + undo() { + const action = this.past.pop(); + if (action) { + action.undo(); + this.future.push(action); + } + } + + redo() { + const action = this.future.pop(); + if (action) { + action.redo(); + this.past.push(action); + } + } + + canUndo(): boolean { + return this.past.length > 0; + } + + canRedo(): boolean { + return this.future.length > 0; + } + + clear() { + this.past = []; + this.future = []; + } + + getHistoryState() { + return { + past: [...this.past], + future: [...this.future], + }; + } +} + +export const history = new History(); diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx new file mode 100644 index 000000000000..1fd21fcc58e7 --- /dev/null +++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx @@ -0,0 +1,723 @@ +import { z } from "zod"; +import { beautifyString, cn } from "@/lib/utils"; +import { useForm } from "react-hook-form"; +import { Input } from "@/components/ui/input"; +import { Button } from "@/components/ui/button"; +import SchemaTooltip from "@/components/SchemaTooltip"; +import useCredentials from "@/hooks/useCredentials"; +import { zodResolver } from "@hookform/resolvers/zod"; +import { NotionLogoIcon } from "@radix-ui/react-icons"; +import { + FaDiscord, + FaGithub, + FaTwitter, + FaGoogle, + FaMedium, + FaKey, + FaHubspot, +} from "react-icons/fa"; +import { FC, useMemo, useState } from "react"; +import { + CredentialsMetaInput, + CredentialsProviderName, +} from "@/lib/autogpt-server-api/types"; +import { IconKey, IconKeyPlus, IconUserPlus } from "@/components/ui/icons"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { + Form, + FormControl, + FormDescription, + FormField, + FormItem, + FormLabel, + FormMessage, +} from "@/components/ui/form"; +import { + Select, + SelectContent, + SelectItem, + SelectSeparator, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; + +const fallbackIcon = FaKey; + +// --8<-- [start:ProviderIconsEmbed] +export const providerIcons: Record< + CredentialsProviderName, + React.FC<{ className?: string }> +> = { + anthropic: fallbackIcon, + e2b: fallbackIcon, + github: FaGithub, + google: FaGoogle, + groq: fallbackIcon, + notion: NotionLogoIcon, + nvidia: fallbackIcon, + discord: FaDiscord, + d_id: fallbackIcon, + google_maps: FaGoogle, + jina: fallbackIcon, + ideogram: fallbackIcon, + linear: fallbackIcon, + medium: FaMedium, + mem0: fallbackIcon, + ollama: fallbackIcon, + openai: fallbackIcon, + openweathermap: fallbackIcon, + open_router: fallbackIcon, + pinecone: fallbackIcon, + slant3d: fallbackIcon, + smtp: fallbackIcon, + replicate: fallbackIcon, + reddit: fallbackIcon, + fal: fallbackIcon, + revid: fallbackIcon, + twitter: FaTwitter, + unreal_speech: fallbackIcon, + exa: fallbackIcon, + hubspot: FaHubspot, +}; +// --8<-- [end:ProviderIconsEmbed] + +export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & ( + | { + success: true; + code: string; + state: string; + } + | { + success: false; + message: string; + } +); + +export const CredentialsInput: FC<{ + selfKey: string; + className?: string; + selectedCredentials?: CredentialsMetaInput; + onSelectCredentials: (newValue?: CredentialsMetaInput) => void; +}> = ({ selfKey, className, selectedCredentials, onSelectCredentials }) => { + const api = useBackendAPI(); + const credentials = useCredentials(selfKey); + const [isAPICredentialsModalOpen, setAPICredentialsModalOpen] = + useState(false); + const [ + isUserPasswordCredentialsModalOpen, + setUserPasswordCredentialsModalOpen, + ] = useState(false); + const [isOAuth2FlowInProgress, setOAuth2FlowInProgress] = useState(false); + const [oAuthPopupController, setOAuthPopupController] = + useState(null); + const [oAuthError, setOAuthError] = useState(null); + + if (!credentials || credentials.isLoading) { + return null; + } + + const { + schema, + provider, + providerName, + supportsApiKey, + supportsOAuth2, + supportsUserPassword, + savedApiKeys, + savedOAuthCredentials, + savedUserPasswordCredentials, + oAuthCallback, + } = credentials; + + async function handleOAuthLogin() { + setOAuthError(null); + const { login_url, state_token } = await api.oAuthLogin( + provider, + schema.credentials_scopes, + ); + setOAuth2FlowInProgress(true); + const popup = window.open(login_url, "_blank", "popup=true"); + + if (!popup) { + throw new Error( + "Failed to open popup window. Please allow popups for this site.", + ); + } + + const controller = new AbortController(); + setOAuthPopupController(controller); + controller.signal.onabort = () => { + console.debug("OAuth flow aborted"); + setOAuth2FlowInProgress(false); + popup.close(); + }; + + const handleMessage = async (e: MessageEvent) => { + console.debug("Message received:", e.data); + if ( + typeof e.data != "object" || + !("message_type" in e.data) || + e.data.message_type !== "oauth_popup_result" + ) { + console.debug("Ignoring irrelevant message"); + return; + } + + if (!e.data.success) { + console.error("OAuth flow failed:", e.data.message); + setOAuthError(`OAuth flow failed: ${e.data.message}`); + setOAuth2FlowInProgress(false); + return; + } + + if (e.data.state !== state_token) { + console.error("Invalid state token received"); + setOAuthError("Invalid state token received"); + setOAuth2FlowInProgress(false); + return; + } + + try { + console.debug("Processing OAuth callback"); + const credentials = await oAuthCallback(e.data.code, e.data.state); + console.debug("OAuth callback processed successfully"); + onSelectCredentials({ + id: credentials.id, + type: "oauth2", + title: credentials.title, + provider, + }); + } catch (error) { + console.error("Error in OAuth callback:", error); + setOAuthError( + // type of error is unkown so we need to use String(error) + `Error in OAuth callback: ${ + error instanceof Error ? error.message : String(error) + }`, + ); + } finally { + console.debug("Finalizing OAuth flow"); + setOAuth2FlowInProgress(false); + controller.abort("success"); + } + }; + + console.debug("Adding message event listener"); + window.addEventListener("message", handleMessage, { + signal: controller.signal, + }); + + setTimeout( + () => { + console.debug("OAuth flow timed out"); + controller.abort("timeout"); + setOAuth2FlowInProgress(false); + setOAuthError("OAuth flow timed out"); + }, + 5 * 60 * 1000, + ); + } + + const ProviderIcon = providerIcons[provider]; + const modals = ( + <> + {supportsApiKey && ( + setAPICredentialsModalOpen(false)} + onCredentialsCreate={(credsMeta) => { + onSelectCredentials(credsMeta); + setAPICredentialsModalOpen(false); + }} + /> + )} + {supportsOAuth2 && ( + oAuthPopupController?.abort("canceled")} + providerName={providerName} + /> + )} + {supportsUserPassword && ( + setUserPasswordCredentialsModalOpen(false)} + onCredentialsCreate={(creds) => { + onSelectCredentials(creds); + setUserPasswordCredentialsModalOpen(false); + }} + /> + )} + + ); + + // Deselect credentials if they do not exist (e.g. provider was changed) + if ( + selectedCredentials && + !savedApiKeys + .concat(savedOAuthCredentials) + .concat(savedUserPasswordCredentials) + .some((c) => c.id === selectedCredentials.id) + ) { + onSelectCredentials(undefined); + } + + // No saved credentials yet + if ( + savedApiKeys.length === 0 && + savedOAuthCredentials.length === 0 && + savedUserPasswordCredentials.length === 0 + ) { + return ( + <> +
+ + {providerName} Credentials + + +
+
+ {supportsOAuth2 && ( + + )} + {supportsApiKey && ( + + )} + {supportsUserPassword && ( + + )} +
+ {modals} + {oAuthError && ( +
Error: {oAuthError}
+ )} + + ); + } + + const getCredentialCounts = () => ({ + apiKeys: savedApiKeys.length, + oauth: savedOAuthCredentials.length, + userPass: savedUserPasswordCredentials.length, + }); + + const getSingleCredential = () => { + const counts = getCredentialCounts(); + const totalCredentials = Object.values(counts).reduce( + (sum, count) => sum + count, + 0, + ); + + if (totalCredentials !== 1) return null; + + if (counts.apiKeys === 1) return savedApiKeys[0]; + if (counts.oauth === 1) return savedOAuthCredentials[0]; + if (counts.userPass === 1) return savedUserPasswordCredentials[0]; + + return null; + }; + + const singleCredential = getSingleCredential(); + + if (singleCredential) { + if (!selectedCredentials) { + onSelectCredentials({ + id: singleCredential.id, + type: singleCredential.type, + provider, + title: singleCredential.title, + }); + } + return null; + } + + function handleValueChange(newValue: string) { + if (newValue === "sign-in") { + // Trigger OAuth2 sign in flow + handleOAuthLogin(); + } else if (newValue === "add-api-key") { + // Open API key dialog + setAPICredentialsModalOpen(true); + } else { + const selectedCreds = savedApiKeys + .concat(savedOAuthCredentials) + .concat(savedUserPasswordCredentials) + .find((c) => c.id == newValue)!; + + onSelectCredentials({ + id: selectedCreds.id, + type: selectedCreds.type, + provider: provider, + // title: customTitle, // TODO: add input for title + }); + } + } + + // Saved credentials exist + return ( + <> +
+ + {providerName} Credentials + + +
+ + {modals} + {oAuthError && ( +
Error: {oAuthError}
+ )} + + ); +}; + +export const APIKeyCredentialsModal: FC<{ + credentialsFieldName: string; + open: boolean; + onClose: () => void; + onCredentialsCreate: (creds: CredentialsMetaInput) => void; +}> = ({ credentialsFieldName, open, onClose, onCredentialsCreate }) => { + const credentials = useCredentials(credentialsFieldName); + + const formSchema = z.object({ + apiKey: z.string().min(1, "API Key is required"), + title: z.string().min(1, "Name is required"), + expiresAt: z.string().optional(), + }); + + const form = useForm>({ + resolver: zodResolver(formSchema), + defaultValues: { + apiKey: "", + title: "", + expiresAt: "", + }, + }); + + if (!credentials || credentials.isLoading || !credentials.supportsApiKey) { + return null; + } + + const { schema, provider, providerName, createAPIKeyCredentials } = + credentials; + + async function onSubmit(values: z.infer) { + const expiresAt = values.expiresAt + ? new Date(values.expiresAt).getTime() / 1000 + : undefined; + const newCredentials = await createAPIKeyCredentials({ + api_key: values.apiKey, + title: values.title, + expires_at: expiresAt, + }); + onCredentialsCreate({ + provider, + id: newCredentials.id, + type: "api_key", + title: newCredentials.title, + }); + } + + return ( + { + if (!open) onClose(); + }} + > + + + Add new API key for {providerName} + {schema.description && ( + {schema.description} + )} + + +
+ + ( + + API Key + {schema.credentials_scopes && ( + + Required scope(s) for this block:{" "} + {schema.credentials_scopes?.map((s, i, a) => ( + + {s} + {i < a.length - 1 && ", "} + + ))} + + )} + + + + + + )} + /> + ( + + Name + + + + + + )} + /> + ( + + Expiration Date (Optional) + + + + + + )} + /> + + + +
+
+ ); +}; + +export const UserPasswordCredentialsModal: FC<{ + credentialsFieldName: string; + open: boolean; + onClose: () => void; + onCredentialsCreate: (creds: CredentialsMetaInput) => void; +}> = ({ credentialsFieldName, open, onClose, onCredentialsCreate }) => { + const credentials = useCredentials(credentialsFieldName); + + const formSchema = z.object({ + username: z.string().min(1, "Username is required"), + password: z.string().min(1, "Password is required"), + title: z.string().min(1, "Name is required"), + }); + + const form = useForm>({ + resolver: zodResolver(formSchema), + defaultValues: { + username: "", + password: "", + title: "", + }, + }); + + if ( + !credentials || + credentials.isLoading || + !credentials.supportsUserPassword + ) { + return null; + } + + const { schema, provider, providerName, createUserPasswordCredentials } = + credentials; + + async function onSubmit(values: z.infer) { + const newCredentials = await createUserPasswordCredentials({ + username: values.username, + password: values.password, + title: values.title, + }); + onCredentialsCreate({ + provider, + id: newCredentials.id, + type: "user_password", + title: newCredentials.title, + }); + } + + return ( + { + if (!open) onClose(); + }} + > + + + + Add new username & password for {providerName} + + +
+ + ( + + Username + + + + + + )} + /> + ( + + Password + + + + + + )} + /> + ( + + Name + + + + + + )} + /> + + + +
+
+ ); +}; + +export const OAuth2FlowWaitingModal: FC<{ + open: boolean; + onClose: () => void; + providerName: string; +}> = ({ open, onClose, providerName }) => { + return ( + { + if (!open) onClose(); + }} + > + + + + Waiting on {providerName} sign-in process... + + + Complete the sign-in process in the pop-up window. +
+ Closing this dialog will cancel the sign-in process. +
+
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx new file mode 100644 index 000000000000..8fdf0932b38d --- /dev/null +++ b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx @@ -0,0 +1,315 @@ +import { + APIKeyCredentials, + CredentialsDeleteNeedConfirmationResponse, + CredentialsDeleteResponse, + CredentialsMetaResponse, + CredentialsProviderName, + PROVIDER_NAMES, + UserPasswordCredentials, +} from "@/lib/autogpt-server-api"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; +import { createContext, useCallback, useEffect, useState } from "react"; + +// Get keys from CredentialsProviderName type +const CREDENTIALS_PROVIDER_NAMES = Object.values( + PROVIDER_NAMES, +) as CredentialsProviderName[]; + +// --8<-- [start:CredentialsProviderNames] +const providerDisplayNames: Record = { + anthropic: "Anthropic", + discord: "Discord", + d_id: "D-ID", + e2b: "E2B", + exa: "Exa", + fal: "FAL", + github: "GitHub", + google: "Google", + google_maps: "Google Maps", + groq: "Groq", + hubspot: "Hubspot", + ideogram: "Ideogram", + jina: "Jina", + linear: "Linear", + medium: "Medium", + mem0: "Mem0", + notion: "Notion", + nvidia: "Nvidia", + ollama: "Ollama", + openai: "OpenAI", + openweathermap: "OpenWeatherMap", + open_router: "Open Router", + pinecone: "Pinecone", + slant3d: "Slant3D", + smtp: "SMTP", + reddit: "Reddit", + replicate: "Replicate", + revid: "Rev.ID", + twitter: "Twitter", + unreal_speech: "Unreal Speech", +} as const; +// --8<-- [end:CredentialsProviderNames] + +type APIKeyCredentialsCreatable = Omit< + APIKeyCredentials, + "id" | "provider" | "type" +>; + +type UserPasswordCredentialsCreatable = Omit< + UserPasswordCredentials, + "id" | "provider" | "type" +>; + +export type CredentialsProviderData = { + provider: CredentialsProviderName; + providerName: string; + savedApiKeys: CredentialsMetaResponse[]; + savedOAuthCredentials: CredentialsMetaResponse[]; + savedUserPasswordCredentials: CredentialsMetaResponse[]; + oAuthCallback: ( + code: string, + state_token: string, + ) => Promise; + createAPIKeyCredentials: ( + credentials: APIKeyCredentialsCreatable, + ) => Promise; + createUserPasswordCredentials: ( + credentials: UserPasswordCredentialsCreatable, + ) => Promise; + deleteCredentials: ( + id: string, + force?: boolean, + ) => Promise< + CredentialsDeleteResponse | CredentialsDeleteNeedConfirmationResponse + >; +}; + +export type CredentialsProvidersContextType = { + [key in CredentialsProviderName]?: CredentialsProviderData; +}; + +export const CredentialsProvidersContext = + createContext(null); + +export default function CredentialsProvider({ + children, +}: { + children: React.ReactNode; +}) { + const [providers, setProviders] = + useState(null); + const api = useBackendAPI(); + + const addCredentials = useCallback( + ( + provider: CredentialsProviderName, + credentials: CredentialsMetaResponse, + ) => { + setProviders((prev) => { + if (!prev || !prev[provider]) return prev; + + const updatedProvider = { ...prev[provider] }; + + if (credentials.type === "api_key") { + updatedProvider.savedApiKeys = [ + ...updatedProvider.savedApiKeys, + credentials, + ]; + } else if (credentials.type === "oauth2") { + updatedProvider.savedOAuthCredentials = [ + ...updatedProvider.savedOAuthCredentials, + credentials, + ]; + } else if (credentials.type === "user_password") { + updatedProvider.savedUserPasswordCredentials = [ + ...updatedProvider.savedUserPasswordCredentials, + credentials, + ]; + } + + return { + ...prev, + [provider]: updatedProvider, + }; + }); + }, + [setProviders], + ); + + /** Wraps `BackendAPI.oAuthCallback`, and adds the result to the internal credentials store. */ + const oAuthCallback = useCallback( + async ( + provider: CredentialsProviderName, + code: string, + state_token: string, + ): Promise => { + const credsMeta = await api.oAuthCallback(provider, code, state_token); + addCredentials(provider, credsMeta); + return credsMeta; + }, + [api, addCredentials], + ); + + /** Wraps `BackendAPI.createAPIKeyCredentials`, and adds the result to the internal credentials store. */ + const createAPIKeyCredentials = useCallback( + async ( + provider: CredentialsProviderName, + credentials: APIKeyCredentialsCreatable, + ): Promise => { + const credsMeta = await api.createAPIKeyCredentials({ + provider, + ...credentials, + }); + addCredentials(provider, credsMeta); + return credsMeta; + }, + [api, addCredentials], + ); + + /** Wraps `BackendAPI.createUserPasswordCredentials`, and adds the result to the internal credentials store. */ + const createUserPasswordCredentials = useCallback( + async ( + provider: CredentialsProviderName, + credentials: UserPasswordCredentialsCreatable, + ): Promise => { + const credsMeta = await api.createUserPasswordCredentials({ + provider, + ...credentials, + }); + addCredentials(provider, credsMeta); + return credsMeta; + }, + [api, addCredentials], + ); + + /** Wraps `BackendAPI.deleteCredentials`, and removes the credentials from the internal store. */ + const deleteCredentials = useCallback( + async ( + provider: CredentialsProviderName, + id: string, + force: boolean = false, + ): Promise< + CredentialsDeleteResponse | CredentialsDeleteNeedConfirmationResponse + > => { + const result = await api.deleteCredentials(provider, id, force); + if (!result.deleted) { + return result; + } + setProviders((prev) => { + if (!prev || !prev[provider]) return prev; + + const updatedProvider = { ...prev[provider] }; + updatedProvider.savedApiKeys = updatedProvider.savedApiKeys.filter( + (cred) => cred.id !== id, + ); + updatedProvider.savedOAuthCredentials = + updatedProvider.savedOAuthCredentials.filter( + (cred) => cred.id !== id, + ); + updatedProvider.savedUserPasswordCredentials = + updatedProvider.savedUserPasswordCredentials.filter( + (cred) => cred.id !== id, + ); + return { + ...prev, + [provider]: updatedProvider, + }; + }); + return result; + }, + [api], + ); + + useEffect(() => { + api.isAuthenticated().then((isAuthenticated) => { + if (!isAuthenticated) return; + + api.listCredentials().then((response) => { + const credentialsByProvider = response.reduce( + (acc, cred) => { + if (!acc[cred.provider]) { + acc[cred.provider] = { + oauthCreds: [], + apiKeys: [], + userPasswordCreds: [], + }; + } + if (cred.type === "oauth2") { + acc[cred.provider].oauthCreds.push(cred); + } else if (cred.type === "api_key") { + acc[cred.provider].apiKeys.push(cred); + } else if (cred.type === "user_password") { + acc[cred.provider].userPasswordCreds.push(cred); + } + return acc; + }, + {} as Record< + CredentialsProviderName, + { + oauthCreds: CredentialsMetaResponse[]; + apiKeys: CredentialsMetaResponse[]; + userPasswordCreds: CredentialsMetaResponse[]; + } + >, + ); + + setProviders((prev) => ({ + ...prev, + ...Object.fromEntries( + CREDENTIALS_PROVIDER_NAMES.map((provider) => [ + provider, + { + provider, + providerName: + providerDisplayNames[provider as CredentialsProviderName], + savedApiKeys: credentialsByProvider[provider]?.apiKeys ?? [], + savedOAuthCredentials: + credentialsByProvider[provider]?.oauthCreds ?? [], + savedUserPasswordCredentials: + credentialsByProvider[provider]?.userPasswordCreds ?? [], + oAuthCallback: (code: string, state_token: string) => + oAuthCallback( + provider as CredentialsProviderName, + code, + state_token, + ), + createAPIKeyCredentials: ( + credentials: APIKeyCredentialsCreatable, + ) => + createAPIKeyCredentials( + provider as CredentialsProviderName, + credentials, + ), + createUserPasswordCredentials: ( + credentials: UserPasswordCredentialsCreatable, + ) => + createUserPasswordCredentials( + provider as CredentialsProviderName, + credentials, + ), + deleteCredentials: (id: string, force: boolean = false) => + deleteCredentials( + provider as CredentialsProviderName, + id, + force, + ), + }, + ]), + ), + })); + }); + }); + }, [ + api, + createAPIKeyCredentials, + createUserPasswordCredentials, + deleteCredentials, + oAuthCallback, + ]); + + return ( + + {children} + + ); +} diff --git a/autogpt_platform/frontend/src/components/marketplace/AgentDetailContent.tsx b/autogpt_platform/frontend/src/components/marketplace/AgentDetailContent.tsx new file mode 100644 index 000000000000..cbaa72e9906d --- /dev/null +++ b/autogpt_platform/frontend/src/components/marketplace/AgentDetailContent.tsx @@ -0,0 +1,96 @@ +// "use client"; +// import Link from "next/link"; +// import { ArrowLeft, Download, Calendar, Tag } from "lucide-react"; +// import { Button } from "@/components/ui/button"; +// import BackendAPI, { GraphCreatable } from "@/lib/autogpt-server-api"; +// import "@xyflow/react/dist/style.css"; +// import { useToast } from "../ui/use-toast"; + +// function AgentDetailContent({ agent }: { agent: GraphCreatable }) { +// const { toast } = useToast(); + +// // const downloadAgent = async (id: string): Promise => { +// // const api = new MarketplaceAPI(); +// // try { +// // const file = await api.downloadAgentFile(id); +// // console.debug(`Agent file downloaded:`, file); + +// // // Create a Blob from the file content +// // const blob = new Blob([file], { type: "application/json" }); + +// // // Create a temporary URL for the Blob +// // const url = window.URL.createObjectURL(blob); + +// // // Create a temporary anchor element +// // const a = document.createElement("a"); +// // a.href = url; +// // a.download = `agent_${id}.json`; // Set the filename + +// // // Append the anchor to the body, click it, and remove it +// // document.body.appendChild(a); +// // a.click(); +// // document.body.removeChild(a); + +// // // Revoke the temporary URL +// // window.URL.revokeObjectURL(url); +// // } catch (error) { +// // console.error(`Error downloading agent:`, error); +// // throw error; +// // } +// // }; + +// return ( +//
+//
+// +// +// Back to Marketplace +// +//
+// +//
+//
+//
+//
+//

{agent.name}

+//

+// {agent.description} +//

+//
+//
+//
+//
+//
+// +// Last Updated +//
+//
+// {new Date(agent.updatedAt).toLocaleDateString()} +//
+//
+//
+//
+// +// Categories +//
+//
+// {agent.categories.join(", ")} +//
+//
+//
+//
+//
+//
+// ); +// } + +// export default AgentDetailContent; diff --git a/autogpt_platform/frontend/src/components/marketplace/actions.ts b/autogpt_platform/frontend/src/components/marketplace/actions.ts new file mode 100644 index 000000000000..d6ebec32a0b7 --- /dev/null +++ b/autogpt_platform/frontend/src/components/marketplace/actions.ts @@ -0,0 +1,18 @@ +// "use server"; + +// import * as Sentry from "@sentry/nextjs"; +// import MarketplaceAPI, { AnalyticsEvent } from "@/lib/marketplace-api"; +// import { checkAuth } from "@/lib/supabase/server"; + +// export async function makeAnalyticsEvent(event: AnalyticsEvent) { +// return await Sentry.withServerActionInstrumentation( +// "makeAnalyticsEvent", +// {}, +// async () => { +// await checkAuth(); +// const apiUrl = process.env.AGPT_SERVER_API_URL; +// const api = new MarketplaceAPI(); +// await api.makeAnalyticsEvent(event); +// }, +// ); +// } diff --git a/autogpt_platform/frontend/src/components/monitor/AgentFlowList.tsx b/autogpt_platform/frontend/src/components/monitor/AgentFlowList.tsx new file mode 100644 index 000000000000..8a87af4c82fc --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/AgentFlowList.tsx @@ -0,0 +1,170 @@ +import BackendAPI, { + GraphExecution, + GraphMeta, +} from "@/lib/autogpt-server-api"; +import React, { useMemo } from "react"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Button } from "@/components/ui/button"; +import { TextRenderer } from "@/components/ui/render"; +import Link from "next/link"; +import { + Dialog, + DialogContent, + DialogHeader, + DialogTrigger, +} from "@/components/ui/dialog"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { ChevronDownIcon, EnterIcon } from "@radix-ui/react-icons"; +import { AgentImportForm } from "@/components/agent-import-form"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; +import moment from "moment/moment"; +import { DialogTitle } from "@/components/ui/dialog"; + +export const AgentFlowList = ({ + flows, + executions, + selectedFlow, + onSelectFlow, + className, +}: { + flows: GraphMeta[]; + executions?: GraphExecution[]; + selectedFlow: GraphMeta | null; + onSelectFlow: (f: GraphMeta) => void; + className?: string; +}) => { + return ( + + + Agents + +
+ {/* Split "Create" button */} + + + {/* https://ui.shadcn.com/docs/components/dialog#notes */} + + + + + + + + + Import from file + + + + + + + + Import Agent +

+ Import an Agent from a file +

+
+ +
+
+
+
+ + + + + + Name + {/* Status */} + {/* Last updated */} + {executions && ( + + # of runs + + )} + {executions && Last run} + + + + {flows + .map((flow) => { + let runCount = 0, + lastRun: GraphExecution | null = null; + if (executions) { + const _flowRuns = executions.filter( + (r) => r.graph_id == flow.id, + ); + runCount = _flowRuns.length; + lastRun = + runCount == 0 + ? null + : _flowRuns.reduce((a, c) => + a.started_at > c.started_at ? a : c, + ); + } + return { flow, runCount, lastRun }; + }) + .sort((a, b) => { + if (!a.lastRun && !b.lastRun) return 0; + if (!a.lastRun) return 1; + if (!b.lastRun) return -1; + return b.lastRun.started_at - a.lastRun.started_at; + }) + .map(({ flow, runCount, lastRun }) => ( + onSelectFlow(flow)} + data-state={selectedFlow?.id == flow.id ? "selected" : null} + > + + + + {/* */} + {/* + {flow.updatedAt ?? "???"} + */} + {executions && ( + + {runCount} + + )} + {executions && + (!lastRun ? ( + + ) : ( + + {moment(lastRun.started_at).fromNow()} + + ))} + + ))} + +
+
+
+ ); +}; +export default AgentFlowList; diff --git a/autogpt_platform/frontend/src/components/monitor/FlowInfo.tsx b/autogpt_platform/frontend/src/components/monitor/FlowInfo.tsx new file mode 100644 index 000000000000..220a8e59e45d --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/FlowInfo.tsx @@ -0,0 +1,327 @@ +import React, { useEffect, useState, useCallback } from "react"; +import { + GraphExecution, + Graph, + GraphMeta, + safeCopyGraph, + BlockUIType, + BlockIORootSchema, +} from "@/lib/autogpt-server-api"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuLabel, + DropdownMenuRadioGroup, + DropdownMenuRadioItem, + DropdownMenuSeparator, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { Button, buttonVariants } from "@/components/ui/button"; +import { ClockIcon, ExitIcon, Pencil2Icon } from "@radix-ui/react-icons"; +import Link from "next/link"; +import { exportAsJSONFile, filterBlocksByType } from "@/lib/utils"; +import { FlowRunsStats } from "@/components/monitor/index"; +import { Trash2Icon } from "lucide-react"; +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, + DialogDescription, + DialogFooter, +} from "@/components/ui/dialog"; +import { useToast } from "@/components/ui/use-toast"; +import { CronScheduler } from "@/components/cronScheduler"; +import RunnerInputUI from "@/components/runner-ui/RunnerInputUI"; +import useAgentGraph from "@/hooks/useAgentGraph"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; + +export const FlowInfo: React.FC< + React.HTMLAttributes & { + flow: GraphMeta; + executions: GraphExecution[]; + flowVersion?: number | "all"; + refresh: () => void; + } +> = ({ flow, executions, flowVersion, refresh, ...props }) => { + const { + agentName, + setAgentName, + agentDescription, + setAgentDescription, + savedAgent, + availableNodes, + availableFlows, + getOutputType, + requestSave, + requestSaveAndRun, + requestStopRun, + scheduleRunner, + isRunning, + isScheduling, + setIsScheduling, + nodes, + setNodes, + edges, + setEdges, + } = useAgentGraph(flow.id, false); + + const api = useBackendAPI(); + const { toast } = useToast(); + + const [flowVersions, setFlowVersions] = useState(null); + const [selectedVersion, setSelectedFlowVersion] = useState( + flowVersion ?? "all", + ); + const selectedFlowVersion: Graph | undefined = flowVersions?.find( + (v) => + v.version == (selectedVersion == "all" ? flow.version : selectedVersion), + ); + + const [isDeleteModalOpen, setIsDeleteModalOpen] = useState(false); + const [openCron, setOpenCron] = useState(false); + const [isRunnerInputOpen, setIsRunnerInputOpen] = useState(false); + const isDisabled = !selectedFlowVersion; + + const getBlockInputsAndOutputs = useCallback(() => { + const inputBlocks = filterBlocksByType( + nodes, + (node) => node.data.uiType === BlockUIType.INPUT, + ); + + const outputBlocks = filterBlocksByType( + nodes, + (node) => node.data.uiType === BlockUIType.OUTPUT, + ); + + const inputs = inputBlocks.map((node) => ({ + id: node.id, + type: "input" as const, + inputSchema: node.data.inputSchema as BlockIORootSchema, + hardcodedValues: { + name: (node.data.hardcodedValues as any).name || "", + description: (node.data.hardcodedValues as any).description || "", + value: (node.data.hardcodedValues as any).value, + placeholder_values: + (node.data.hardcodedValues as any).placeholder_values || [], + limit_to_placeholder_values: + (node.data.hardcodedValues as any).limit_to_placeholder_values || + false, + }, + })); + + const outputs = outputBlocks.map((node) => ({ + id: node.id, + type: "output" as const, + hardcodedValues: { + name: (node.data.hardcodedValues as any).name || "Output", + description: + (node.data.hardcodedValues as any).description || + "Output from the agent", + value: (node.data.hardcodedValues as any).value, + }, + result: (node.data.executionResults as any)?.at(-1)?.data?.output, + })); + + return { inputs, outputs }; + }, [nodes]); + + const handleScheduleButton = () => { + if (!selectedFlowVersion) { + toast({ + title: "Please select a flow version before scheduling", + duration: 2000, + }); + return; + } + setOpenCron(true); + }; + + useEffect(() => { + api.getGraphAllVersions(flow.id).then((result) => setFlowVersions(result)); + }, [flow.id, api]); + + const openRunnerInput = () => setIsRunnerInputOpen(true); + + const runOrOpenInput = () => { + const { inputs } = getBlockInputsAndOutputs(); + if (inputs.length > 0) { + openRunnerInput(); + } else { + requestSaveAndRun(); + } + }; + + const handleInputChange = useCallback( + (nodeId: string, field: string, value: string) => { + setNodes((nds) => + nds.map((node) => { + if (node.id === nodeId) { + return { + ...node, + data: { + ...node.data, + hardcodedValues: { + ...(node.data.hardcodedValues as any), + [field]: value, + }, + }, + }; + } + return node; + }), + ); + }, + [setNodes], + ); + + return ( + + +
+ + {flow.name} v{flow.version} + +
+
+ {(flowVersions?.length ?? 0) > 1 && ( + + + + + + Choose a version + + + setSelectedFlowVersion( + choice == "all" ? choice : Number(choice), + ) + } + > + + All versions + + {flowVersions?.map((v) => ( + + Version {v.version} + {v.is_active ? " (active)" : ""} + + ))} + + + + )} + + + Open in Builder + + + + +
+
+ + + execution.graph_id == flow.id && + (selectedVersion == "all" || + execution.graph_version == selectedVersion), + )} + /> + + + + + Delete Agent + + Are you sure you want to delete this agent?
+ This action cannot be undone. +
+
+ + + + +
+
+ setIsRunnerInputOpen(false)} + blockInputs={getBlockInputsAndOutputs().inputs} + onInputChange={handleInputChange} + onRun={() => { + setIsRunnerInputOpen(false); + requestSaveAndRun(); + }} + isRunning={isRunning} + scheduledInput={false} + isScheduling={false} + onSchedule={async () => {}} // Fixed type error by making async + /> +
+ ); +}; +export default FlowInfo; diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunInfo.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunInfo.tsx new file mode 100644 index 000000000000..2d6dd41a97ed --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunInfo.tsx @@ -0,0 +1,153 @@ +import React, { useCallback, useEffect, useState } from "react"; +import { + GraphExecution, + GraphMeta, + NodeExecutionResult, + SpecialBlockID, +} from "@/lib/autogpt-server-api"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import Link from "next/link"; +import { Button, buttonVariants } from "@/components/ui/button"; +import { IconSquare } from "@/components/ui/icons"; +import { ExitIcon, Pencil2Icon } from "@radix-ui/react-icons"; +import moment from "moment/moment"; +import { FlowRunStatusBadge } from "@/components/monitor/FlowRunStatusBadge"; +import RunnerOutputUI, { BlockOutput } from "../runner-ui/RunnerOutputUI"; +import { useBackendAPI } from "@/lib/autogpt-server-api/context"; + +export const FlowRunInfo: React.FC< + React.HTMLAttributes & { + flow: GraphMeta; + execution: GraphExecution; + } +> = ({ flow, execution, ...props }) => { + const [isOutputOpen, setIsOutputOpen] = useState(false); + const [blockOutputs, setBlockOutputs] = useState([]); + const api = useBackendAPI(); + + const fetchBlockResults = useCallback(async () => { + const executionResults = await api.getGraphExecutionInfo( + flow.id, + execution.execution_id, + ); + + // Create a map of the latest COMPLETED execution results of output nodes by node_id + const latestCompletedResults = executionResults + .filter( + (result) => + result.status === "COMPLETED" && + result.block_id === SpecialBlockID.OUTPUT, + ) + .reduce((acc, result) => { + const existing = acc.get(result.node_id); + + // Compare dates if there's an existing result + if (existing) { + const existingDate = existing.end_time || existing.add_time; + const currentDate = result.end_time || result.add_time; + + if (currentDate > existingDate) { + acc.set(result.node_id, result); + } + } else { + acc.set(result.node_id, result); + } + + return acc; + }, new Map()); + + // Transform results to BlockOutput format + setBlockOutputs( + Array.from(latestCompletedResults.values()).map((result) => ({ + id: result.node_id, + type: "output" as const, + hardcodedValues: { + name: result.input_data.name || "Output", + description: result.input_data.description || "Output from the agent", + value: result.input_data.value, + }, + // Change this line to extract the array directly + result: result.output_data?.output || undefined, + })), + ); + }, [api, flow.id, execution.execution_id]); + + // Fetch graph and execution data + useEffect(() => { + if (!isOutputOpen) return; + fetchBlockResults(); + }, [isOutputOpen, fetchBlockResults]); + + if (execution.graph_id != flow.id) { + throw new Error( + `FlowRunInfo can't be used with non-matching execution.graph_id and flow.id`, + ); + } + + const handleStopRun = useCallback(() => { + api.stopGraphExecution(flow.id, execution.execution_id); + }, [api, flow.id, execution.execution_id]); + + return ( + <> + + +
+ + {flow.name}{" "} + v{execution.graph_version} + +
+
+ {execution.status === "RUNNING" && ( + + )} + + + Open in Builder + +
+
+ +

+ Agent ID: {flow.id} +

+

+ Run ID: {execution.execution_id} +

+
+ Status:{" "} + +
+

+ Started:{" "} + {moment(execution.started_at).format("YYYY-MM-DD HH:mm:ss")} +

+

+ Finished:{" "} + {moment(execution.ended_at).format("YYYY-MM-DD HH:mm:ss")} +

+

+ Duration (run time):{" "} + {execution.duration.toFixed(1)} ( + {execution.total_run_time.toFixed(1)}) seconds +

+
+
+ setIsOutputOpen(false)} + blockOutputs={blockOutputs} + /> + + ); +}; + +export default FlowRunInfo; diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunStatusBadge.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunStatusBadge.tsx new file mode 100644 index 000000000000..47381b0b830f --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunStatusBadge.tsx @@ -0,0 +1,25 @@ +import React from "react"; +import { Badge } from "@/components/ui/badge"; +import { cn } from "@/lib/utils"; +import { GraphExecution } from "@/lib/autogpt-server-api"; + +export const FlowRunStatusBadge: React.FC<{ + status: GraphExecution["status"]; + className?: string; +}> = ({ status, className }) => ( + + {status} + +); diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunsList.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunsList.tsx new file mode 100644 index 000000000000..b0b92d48e2e2 --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunsList.tsx @@ -0,0 +1,80 @@ +import React from "react"; +import { GraphExecution, GraphMeta } from "@/lib/autogpt-server-api"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; +import moment from "moment/moment"; +import { FlowRunStatusBadge } from "@/components/monitor/FlowRunStatusBadge"; +import { TextRenderer } from "../ui/render"; + +export const FlowRunsList: React.FC<{ + flows: GraphMeta[]; + executions: GraphExecution[]; + className?: string; + selectedRun?: GraphExecution | null; + onSelectRun: (r: GraphExecution) => void; +}> = ({ flows, executions, selectedRun, onSelectRun, className }) => ( + + + Runs + + + + + + Agent + Started + Status + Duration + + + + {executions.map((execution) => ( + onSelectRun(execution)} + data-state={ + selectedRun?.execution_id == execution.execution_id + ? "selected" + : null + } + > + + f.id == execution.graph_id)?.name} + truncateLengthLimit={30} + /> + + + {moment(execution.started_at).format("HH:mm")} + + + + + {formatDuration(execution.duration)} + + ))} + +
+
+
+); + +function formatDuration(seconds: number): string { + return ( + (seconds < 100 ? seconds.toPrecision(2) : Math.round(seconds)).toString() + + "s" + ); +} + +export default FlowRunsList; diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunsStatus.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunsStatus.tsx new file mode 100644 index 000000000000..5c638dead476 --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunsStatus.tsx @@ -0,0 +1,118 @@ +import React, { useState } from "react"; +import { GraphExecution, GraphMeta } from "@/lib/autogpt-server-api"; +import { CardTitle } from "@/components/ui/card"; +import { Button } from "@/components/ui/button"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover"; +import { Calendar } from "@/components/ui/calendar"; +import { FlowRunsTimeline } from "@/components/monitor/FlowRunsTimeline"; + +export const FlowRunsStatus: React.FC<{ + flows: GraphMeta[]; + executions: GraphExecution[]; + title?: string; + className?: string; +}> = ({ flows, executions: executions, title, className }) => { + /* "dateMin": since the first flow in the dataset + * number > 0: custom date (unix timestamp) + * number < 0: offset relative to Date.now() (in seconds) */ + const [selected, setSelected] = useState(); + const [statsSince, setStatsSince] = useState(-24 * 3600); + const statsSinceTimestamp = // unix timestamp or null + typeof statsSince == "string" + ? null + : statsSince < 0 + ? Date.now() + statsSince * 1000 + : statsSince; + const filteredFlowRuns = + statsSinceTimestamp != null + ? executions.filter((fr) => fr.started_at > statsSinceTimestamp) + : executions; + + return ( +
+
+ {title || "Stats"} +
+ + + + + + + + + + { + setSelected(selectedDay); + setStatsSince(selectedDay.getTime()); + }} + /> + + + +
+
+ +
+
+

+ Total runs: {filteredFlowRuns.length} +

+

+ Total run time:{" "} + {filteredFlowRuns.reduce( + (total, run) => total + run.total_run_time, + 0, + )}{" "} + seconds +

+ {/*

Total cost: €1,23

*/} +
+
+ ); +}; +export default FlowRunsStatus; diff --git a/autogpt_platform/frontend/src/components/monitor/FlowRunsTimeline.tsx b/autogpt_platform/frontend/src/components/monitor/FlowRunsTimeline.tsx new file mode 100644 index 000000000000..8f70af3b24a8 --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/FlowRunsTimeline.tsx @@ -0,0 +1,175 @@ +import { GraphExecution, GraphMeta } from "@/lib/autogpt-server-api"; +import { + ComposedChart, + DefaultLegendContentProps, + Legend, + Line, + ResponsiveContainer, + Scatter, + Tooltip, + XAxis, + YAxis, +} from "recharts"; +import moment from "moment/moment"; +import { Card } from "@/components/ui/card"; +import { cn, hashString } from "@/lib/utils"; +import React from "react"; +import { FlowRunStatusBadge } from "@/components/monitor/FlowRunStatusBadge"; + +export const FlowRunsTimeline = ({ + flows, + executions, + dataMin, + className, +}: { + flows: GraphMeta[]; + executions: GraphExecution[]; + dataMin: "dataMin" | number; + className?: string; +}) => ( + /* TODO: make logarithmic? */ + + + { + const now = moment(); + const time = moment(unixTime); + return now.diff(time, "hours") < 24 + ? time.format("HH:mm") + : time.format("YYYY-MM-DD HH:mm"); + }} + name="Time" + scale="time" + /> + (s > 90 ? `${Math.round(s / 60)}m` : `${s}s`)} + /> + { + if (payload && payload.length) { + const data: GraphExecution & { time: number; _duration: number } = + payload[0].payload; + const flow = flows.find((f) => f.id === data.graph_id); + return ( + +

+ Agent: {flow ? flow.name : "Unknown"} +

+
+ Status:  + +
+

+ Started:{" "} + {moment(data.started_at).format("YYYY-MM-DD HH:mm:ss")} +

+

+ Duration / run time:{" "} + {formatDuration(data.duration)} /{" "} + {formatDuration(data.total_run_time)} +

+
+ ); + } + return null; + }} + /> + {flows.map((flow) => ( + e.graph_id == flow.id) + .map((e) => ({ + ...e, + time: e.started_at + e.total_run_time * 1000, + _duration: e.total_run_time, + }))} + name={flow.name} + fill={`hsl(${(hashString(flow.id) * 137.5) % 360}, 70%, 50%)`} + /> + ))} + {executions.map((execution) => ( + + ))} + } + wrapperStyle={{ + bottom: 0, + left: 0, + right: 0, + width: "100%", + display: "flex", + justifyContent: "center", + }} + /> +
+
+); + +export default FlowRunsTimeline; + +const ScrollableLegend: React.FC< + DefaultLegendContentProps & { className?: string } +> = ({ payload, className }) => { + return ( +
+ {payload?.map((entry, index) => { + if (entry.type == "none") return; + return ( + + + {entry.value} + + ); + })} +
+ ); +}; + +function formatDuration(seconds: number): string { + return ( + (seconds < 100 ? seconds.toPrecision(2) : Math.round(seconds)).toString() + + "s" + ); +} diff --git a/autogpt_platform/frontend/src/components/monitor/index.ts b/autogpt_platform/frontend/src/components/monitor/index.ts new file mode 100644 index 000000000000..0f8f80287c06 --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/index.ts @@ -0,0 +1,6 @@ +export { default as AgentFlowList } from "./AgentFlowList"; +export { default as FlowRunsList } from "./FlowRunsList"; +export { default as FlowInfo } from "./FlowInfo"; +export { default as FlowRunInfo } from "./FlowRunInfo"; +export { default as FlowRunsStats } from "./FlowRunsStatus"; +export { default as FlowRunsTimeline } from "./FlowRunsTimeline"; diff --git a/autogpt_platform/frontend/src/components/monitor/scheduleTable.tsx b/autogpt_platform/frontend/src/components/monitor/scheduleTable.tsx new file mode 100644 index 000000000000..85cffba4e61f --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/scheduleTable.tsx @@ -0,0 +1,228 @@ +import { Schedule } from "@/lib/autogpt-server-api"; +import { Button } from "@/components/ui/button"; +import { Card } from "@/components/ui/card"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; +import { Badge } from "@/components/ui/badge"; +import { ScrollArea } from "@/components/ui/scroll-area"; +import { ClockIcon, Loader2 } from "lucide-react"; +import { useToast } from "@/components/ui/use-toast"; +import { CronExpressionManager } from "@/lib/monitor/cronExpressionManager"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import { GraphMeta } from "@/lib/autogpt-server-api"; +import { useRouter } from "next/navigation"; +import { useState } from "react"; +import { + Dialog, + DialogContent, + DialogHeader, + DialogTitle, +} from "@/components/ui/dialog"; +import { TextRenderer } from "../ui/render"; + +interface SchedulesTableProps { + schedules: Schedule[]; + agents: GraphMeta[]; + onRemoveSchedule: (scheduleId: string, enabled: boolean) => void; + sortColumn: keyof Schedule; + sortDirection: "asc" | "desc"; + onSort: (column: keyof Schedule) => void; +} + +export const SchedulesTable = ({ + schedules, + agents, + onRemoveSchedule, + sortColumn, + sortDirection, + onSort, +}: SchedulesTableProps) => { + const { toast } = useToast(); + const router = useRouter(); + const cron_manager = new CronExpressionManager(); + const [selectedAgent, setSelectedAgent] = useState(""); + const [isDialogOpen, setIsDialogOpen] = useState(false); + const [isLoading, setIsLoading] = useState(false); + const [selectedFilter, setSelectedFilter] = useState(""); + + const filteredAndSortedSchedules = [...schedules] + .filter( + (schedule) => !selectedFilter || schedule.graph_id === selectedFilter, + ) + .sort((a, b) => { + const aValue = a[sortColumn]; + const bValue = b[sortColumn]; + if (sortDirection === "asc") { + return String(aValue).localeCompare(String(bValue)); + } + return String(bValue).localeCompare(String(aValue)); + }); + + const handleToggleSchedule = (scheduleId: string, enabled: boolean) => { + onRemoveSchedule(scheduleId, enabled); + if (!enabled) { + toast({ + title: "Schedule Disabled", + description: "The schedule has been successfully disabled.", + }); + } + }; + + const handleNewSchedule = () => { + setIsDialogOpen(true); + }; + + const handleAgentSelect = (agentId: string) => { + setSelectedAgent(agentId); + }; + + const handleSchedule = async () => { + setIsLoading(true); + try { + await new Promise((resolve) => setTimeout(resolve, 100)); + router.push(`/build?flowID=${selectedAgent}&open_scheduling=true`); + } catch (error) { + console.error("Navigation error:", error); + } + }; + + return ( + + + + + Select Agent for New Schedule + + + + + + +
+

Schedules

+
+ + +
+
+ + + + + onSort("graph_id")} + className="cursor-pointer" + > + Graph Name + + onSort("next_run_time")} + className="cursor-pointer" + > + Next Execution + + onSort("cron")} + className="cursor-pointer" + > + Schedule + + + Actions + + + + {filteredAndSortedSchedules.length === 0 ? ( + + + No schedules are available + + + ) : ( + filteredAndSortedSchedules.map((schedule) => ( + + + {agents.find((a) => a.id === schedule.graph_id)?.name || + schedule.graph_id} + + + {new Date(schedule.next_run_time).toLocaleString()} + + + + {cron_manager.generateDescription(schedule.cron || "")} + + + + +
+ +
+
+
+ )) + )} +
+
+
+
+ ); +}; diff --git a/autogpt_platform/frontend/src/components/monitor/skeletons/AgentFlowListSkeleton.tsx b/autogpt_platform/frontend/src/components/monitor/skeletons/AgentFlowListSkeleton.tsx new file mode 100644 index 000000000000..58d8c6ca6fe1 --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/skeletons/AgentFlowListSkeleton.tsx @@ -0,0 +1,24 @@ +export default function AgentsFlowListSkeleton() { + return ( +
+
+

Agents

+
+
+
+
+
Name
+
# of runs
+
Last run
+
+ {[...Array(3)].map((_, index) => ( +
+
+
+
+
+ ))} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/monitor/skeletons/FlowRunsListSkeleton.tsx b/autogpt_platform/frontend/src/components/monitor/skeletons/FlowRunsListSkeleton.tsx new file mode 100644 index 000000000000..6f884151d3a0 --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/skeletons/FlowRunsListSkeleton.tsx @@ -0,0 +1,23 @@ +export default function FlowRunsListSkeleton() { + return ( +
+
+

Runs

+
+
Agent
+
Started
+
Status
+
Duration
+
+ {[...Array(4)].map((_, index) => ( +
+
+
+
+
+
+ ))} +
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/monitor/skeletons/FlowRunsStatusSkeleton.tsx b/autogpt_platform/frontend/src/components/monitor/skeletons/FlowRunsStatusSkeleton.tsx new file mode 100644 index 000000000000..40677c70cc7c --- /dev/null +++ b/autogpt_platform/frontend/src/components/monitor/skeletons/FlowRunsStatusSkeleton.tsx @@ -0,0 +1,28 @@ +export default function FlowRunsStatusSkeleton() { + return ( +
+
+
+

Stats

+
+ {["2h", "8h", "24h", "7d", "Custom", "All"].map((btn) => ( +
+ ))} +
+
+ + {/* Placeholder for the line chart */} +
+ + {/* Placeholders for total runs and total run time */} +
+
+
+
+
+
+ ); +} diff --git a/autogpt_platform/frontend/src/components/nav/CreditButton.tsx b/autogpt_platform/frontend/src/components/nav/CreditButton.tsx new file mode 100644 index 000000000000..aec84871bd50 --- /dev/null +++ b/autogpt_platform/frontend/src/components/nav/CreditButton.tsx @@ -0,0 +1,24 @@ +"use client"; + +import { Button } from "@/components/ui/button"; +import { IconRefresh } from "@/components/ui/icons"; +import useCredits from "@/hooks/useCredits"; + +export default function CreditButton() { + const { credits, fetchCredits } = useCredits(); + + return ( + credits !== null && ( + + ) + ); +} diff --git a/autogpt_platform/frontend/src/components/nav/MarketPopup.tsx b/autogpt_platform/frontend/src/components/nav/MarketPopup.tsx new file mode 100644 index 000000000000..c04c32ecc3e2 --- /dev/null +++ b/autogpt_platform/frontend/src/components/nav/MarketPopup.tsx @@ -0,0 +1,35 @@ +import { ButtonHTMLAttributes } from "react"; +import React from "react"; + +interface MarketPopupProps extends ButtonHTMLAttributes { + marketplaceUrl?: string; +} + +export default function MarketPopup({ + className = "", + marketplaceUrl = (() => { + if (process.env.NEXT_PUBLIC_APP_ENV === "prod") { + return "https://production-marketplace-url.com"; + } else if (process.env.NEXT_PUBLIC_APP_ENV === "dev") { + return "https://dev-builder.agpt.co/marketplace"; + } else { + return "http://localhost:3000/marketplace"; + } + })(), + children, + ...props +}: MarketPopupProps) { + const openMarketplacePopup = () => { + window.open( + marketplaceUrl, + "popupWindow", + "width=600,height=400,toolbar=no,menubar=no,scrollbars=no", + ); + }; + + return ( + + ); +} diff --git a/autogpt_platform/frontend/src/components/nav/NavBarButtons.tsx b/autogpt_platform/frontend/src/components/nav/NavBarButtons.tsx new file mode 100644 index 000000000000..7852fb941e0b --- /dev/null +++ b/autogpt_platform/frontend/src/components/nav/NavBarButtons.tsx @@ -0,0 +1,83 @@ +"use client"; + +import React from "react"; +import Link from "next/link"; +import { BsBoxes } from "react-icons/bs"; +import { LuLaptop, LuShoppingCart } from "react-icons/lu"; +import { BehaveAs, cn } from "@/lib/utils"; +import { usePathname } from "next/navigation"; +import { getBehaveAs } from "@/lib/utils"; +import { IconMarketplace } from "@/components/ui/icons"; +import MarketPopup from "./MarketPopup"; + +export function NavBarButtons({ className }: { className?: string }) { + const pathname = usePathname(); + const buttons = [ + { + href: "/", + text: "Monitor", + icon: , + }, + { + href: "/build", + text: "Build", + icon: , + }, + { + href: "/marketplace", + text: "Marketplace", + icon: , + }, + ]; + + const isCloud = getBehaveAs() === BehaveAs.CLOUD; + + return ( + <> + {buttons.map((button) => { + const isActive = button.href === pathname; + return ( + + {button.icon} {button.text} + + ); + })} + {isCloud ? ( + + Marketplace + + ) : ( + + Marketplace + + )} + + ); +} diff --git a/autogpt_platform/frontend/src/components/node-input-components.tsx b/autogpt_platform/frontend/src/components/node-input-components.tsx new file mode 100644 index 000000000000..e863bd8c82c9 --- /dev/null +++ b/autogpt_platform/frontend/src/components/node-input-components.tsx @@ -0,0 +1,1425 @@ +import { Calendar } from "@/components/ui/calendar"; +import { + Popover, + PopoverContent, + PopoverTrigger, +} from "@/components/ui/popover"; +import { format } from "date-fns"; +import { CalendarIcon, Clock } from "lucide-react"; +import { Cross2Icon, Pencil2Icon, PlusIcon } from "@radix-ui/react-icons"; +import { beautifyString, cn } from "@/lib/utils"; +import { + BlockIORootSchema, + BlockIOSubSchema, + BlockIOObjectSubSchema, + BlockIOKVSubSchema, + BlockIOArraySubSchema, + BlockIOStringSubSchema, + BlockIONumberSubSchema, + BlockIOBooleanSubSchema, + BlockIOSimpleTypeSubSchema, +} from "@/lib/autogpt-server-api/types"; +import React, { FC, useCallback, useEffect, useMemo, useState } from "react"; +import { Button } from "./ui/button"; +import { Switch } from "./ui/switch"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "./ui/select"; +import { + MultiSelector, + MultiSelectorContent, + MultiSelectorInput, + MultiSelectorItem, + MultiSelectorList, + MultiSelectorTrigger, +} from "./ui/multiselect"; +import { LocalValuedInput } from "./ui/input"; +import NodeHandle from "./NodeHandle"; +import { ConnectionData } from "./CustomNode"; +import { CredentialsInput } from "./integrations/credentials-input"; + +type NodeObjectInputTreeProps = { + nodeId: string; + selfKey?: string; + schema: BlockIORootSchema | BlockIOObjectSubSchema; + object?: { [key: string]: any }; + connections: ConnectionData; + handleInputClick: (key: string) => void; + handleInputChange: (key: string, value: any) => void; + errors: { [key: string]: string | undefined }; + className?: string; + displayName?: string; +}; + +const NodeObjectInputTree: FC = ({ + nodeId, + selfKey = "", + schema, + object, + connections, + handleInputClick, + handleInputChange, + errors, + className, + displayName, +}) => { + object ||= ("default" in schema ? schema.default : null) ?? {}; + return ( +
+ {Object.entries(schema.properties).map(([propKey, propSchema]) => { + const childKey = selfKey ? `${selfKey}.${propKey}` : propKey; + + return ( +
+ + {propSchema.title || beautifyString(propKey)} + + +
+ ); + })} +
+ ); +}; + +export default NodeObjectInputTree; + +const NodeImageInput: FC<{ + selfKey: string; + schema: BlockIOStringSubSchema; + value?: string; + error?: string; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; + className?: string; + displayName: string; +}> = ({ + selfKey, + schema, + value = "", + error, + handleInputChange, + className, + displayName, +}) => { + const handleFileChange = useCallback( + async (event: React.ChangeEvent) => { + const file = event.target.files?.[0]; + if (!file) return; + + // Validate file type + if (!file.type.startsWith("image/")) { + console.error("Please upload an image file"); + return; + } + + // Convert to base64 + const reader = new FileReader(); + reader.onload = (e) => { + const base64String = (e.target?.result as string).split(",")[1]; + handleInputChange(selfKey, base64String); + }; + reader.readAsDataURL(file); + }, + [selfKey, handleInputChange], + ); + + return ( +
+
+
+ + {value && ( + + )} +
+ + + + {value && ( +
+ Preview +
+ )} +
+ {error && {error}} +
+ ); +}; + +const NodeDateTimeInput: FC<{ + selfKey: string; + schema: BlockIOStringSubSchema; + value?: string; + error?: string; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; + className?: string; + displayName: string; +}> = ({ + selfKey, + schema, + value = "", + error, + handleInputChange, + className, + displayName, +}) => { + const date = value ? new Date(value) : new Date(); + const [timeInput, setTimeInput] = useState( + value ? format(date, "HH:mm") : "00:00", + ); + + const handleDateSelect = (newDate: Date | undefined) => { + if (!newDate) return; + + const [hours, minutes] = timeInput.split(":").map(Number); + newDate.setHours(hours, minutes); + handleInputChange(selfKey, newDate.toISOString()); + }; + + const handleTimeChange = (e: React.ChangeEvent) => { + const newTime = e.target.value; + setTimeInput(newTime); + + if (value) { + const [hours, minutes] = newTime.split(":").map(Number); + const newDate = new Date(value); + newDate.setHours(hours, minutes); + handleInputChange(selfKey, newDate.toISOString()); + } + }; + + return ( +
+ + + + + + + + + + {error && {error}} +
+ ); +}; + +export const NodeGenericInputField: FC<{ + nodeId: string; + propKey: string; + propSchema: BlockIOSubSchema; + currentValue?: any; + errors: NodeObjectInputTreeProps["errors"]; + connections: NodeObjectInputTreeProps["connections"]; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; + handleInputClick: NodeObjectInputTreeProps["handleInputClick"]; + className?: string; + displayName?: string; +}> = ({ + nodeId, + propKey, + propSchema, + currentValue, + errors, + connections, + handleInputChange, + handleInputClick, + className, + displayName, +}) => { + className = cn(className); + displayName ||= propSchema.title || beautifyString(propKey); + + if ("allOf" in propSchema) { + // If this happens, that is because Pydantic wraps $refs in an allOf if the + // $ref has sibling schema properties (which isn't technically allowed), + // so there will only be one item in allOf[]. + // AFAIK this should NEVER happen though, as $refs are resolved server-side. + propSchema = propSchema.allOf[0]; + console.warn(`Unsupported 'allOf' in schema for '${propKey}'!`, propSchema); + } + + if ("credentials_provider" in propSchema) { + return ( + + ); + } + + if ("properties" in propSchema) { + // Render a multi-select for all-boolean sub-schemas with more than 3 properties + if ( + Object.values(propSchema.properties).every( + (subSchema) => "type" in subSchema && subSchema.type == "boolean", + ) && + Object.keys(propSchema.properties).length >= 3 + ) { + const options = Object.keys(propSchema.properties); + const selectedKeys = Object.entries(currentValue || {}) + .filter(([_, v]) => v) + .map(([k, _]) => k); + return ( + { + handleInputChange( + key, + Object.fromEntries( + options.map((option) => [option, selection.includes(option)]), + ), + ); + }} + /> + ); + } + + return ( + + ); + } + + if ("additionalProperties" in propSchema) { + return ( + + ); + } + + if ("anyOf" in propSchema) { + // Optional oneOf + if ( + "oneOf" in propSchema.anyOf[0] && + propSchema.anyOf[0].oneOf && + "discriminator" in propSchema.anyOf[0] && + propSchema.anyOf[0].discriminator + ) { + return ( + + ); + } + + // optional items + const types = propSchema.anyOf.map((s) => + "type" in s ? s.type : undefined, + ); + if (types.includes("string") && types.includes("null")) { + // optional string and datetime + + if ( + "format" in propSchema.anyOf[0] && + propSchema.anyOf[0].format === "date-time" + ) { + return ( + + ); + } + + return ( + + ); + } else if ( + (types.includes("integer") || types.includes("number")) && + types.includes("null") + ) { + return ( + + ); + } else if (types.includes("array") && types.includes("null")) { + return ( + + ); + } else if (types.includes("object") && types.includes("null")) { + // rendering optional mutliselect + if ( + Object.values( + (propSchema.anyOf[0] as BlockIOObjectSubSchema).properties, + ).every( + (subSchema) => "type" in subSchema && subSchema.type == "boolean", + ) && + Object.keys((propSchema.anyOf[0] as BlockIOObjectSubSchema).properties) + .length >= 1 + ) { + const options = Object.keys( + (propSchema.anyOf[0] as BlockIOObjectSubSchema).properties, + ); + const selectedKeys = Object.entries(currentValue || {}) + .filter(([_, v]) => v) + .map(([k, _]) => k); + return ( + { + handleInputChange( + key, + Object.fromEntries( + options.map((option) => [option, selection.includes(option)]), + ), + ); + }} + /> + ); + } + + return ( + + ); + } + } + + if ( + "oneOf" in propSchema && + propSchema.oneOf && + "discriminator" in propSchema && + propSchema.discriminator + ) { + return ( + + ); + } + + if (!("type" in propSchema)) { + return ( + + ); + } + + switch (propSchema.type) { + case "string": + if ("image_upload" in propSchema && propSchema.image_upload === true) { + return ( + + ); + } + if ("format" in propSchema && propSchema.format === "date-time") { + return ( + + ); + } + return ( + + ); + case "boolean": + return ( + + ); + case "number": + case "integer": + return ( + + ); + case "array": + return ( + + ); + default: + console.warn( + `Schema for '${propKey}' specifies unknown type:`, + propSchema, + ); + return ( + + ); + } +}; + +const NodeOneOfDiscriminatorField: FC<{ + nodeId: string; + propKey: string; + propSchema: any; + currentValue?: any; + defaultValue?: any; + errors: { [key: string]: string | undefined }; + connections: ConnectionData; + handleInputChange: (key: string, value: any) => void; + handleInputClick: (key: string) => void; + className?: string; + displayName?: string; +}> = ({ + nodeId, + propKey, + propSchema, + currentValue, + defaultValue, + errors, + connections, + handleInputChange, + handleInputClick, + className, +}) => { + const discriminator = propSchema.discriminator; + const discriminatorProperty = discriminator.propertyName; + + const variantOptions = useMemo(() => { + const oneOfVariants = propSchema.oneOf || []; + + return oneOfVariants + .map((variant: any) => { + const variantDiscValue = + variant.properties?.[discriminatorProperty]?.const; + + return { + value: variantDiscValue, + schema: variant as BlockIOSubSchema, + }; + }) + .filter((v: any) => v.value != null); + }, [discriminatorProperty, propSchema.oneOf]); + + const initialVariant = defaultValue + ? variantOptions.find( + (opt: any) => defaultValue[discriminatorProperty] === opt.value, + ) + : currentValue + ? variantOptions.find( + (opt: any) => currentValue[discriminatorProperty] === opt.value, + ) + : null; + + const [chosenType, setChosenType] = useState( + initialVariant?.value || "", + ); + + useEffect(() => { + if (initialVariant && !currentValue) { + handleInputChange( + propKey, + defaultValue || { + [discriminatorProperty]: initialVariant.value, + }, + ); + } + }, []); + + const handleVariantChange = (newType: string) => { + setChosenType(newType); + const chosenVariant = variantOptions.find( + (opt: any) => opt.value === newType, + ); + if (chosenVariant) { + const initialValue = { + [discriminatorProperty]: newType, + }; + handleInputChange(propKey, initialValue); + } + }; + + const chosenVariantSchema = variantOptions.find( + (opt: any) => opt.value === chosenType, + )?.schema; + + function getEntryKey(key: string): string { + // use someKey for handle purpose (not childKey) + return `${propKey}_#_${key}`; + } + + function isConnected(key: string): boolean { + return connections.some( + (c) => c.targetHandle === getEntryKey(key) && c.target === nodeId, + ); + } + + return ( +
+ + + {chosenVariantSchema && ( +
+ {Object.entries(chosenVariantSchema.properties).map( + ([someKey, childSchema]) => { + if (someKey === "discriminator") { + return null; + } + const childKey = propKey ? `${propKey}.${someKey}` : someKey; // for history redo/undo purpose + return ( +
+ + + {!isConnected(someKey) && ( + + )} +
+ ); + }, + )} +
+ )} +
+ ); +}; + +const NodeCredentialsInput: FC<{ + selfKey: string; + value: any; + errors: { [key: string]: string | undefined }; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; + className?: string; +}> = ({ selfKey, value, errors, handleInputChange, className }) => { + return ( +
+ + handleInputChange(selfKey, credsMeta) + } + selectedCredentials={value} + /> + {errors[selfKey] && ( + {errors[selfKey]} + )} +
+ ); +}; + +const InputRef = (value: any): ((el: HTMLInputElement | null) => void) => { + return (el) => el && value != null && (el.value = value); +}; + +const NodeKeyValueInput: FC<{ + nodeId: string; + selfKey: string; + schema: BlockIOKVSubSchema; + entries?: { [key: string]: string } | { [key: string]: number }; + errors: { [key: string]: string | undefined }; + connections: NodeObjectInputTreeProps["connections"]; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; + className?: string; + displayName?: string; +}> = ({ + nodeId, + selfKey, + entries, + schema, + connections, + handleInputChange, + errors, + className, + displayName, +}) => { + const getPairValues = useCallback(() => { + // Map will preserve the order of entries. + let inputEntries = entries ?? schema.default; + if (!inputEntries || typeof inputEntries !== "object") inputEntries = {}; + + const defaultEntries = new Map(Object.entries(inputEntries)); + const prefix = `${selfKey}_#_`; + connections + .filter((c) => c.targetHandle.startsWith(prefix) && c.target === nodeId) + .map((c) => c.targetHandle.slice(prefix.length)) + .forEach((k) => !defaultEntries.has(k) && defaultEntries.set(k, "")); + + return Array.from(defaultEntries, ([key, value]) => ({ key, value })); + }, [entries, schema.default, connections, nodeId, selfKey]); + + const [keyValuePairs, setKeyValuePairs] = useState< + { key: string; value: string | number | null }[] + >([]); + + useEffect( + () => setKeyValuePairs(getPairValues()), + [connections, entries, schema.default, getPairValues], + ); + + function updateKeyValuePairs(newPairs: typeof keyValuePairs) { + setKeyValuePairs(newPairs); + + handleInputChange( + selfKey, + newPairs.reduce((obj, { key, value }) => ({ ...obj, [key]: value }), {}), + ); + } + + function convertValueType(value: string): string | number | null { + if ( + !schema.additionalProperties || + schema.additionalProperties.type == "string" + ) + return value; + if (!value) return null; + return Number(value); + } + + function getEntryKey(key: string): string { + return `${selfKey}_#_${key}`; + } + function isConnected(key: string): boolean { + return connections.some( + (c) => c.targetHandle === getEntryKey(key) && c.target === nodeId, + ); + } + + return ( +
0 ? "flex flex-col" : "")} + > +
+ {keyValuePairs.map(({ key, value }, index) => ( + // The `index` is used as a DOM key instead of the actual `key` + // because the `key` can change with each input, causing the input to lose focus. +
+ + {!isConnected(key) && ( +
+ + updateKeyValuePairs( + keyValuePairs.toSpliced(index, 1, { + key: e.target.value, + value: value, + }), + ) + } + /> + + updateKeyValuePairs( + keyValuePairs.toSpliced(index, 1, { + key: key, + value: convertValueType(e.target.value), + }), + ) + } + /> + +
+ )} + {errors[`${selfKey}.${key}`] && ( + + {errors[`${selfKey}.${key}`]} + + )} +
+ ))} + +
+ {errors[selfKey] && ( + {errors[selfKey]} + )} +
+ ); +}; + +// Checking if schema is type of string +function isStringSubSchema( + schema: BlockIOSimpleTypeSubSchema, +): schema is BlockIOStringSubSchema { + return "type" in schema && schema.type === "string"; +} + +const NodeArrayInput: FC<{ + nodeId: string; + selfKey: string; + schema: BlockIOArraySubSchema; + entries?: string[]; + errors: { [key: string]: string | undefined }; + connections: NodeObjectInputTreeProps["connections"]; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; + handleInputClick: NodeObjectInputTreeProps["handleInputClick"]; + className?: string; + displayName?: string; +}> = ({ + nodeId, + selfKey, + schema, + entries, + errors, + connections, + handleInputChange, + handleInputClick, + className, + displayName, +}) => { + entries ??= schema.default; + if (!entries || !Array.isArray(entries)) entries = []; + + const prefix = `${selfKey}_$_`; + connections + .filter((c) => c.targetHandle.startsWith(prefix) && c.target === nodeId) + .map((c) => parseInt(c.targetHandle.slice(prefix.length))) + .filter((c) => !isNaN(c)) + .forEach( + (c) => + entries.length <= c && + entries.push(...Array(c - entries.length + 1).fill("")), + ); + + const isItemObject = "items" in schema && "properties" in schema.items!; + const error = + typeof errors[selfKey] === "string" ? errors[selfKey] : undefined; + return ( +
+ {entries.map((entry: any, index: number) => { + const entryKey = `${selfKey}_$_${index}`; + const isConnected = + connections && + connections.some( + (c) => c.targetHandle === entryKey && c.target === nodeId, + ); + return ( +
+ +
+ {!isConnected && + (schema.items ? ( + + ) : ( + + ))} + {!isConnected && ( + + )} +
+ {errors[entryKey] && typeof errors[entryKey] === "string" && ( + {errors[entryKey]} + )} +
+ ); + })} + + {error && {error}} +
+ ); +}; + +const NodeMultiSelectInput: FC<{ + selfKey: string; + schema: BlockIOObjectSubSchema; // TODO: Support BlockIOArraySubSchema + selection?: string[]; + error?: string; + className?: string; + displayName?: string; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; +}> = ({ + selfKey, + schema, + selection = [], + error, + className, + displayName, + handleInputChange, +}) => { + const options = Object.keys(schema.properties); + + return ( +
+ handleInputChange(selfKey, v)} + > + + + + + + {options + .map((key) => ({ ...schema.properties[key], key })) + .map(({ key, title, description }) => ( + + {title ?? key} + + ))} + + + + {error && {error}} +
+ ); +}; + +const NodeStringInput: FC<{ + selfKey: string; + schema: BlockIOStringSubSchema; + value?: string; + error?: string; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; + handleInputClick: NodeObjectInputTreeProps["handleInputClick"]; + className?: string; + displayName: string; +}> = ({ + selfKey, + schema, + value = "", + error, + handleInputChange, + handleInputClick, + className, + displayName, +}) => { + value ||= schema.default || ""; + return ( +
+ {schema.enum ? ( + + ) : ( +
handleInputClick(selfKey) : undefined} + > + handleInputChange(selfKey, e.target.value)} + readOnly={schema.secret} + placeholder={ + schema?.placeholder || `Enter ${beautifyString(displayName)}` + } + className="pr-8 read-only:cursor-pointer read-only:text-gray-500" + /> + +
+ )} + {error && {error}} +
+ ); +}; + +export const NodeTextBoxInput: FC<{ + selfKey: string; + schema: BlockIOStringSubSchema; + value?: string; + error?: string; + handleInputChange: NodeObjectInputTreeProps["handleInputChange"]; + handleInputClick: NodeObjectInputTreeProps["handleInputClick"]; + className?: string; + displayName: string; +}> = ({ + selfKey, + schema, + value = "", + error, + handleInputChange, + handleInputClick, + className, + displayName, +}) => { + value ||= schema.default || ""; + return ( +
+
handleInputClick(selfKey) : undefined} + > +