diff --git a/.github/workflows/docker-build-push-mainland.yml b/.github/workflows/docker-build-push-mainland.yml
index 6099a4817..279cdb68e 100644
--- a/.github/workflows/docker-build-push-mainland.yml
+++ b/.github/workflows/docker-build-push-mainland.yml
@@ -3,6 +3,15 @@ name: Docker Build and Push All Images to tencentyun
on:
workflow_dispatch:
inputs:
+ version:
+ description: 'Image version tag (e.g. v1.0.0 or latest)'
+ required: true
+ default: 'latest'
+ push_latest:
+ description: 'Also push latest tag'
+ required: false
+ default: false
+ type: boolean
runner_label_json:
description: 'runner array in json format (e.g. ["ubuntu-latest"] or ["self-hosted"])'
required: true
@@ -23,10 +32,16 @@ jobs:
uses: actions/checkout@v4
- name: Build main image (amd64) and load locally
run: |
- docker buildx build --platform linux/amd64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent:amd64 -f make/main/Dockerfile --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua .
+ docker buildx build --platform linux/amd64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}-amd64 -f make/main/Dockerfile --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua .
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Push main image (amd64) to Tencent Cloud
+ run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}-amd64
+ - name: Tag main image (amd64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}-amd64 ccr.ccs.tencentyun.com/nexent-hub/nexent:amd64
+ - name: Push latest main image (amd64) to Tencent Cloud
+ if: inputs.push_latest == 'true'
run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent:amd64
build-and-push-main-arm64:
@@ -43,10 +58,16 @@ jobs:
uses: actions/checkout@v4
- name: Build main image (arm64) and load locally
run: |
- docker buildx build --platform linux/arm64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent:arm64 -f make/main/Dockerfile --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua .
+ docker buildx build --platform linux/arm64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}-arm64 -f make/main/Dockerfile --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua .
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Push main image (arm64) to Tencent Cloud
+ run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}-arm64
+ - name: Tag main image (arm64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}-arm64 ccr.ccs.tencentyun.com/nexent-hub/nexent:arm64
+ - name: Push latest main image (arm64) to Tencent Cloud
+ if: inputs.push_latest == 'true'
run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent:arm64
build-and-push-data-process-amd64:
@@ -72,10 +93,16 @@ jobs:
rm -rf .git .gitattributes
- name: Build data process image (amd64) and load locally
run: |
- docker buildx build --platform linux/amd64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:amd64 -f make/data_process/Dockerfile --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua .
+ docker buildx build --platform linux/amd64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}-amd64 -f make/data_process/Dockerfile --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua .
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Push data process image (amd64) to Tencent Cloud
+ run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}-amd64
+ - name: Tag data process image (amd64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}-amd64 ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:amd64
+ - name: Push latest data process image (amd64) to Tencent Cloud
+ if: inputs.push_latest == 'true'
run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:amd64
build-and-push-data-process-arm64:
@@ -101,10 +128,16 @@ jobs:
rm -rf .git .gitattributes
- name: Build data process image (arm64) and load locally
run: |
- docker buildx build --platform linux/arm64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:arm64 -f make/data_process/Dockerfile --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua .
+ docker buildx build --platform linux/arm64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}-arm64 -f make/data_process/Dockerfile --build-arg MIRROR=https://pypi.tuna.tsinghua.edu.cn/simple --build-arg APT_MIRROR=tsinghua .
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Push data process image (arm64) to Tencent Cloud
+ run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}-arm64
+ - name: Tag data process image (arm64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}-arm64 ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:arm64
+ - name: Push latest data process image (arm64) to Tencent Cloud
+ if: inputs.push_latest == 'true'
run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:arm64
build-and-push-web-amd64:
@@ -121,10 +154,16 @@ jobs:
uses: actions/checkout@v4
- name: Build web image (amd64) and load locally
run: |
- docker buildx build --platform linux/amd64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-web:amd64 -f make/web/Dockerfile --build-arg MIRROR=https://registry.npmmirror.com --build-arg APK_MIRROR=tsinghua .
+ docker buildx build --platform linux/amd64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}-amd64 -f make/web/Dockerfile --build-arg MIRROR=https://registry.npmmirror.com --build-arg APK_MIRROR=tsinghua .
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Push web image (amd64) to Tencent Cloud
+ run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}-amd64
+ - name: Tag web image (amd64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}-amd64 ccr.ccs.tencentyun.com/nexent-hub/nexent-web:amd64
+ - name: Push latest web image (amd64) to Tencent Cloud
+ if: inputs.push_latest == 'true'
run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-web:amd64
build-and-push-web-arm64:
@@ -141,10 +180,16 @@ jobs:
uses: actions/checkout@v4
- name: Build web image (arm64) and load locally
run: |
- docker buildx build --platform linux/arm64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-web:arm64 -f make/web/Dockerfile --build-arg MIRROR=https://registry.npmmirror.com --build-arg APK_MIRROR=tsinghua .
+ docker buildx build --platform linux/arm64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}-arm64 -f make/web/Dockerfile --build-arg MIRROR=https://registry.npmmirror.com --build-arg APK_MIRROR=tsinghua .
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Push web image (arm64) to Tencent Cloud
+ run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}-arm64
+ - name: Tag web image (arm64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}-arm64 ccr.ccs.tencentyun.com/nexent-hub/nexent-web:arm64
+ - name: Push latest web image (arm64) to Tencent Cloud
+ if: inputs.push_latest == 'true'
run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-web:arm64
build-and-push-terminal-amd64:
@@ -161,10 +206,16 @@ jobs:
uses: actions/checkout@v4
- name: Build terminal image (amd64) and load locally
run: |
- docker buildx build --platform linux/amd64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:amd64 -f make/terminal/Dockerfile .
+ docker buildx build --platform linux/amd64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}-amd64 -f make/terminal/Dockerfile .
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Push terminal image (amd64) to Tencent Cloud
+ run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}-amd64
+ - name: Tag terminal image (amd64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}-amd64 ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:amd64
+ - name: Push latest terminal image (amd64) to Tencent Cloud
+ if: inputs.push_latest == 'true'
run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:amd64
build-and-push-terminal-arm64:
@@ -181,10 +232,16 @@ jobs:
uses: actions/checkout@v4
- name: Build terminal image (arm64) and load locally
run: |
- docker buildx build --platform linux/arm64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:arm64 -f make/terminal/Dockerfile .
+ docker buildx build --platform linux/arm64 --load -t ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}-arm64 -f make/terminal/Dockerfile .
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Push terminal image (arm64) to Tencent Cloud
+ run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}-arm64
+ - name: Tag terminal image (arm64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}-arm64 ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:arm64
+ - name: Push latest terminal image (arm64) to Tencent Cloud
+ if: inputs.push_latest == 'true'
run: docker push ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:arm64
manifest-push-main:
@@ -196,6 +253,13 @@ jobs:
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Create and push manifest for main (Tencent Cloud)
+ run: |
+ docker manifest create ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }} \
+ ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}-amd64 \
+ ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}-arm64
+ docker manifest push ccr.ccs.tencentyun.com/nexent-hub/nexent:${{ inputs.version }}
+ - name: Create and push latest manifest for main (Tencent Cloud)
+ if: inputs.push_latest == 'true'
run: |
docker manifest create ccr.ccs.tencentyun.com/nexent-hub/nexent:latest \
ccr.ccs.tencentyun.com/nexent-hub/nexent:amd64 \
@@ -211,6 +275,13 @@ jobs:
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Create and push manifest for data-process (Tencent Cloud)
+ run: |
+ docker manifest create ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }} \
+ ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}-amd64 \
+ ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}-arm64
+ docker manifest push ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:${{ inputs.version }}
+ - name: Create and push latest manifest for data-process (Tencent Cloud)
+ if: inputs.push_latest == 'true'
run: |
docker manifest create ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:latest \
ccr.ccs.tencentyun.com/nexent-hub/nexent-data-process:amd64 \
@@ -226,6 +297,13 @@ jobs:
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Create and push manifest for web (Tencent Cloud)
+ run: |
+ docker manifest create ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }} \
+ ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}-amd64 \
+ ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}-arm64
+ docker manifest push ccr.ccs.tencentyun.com/nexent-hub/nexent-web:${{ inputs.version }}
+ - name: Create and push latest manifest for web (Tencent Cloud)
+ if: inputs.push_latest == 'true'
run: |
docker manifest create ccr.ccs.tencentyun.com/nexent-hub/nexent-web:latest \
ccr.ccs.tencentyun.com/nexent-hub/nexent-web:amd64 \
@@ -241,6 +319,13 @@ jobs:
- name: Login to Tencent Cloud
run: echo ${{ secrets.TCR_PASSWORD }} | docker login ccr.ccs.tencentyun.com --username=${{ secrets.TCR_USERNAME }} --password-stdin
- name: Create and push manifest for terminal (Tencent Cloud)
+ run: |
+ docker manifest create ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }} \
+ ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}-amd64 \
+ ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}-arm64
+ docker manifest push ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:${{ inputs.version }}
+ - name: Create and push latest manifest for terminal (Tencent Cloud)
+ if: inputs.push_latest == 'true'
run: |
docker manifest create ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:latest \
ccr.ccs.tencentyun.com/nexent-hub/nexent-ubuntu-terminal:amd64 \
diff --git a/.github/workflows/docker-build-push-overseas.yml b/.github/workflows/docker-build-push-overseas.yml
index e9c48d272..d0483c05c 100644
--- a/.github/workflows/docker-build-push-overseas.yml
+++ b/.github/workflows/docker-build-push-overseas.yml
@@ -3,6 +3,15 @@ name: Docker Build and Push All Images to DockerHub
on:
workflow_dispatch:
inputs:
+ version:
+ description: 'Image version tag (e.g. v1.0.0 or latest)'
+ required: true
+ default: 'latest'
+ push_latest:
+ description: 'Also push latest tag'
+ required: false
+ default: false
+ type: boolean
runner_label_json:
description: 'runner array in json format (e.g. ["ubuntu-latest"] or ["self-hosted"])'
required: true
@@ -23,10 +32,16 @@ jobs:
uses: actions/checkout@v4
- name: Build main image (amd64) and load locally
run: |
- docker buildx build --platform linux/amd64 -t nexent/nexent:amd64 --load -f make/main/Dockerfile .
+ docker buildx build --platform linux/amd64 -t nexent/nexent:${{ inputs.version }}-amd64 --load -f make/main/Dockerfile .
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Push main image (amd64) to DockerHub
+ run: docker push nexent/nexent:${{ inputs.version }}-amd64
+ - name: Tag main image (amd64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag nexent/nexent:${{ inputs.version }}-amd64 nexent/nexent:amd64
+ - name: Push latest main image (amd64) to DockerHub
+ if: inputs.push_latest == 'true'
run: docker push nexent/nexent:amd64
build-and-push-main-arm64:
@@ -43,10 +58,16 @@ jobs:
uses: actions/checkout@v4
- name: Build main image (arm64) and load locally
run: |
- docker buildx build --platform linux/arm64 -t nexent/nexent:arm64 --load -f make/main/Dockerfile .
+ docker buildx build --platform linux/arm64 -t nexent/nexent:${{ inputs.version }}-arm64 --load -f make/main/Dockerfile .
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Push main image (arm64) to DockerHub
+ run: docker push nexent/nexent:${{ inputs.version }}-arm64
+ - name: Tag main image (arm64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag nexent/nexent:${{ inputs.version }}-arm64 nexent/nexent:arm64
+ - name: Push latest main image (arm64) to DockerHub
+ if: inputs.push_latest == 'true'
run: docker push nexent/nexent:arm64
build-and-push-data-process-amd64:
@@ -72,10 +93,16 @@ jobs:
rm -rf .git .gitattributes
- name: Build data process image (amd64) and load locally
run: |
- docker buildx build --platform linux/amd64 -t nexent/nexent-data-process:amd64 --load -f make/data_process/Dockerfile .
+ docker buildx build --platform linux/amd64 -t nexent/nexent-data-process:${{ inputs.version }}-amd64 --load -f make/data_process/Dockerfile .
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Push data process image (amd64) to DockerHub
+ run: docker push nexent/nexent-data-process:${{ inputs.version }}-amd64
+ - name: Tag data process image (amd64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag nexent/nexent-data-process:${{ inputs.version }}-amd64 nexent/nexent-data-process:amd64
+ - name: Push latest data process image (amd64) to DockerHub
+ if: inputs.push_latest == 'true'
run: docker push nexent/nexent-data-process:amd64
build-and-push-data-process-arm64:
@@ -101,10 +128,16 @@ jobs:
rm -rf .git .gitattributes
- name: Build data process image (arm64) and load locally
run: |
- docker buildx build --platform linux/arm64 -t nexent/nexent-data-process:arm64 --load -f make/data_process/Dockerfile .
+ docker buildx build --platform linux/arm64 -t nexent/nexent-data-process:${{ inputs.version }}-arm64 --load -f make/data_process/Dockerfile .
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Push data process image (arm64) to DockerHub
+ run: docker push nexent/nexent-data-process:${{ inputs.version }}-arm64
+ - name: Tag data process image (arm64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag nexent/nexent-data-process:${{ inputs.version }}-arm64 nexent/nexent-data-process:arm64
+ - name: Push latest data process image (arm64) to DockerHub
+ if: inputs.push_latest == 'true'
run: docker push nexent/nexent-data-process:arm64
build-and-push-web-amd64:
@@ -121,10 +154,16 @@ jobs:
uses: actions/checkout@v4
- name: Build web image (amd64) and load locally
run: |
- docker buildx build --platform linux/amd64 -t nexent/nexent-web:amd64 --load -f make/web/Dockerfile .
+ docker buildx build --platform linux/amd64 -t nexent/nexent-web:${{ inputs.version }}-amd64 --load -f make/web/Dockerfile .
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Push web image (amd64) to DockerHub
+ run: docker push nexent/nexent-web:${{ inputs.version }}-amd64
+ - name: Tag web image (amd64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag nexent/nexent-web:${{ inputs.version }}-amd64 nexent/nexent-web:amd64
+ - name: Push latest web image (amd64) to DockerHub
+ if: inputs.push_latest == 'true'
run: docker push nexent/nexent-web:amd64
build-and-push-web-arm64:
@@ -141,10 +180,16 @@ jobs:
uses: actions/checkout@v4
- name: Build web image (arm64) and load locally
run: |
- docker buildx build --platform linux/arm64 -t nexent/nexent-web:arm64 --load -f make/web/Dockerfile .
+ docker buildx build --platform linux/arm64 -t nexent/nexent-web:${{ inputs.version }}-arm64 --load -f make/web/Dockerfile .
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Push web image (arm64) to DockerHub
+ run: docker push nexent/nexent-web:${{ inputs.version }}-arm64
+ - name: Tag web image (arm64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag nexent/nexent-web:${{ inputs.version }}-arm64 nexent/nexent-web:arm64
+ - name: Push latest web image (arm64) to DockerHub
+ if: inputs.push_latest == 'true'
run: docker push nexent/nexent-web:arm64
build-and-push-terminal-amd64:
@@ -161,10 +206,16 @@ jobs:
uses: actions/checkout@v4
- name: Build terminal image (amd64) and load locally
run: |
- docker buildx build --platform linux/amd64 -t nexent/nexent-ubuntu-terminal:amd64 --load -f make/terminal/Dockerfile .
+ docker buildx build --platform linux/amd64 -t nexent/nexent-ubuntu-terminal:${{ inputs.version }}-amd64 --load -f make/terminal/Dockerfile .
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Push terminal image (amd64) to DockerHub
+ run: docker push nexent/nexent-ubuntu-terminal:${{ inputs.version }}-amd64
+ - name: Tag terminal image (amd64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag nexent/nexent-ubuntu-terminal:${{ inputs.version }}-amd64 nexent/nexent-ubuntu-terminal:amd64
+ - name: Push latest terminal image (amd64) to DockerHub
+ if: inputs.push_latest == 'true'
run: docker push nexent/nexent-ubuntu-terminal:amd64
build-and-push-terminal-arm64:
@@ -181,10 +232,16 @@ jobs:
uses: actions/checkout@v4
- name: Build terminal image (arm64) and load locally
run: |
- docker buildx build --platform linux/arm64 -t nexent/nexent-ubuntu-terminal:arm64 --load -f make/terminal/Dockerfile .
+ docker buildx build --platform linux/arm64 -t nexent/nexent-ubuntu-terminal:${{ inputs.version }}-arm64 --load -f make/terminal/Dockerfile .
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Push terminal image (arm64) to DockerHub
+ run: docker push nexent/nexent-ubuntu-terminal:${{ inputs.version }}-arm64
+ - name: Tag terminal image (arm64) as latest
+ if: inputs.push_latest == 'true'
+ run: docker tag nexent/nexent-ubuntu-terminal:${{ inputs.version }}-arm64 nexent/nexent-ubuntu-terminal:arm64
+ - name: Push latest terminal image (arm64) to DockerHub
+ if: inputs.push_latest == 'true'
run: docker push nexent/nexent-ubuntu-terminal:arm64
manifest-push-main:
@@ -196,6 +253,13 @@ jobs:
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Create and push manifest for main (DockerHub)
+ run: |
+ docker manifest create nexent/nexent:${{ inputs.version }} \
+ nexent/nexent:${{ inputs.version }}-amd64 \
+ nexent/nexent:${{ inputs.version }}-arm64
+ docker manifest push nexent/nexent:${{ inputs.version }}
+ - name: Create and push latest manifest for main (DockerHub)
+ if: inputs.push_latest == 'true'
run: |
docker manifest create nexent/nexent:latest \
nexent/nexent:amd64 \
@@ -211,6 +275,13 @@ jobs:
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Create and push manifest for data-process (DockerHub)
+ run: |
+ docker manifest create nexent/nexent-data-process:${{ inputs.version }} \
+ nexent/nexent-data-process:${{ inputs.version }}-amd64 \
+ nexent/nexent-data-process:${{ inputs.version }}-arm64
+ docker manifest push nexent/nexent-data-process:${{ inputs.version }}
+ - name: Create and push latest manifest for data-process (DockerHub)
+ if: inputs.push_latest == 'true'
run: |
docker manifest create nexent/nexent-data-process:latest \
nexent/nexent-data-process:amd64 \
@@ -226,6 +297,13 @@ jobs:
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Create and push manifest for web (DockerHub)
+ run: |
+ docker manifest create nexent/nexent-web:${{ inputs.version }} \
+ nexent/nexent-web:${{ inputs.version }}-amd64 \
+ nexent/nexent-web:${{ inputs.version }}-arm64
+ docker manifest push nexent/nexent-web:${{ inputs.version }}
+ - name: Create and push latest manifest for web (DockerHub)
+ if: inputs.push_latest == 'true'
run: |
docker manifest create nexent/nexent-web:latest \
nexent/nexent-web:amd64 \
@@ -241,6 +319,13 @@ jobs:
- name: Login to DockerHub
run: echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u nexent --password-stdin
- name: Create and push manifest for terminal (DockerHub)
+ run: |
+ docker manifest create nexent/nexent-ubuntu-terminal:${{ inputs.version }} \
+ nexent/nexent-ubuntu-terminal:${{ inputs.version }}-amd64 \
+ nexent/nexent-ubuntu-terminal:${{ inputs.version }}-arm64
+ docker manifest push nexent/nexent-ubuntu-terminal:${{ inputs.version }}
+ - name: Create and push latest manifest for terminal (DockerHub)
+ if: inputs.push_latest == 'true'
run: |
docker manifest create nexent/nexent-ubuntu-terminal:latest \
nexent/nexent-ubuntu-terminal:amd64 \
diff --git a/backend/agents/create_agent_info.py b/backend/agents/create_agent_info.py
index ca801109f..6e8d17740 100644
--- a/backend/agents/create_agent_info.py
+++ b/backend/agents/create_agent_info.py
@@ -43,7 +43,8 @@ async def create_model_config_list(tenant_id):
model_repo=record["model_repo"],
model_name=record["model_name"],
),
- url=record["base_url"]))
+ url=record["base_url"],
+ ssl_verify=record.get("ssl_verify", True)))
# fit for old version, main_model and sub_model use default model
main_model_config = tenant_config_manager.get_model_config(
key=MODEL_CONFIG_MAPPING["llm"], tenant_id=tenant_id)
@@ -52,13 +53,15 @@ async def create_model_config_list(tenant_id):
api_key=main_model_config.get("api_key", ""),
model_name=get_model_name_from_config(main_model_config) if main_model_config.get(
"model_name") else "",
- url=main_model_config.get("base_url", "")))
+ url=main_model_config.get("base_url", ""),
+ ssl_verify=main_model_config.get("ssl_verify", True)))
model_list.append(
ModelConfig(cite_name="sub_model",
api_key=main_model_config.get("api_key", ""),
model_name=get_model_name_from_config(main_model_config) if main_model_config.get(
"model_name") else "",
- url=main_model_config.get("base_url", "")))
+ url=main_model_config.get("base_url", ""),
+ ssl_verify=main_model_config.get("ssl_verify", True)))
return model_list
diff --git a/backend/apps/file_management_app.py b/backend/apps/file_management_app.py
index 19e382ba1..4869ce440 100644
--- a/backend/apps/file_management_app.py
+++ b/backend/apps/file_management_app.py
@@ -1,7 +1,10 @@
import logging
+import re
from http import HTTPStatus
from typing import List, Optional
+from urllib.parse import urlparse, urlunparse, unquote, quote
+import httpx
from fastapi import APIRouter, Body, File, Form, Header, HTTPException, Path as PathParam, Query, UploadFile
from fastapi.responses import JSONResponse, RedirectResponse, StreamingResponse
@@ -12,6 +15,51 @@
logger = logging.getLogger("file_management_app")
+
+def build_content_disposition_header(filename: Optional[str]) -> str:
+ """
+ Build a Content-Disposition header that keeps the original filename.
+
+ - ASCII filenames are returned directly.
+ - Non-ASCII filenames include both an ASCII fallback and RFC 5987 encoded value
+ so modern browsers keep the original name.
+ """
+ safe_name = (filename or "download").strip() or "download"
+
+ def _sanitize_ascii(value: str) -> str:
+ # Replace problematic characters that break HTTP headers
+ # Remove control characters (newlines, carriage returns, tabs, etc.)
+ # Remove control characters (0x00-0x1F and 0x7F)
+ sanitized = re.sub(r'[\x00-\x1F\x7F]', '', value)
+ # Replace problematic characters that break HTTP headers
+ sanitized = sanitized.replace("\\", "_").replace('"', "_")
+ # Remove leading/trailing spaces and dots (Windows filename restrictions)
+ sanitized = sanitized.strip(' .')
+ return sanitized if sanitized else "download"
+
+ try:
+ safe_name.encode("ascii")
+ return f'attachment; filename="{_sanitize_ascii(safe_name)}"'
+ except UnicodeEncodeError:
+ try:
+ encoded = quote(safe_name, safe="")
+ except Exception:
+ # quote failure, fallback to sanitized ASCII only
+ logger.warning("Failed to encode filename '%s', using fallback", safe_name)
+ return f'attachment; filename="{_sanitize_ascii(safe_name)}"'
+
+ fallback = _sanitize_ascii(
+ safe_name.encode("ascii", "ignore").decode("ascii") or "download"
+ )
+ return f'attachment; filename="{fallback}"; filename*=UTF-8\'\'{encoded}'
+ except Exception as exc: # pragma: no cover
+ logger.warning(
+ "Failed to encode filename '%s': %s. Using fallback.",
+ safe_name,
+ exc,
+ )
+ return 'attachment; filename="download"'
+
# Create API router
file_management_runtime_router = APIRouter(prefix="/file")
file_management_config_router = APIRouter(prefix="/file")
@@ -98,6 +146,64 @@ async def process_files(
)
+@file_management_config_router.get("/download/{object_name:path}")
+async def get_storage_file(
+ object_name: str = PathParam(..., description="File object name"),
+ download: str = Query("ignore", description="How to get the file"),
+ expires: int = Query(3600, description="URL validity period (seconds)"),
+ filename: Optional[str] = Query(None, description="Original filename for download (optional)")
+):
+ """
+ Get information, download link, or file stream for a single file
+
+ - **object_name**: File object name
+ - **download**: Download mode: ignore (default, return file info), stream (return file stream), redirect (redirect to download URL)
+ - **expires**: URL validity period in seconds (default 3600)
+ - **filename**: Original filename for download (optional, if not provided, will use object_name)
+
+ Returns file information, download link, or file content
+ """
+ try:
+ logger.info(f"[get_storage_file] Route matched! object_name={object_name}, download={download}, filename={filename}")
+ if download == "redirect":
+ # return a redirect download URL
+ result = await get_file_url_impl(object_name=object_name, expires=expires)
+ return RedirectResponse(url=result["url"])
+ elif download == "stream":
+ # return a readable file stream
+ file_stream, content_type = await get_file_stream_impl(object_name=object_name)
+ logger.info(f"Streaming file: object_name={object_name}, content_type={content_type}")
+
+ # Use provided filename or extract from object_name
+ download_filename = filename
+ if not download_filename:
+ # Extract filename from object_name (get the last part after the last slash)
+ download_filename = object_name.split("/")[-1] if "/" in object_name else object_name
+
+ # Build Content-Disposition header with proper encoding for non-ASCII characters
+ content_disposition = build_content_disposition_header(download_filename)
+
+ return StreamingResponse(
+ file_stream,
+ media_type=content_type,
+ headers={
+ "Content-Disposition": content_disposition,
+ "Cache-Control": "public, max-age=3600",
+ "ETag": f'"{object_name}"',
+ }
+ )
+ else:
+ # return file metadata
+ return await get_file_url_impl(object_name=object_name, expires=expires)
+ except Exception as e:
+ logger.error(f"Failed to get file: object_name={object_name}, error={str(e)}")
+ raise HTTPException(
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
+ detail=f"Failed to get file information: {str(e)}"
+ )
+
+
+
@file_management_runtime_router.post("/storage")
async def storage_upload_files(
files: List[UploadFile] = File(..., description="List of files to upload"),
@@ -158,43 +264,204 @@ async def get_storage_files(
)
-@file_management_config_router.get("/storage/{path}/{object_name}")
-async def get_storage_file(
- object_name: str = PathParam(..., description="File object name"),
- download: str = Query("ignore", description="How to get the file"),
- expires: int = Query(3600, description="URL validity period (seconds)")
+def _ensure_http_scheme(raw_url: str) -> str:
+ """
+ Ensure the provided Datamate URL has an explicit HTTP or HTTPS scheme.
+ """
+ candidate = (raw_url or "").strip()
+ if not candidate:
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_REQUEST,
+ detail="URL cannot be empty"
+ )
+
+ parsed = urlparse(candidate)
+ if parsed.scheme:
+ if parsed.scheme not in ("http", "https"):
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_REQUEST,
+ detail="URL must start with http:// or https://"
+ )
+ return candidate
+
+ if candidate.startswith("//"):
+ return f"http:{candidate}"
+
+ return f"http://{candidate}"
+
+
+def _normalize_datamate_download_url(raw_url: str) -> str:
+ """
+ Normalize Datamate download URL to ensure it follows /data-management/datasets/{datasetId}/files/{fileId}/download
+ """
+ normalized_source = _ensure_http_scheme(raw_url)
+ parsed_url = urlparse(normalized_source)
+ path_segments = [segment for segment in parsed_url.path.split("/") if segment]
+
+ if "data-management" not in path_segments:
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_REQUEST,
+ detail="Invalid Datamate URL: missing 'data-management' segment"
+ )
+
+ try:
+ dm_index = path_segments.index("data-management")
+ datasets_index = path_segments.index("datasets", dm_index)
+ dataset_id = path_segments[datasets_index + 1]
+ files_index = path_segments.index("files", datasets_index)
+ file_id = path_segments[files_index + 1]
+ except (ValueError, IndexError):
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_REQUEST,
+ detail="Invalid Datamate URL: unable to parse dataset_id or file_id"
+ )
+
+ prefix_segments = path_segments[:dm_index]
+ prefix_path = "/" + "/".join(prefix_segments) if prefix_segments else ""
+ normalized_path = f"{prefix_path}/data-management/datasets/{dataset_id}/files/{file_id}/download"
+
+ normalized_url = urlunparse((
+ parsed_url.scheme,
+ parsed_url.netloc,
+ normalized_path,
+ "",
+ "",
+ ""
+ ))
+
+ return normalized_url
+
+
+def _build_datamate_url_from_parts(base_url: str, dataset_id: str, file_id: str) -> str:
+ """
+ Build Datamate download URL from individual parts
+ """
+ if not base_url:
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_REQUEST,
+ detail="base_url is required when dataset_id and file_id are provided"
+ )
+
+ base_with_scheme = _ensure_http_scheme(base_url)
+ parsed_base = urlparse(base_with_scheme)
+ base_prefix = parsed_base.path.rstrip("/")
+
+ if base_prefix and not base_prefix.endswith("/api"):
+ if base_prefix.endswith("/"):
+ base_prefix = f"{base_prefix}api"
+ else:
+ base_prefix = f"{base_prefix}/api"
+ elif not base_prefix:
+ base_prefix = "/api"
+
+ normalized_path = f"{base_prefix}/data-management/datasets/{dataset_id}/files/{file_id}/download"
+
+ return urlunparse((
+ parsed_base.scheme,
+ parsed_base.netloc,
+ normalized_path,
+ "",
+ "",
+ ""
+ ))
+
+
+@file_management_config_router.get("/datamate/download")
+async def download_datamate_file(
+ url: Optional[str] = Query(None, description="Datamate file URL to download"),
+ base_url: Optional[str] = Query(None, description="Datamate base server URL (e.g., host:port)"),
+ dataset_id: Optional[str] = Query(None, description="Datamate dataset ID"),
+ file_id: Optional[str] = Query(None, description="Datamate file ID"),
+ filename: Optional[str] = Query(None, description="Optional filename for download"),
+ authorization: Optional[str] = Header(None, alias="Authorization")
):
"""
- Get information, download link, or file stream for a single file
+ Download file from Datamate knowledge base via HTTP URL
- - **object_name**: File object name
- - **download**: Download mode: ignore (default, return file info), stream (return file stream), redirect (redirect to download URL)
- - **expires**: URL validity period in seconds (default 3600)
+ - **url**: Full HTTP URL of the file to download (optional)
+ - **base_url**: Base server URL (e.g., host:port)
+ - **dataset_id**: Datamate dataset ID
+ - **file_id**: Datamate file ID
+ - **filename**: Optional filename for the download (extracted automatically if not provided)
+ - **authorization**: Optional authorizatio n header to pass to the target URL
- Returns file information, download link, or file content
+ Returns file stream for download
"""
try:
- if download == "redirect":
- # return a redirect download URL
- result = await get_file_url_impl(object_name=object_name, expires=expires)
- return RedirectResponse(url=result["url"])
- elif download == "stream":
- # return a readable file stream
- file_stream, content_type = await get_file_stream_impl(object_name=object_name)
+ if url:
+ logger.info(f"[download_datamate_file] Using full URL: {url}")
+ normalized_url = _normalize_datamate_download_url(url)
+ elif base_url and dataset_id and file_id:
+ logger.info(f"[download_datamate_file] Building URL from parts: base_url={base_url}, dataset_id={dataset_id}, file_id={file_id}")
+ normalized_url = _build_datamate_url_from_parts(base_url, dataset_id, file_id)
+ else:
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_REQUEST,
+ detail="Either url or (base_url, dataset_id, file_id) must be provided"
+ )
+
+ logger.info(f"[download_datamate_file] Normalized download URL: {normalized_url}")
+ logger.info(f"[download_datamate_file] Authorization header present: {authorization is not None}")
+
+ headers = {}
+ if authorization:
+ headers["Authorization"] = authorization
+ logger.debug(f"[download_datamate_file] Using authorization header: {authorization[:20]}...")
+ headers["User-Agent"] = "Nexent-File-Downloader/1.0"
+
+ logger.info(f"[download_datamate_file] Request headers: {list(headers.keys())}")
+
+ async with httpx.AsyncClient(timeout=30.0) as client:
+ response = await client.get(normalized_url, headers=headers, follow_redirects=True)
+ logger.info(f"[download_datamate_file] Response status: {response.status_code}")
+
+ if response.status_code == 404:
+ logger.error(f"[download_datamate_file] File not found at URL: {normalized_url}")
+ logger.error(f"[download_datamate_file] Response headers: {dict(response.headers)}")
+ raise HTTPException(
+ status_code=HTTPStatus.NOT_FOUND,
+ detail="File not found. Please verify dataset_id and file_id."
+ )
+
+ response.raise_for_status()
+
+ content_type = response.headers.get("Content-Type", "application/octet-stream")
+
+ download_filename = filename
+ if not download_filename:
+ content_disposition = response.headers.get("Content-Disposition", "")
+ if content_disposition:
+ filename_match = re.search(r'filename="?(.+?)"?$', content_disposition)
+ if filename_match:
+ download_filename = filename_match.group(1)
+
+ if not download_filename:
+ path = unquote(urlparse(normalized_url).path)
+ download_filename = path.split('/')[-1] or "download"
+
+ # Build Content-Disposition header with proper encoding for non-ASCII characters
+ content_disposition = build_content_disposition_header(download_filename)
+
return StreamingResponse(
- file_stream,
+ iter([response.content]),
media_type=content_type,
headers={
- "Content-Disposition": f'inline; filename="{object_name}"'
+ "Content-Disposition": content_disposition
}
)
- else:
- # return file metadata
- return await get_file_url_impl(object_name=object_name, expires=expires)
+ except httpx.HTTPError as e:
+ logger.error(f"Failed to download file from URL {url}: {str(e)}")
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_GATEWAY,
+ detail=f"Failed to download file from URL: {str(e)}"
+ )
+ except HTTPException:
+ raise
except Exception as e:
+ logger.error(f"Failed to download datamate file: {str(e)}")
raise HTTPException(
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
- detail=f"Failed to get file information: {str(e)}"
+ detail=f"Failed to download file: {str(e)}"
)
diff --git a/backend/apps/image_app.py b/backend/apps/image_app.py
index 3024d4226..61eed2fcc 100644
--- a/backend/apps/image_app.py
+++ b/backend/apps/image_app.py
@@ -1,7 +1,11 @@
import logging
+import base64
from urllib.parse import unquote
+from io import BytesIO
-from fastapi import APIRouter
+from fastapi import APIRouter, HTTPException
+from fastapi.responses import StreamingResponse
+from http import HTTPStatus
from services.image_service import proxy_image_impl
@@ -12,23 +16,53 @@
logger = logging.getLogger("image_app")
-# TODO: To remove this proxy service after frontend uses image filter service as image provider
@router.get("/image")
-async def proxy_image(url: str):
+async def proxy_image(url: str, format: str = "json"):
"""
- Image proxy service that fetches remote images and returns base64 encoded data
-
+ Image proxy service that fetches remote images
+
Parameters:
url: Remote image URL
+ format: Response format - "json" (default, returns base64) or "stream" (returns image stream)
Returns:
- JSON object containing base64 encoded image
+ JSON object containing base64 encoded image (format=json) or image stream (format=stream)
"""
try:
# URL decode
decoded_url = unquote(url)
- return await proxy_image_impl(decoded_url)
+
+ if format == "stream":
+ # Return image as stream for direct use in
tags
+ result = await proxy_image_impl(decoded_url)
+ if not result.get("success"):
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_GATEWAY,
+ detail=result.get("error", "Failed to fetch image")
+ )
+
+ # Decode base64 to bytes
+ base64_data = result.get("base64", "")
+ content_type = result.get("content_type", "image/jpeg")
+ image_bytes = base64.b64decode(base64_data)
+
+ # Return as streaming response
+ return StreamingResponse(
+ BytesIO(image_bytes),
+ media_type=content_type,
+ headers={
+ "Cache-Control": "public, max-age=3600"
+ }
+ )
+ else:
+ # Return JSON with base64 (default behavior for backward compatibility)
+ return await proxy_image_impl(decoded_url)
except Exception as e:
logger.error(
f"Error occurred while proxying image: {str(e)}, URL: {url[:50]}...")
+ if format == "stream":
+ raise HTTPException(
+ status_code=HTTPStatus.BAD_GATEWAY,
+ detail=str(e)
+ )
return {"success": False, "error": str(e)}
\ No newline at end of file
diff --git a/backend/apps/me_model_managment_app.py b/backend/apps/me_model_managment_app.py
index 70c4cfab8..d7055474f 100644
--- a/backend/apps/me_model_managment_app.py
+++ b/backend/apps/me_model_managment_app.py
@@ -4,81 +4,44 @@
from fastapi import APIRouter, Query, HTTPException
from fastapi.responses import JSONResponse
-from consts.exceptions import TimeoutException, NotFoundException, MEConnectionException
-from services.me_model_management_service import get_me_models_impl, check_me_variable_set
-from services.model_health_service import check_me_connectivity_impl
+from consts.exceptions import MEConnectionException, TimeoutException
+from services.me_model_management_service import check_me_variable_set, check_me_connectivity
router = APIRouter(prefix="/me")
-@router.get("/model/list")
-async def get_me_models(
- type: str = Query(
- default="", description="Model type: embed/chat/rerank"),
- timeout: int = Query(
- default=2, description="Request timeout in seconds")
-):
- """
- Get list of models from model engine API
- """
- try:
- # Pre-check ME environment variables; return empty list if not configured
- if not await check_me_variable_set():
- return JSONResponse(
- status_code=HTTPStatus.OK,
- content={
- "message": "Retrieve skipped",
- "data": []
- }
- )
- filtered_result = await get_me_models_impl(timeout=timeout, type=type)
- return JSONResponse(
- status_code=HTTPStatus.OK,
- content={
- "message": "Successfully retrieved",
- "data": filtered_result
- }
- )
- except TimeoutException as e:
- logging.error(f"Request me model timeout: {str(e)}")
- raise HTTPException(status_code=HTTPStatus.REQUEST_TIMEOUT, detail="Failed to get ModelEngine model list: timeout")
- except NotFoundException as e:
- logging.error(f"Request me model not found: {str(e)}")
- raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail="ModelEngine model not found")
- except Exception as e:
- logging.error(f"Failed to get me model list: {str(e)}")
- raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="Failed to get ModelEngine model list")
-
-
@router.get("/healthcheck")
-async def check_me_connectivity(timeout: int = Query(default=2, description="Timeout in seconds")):
+async def check_me_health(timeout: int = Query(default=30, description="Timeout in seconds")):
"""
- Health check from model engine API
+ Health check for ModelEngine platform by actually calling the API.
+ Returns connectivity status based on actual API response.
"""
try:
- # Pre-check ME environment variables; return not connected if not configured
+ # First check if environment variables are configured
if not await check_me_variable_set():
return JSONResponse(
status_code=HTTPStatus.OK,
content={
"connectivity": False,
- "message": "ModelEngine platform necessary environment variables not configured. Healthcheck skipped.",
+ "message": "ModelEngine platform environment variables not configured. Healthcheck skipped.",
}
)
- await check_me_connectivity_impl(timeout)
+
+ # Then check actual connectivity
+ await check_me_connectivity(timeout)
return JSONResponse(
status_code=HTTPStatus.OK,
content={
"connectivity": True,
- "message": "ModelEngine platform connect successfully.",
+ "message": "ModelEngine platform connected successfully.",
}
)
except MEConnectionException as e:
- logging.error(f"ModelEngine model healthcheck failed: {str(e)}")
- raise HTTPException(status_code=HTTPStatus.SERVICE_UNAVAILABLE, detail="ModelEngine model connect failed.")
+ logging.error(f"ModelEngine healthcheck failed: {str(e)}")
+ raise HTTPException(status_code=HTTPStatus.SERVICE_UNAVAILABLE, detail=f"ModelEngine connection failed: {str(e)}")
except TimeoutException as e:
- logging.error(f"ModelEngine model healthcheck timeout: {str(e)}")
- raise HTTPException(status_code=HTTPStatus.REQUEST_TIMEOUT, detail="ModelEngine model connect timeout.")
+ logging.error(f"ModelEngine healthcheck timeout: {str(e)}")
+ raise HTTPException(status_code=HTTPStatus.REQUEST_TIMEOUT, detail="ModelEngine connection timeout.")
except Exception as e:
- logging.error(f"ModelEngine model healthcheck failed with unknown error: {str(e)}.")
- raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="ModelEngine model connect failed.")
+ logging.error(f"ModelEngine healthcheck failed with unknown error: {str(e)}")
+ raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=f"ModelEngine healthcheck failed: {str(e)}")
diff --git a/backend/apps/model_managment_app.py b/backend/apps/model_managment_app.py
index 5383aeed8..0c3c7a8cf 100644
--- a/backend/apps/model_managment_app.py
+++ b/backend/apps/model_managment_app.py
@@ -157,25 +157,35 @@ async def get_provider_list(request: ProviderModelRequest, authorization: Option
@router.post("/update")
-async def update_single_model(request: dict, authorization: Optional[str] = Header(None)):
- """Update a single model by its `model_id`.
+async def update_single_model(
+ request: dict,
+ display_name: str = Query(..., description="Current display name of the model to update"),
+ authorization: Optional[str] = Header(None)
+):
+ """Update a single model by its current `display_name`.
- Performs a uniqueness check on `display_name` within the tenant and updates
- the record if valid.
+ The model is looked up using the `display_name` query parameter. The request
+ body contains the fields to update, which may include a new `display_name`.
Args:
- request: Arbitrary model fields with required `model_id`.
+ request: Arbitrary model fields to update (may include new display_name).
+ display_name: Current display name of the model (query parameter for lookup).
authorization: Bearer token header used to derive identity context.
Raises:
- HTTPException: 409 if `display_name` conflicts, 500 for unexpected errors.
+ HTTPException: 404 if model not found, 409 if new `display_name` conflicts,
+ 500 for unexpected errors.
"""
try:
user_id, tenant_id = get_current_user_id(authorization)
- await update_single_model_for_tenant(user_id, tenant_id, request)
+ await update_single_model_for_tenant(user_id, tenant_id, display_name, request)
return JSONResponse(status_code=HTTPStatus.OK, content={
"message": "Model updated successfully"
})
+ except LookupError as e:
+ logging.error(f"Failed to update model: {str(e)}")
+ raise HTTPException(status_code=HTTPStatus.NOT_FOUND,
+ detail=str(e))
except ValueError as e:
logging.error(f"Failed to update model: {str(e)}")
raise HTTPException(status_code=HTTPStatus.CONFLICT,
diff --git a/backend/consts/const.py b/backend/consts/const.py
index 754619d12..8e99ca84d 100644
--- a/backend/consts/const.py
+++ b/backend/consts/const.py
@@ -279,7 +279,7 @@ class VectorDatabaseType(str, Enum):
os.getenv("LLM_SLOW_TOKEN_RATE_THRESHOLD", "10.0")) # tokens per second
# APP Version
-APP_VERSION = "v1.7.7"
+APP_VERSION = "v1.7.7.1"
DEFAULT_ZH_TITLE = "新对话"
DEFAULT_EN_TITLE = "New Conversation"
diff --git a/backend/consts/provider.py b/backend/consts/provider.py
index f82a60e7f..7fd783015 100644
--- a/backend/consts/provider.py
+++ b/backend/consts/provider.py
@@ -5,8 +5,12 @@ class ProviderEnum(str, Enum):
"""Supported model providers"""
SILICON = "silicon"
OPENAI = "openai"
+ MODELENGINE = "modelengine"
# Silicon Flow
SILICON_BASE_URL = "https://api.siliconflow.cn/v1/"
SILICON_GET_URL = "https://api.siliconflow.cn/v1/models"
+
+# ModelEngine
+# Base URL and API key are loaded from environment variables at runtime
diff --git a/backend/database/db_models.py b/backend/database/db_models.py
index eeb9d1c34..a4201abad 100644
--- a/backend/database/db_models.py
+++ b/backend/database/db_models.py
@@ -160,6 +160,8 @@ class ModelRecord(TableBase):
Integer, doc="Expected chunk size for embedding models, used during document chunking")
maximum_chunk_size = Column(
Integer, doc="Maximum chunk size for embedding models, used during document chunking")
+ ssl_verify = Column(
+ Boolean, default=True, doc="Whether to verify SSL certificates when connecting to this model API. Default is true. Set to false for local services without SSL support.")
class ToolInfo(TableBase):
diff --git a/backend/database/model_management_db.py b/backend/database/model_management_db.py
index b5d0d5b1e..257320499 100644
--- a/backend/database/model_management_db.py
+++ b/backend/database/model_management_db.py
@@ -185,6 +185,21 @@ def get_model_by_display_name(display_name: str, tenant_id: str) -> Optional[Dic
return model
+def get_models_by_display_name(display_name: str, tenant_id: str) -> List[Dict[str, Any]]:
+ """
+ Get all model records by display name (for multi_embedding which creates two records)
+
+ Args:
+ display_name: Model display name
+ tenant_id: Tenant ID
+
+ Returns:
+ List[Dict[str, Any]]: List of model records with the same display_name
+ """
+ filters = {'display_name': display_name}
+ return get_model_records(filters, tenant_id)
+
+
def get_model_id_by_display_name(display_name: str, tenant_id: str) -> Optional[int]:
"""
Get a model ID by display name
@@ -252,3 +267,25 @@ def get_models_by_tenant_factory_type(tenant_id: str, model_factory: str, model_
"model_type": model_type
}
return get_model_records(filters, tenant_id)
+
+
+def get_model_by_name_factory(model_name: str, model_factory: str, tenant_id: str) -> Optional[Dict[str, Any]]:
+ """
+ Get a model record by model_name and model_factory for deduplication.
+
+ Args:
+ model_name: Model name (e.g., "deepseek-r1-distill-qwen-14b")
+ model_factory: Model factory (e.g., "ModelEngine")
+ tenant_id: Tenant ID
+
+ Returns:
+ Optional[Dict[str, Any]]: Model record if found, None otherwise
+ """
+ filters = {
+ 'model_name': model_name,
+ 'model_factory': model_factory
+ }
+ records = get_model_records(filters, tenant_id)
+ return records[0] if records else None
+
+
diff --git a/backend/services/agent_service.py b/backend/services/agent_service.py
index aa537bb25..5184b0e25 100644
--- a/backend/services/agent_service.py
+++ b/backend/services/agent_service.py
@@ -41,7 +41,7 @@
update_related_agents
)
from database.model_management_db import get_model_by_model_id, get_model_id_by_display_name
-from database.remote_mcp_db import check_mcp_name_exists, get_mcp_server_by_name_and_tenant
+from database.remote_mcp_db import get_mcp_server_by_name_and_tenant
from database.tool_db import (
check_tool_is_available,
create_or_update_tool_by_tool_info,
@@ -53,8 +53,6 @@
)
from services.conversation_management_service import save_conversation_assistant, save_conversation_user
from services.memory_config_service import build_memory_context
-from services.remote_mcp_service import add_remote_mcp_server_list
-from services.tool_configuration_service import update_tool_list
from utils.auth_utils import get_current_user_info, get_user_language
from utils.config_utils import tenant_config_manager
from utils.memory_utils import build_memory_config
@@ -573,6 +571,15 @@ async def get_agent_info_impl(agent_id: int, tenant_id: str):
elif "business_logic_model_name" not in agent_info:
agent_info["business_logic_model_name"] = None
+ # Check agent availability
+ is_available, unavailable_reasons = check_agent_availability(
+ agent_id=agent_id,
+ tenant_id=tenant_id,
+ agent_info=agent_info
+ )
+ agent_info["is_available"] = is_available
+ agent_info["unavailable_reasons"] = unavailable_reasons
+
return agent_info
@@ -890,52 +897,17 @@ async def import_agent_impl(
force_import: bool = False
):
"""
- Import agent using DFS
+ Import agent using DFS.
+
+ Note:
+ MCP server registration and tool list refresh are now handled
+ on the frontend / dedicated MCP configuration flows.
+ The backend import logic only consumes the tools that already
+ exist for the current tenant.
"""
user_id, tenant_id, _ = get_current_user_info(authorization)
agent_id = agent_info.agent_id
- # First, add MCP servers if any
- if agent_info.mcp_info:
- for mcp_info in agent_info.mcp_info:
- if mcp_info.mcp_server_name and mcp_info.mcp_url:
- try:
- # Check if MCP name already exists
- if check_mcp_name_exists(mcp_name=mcp_info.mcp_server_name, tenant_id=tenant_id):
- # Get existing MCP server info to compare URLs
- existing_mcp = get_mcp_server_by_name_and_tenant(mcp_name=mcp_info.mcp_server_name,
- tenant_id=tenant_id)
- if existing_mcp and existing_mcp == mcp_info.mcp_url:
- # Same name and URL, skip
- logger.info(
- f"MCP server {mcp_info.mcp_server_name} with same URL already exists, skipping")
- continue
- else:
- # Same name but different URL, add import prefix
- import_mcp_name = f"import_{mcp_info.mcp_server_name}"
- logger.info(
- f"MCP server {mcp_info.mcp_server_name} exists with different URL, using name: {import_mcp_name}")
- mcp_server_name = import_mcp_name
- else:
- # Name doesn't exist, use original name
- mcp_server_name = mcp_info.mcp_server_name
-
- await add_remote_mcp_server_list(
- tenant_id=tenant_id,
- user_id=user_id,
- remote_mcp_server=mcp_info.mcp_url,
- remote_mcp_server_name=mcp_server_name
- )
- except Exception as e:
- raise Exception(
- f"Failed to add MCP server {mcp_info.mcp_server_name}: {str(e)}")
-
- # Then, update tool list to include new MCP tools
- try:
- await update_tool_list(tenant_id=tenant_id, user_id=user_id)
- except Exception as e:
- raise Exception(f"Failed to update tool list: {str(e)}")
-
agent_stack = deque([agent_id])
agent_id_set = set()
mapping_agent_id = {}
@@ -1047,14 +1019,16 @@ async def import_agent_by_agent_id(
regeneration_model_id = business_logic_model_id or model_id
if regeneration_model_id:
try:
- agent_name = _regenerate_agent_name_with_llm(
+ # Offload blocking LLM regeneration to a thread to avoid blocking the event loop
+ agent_name = await asyncio.to_thread(
+ _regenerate_agent_name_with_llm,
original_name=agent_name,
existing_names=existing_names,
task_description=import_agent_info.business_description or import_agent_info.description or "",
model_id=regeneration_model_id,
tenant_id=tenant_id,
language=LANGUAGE["ZH"], # Default to Chinese, can be enhanced later
- agents_cache=all_agents
+ agents_cache=all_agents,
)
logger.info(f"Regenerated agent name: '{agent_name}'")
except Exception as e:
@@ -1079,14 +1053,16 @@ async def import_agent_by_agent_id(
regeneration_model_id = business_logic_model_id or model_id
if regeneration_model_id:
try:
- agent_display_name = _regenerate_agent_display_name_with_llm(
+ # Offload blocking LLM regeneration to a thread to avoid blocking the event loop
+ agent_display_name = await asyncio.to_thread(
+ _regenerate_agent_display_name_with_llm,
original_display_name=agent_display_name,
existing_display_names=existing_display_names,
task_description=import_agent_info.business_description or import_agent_info.description or "",
model_id=regeneration_model_id,
tenant_id=tenant_id,
language=LANGUAGE["ZH"], # Default to Chinese, can be enhanced later
- agents_cache=all_agents
+ agents_cache=all_agents,
)
logger.info(f"Regenerated agent display_name: '{agent_display_name}'")
except Exception as e:
@@ -1168,23 +1144,13 @@ async def list_all_agent_info_impl(tenant_id: str) -> list[dict]:
if not agent["enabled"]:
continue
- unavailable_reasons: list[str] = []
-
- tool_info = search_tools_for_sub_agent(
- agent_id=agent["agent_id"], tenant_id=tenant_id)
- tool_id_list = [tool["tool_id"]
- for tool in tool_info if tool.get("tool_id") is not None]
- if tool_id_list:
- tool_statuses = check_tool_is_available(tool_id_list)
- if not all(tool_statuses):
- unavailable_reasons.append("tool_unavailable")
-
- model_reasons = _collect_model_availability_reasons(
- agent=agent,
+ # Use shared availability check function
+ _, unavailable_reasons = check_agent_availability(
+ agent_id=agent["agent_id"],
tenant_id=tenant_id,
+ agent_info=agent,
model_cache=model_cache
)
- unavailable_reasons.extend(model_reasons)
# Preserve the raw data so we can adjust availability for duplicates
enriched_agents.append({
@@ -1295,6 +1261,56 @@ def _check_single_model_availability(
return []
+def check_agent_availability(
+ agent_id: int,
+ tenant_id: str,
+ agent_info: dict | None = None,
+ model_cache: Dict[int, Optional[dict]] | None = None
+) -> tuple[bool, list[str]]:
+ """
+ Check if an agent is available based on its tools and model configuration.
+
+ Args:
+ agent_id: The agent ID to check
+ tenant_id: The tenant ID
+ agent_info: Optional pre-fetched agent info (to avoid duplicate DB queries)
+ model_cache: Optional model cache for performance optimization
+
+ Returns:
+ tuple: (is_available: bool, unavailable_reasons: list[str])
+ """
+ unavailable_reasons: list[str] = []
+
+ if model_cache is None:
+ model_cache = {}
+
+ # Fetch agent info if not provided
+ if agent_info is None:
+ agent_info = search_agent_info_by_agent_id(agent_id, tenant_id)
+
+ if not agent_info:
+ return False, ["agent_not_found"]
+
+ # Check tool availability
+ tool_info = search_tools_for_sub_agent(agent_id=agent_id, tenant_id=tenant_id)
+ tool_id_list = [tool["tool_id"] for tool in tool_info if tool.get("tool_id") is not None]
+ if tool_id_list:
+ tool_statuses = check_tool_is_available(tool_id_list)
+ if not all(tool_statuses):
+ unavailable_reasons.append("tool_unavailable")
+
+ # Check model availability
+ model_reasons = _collect_model_availability_reasons(
+ agent=agent_info,
+ tenant_id=tenant_id,
+ model_cache=model_cache
+ )
+ unavailable_reasons.extend(model_reasons)
+
+ is_available = len(unavailable_reasons) == 0
+ return is_available, unavailable_reasons
+
+
def insert_related_agent_impl(parent_agent_id, child_agent_id, tenant_id):
# search the agent by bfs, check if there is a circular call
search_list = deque([child_agent_id])
diff --git a/backend/services/conversation_management_service.py b/backend/services/conversation_management_service.py
index a794598df..b14835d90 100644
--- a/backend/services/conversation_management_service.py
+++ b/backend/services/conversation_management_service.py
@@ -263,8 +263,13 @@ def call_llm_for_title(content: str, tenant_id: str, language: str = LANGUAGE["Z
key=MODEL_CONFIG_MAPPING["llm"], tenant_id=tenant_id)
# Create OpenAIServerModel instance
- llm = OpenAIServerModel(model_id=get_model_name_from_config(model_config) if model_config.get("model_name") else "", api_base=model_config.get("base_url", ""),
- api_key=model_config.get("api_key", ""), temperature=0.7, top_p=0.95)
+ llm = OpenAIServerModel(
+ model_id=get_model_name_from_config(model_config) if model_config.get("model_name") else "",
+ api_base=model_config.get("base_url", ""),
+ api_key=model_config.get("api_key", ""),
+ temperature=0.7,
+ top_p=0.95
+ )
# Build messages
user_prompt = Template(prompt_template["USER_PROMPT"], undefined=StrictUndefined).render({
@@ -276,7 +281,7 @@ def call_llm_for_title(content: str, tenant_id: str, language: str = LANGUAGE["Z
"content": user_prompt}]
# Call the model
- response = llm(messages, max_tokens=10)
+ response = llm.generate(messages)
if not response or not response.content or not response.content.strip():
return DEFAULT_EN_TITLE if language == LANGUAGE["EN"] else DEFAULT_ZH_TITLE
return remove_think_blocks(response.content.strip())
diff --git a/backend/services/me_model_management_service.py b/backend/services/me_model_management_service.py
index e44aab0d5..9860ffe5b 100644
--- a/backend/services/me_model_management_service.py
+++ b/backend/services/me_model_management_service.py
@@ -1,61 +1,55 @@
-import asyncio
-from typing import List
-
import aiohttp
+import asyncio
from consts.const import MODEL_ENGINE_APIKEY, MODEL_ENGINE_HOST
-from consts.exceptions import TimeoutException, NotFoundException
+from consts.exceptions import MEConnectionException, TimeoutException
+
+
+async def check_me_variable_set() -> bool:
+ """
+ Check if the ME environment variables are correctly set.
+ Returns:
+ bool: True if both MODEL_ENGINE_APIKEY and MODEL_ENGINE_HOST are set and non-empty, False otherwise.
+ """
+ return bool(MODEL_ENGINE_APIKEY and MODEL_ENGINE_HOST)
-async def get_me_models_impl(timeout: int = 2, type: str = "") -> List:
+async def check_me_connectivity(timeout: int = 30) -> bool:
"""
- Fetches a list of models from the model engine API with response formatting.
- Parameters:
- timeout (int): The total timeout for the request in seconds.
- type (str): The type of model to filter for. If empty, returns all models.
+ Check ModelEngine connectivity by actually calling the API.
+
+ Args:
+ timeout: Request timeout in seconds
+
Returns:
- - filtered_result: List of model data dictionaries
+ bool: True if connection successful, False otherwise
+
+ Raises:
+ MEConnectionException: If connection failed with specific error
+ TimeoutException: If request timed out
"""
+ if not await check_me_variable_set():
+ return False
+
try:
- headers = {
- 'Authorization': f'Bearer {MODEL_ENGINE_APIKEY}',
- }
+ headers = {"Authorization": f"Bearer {MODEL_ENGINE_APIKEY}"}
+
async with aiohttp.ClientSession(
- timeout=aiohttp.ClientTimeout(total=timeout),
- connector=aiohttp.TCPConnector(ssl=False)
+ timeout=aiohttp.ClientTimeout(total=timeout),
+ connector=aiohttp.TCPConnector(ssl=False)
) as session:
async with session.get(
- f"{MODEL_ENGINE_HOST}/open/router/v1/models",
- headers=headers
+ f"{MODEL_ENGINE_HOST}/open/router/v1/models",
+ headers=headers
) as response:
- response.raise_for_status()
- result_data = await response.json()
- result: list = result_data['data']
-
- # Type filtering
- filtered_result = []
- if type:
- for data in result:
- if data['type'] == type:
- filtered_result.append(data)
- if not filtered_result:
- result_types = set(data['type'] for data in result)
- raise NotFoundException(
- f"No models found with type '{type}'. Available types: {result_types}.")
- else:
- filtered_result = result
-
- return filtered_result
+ if response.status == 200:
+ return True
+ else:
+ raise MEConnectionException(
+ f"Connection failed, error code: {response.status}")
except asyncio.TimeoutError:
- raise TimeoutException("Request timeout.")
+ raise TimeoutException("Connection timed out")
+ except MEConnectionException:
+ raise
except Exception as e:
- raise Exception(f"Failed to get model list: {str(e)}.")
-
-
-async def check_me_variable_set() -> bool:
- """
- Check if the ME environment variables are correctly set.
- Returns:
- bool: True if both MODEL_ENGINE_APIKEY and MODEL_ENGINE_HOST are set and non-empty, False otherwise.
- """
- return bool(MODEL_ENGINE_APIKEY and MODEL_ENGINE_HOST)
+ raise Exception(f"Unknown error occurred: {str(e)}")
diff --git a/backend/services/model_health_service.py b/backend/services/model_health_service.py
index df98f508a..c6f426789 100644
--- a/backend/services/model_health_service.py
+++ b/backend/services/model_health_service.py
@@ -1,15 +1,11 @@
-import asyncio
import logging
-import aiohttp
-from http import HTTPStatus
from nexent.core import MessageObserver
from nexent.core.models import OpenAIModel, OpenAIVLModel
from nexent.core.models.embedding_model import JinaEmbedding, OpenAICompatibleEmbedding
from services.voice_service import get_voice_service
-from consts.const import MODEL_ENGINE_APIKEY, MODEL_ENGINE_HOST, LOCALHOST_IP, LOCALHOST_NAME, DOCKER_INTERNAL_HOST
-from consts.exceptions import MEConnectionException, TimeoutException
+from consts.const import LOCALHOST_IP, LOCALHOST_NAME, DOCKER_INTERNAL_HOST
from consts.model import ModelConnectStatusEnum
from database.model_management_db import get_model_by_display_name, update_model_record
from utils.config_utils import get_model_name_from_config
@@ -57,6 +53,7 @@ async def _perform_connectivity_check(
model_type: str,
model_base_url: str,
model_api_key: str,
+ ssl_verify: bool = True,
) -> bool:
"""
Perform specific model connectivity check
@@ -65,6 +62,7 @@ async def _perform_connectivity_check(
model_type: Model type
model_base_url: Model base URL
model_api_key: API key
+ ssl_verify: Whether to verify SSL certificates (default: True)
Returns:
bool: Connectivity check result
"""
@@ -95,7 +93,8 @@ async def _perform_connectivity_check(
observer,
model_id=model_name,
api_base=model_base_url,
- api_key=model_api_key
+ api_key=model_api_key,
+ ssl_verify=ssl_verify
).check_connectivity()
elif model_type == "rerank":
connectivity = False
@@ -135,11 +134,12 @@ async def check_model_connectivity(display_name: str, tenant_id: str) -> dict:
model_type = model["model_type"]
model_base_url = model["base_url"]
model_api_key = model["api_key"]
+ ssl_verify = model.get("ssl_verify", True) # Default to True if not present
try:
# Use the common connectivity check function
connectivity = await _perform_connectivity_check(
- model_name, model_type, model_base_url, model_api_key
+ model_name, model_type, model_base_url, model_api_key, ssl_verify
)
except Exception as e:
update_data = {"connect_status": ModelConnectStatusEnum.UNAVAILABLE.value}
@@ -167,32 +167,6 @@ async def check_model_connectivity(display_name: str, tenant_id: str) -> dict:
raise e
-async def check_me_connectivity_impl(timeout: int):
- """
- Check ME connectivity and return structured response data
- Args:
- timeout: Request timeout in seconds
- """
- try:
- headers = {'Authorization': f'Bearer {MODEL_ENGINE_APIKEY}'}
-
- async with aiohttp.ClientSession(
- timeout=aiohttp.ClientTimeout(total=timeout),
- connector=aiohttp.TCPConnector(ssl=False)
- ) as session:
- async with session.get(
- f"{MODEL_ENGINE_HOST}/open/router/v1/models",
- headers=headers
- ) as response:
- if response.status == HTTPStatus.OK:
- return
- else:
- raise MEConnectionException(
- f"Connection failed, error code: {response.status}")
- except asyncio.TimeoutError:
- raise TimeoutException("Connection timed out")
- except Exception as e:
- raise Exception(f"Unknown error occurred: {str(e)}")
async def verify_model_config_connectivity(model_config: dict):
@@ -208,11 +182,12 @@ async def verify_model_config_connectivity(model_config: dict):
model_type = model_config["model_type"]
model_base_url = model_config["base_url"]
model_api_key = model_config["api_key"]
+ ssl_verify = model_config.get("ssl_verify", True) # Default to True if not present
try:
# Use the common connectivity check function
connectivity = await _perform_connectivity_check(
- model_name, model_type, model_base_url, model_api_key
+ model_name, model_type, model_base_url, model_api_key, ssl_verify
)
if not connectivity:
diff --git a/backend/services/model_management_service.py b/backend/services/model_management_service.py
index 84936f393..7fe1a86b6 100644
--- a/backend/services/model_management_service.py
+++ b/backend/services/model_management_service.py
@@ -9,6 +9,7 @@
create_model_record,
delete_model_record,
get_model_by_display_name,
+ get_models_by_display_name,
get_model_records,
get_models_by_tenant_factory_type,
update_model_record,
@@ -133,6 +134,9 @@ async def batch_create_models_for_tenant(user_id: str, tenant_id: str, batch_pay
if provider == ProviderEnum.SILICON.value:
model_url = SILICON_BASE_URL
+ elif provider == ProviderEnum.MODELENGINE.value:
+ # ModelEngine models carry their own base_url in each model dict
+ model_url = ""
else:
model_url = ""
@@ -195,20 +199,63 @@ async def list_provider_models_for_tenant(tenant_id: str, provider: str, model_t
raise Exception(f"Failed to list provider models: {str(e)}")
-async def update_single_model_for_tenant(user_id: str, tenant_id: str, model_data: Dict[str, Any]):
- """Update a single model by its model_id, ensuring display_name uniqueness."""
+async def update_single_model_for_tenant(
+ user_id: str,
+ tenant_id: str,
+ current_display_name: str,
+ model_data: Dict[str, Any]
+):
+ """Update model(s) by current display_name. If embedding/multi_embedding, update both types.
+
+ Args:
+ user_id: The user performing the update.
+ tenant_id: The tenant context.
+ current_display_name: The current display_name used to look up the model(s).
+ model_data: The fields to update, which may include a new display_name.
+
+ Raises:
+ LookupError: If no model is found with the current_display_name.
+ ValueError: If a new display_name conflicts with an existing model.
+ """
try:
- existing_model_by_display = get_model_by_display_name(model_data["display_name"], tenant_id)
- current_model_id = int(model_data["model_id"])
- existing_model_id = existing_model_by_display["model_id"] if existing_model_by_display else None
-
- if existing_model_by_display and existing_model_id != current_model_id:
- raise ValueError(
- f"Name {model_data['display_name']} is already in use, please choose another display name")
+ # Get all models with the current display_name (may be 1 or 2 for embedding types)
+ existing_models = get_models_by_display_name(current_display_name, tenant_id)
- update_model_record(current_model_id, model_data, user_id)
- logging.debug(
- f"Model {model_data['display_name']} updated successfully")
+ if not existing_models:
+ raise LookupError(f"Model not found: {current_display_name}")
+
+ # Check if a new display_name is being set and if it conflicts
+ new_display_name = model_data.get("display_name")
+ if new_display_name and new_display_name != current_display_name:
+ conflict_models = get_models_by_display_name(new_display_name, tenant_id)
+ if conflict_models:
+ raise ValueError(
+ f"Name {new_display_name} is already in use, please choose another display name"
+ )
+
+ # Check if any of the existing models is multi_embedding
+ has_multi_embedding = any(
+ m.get("model_type") == "multi_embedding" for m in existing_models
+ )
+
+ if has_multi_embedding:
+ # Update both embedding and multi_embedding records
+ for model in existing_models:
+ # Prepare update data, excluding model_type to preserve original type
+ update_data = {k: v for k, v in model_data.items() if k not in ["model_id", "model_type"]}
+ update_model_record(model["model_id"], update_data, user_id)
+ logging.debug(
+ f"Model {current_display_name} (embedding + multi_embedding) updated successfully")
+ else:
+ # Single model update
+ current_model_id = existing_models[0]["model_id"]
+ update_data = {k: v for k, v in model_data.items() if k != "model_id"}
+ update_model_record(current_model_id, update_data, user_id)
+ logging.debug(f"Model {current_display_name} updated successfully")
+ except LookupError:
+ raise
+ except ValueError:
+ raise
except Exception as e:
logging.error(f"Failed to update model: {str(e)}")
raise Exception(f"Failed to update model: {str(e)}")
@@ -218,7 +265,7 @@ async def batch_update_models_for_tenant(user_id: str, tenant_id: str, model_lis
"""Batch update models for a tenant."""
try:
for model in model_list:
- update_model_record(model["model_id"], model, user_id)
+ update_model_record(model["model_id"], model, user_id, tenant_id)
logging.debug("Batch update models successfully")
except Exception as e:
@@ -229,24 +276,24 @@ async def batch_update_models_for_tenant(user_id: str, tenant_id: str, model_lis
async def delete_model_for_tenant(user_id: str, tenant_id: str, display_name: str):
"""Delete model(s) by display_name. If embedding/multi_embedding, delete both types."""
try:
- model = get_model_by_display_name(display_name, tenant_id)
- if not model:
+ # Get all models with this display_name (may be 1 or 2 for embedding types)
+ models = get_models_by_display_name(display_name, tenant_id)
+ if not models:
raise LookupError(f"Model not found: {display_name}")
deleted_types: List[str] = []
- if model.get("model_type") in ["embedding", "multi_embedding"]:
- # Fetch both variants once to avoid repeated lookups
- models_by_type: Dict[str, Dict[str, Any]] = {}
- for t in ["embedding", "multi_embedding"]:
- m = get_model_by_display_name(display_name, tenant_id)
- if m and m.get("model_type") == t:
- models_by_type[t] = m
-
- # Best-effort memory cleanup using the fetched variants
+
+ # Check if any of the models is multi_embedding (which means we have both types)
+ has_multi_embedding = any(
+ m.get("model_type") == "multi_embedding" for m in models
+ )
+
+ if has_multi_embedding:
+ # Best-effort memory cleanup for embedding models
try:
vdb_core = get_vector_db_core()
base_memory_config = build_memory_config_for_tenant(tenant_id)
- for t, m in models_by_type.items():
+ for m in models:
try:
await clear_model_memories(
vdb_core=vdb_core,
@@ -267,17 +314,21 @@ async def delete_model_for_tenant(user_id: str, tenant_id: str, display_name: st
logger.warning(
"Memory cleanup preparation failed: %s", outer_cleanup_exc)
- # Delete the fetched variants
- for t, m in models_by_type.items():
+ # Delete all records with the same display_name
+ for m in models:
delete_model_record(m["model_id"], user_id, tenant_id)
- deleted_types.append(t)
+ deleted_types.append(m.get("model_type", "unknown"))
else:
+ # Single model delete
+ model = models[0]
delete_model_record(model["model_id"], user_id, tenant_id)
deleted_types.append(model.get("model_type", "unknown"))
logging.debug(
f"Successfully deleted model(s) in types: {', '.join(deleted_types)}")
return display_name
+ except LookupError:
+ raise
except Exception as e:
logging.error(f"Failed to delete model: {str(e)}")
raise Exception(f"Failed to delete model: {str(e)}")
@@ -288,6 +339,12 @@ async def list_models_for_tenant(tenant_id: str):
try:
records = get_model_records(None, tenant_id)
result: List[Dict[str, Any]] = []
+
+ # Type mapping for backwards compatibility (chat -> llm for frontend)
+ type_map = {
+ "chat": "llm",
+ }
+
for record in records:
record["model_name"] = add_repo_to_name(
model_repo=record["model_repo"],
@@ -295,6 +352,11 @@ async def list_models_for_tenant(tenant_id: str):
)
record["connect_status"] = ModelConnectStatusEnum.get_value(
record.get("connect_status"))
+
+ # Map model_type if necessary (for ModelEngine compatibility)
+ if record.get("model_type") in type_map:
+ record["model_type"] = type_map[record["model_type"]]
+
result.append(record)
logging.debug("Successfully retrieved model list")
diff --git a/backend/services/model_provider_service.py b/backend/services/model_provider_service.py
index ecde87321..271ad7f99 100644
--- a/backend/services/model_provider_service.py
+++ b/backend/services/model_provider_service.py
@@ -3,14 +3,18 @@
from typing import Dict, List
import httpx
+import aiohttp
from consts.const import (
DEFAULT_LLM_MAX_TOKENS,
DEFAULT_EXPECTED_CHUNK_SIZE,
- DEFAULT_MAXIMUM_CHUNK_SIZE
+ DEFAULT_MAXIMUM_CHUNK_SIZE,
+ MODEL_ENGINE_HOST,
+ MODEL_ENGINE_APIKEY,
)
from consts.model import ModelConnectStatusEnum, ModelRequest
from consts.provider import SILICON_GET_URL, ProviderEnum
+from consts.exceptions import TimeoutException
from database.model_management_db import get_models_by_tenant_factory_type
from services.model_health_service import embedding_dimension_check
from utils.model_name_utils import split_repo_name, add_repo_to_name
@@ -67,6 +71,76 @@ async def get_models(self, provider_config: Dict) -> List[Dict]:
return []
+class ModelEngineProvider(AbstractModelProvider):
+ """Concrete implementation for ModelEngine provider."""
+
+ async def get_models(self, provider_config: Dict) -> List[Dict]:
+ """
+ Fetch models from ModelEngine API.
+
+ Args:
+ provider_config: Configuration dict containing model_type
+
+ Returns:
+ List of models with canonical fields
+ """
+ try:
+ if not MODEL_ENGINE_HOST or not MODEL_ENGINE_APIKEY:
+ logger.warning("ModelEngine environment variables not configured")
+ return []
+
+ model_type: str = provider_config.get("model_type", "")
+ headers = {"Authorization": f"Bearer {MODEL_ENGINE_APIKEY}"}
+
+ async with aiohttp.ClientSession(
+ timeout=aiohttp.ClientTimeout(total=30),
+ connector=aiohttp.TCPConnector(ssl=False)
+ ) as session:
+ async with session.get(
+ f"{MODEL_ENGINE_HOST}/open/router/v1/models",
+ headers=headers
+ ) as response:
+ response.raise_for_status()
+ data = await response.json()
+ all_models = data.get("data", [])
+
+ # Type mapping from ModelEngine to internal types
+ type_map = {
+ "embed": "embedding",
+ "chat": "llm",
+ "asr": "stt",
+ "tts": "tts",
+ "rerank": "rerank",
+ "vlm": "vlm",
+ }
+
+ # Filter models by type if specified
+ filtered_models = []
+ for model in all_models:
+ me_type = model.get("type", "")
+ internal_type = type_map.get(me_type)
+
+ # If model_type filter is provided, only include matching models
+ if model_type and internal_type != model_type:
+ continue
+
+ if internal_type:
+ filtered_models.append({
+ "id": model.get("id", ""),
+ "model_type": internal_type,
+ "model_tag": me_type,
+ "max_tokens": DEFAULT_LLM_MAX_TOKENS if internal_type in ("llm", "vlm") else 0,
+ # ModelEngine models will get base_url and api_key from environment
+ "base_url": MODEL_ENGINE_HOST,
+ "api_key": MODEL_ENGINE_APIKEY,
+ })
+
+ return filtered_models
+ except Exception as e:
+ logger.error(f"Error getting models from ModelEngine: {e}")
+ return []
+
+
async def prepare_model_dict(provider: str, model: dict, model_url: str, model_api_key: str) -> dict:
"""
Construct a model configuration dictionary that is ready to be stored in the
@@ -75,11 +149,10 @@ async def prepare_model_dict(provider: str, model: dict, model_url: str, model_a
the router implementation concise.
Args:
- provider: Name of the model provider (e.g. "silicon", "openai").
+ provider: Name of the model provider (e.g. "silicon", "openai", "modelengine").
model: A single model item coming from the provider list.
model_url: Base URL for the provider API.
model_api_key: API key that should be saved together with the model.
- max_tokens: User-supplied max token / embedding dimension upper-bound.
Returns:
A dictionary ready to be passed to *create_model_record*.
@@ -98,6 +171,18 @@ async def prepare_model_dict(provider: str, model: dict, model_url: str, model_a
expected_chunk_size = model.get("expected_chunk_size", DEFAULT_EXPECTED_CHUNK_SIZE)
maximum_chunk_size = model.get("maximum_chunk_size", DEFAULT_MAXIMUM_CHUNK_SIZE)
+ # For ModelEngine provider, extract the host from model's base_url
+ # We'll append the correct path later
+ if provider == ProviderEnum.MODELENGINE.value:
+ # Get the raw host URL from model (e.g., "https://120.253.225.102:50001")
+ raw_model_url = model.get("base_url", "")
+ # Strip any existing path to get just the host
+ if raw_model_url:
+ # Remove any trailing /open/router/v1 or similar paths to get base host
+ raw_model_url = raw_model_url.split("/open/")[0] if "/open/" in raw_model_url else raw_model_url
+ model_url = raw_model_url
+ model_api_key = model.get("api_key", model_api_key)
+
# Build the canonical representation using the existing Pydantic schema for
# consistency of validation and default handling.
model_obj = ModelRequest(
@@ -117,11 +202,24 @@ async def prepare_model_dict(provider: str, model: dict, model_url: str, model_a
# Determine the correct base_url and, for embeddings, update the actual
# dimension by performing a real connectivity check.
if model["model_type"] in ["embedding", "multi_embedding"]:
- model_dict["base_url"] = f"{model_url}embeddings"
+ if provider != ProviderEnum.MODELENGINE.value:
+ model_dict["base_url"] = f"{model_url}embeddings"
+ else:
+ # For ModelEngine embedding models, append the embeddings path
+ model_dict["base_url"] = f"{model_url.rstrip('/')}/open/router/v1/embeddings"
# The embedding dimension might differ from the provided max_tokens.
model_dict["max_tokens"] = await embedding_dimension_check(model_dict)
else:
- model_dict["base_url"] = model_url
+ # For non-embedding models
+ if provider == ProviderEnum.MODELENGINE.value:
+ # Ensure ModelEngine models have the full API path
+ model_dict["base_url"] = f"{model_url.rstrip('/')}/open/router/v1"
+ else:
+ model_dict["base_url"] = model_url
+
+ # ModelEngine models don't support SSL verification
+ if provider == ProviderEnum.MODELENGINE.value:
+ model_dict["ssl_verify"] = False
# All newly created models start in NOT_DETECTED status.
model_dict["connect_status"] = ModelConnectStatusEnum.NOT_DETECTED.value
@@ -182,5 +280,8 @@ async def get_provider_models(model_data: dict) -> List[dict]:
if model_data["provider"] == ProviderEnum.SILICON.value:
provider = SiliconModelProvider()
model_list = await provider.get_models(model_data)
+ elif model_data["provider"] == ProviderEnum.MODELENGINE.value:
+ provider = ModelEngineProvider()
+ model_list = await provider.get_models(model_data)
return model_list
diff --git a/backend/services/vectordatabase_service.py b/backend/services/vectordatabase_service.py
index 55d2a5e4a..e72b3f9f3 100644
--- a/backend/services/vectordatabase_service.py
+++ b/backend/services/vectordatabase_service.py
@@ -15,12 +15,11 @@
import time
import uuid
from datetime import datetime, timezone
-from typing import Any, Dict, Generator, List, Optional
+from typing import Any, Dict, List, Optional
from fastapi import Body, Depends, Path, Query
from fastapi.responses import StreamingResponse
from nexent.core.models.embedding_model import OpenAICompatibleEmbedding, JinaEmbedding, BaseEmbedding
-from nexent.core.nlp.tokenizer import calculate_term_weights
from nexent.vector_database.base import VectorDatabaseCore
from nexent.vector_database.elasticsearch_core import ElasticSearchCore
diff --git a/backend/services/voice_service.py b/backend/services/voice_service.py
index a66f7c15d..0bffec895 100644
--- a/backend/services/voice_service.py
+++ b/backend/services/voice_service.py
@@ -1,6 +1,6 @@
import asyncio
import logging
-from typing import Dict, Any, Optional
+from typing import Any, Optional
from nexent.core.models.stt_model import STTConfig, STTModel
from nexent.core.models.tts_model import TTSConfig, TTSModel
diff --git a/doc/docs/en/sdk/core/multimodal.md b/doc/docs/en/sdk/core/multimodal.md
new file mode 100644
index 000000000..e83f83416
--- /dev/null
+++ b/doc/docs/en/sdk/core/multimodal.md
@@ -0,0 +1,327 @@
+# Multimodal Module
+
+This module provides a native multimodal data processing bus designed for agents. With the `@load_object` and `@save_object` decorators, it supports real-time transmission and processing of text, images, audio, video, and other data formats, enabling seamless cross-modal data flow.
+
+## 📋 Table of Contents
+
+- [LoadSaveObjectManager Initialization](#loadsaveobjectmanager-initialization)
+- [@load_object Decorator](#load_object-decorator)
+- [@save_object Decorator](#save_object-decorator)
+- [Combined Usage Example](#combined-usage-example)
+
+## LoadSaveObjectManager Initialization
+
+Before using the decorators, you need to initialize a `LoadSaveObjectManager` instance and pass in a storage client (for example, a MinIO client):
+
+```python
+from nexent.multi_modal.load_save_object import LoadSaveObjectManager
+from database.client import minio_client
+
+
+# Create manager instance
+Multimodal = LoadSaveObjectManager(storage_client=minio_client)
+```
+
+You can also implement your own storage client based on the `StorageClient` base class in `sdk.nexent.storage.storage_client_base`.
+The storage client must implement:
+
+- `get_file_stream(object_name, bucket)`: get a file stream from storage (for download)
+- `upload_fileobj(file_obj, object_name, bucket)`: upload a file-like object to storage (for save)
+
+## @load_object Decorator
+
+The `@load_object` decorator downloads files from URLs (S3 / HTTP / HTTPS) **before** the wrapped function is executed, and passes the file content (or transformed data) into the wrapped function.
+
+### Features
+
+- **Automatic download**: Automatically detect and download files pointed to by S3, HTTP, or HTTPS URLs.
+- **Data transformation**: Use custom transformer functions to convert downloaded bytes into types required by the wrapped function (for example, `PIL.Image`, text, etc.).
+- **Batch processing**: Support a single URL or a list of URLs.
+
+### Parameters
+
+- `input_names` (`List[str]`): names of function parameters to transform.
+- `input_data_transformer` (`Optional[List[Callable[[bytes], Any]]]`): optional list of transformers; each transformer converts raw `bytes` into the target type for the corresponding parameter.
+
+### Supported URL Formats
+
+The decorator supports:
+
+- **S3 URLs**
+ - `s3://bucket-name/object/file.jpg`
+ - `/bucket-name/object/file.jpg` (short form)
+- **HTTP / HTTPS URLs**
+ - `http://example.com/file.jpg`
+ - `https://example.com/file.jpg`
+
+URL type detection:
+
+- Starts with `http://` → HTTP URL
+- Starts with `https://` → HTTPS URL
+- Starts with `s3://` or looks like `/bucket/object` → S3 URL
+
+### Examples
+
+#### Basic: download as bytes
+
+```python
+@Multimodal.load_object(input_names=["image_url"])
+def process_image(image_url: bytes):
+ """image_url will be replaced with downloaded bytes."""
+ print(f"File size: {len(image_url)} bytes")
+ return image_url
+
+
+# Call process_image
+result = process_image(image_url="http://example.com/pic.PNG")
+```
+
+#### Advanced: convert bytes to PIL Image
+
+If the function parameter is not `bytes` (for example, it expects `PIL.Image.Image`), define a converter (such as `bytes_to_pil`) and pass it to the decorator.
+
+```python
+import io
+from PIL import Image
+
+
+def bytes_to_pil(binary_data: bytes) -> Image.Image:
+ image_stream = io.BytesIO(binary_data)
+ img = Image.open(image_stream)
+ return img
+
+
+@Multimodal.load_object(
+ input_names=["image_url"],
+ input_data_transformer=[bytes_to_pil],
+)
+def process_image(image_url: Image.Image) -> Image.Image:
+ """image_url will be converted into a PIL Image object."""
+ resized = image_url.resize((800, 600))
+ return resized
+
+
+result = process_image(image_url="http://example.com/pic.PNG")
+```
+
+#### Multiple inputs
+
+```python
+from PIL import Image
+
+
+@Multimodal.load_object(
+ input_names=["image_url1", "image_url2"],
+ input_data_transformer=[bytes_to_pil, bytes_to_pil],
+)
+def process_two_images(image_url1: Image.Image, image_url2: Image.Image) -> Image.Image:
+ """Both image URLs will be downloaded and converted into PIL Images."""
+ combined = Image.new("RGB", (1600, 600))
+ combined.paste(image_url1, (0, 0))
+ combined.paste(image_url2, (800, 0))
+ return combined
+
+
+result = process_two_images(
+ image_url1="http://example.com/pic1.PNG",
+ image_url2="http://example.com/pic2.PNG",
+)
+```
+
+#### List of URLs
+
+```python
+from typing import List
+from PIL import Image
+
+
+@Multimodal.load_object(
+ input_names=["image_urls"],
+ input_data_transformer=[bytes_to_pil],
+)
+def process_image_list(image_urls: List[Image.Image]) -> List[Image.Image]:
+ """Support a list of URLs, each will be downloaded and converted."""
+ results: List[Image.Image] = []
+ for img in image_urls:
+ results.append(img.resize((200, 200)))
+ return results
+
+
+result = process_image_list(
+ image_urls=[
+ "http://example.com/pic1.PNG",
+ "http://example.com/pic2.PNG",
+ ]
+)
+```
+
+## @save_object Decorator
+
+The `@save_object` decorator uploads return values to storage (MinIO) **after** the wrapped function finishes, and returns S3 URLs.
+
+### Features
+
+- **Automatic upload**: Automatically upload function return values to MinIO.
+- **Data transformation**: Use transformers to convert return values into `bytes` (for example, `PIL.Image` → `bytes`).
+- **Batch processing**: Support a single return value or multiple values (tuple).
+- **URL return**: Return S3 URLs of the form `s3://bucket/object_name`.
+
+### Parameters
+
+- `output_names` (`List[str]`): logical names for each return value.
+- `output_transformers` (`Optional[List[Callable[[Any], bytes]]]`): transformers that convert each return value into `bytes`.
+- `bucket` (`str`): target bucket name, default `"nexent"`.
+
+### Examples
+
+#### Basic: save raw bytes
+
+```python
+@Multimodal.save_object(
+ output_names=["content"],
+)
+def generate_file() -> bytes:
+ """Returned bytes will be uploaded to MinIO automatically."""
+ content = b"Hello, World!"
+ return content
+```
+
+#### Advanced: convert PIL Image to bytes before upload
+
+If the function does not return `bytes` (for example, it returns `PIL.Image.Image`), define a converter such as `pil_to_bytes` and pass it to the decorator.
+
+```python
+import io
+from typing import Optional
+from PIL import Image, ImageFilter
+
+
+def pil_to_bytes(img: Image.Image, format: Optional[str] = None) -> bytes:
+ """
+ Convert a PIL Image to binary data (bytes).
+ """
+ if img is None:
+ raise ValueError("Input image cannot be None")
+
+ buffer = io.BytesIO()
+
+ # Decide which format to use
+ if format is None:
+ # Use original format if available, otherwise default to PNG
+ format = img.format if img.format else "PNG"
+
+ # For JPEG, ensure RGB (no alpha channel)
+ if format.upper() == "JPEG" and img.mode in ("RGBA", "LA", "P"):
+ rgb_img = Image.new("RGB", img.size, (255, 255, 255))
+ if img.mode == "P":
+ img = img.convert("RGBA")
+ rgb_img.paste(
+ img,
+ mask=img.split()[-1] if img.mode in ("RGBA", "LA") else None,
+ )
+ rgb_img.save(buffer, format=format)
+ else:
+ img.save(buffer, format=format)
+
+ data = buffer.getvalue()
+ buffer.close()
+ return data
+
+
+@Multimodal.save_object(
+ output_names=["processed_image"],
+ output_transformers=[pil_to_bytes],
+)
+def process_image(image: Image.Image) -> Image.Image:
+ """Returned PIL Image will be converted to bytes and uploaded."""
+ blurred = image.filter(ImageFilter.GaussianBlur(radius=5))
+ return blurred
+```
+
+#### Multiple files
+
+```python
+from typing import Tuple
+
+
+@Multimodal.save_object(
+ output_names=["resized1", "resized2"],
+ output_transformers=[pil_to_bytes, pil_to_bytes],
+)
+def process_two_images(
+ img1: Image.Image,
+ img2: Image.Image,
+) -> Tuple[Image.Image, Image.Image]:
+ """Both returned images will be uploaded and return corresponding S3 URLs."""
+ resized1 = img1.resize((800, 600))
+ resized2 = img2.resize((800, 600))
+ return resized1, resized2
+```
+
+### Return Format
+
+- **Single return value**: a single S3 URL string, `s3://bucket/object_name`.
+- **Multiple return values (tuple)**: a tuple where each element is the corresponding S3 URL.
+
+### Notes
+
+- If you do **not** provide a transformer, the function return value must be `bytes`.
+- If you provide a transformer, the transformer **must** return `bytes`.
+- The number of return values must match the length of `output_names`.
+
+## Combined Usage Example
+
+In practice, `@load_object` and `@save_object` are often used together to build a full **download → process → upload** pipeline:
+
+```python
+from typing import Union, List
+from PIL import Image, ImageFilter
+
+from database.client import minio_client
+from nexent.multi_modal.load_save_object import LoadSaveObjectManager
+
+
+Multimodal = LoadSaveObjectManager(storage_client=minio_client)
+
+
+@Multimodal.load_object(
+ input_names=["image_url"],
+ input_data_transformer=[bytes_to_pil],
+)
+@Multimodal.save_object(
+ output_names=["blurred_image"],
+ output_transformers=[pil_to_bytes],
+)
+def blur_image_tool(
+ image_url: Union[str, List[str]],
+ blur_radius: int = 5,
+) -> Image.Image:
+ """
+ Apply a Gaussian blur filter to an image.
+
+ Args:
+ image_url: S3 URL or HTTP/HTTPS URL of the image.
+ blur_radius: Blur radius (default 5, valid range 1–50).
+
+ Returns:
+ Processed PIL Image object (it will be uploaded and returned as an S3 URL).
+ """
+ # At this point, image_url has already been converted to a PIL Image
+ if image_url is None:
+ raise ValueError("Failed to load image")
+
+ # Clamp blur radius
+ blur_radius = max(1, min(50, blur_radius))
+
+ # Apply blur
+ blurred_image = image_url.filter(ImageFilter.GaussianBlur(radius=blur_radius))
+ return blurred_image
+
+
+# Example usage
+result_url = blur_image_tool(
+ image_url="s3://nexent/images/input.png",
+ blur_radius=10,
+)
+# result_url is something like "s3://nexent/attachments/xxx.png"
+```
\ No newline at end of file
diff --git a/doc/docs/en/user-guide/agent-development.md b/doc/docs/en/user-guide/agent-development.md
index 54970e5c2..538041ab1 100644
--- a/doc/docs/en/user-guide/agent-development.md
+++ b/doc/docs/en/user-guide/agent-development.md
@@ -15,6 +15,14 @@ If you have an existing agent configuration, you can also import it:
+> ⚠️ **Note:** If you import an agent with a duplicate name, a prompt dialog will appear. You can choose:
+> - **Import anyway**: Keep the duplicate name; the imported agent will be in an unavailable state and requires manual modification of the Agent name and variable name before it can be used
+> - **Regenerate and import**: The system will call the LLM to rename the Agent, which will consume a certain amount of model tokens and may take longer
+
+
+

+
+
## 👥 Configure Collaborative Agents/Tools
You can configure other collaborative agents for your created agent, as well as assign available tools to empower the agent to complete complex tasks.
diff --git a/doc/docs/en/user-guide/assets/agent-development/duplicated_import.png b/doc/docs/en/user-guide/assets/agent-development/duplicated_import.png
new file mode 100644
index 000000000..3d7e0e6bc
Binary files /dev/null and b/doc/docs/en/user-guide/assets/agent-development/duplicated_import.png differ
diff --git a/doc/docs/zh/opensource-memorial-wall.md b/doc/docs/zh/opensource-memorial-wall.md
index d1c9a3ccc..41fdb6ee8 100644
--- a/doc/docs/zh/opensource-memorial-wall.md
+++ b/doc/docs/zh/opensource-memorial-wall.md
@@ -24,6 +24,10 @@
第一次玩开源项目,nexent真的挺好用的!用自然语言就能搞智能体,比我想象的简单多了
:::
+::: info codingcat99 - 2025-05-15
+大家一起加油
+:::
+
::: tip bytedancer2023 - 2025-05-18
我们小公司想做客服机器人,之前技术门槛太高了。nexent的多文件格式支持真的帮了大忙,产品经理现在也能自己调智能体了哈哈
:::
@@ -517,15 +521,15 @@ nexent智能体帮助我学到更多的东西,赞!
第一次使用nexent,想借此更快入手ai应用开发呀!
:::
-:::info user - 2025-11-26
+::: info user - 2025-11-26
Nexent开发者加油
:::
-:::info NOSN - 2025-11-27
+::: info NOSN - 2025-11-27
Nexent越做越强大!
:::
-:::info Chenpi-Sakura - 2025-11-27
+::: info Chenpi-Sakura - 2025-11-27
开源共创未来!
:::
@@ -533,11 +537,11 @@ Nexent越做越强大!
Nexent加油
:::
-:::info AstreoX - 2025-11-27
+::: info AstreoX - 2025-11-27
感谢Nexent为智能体开发提出了更多可能!
:::
-:::info user - 2025-11-26
+::: info user - 2025-11-26
祝nexent平台越做越胡奥
:::
@@ -552,3 +556,54 @@ Nexent加油
::: info kj - 2025-11-27
祝越来越好
:::
+
+::: info aaa - 2025-11-28
+祝nexent平台越来越好
+:::
+
+::: info hanyuan5888-beep - 2025-11-29
+通过华为ICT大赛接触到的这个平台,前端做的非常好看,并且功能很全面。
+:::
+
+::: info user - 2025-11-29
+感谢 Nexent 让我踏上了开源之旅!这个项目的文档真的很棒,帮助我快速上手。
+:::
+
+::: info G-oeX - 2025-11-30
+感谢 Nexent 让我第一次感受到智能体 希望参加ICT比赛过程中可以学到更多知识 能够对该领域有更多的了解和认识!star!!!
+:::
+
+::: tip peri1506 - 2025-11-30
+感谢 Nexent 让我踏上了开源之旅!这个项目的文档真的很棒,帮助我快速上手。
+:::
+
+::: tip kissmekm - 2025-12-01
+感谢 Nexent 让我踏上了开源之旅!希望能使用nexent开发智能体
+:::
+
+::: info luna - 2025-12-1
+感谢nexent,祝平台越做越强大
+:::
+
+::: info sbwrn - 2025-12-02
+祝越来越好
+:::
+
+::: info sbwrn - 2025-12-02
+祝nexent平台越来越好
+
+:::tip 开源新手 - 2025-12-02
+感谢 Nexent 让我踏上了开源之旅!这个项目的文档真的很棒,帮助我快速上手。
+:::
+
+::: info sbwrn - 2025-12-02
+祝nexent平台越来越好
+:::
+
+::: info dengpeiying - 2025-12-02
+Nexent开发者加油
+:::
+
+::: info jinhb - 2025-12-03
+祝nexent平台越来越好
+:::
diff --git a/doc/docs/zh/sdk/core/multimodal.md b/doc/docs/zh/sdk/core/multimodal.md
new file mode 100644
index 000000000..eec6c66cb
--- /dev/null
+++ b/doc/docs/zh/sdk/core/multimodal.md
@@ -0,0 +1,312 @@
+# 多模态模块
+
+本模块提供专为智能体设计的原生多模态数据处理总线,通过 `@load_object`、 `@save_object` 装饰器,支持文本、图像、音频、视频等多种数据格式的实时传输和处理,实现跨模态的无缝数据流转。
+
+## 📋 目录
+
+- [LoadSaveObjectManager 初始化](#loadsaveobjectmanager-初始化)
+- [@load_object装饰器](#@load_object装饰器)
+- [@save_object装饰器](#@save_object装饰器)
+- [组合使用示例](#组合使用示例)
+
+
+## LoadSaveObjectManager 初始化
+
+在使用装饰器之前,需要先初始化 `LoadSaveObjectManager` 实例,并传入存储客户端(如 MinIO 客户端):
+
+```python
+from nexent.multi_modal.load_save_object import LoadSaveObjectManager
+from database.client import minio_client
+
+
+# 创建管理器实例
+Multimodal = LoadSaveObjectManager(storage_client=minio_client)
+```
+
+存储客户端也可以通过`sdk.nexent.storage.storage_client_base`中的`StorageClient`基类,实现自己的存储客户端。存储客户端需要实现以下方法:
+- `get_file_stream(object_name, bucket)`: 从存储中获取文件流(用于下载)
+- `upload_fileobj(file_obj, object_name, bucket)`: 上传文件对象到存储(用于保存)
+
+
+## @load_object装饰器
+
+`@load_object` 装饰器用于在被装饰函数执行前自动从 URL(S3、HTTP、HTTPS)下载文件,并将文件内容(或转换后的数据)传递给被装饰函数。
+
+### 功能特性
+
+- **自动下载**: 自动识别并下载 S3、HTTP、HTTPS URL 指向的文件
+- **数据转换**: 支持通过自定义转换器将下载的字节数据转换为被装饰函数所需格式(如 PIL Image、文本等)
+- **批量处理**: 支持处理单个 URL 或 URL 列表
+
+
+### 参数说明
+
+- `input_names` (List[str]): : 需要处理的函数参数名称列表
+- `input_data_transformer` (Optional[List[Callable[[Any], bytes]]]): 可选的数据转换器列表,用于将下载的字节数据转换为所需格式
+
+### 支持的URL格式
+
+装饰器支持以下 URL 格式:
+
+- S3 URL
+ - `s3://bucket-name/object/file.jpg`
+ - `/bucket-name/object/file.jpg`(简化格式)
+- HTTP/HTTPS URL
+ - `http://example.com/file.jpg`
+ - `https://example.com/file.jpg`
+
+
+系统会自动检测 URL 类型:
+- 以 `http://` 开头 → HTTP URL
+- 以 `https://` 开头 → HTTPS URL
+- 以 `s3://` 开头或符合 `/bucket/object` 格式 → S3 URL
+
+### 使用示例
+
+#### 基础用法:下载为字节数据
+
+```python
+@Multimodal.load_object(input_names=["image_url"])
+def process_image(image_url: bytes):
+ """file_url 参数会被自动替换为从 URL 下载的字节数据"""
+ print(f"文件大小: {len(image_url)} bytes")
+ return image_url
+
+# 调用process_file方法
+result = process_image(image_url=f"http://example/pic.PNG")
+```
+
+#### 进阶用法:使用转换器将字节数据转换为所需格式
+
+若被装饰函数的入参不是字节数据,而是其他数据类型的数据(如PIL Image)。可以定义一个数据转换的函数(如bytes_to_pil)并将函数名作为入参传给装饰器。
+
+```python
+import io
+import PIL
+from PIL import Image
+
+def bytes_to_pil(binary_data):
+ image_stream = io.BytesIO(binary_data)
+ img = Image.open(image_stream)
+ return img
+
+@Multimodal.load_object(
+ input_names=["image_url"],
+ input_data_transformer=[bytes_to_pil]
+)
+def process_image(image_url: Image.Image):
+ """image_url 参数会被自动转换为 PIL Image 对象"""
+ resized = image_url.resize((800, 600))
+ return resized
+
+# 调用process_file方法
+result = process_image(image_url=f"http://example/pic.PNG")
+```
+
+#### 处理多个输入
+
+```python
+@Multimodal.load_object(
+ input_names=["image_url1", "image_url2"],
+ input_data_transformer=[bytes_to_pil, bytes_to_pil]
+)
+def process_two_images(image_url1: Image.Image, image_url2: Image.Image):
+ """两个图片 URL 都会被下载并转换为 PIL Image"""
+ combined = Image.new('RGB', (1600, 600))
+ combined.paste(image_url1, (0, 0))
+ combined.paste(image_url2, (800, 0))
+ return combined
+
+# 调用process_file方法
+result = process_two_images(image_url1=f"http://example/pic1.PNG", image_url2=f"http://example/pic2.PNG")
+```
+
+#### 处理 URL 列表
+
+```python
+@Multimodal.load_object(
+ input_names=["image_urls"],
+ input_data_transformer=[bytes_to_pil]
+)
+def process_image_list(image_urls: List[Image.Image]):
+ """支持传入 URL 列表,每个 URL 都会被下载并转换"""
+ results = []
+ for img in image_urls:
+ results.append(img.resize((200, 200)))
+ return results
+
+# 调用process_file方法
+result = process_image_list(image_urls=["http://example/pic1.PNG", "http://example/pic2.PNG"])
+```
+
+
+## @save_object装饰器
+
+`@save_object` 装饰器用于在被装饰函数执行后自动将返回值上传到存储(MinIO),并返回 S3 URL。
+
+### 功能特性
+
+- **自动上传**: 自动将被装饰函数返回值上传到 MinIO 存储
+- **数据转换**: 支持通过转换器将返回值转换为字节数据(如 PIL Image 转 bytes)
+- **批量处理**: 支持处理单个返回值或多个返回值(tuple)
+- **URL 返回**: 返回 S3 URL 格式(`s3://bucket/object_name`)
+
+### 参数说明
+
+- `output_names` (List[str]): 被装饰器函数的输出参数的名称列表
+- `output_transformers` (Optional[List[Callable[[Any], bytes]]]): 可选的数据转换器列表,用于将返回值转换为字节数据
+- `bucket` (str): 存储桶名称,默认为 `"nexent"`
+
+### 使用示例
+
+#### 基础用法:直接保存字节数据
+
+```python
+@Multimodal.save_object(
+ output_names=["content"]
+)
+def generate_file() -> bytes:
+ """返回的字节数据会被自动上传到 MinIO"""
+ content = b"Hello, World!"
+ return content
+```
+
+#### 进阶用法: 使用转换器将函数返回值转换为字节数据
+
+若被装饰函数的出参不是字节数据,而是其他数据类型的数据(如PIL Image)。可以定义一个数据转换的函数(如pil_to_bytes)并将函数名作为入参传给装饰器。
+
+
+```python
+# 定义将PIL对象转换为Bytes的转换器函数
+def pil_to_bytes(img, format=None):
+ """
+ Convert PIL Image to binary data (bytes)
+
+ Args:
+ img: PIL.Image object
+ format: Output format ('JPEG', 'PNG', 'BMP', 'WEBP', etc.).
+ If None, uses the image's original format or defaults to PNG.
+
+ Returns:
+ bytes: Binary data of the image
+ """
+ if img is None:
+ raise ValueError("Input image cannot be None")
+
+ # Create memory buffer
+ buffer = io.BytesIO()
+
+ # Determine format to use
+ if format is None:
+ # Use image's original format if available, otherwise default to PNG
+ format = img.format if img.format else 'PNG'
+
+ # Save image to buffer with specified format
+ # For JPEG, ensure RGB mode (no transparency)
+ if format.upper() == 'JPEG' and img.mode in ('RGBA', 'LA', 'P'):
+ # Convert to RGB for JPEG compatibility
+ rgb_img = Image.new('RGB', img.size, (255, 255, 255))
+ if img.mode == 'P':
+ img = img.convert('RGBA')
+ rgb_img.paste(img, mask=img.split()[-1] if img.mode in ('RGBA', 'LA') else None)
+ rgb_img.save(buffer, format=format)
+ else:
+ img.save(buffer, format=format)
+
+ # Get binary data
+ binary_data = buffer.getvalue()
+ buffer.close()
+
+ return binary_data
+
+
+@Multimodal.save_object(
+ output_names=["processed_image"],
+ output_transformers=[pil_to_bytes]
+)
+def process_image(image: Image.Image) -> Image.Image:
+ """返回的 PIL Image 会被转换为字节并上传"""
+ blurred = image.filter(ImageFilter.GaussianBlur(radius=5))
+ return blurred
+```
+
+#### 返回多个文件
+
+```python
+@Multimodal.save_object(
+ output_names=["resized1", "resized2"],
+ output_transformers=[pil_to_bytes, pil_to_bytes]
+)
+def process_two_images(img1: Image.Image, img2: Image.Image) -> Tuple[Image.Image, Image.Image]:
+ """返回两个图片,都会被上传并返回对应的 S3 URL"""
+ resized1 = img1.resize((800, 600))
+ resized2 = img2.resize((800, 600))
+ return resized1, resized2
+```
+
+### 返回值格式
+
+- 单个返回值:返回单个 S3 URL 字符串,格式为 `s3://bucket/object_name`
+- 多个返回值(tuple):返回 tuple,每个元素是对应的 S3 URL
+
+### 注意事项
+
+- 如果没有提供转换器,被装饰函数的返回值必须是 `bytes` 类型
+- 如果提供了转换器,转换器必须返回 `bytes` 类型
+- 返回值的数量必须与 `output_names` 的长度一致
+
+
+## 组合使用示例
+
+在实际应用中,通常会将 `@load_object` 和 `@save_object` 组合使用,实现完整的"下载-处理-上传"流程:
+
+```python
+from PIL import Image, ImageFilter
+from typing import Union, List
+from database.client import minio_client
+from multi_modal.load_save_object import LoadSaveObjectManager
+
+Multimodal = LoadSaveObjectManager(storage_client=minio_client)
+
+@Multimodal.load_object(
+ input_names=["image_url"],
+ input_data_transformer=[bytes_to_pil]
+)
+@Multimodal.save_object(
+ output_names=["blurred_image"],
+ output_transformers=[pil_to_bytes]
+)
+def blur_image_tool(
+ image_url: Union[str, List[str]],
+ blur_radius: int = 5
+) -> Image.Image:
+ """
+ 对图片应用高斯模糊滤镜
+
+ Args:
+ image_url: 图片的 S3 URL 或 HTTP/HTTPS URL
+ blur_radius: 模糊半径(默认 5,范围 1-50)
+
+ Returns:
+ 处理后的 PIL Image 对象(会被自动上传并返回 S3 URL)
+ """
+ # 此时 image_url 已经是 PIL Image 对象
+ if image_url is None:
+ raise ValueError("Failed to load image")
+
+ # 验证并限制模糊半径
+ blur_radius = max(1, min(50, blur_radius))
+
+ # 应用模糊滤镜
+ blurred_image = image_url.filter(ImageFilter.GaussianBlur(radius=blur_radius))
+
+ # 返回 PIL Image(会被 @save_object 自动上传)
+ return blurred_image
+
+# 使用示例
+result_url = blur_image_tool(
+ image_url="s3://nexent/images/input.png",
+ blur_radius=10
+)
+# result_url 是 "s3://nexent/attachments/xxx.png"
+```
\ No newline at end of file
diff --git a/doc/docs/zh/user-guide/agent-development.md b/doc/docs/zh/user-guide/agent-development.md
index 5f9eb0646..ff4c7c943 100644
--- a/doc/docs/zh/user-guide/agent-development.md
+++ b/doc/docs/zh/user-guide/agent-development.md
@@ -15,6 +15,14 @@
+> ⚠️ **提示**:如果导入了重名的智能体,系统会弹出提示弹窗。您可以选择:
+> - **直接导入**:保留重复名称,导入后的智能体会处于不可用状态,需手动修改 Agent 名称和变量名后才能使用
+> - **重新生成并导入**:系统将调用 LLM 对 Agent 进行重命名,会消耗一定的模型 token 数,可能耗时较长
+
+
+

+
+
## 👥 配置协作智能体/工具
您可以为创建的智能体配置其他协作智能体,也可以为它配置可使用的工具,以赋予智能体能力完成复杂任务。
diff --git a/doc/docs/zh/user-guide/assets/agent-development/duplicated_import.png b/doc/docs/zh/user-guide/assets/agent-development/duplicated_import.png
new file mode 100644
index 000000000..e4d51cad5
Binary files /dev/null and b/doc/docs/zh/user-guide/assets/agent-development/duplicated_import.png differ
diff --git a/docker/.env.example b/docker/.env.example
index e770040e7..1018228ac 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -153,4 +153,4 @@ LLM_SLOW_REQUEST_THRESHOLD_SECONDS=5.0
LLM_SLOW_TOKEN_RATE_THRESHOLD=10.0
# Market Backend Address
-MARKET_BACKEND=http://localhost:8010
\ No newline at end of file
+MARKET_BACKEND=https://market.nexent.tech
diff --git a/docker/deploy.sh b/docker/deploy.sh
index 2b3ea9618..dc39eaf6a 100755
--- a/docker/deploy.sh
+++ b/docker/deploy.sh
@@ -1,10 +1,13 @@
#!/bin/bash
+# Ensure the script is executed with bash (required for arrays and [[ ]])
+if [ -z "$BASH_VERSION" ]; then
+ echo "❌ This script must be run with bash. Please use: bash deploy.sh or ./deploy.sh"
+ exit 0
+fi
+
# Exit immediately if a command exits with a non-zero status
set -e
-
-ERROR_OCCURRED=0
-
set -a
source .env
@@ -51,6 +54,173 @@ sanitize_input() {
printf "%s" "$input" | tr -d '\r'
}
+is_windows_env() {
+ # Detect Windows Git Bash / MSYS / MINGW environment
+ local os_name
+ os_name=$(uname -s 2>/dev/null | tr '[:upper:]' '[:lower:]')
+ if [[ "$os_name" == mingw* || "$os_name" == msys* ]]; then
+ return 0
+ fi
+ return 1
+}
+
+is_port_in_use() {
+ # Check if a TCP port is already in use (Linux/macOS/Windows Git Bash)
+ local port="$1"
+
+ # Prefer lsof when available (typically on Linux/macOS)
+ if command -v lsof >/dev/null 2>&1 && ! is_windows_env; then
+ if lsof -iTCP:"$port" -sTCP:LISTEN -P -n >/dev/null 2>&1; then
+ return 0
+ fi
+ return 1
+ fi
+
+ # Fallback to ss if available
+ if command -v ss >/dev/null 2>&1; then
+ if ss -ltn 2>/dev/null | awk '{print $4}' | grep -qE "[:\.]${port}$"; then
+ return 0
+ fi
+ return 1
+ fi
+
+ # Fallback to netstat (works on Windows and many Linux distributions)
+ if command -v netstat >/dev/null 2>&1; then
+ if netstat -an 2>/dev/null | grep -qE "[:\.]${port}[[:space:]]"; then
+ return 0
+ fi
+ return 1
+ fi
+
+ # If no inspection tool is available, assume the port is free
+ return 1
+}
+
+add_port_if_new() {
+ # Helper to add a port to global arrays only if not already present
+ local port="$1"
+ local source="$2"
+ local existing_port
+
+ for existing_port in "${PORTS_TO_CHECK[@]}"; do
+ if [ "$existing_port" = "$port" ]; then
+ return 0
+ fi
+ done
+
+ PORTS_TO_CHECK+=("$port")
+ PORT_SOURCES+=("$source")
+}
+
+collect_ports_from_env_file() {
+ # Collect ports from a single env file, based on addresses and *_PORT style variables
+ local env_file="$1"
+
+ if [ ! -f "$env_file" ]; then
+ return 0
+ fi
+
+ # 1) Address-style values containing :PORT (for example http://host:3000)
+ # We only care about the numeric port part.
+ while IFS= read -r match; do
+ local port="${match#:}"
+ port=$(echo "$port" | tr -d '[:space:]')
+ if [[ "$port" =~ ^[0-9]{2,5}$ ]]; then
+ add_port_if_new "$port" "$env_file (address)"
+ fi
+ done < <(grep -Eo ':[0-9]{2,5}' "$env_file" 2>/dev/null | sort -u)
+
+ # 2) Variables that explicitly define a port, for example FOO_PORT=3000
+ while IFS= read -r line; do
+ # Strip inline comments
+ line="${line%%#*}"
+ # Extract value part after '='
+ local value="${line#*=}"
+ value=$(echo "$value" | tr -d '[:space:]"'\''')
+ if [[ "$value" =~ ^[0-9]{2,5}$ ]]; then
+ add_port_if_new "$value" "$env_file (PORT variable)"
+ fi
+ done < <(grep -E '^[A-Za-z_][A-Za-z0-9_]*_PORT *= *[0-9]{2,5}' "$env_file" 2>/dev/null)
+}
+
+check_ports_in_env_files() {
+ # Preflight check: ensure all ports referenced in env files are free
+ PORTS_TO_CHECK=()
+ PORT_SOURCES=()
+
+ # Always include the main .env if present, plus any .env.* files
+ local env_files=()
+ if [ -f ".env" ]; then
+ env_files+=(".env")
+ fi
+
+ # Include additional env variants such as .env.general and .env.mainland
+ local f
+ for f in .env.*; do
+ if [ -f "$f" ]; then
+ env_files+=("$f")
+ fi
+ done
+
+ # Collect ports from all discovered env files
+ for f in "${env_files[@]}"; do
+ collect_ports_from_env_file "$f"
+ done
+
+ if [ ${#PORTS_TO_CHECK[@]} -eq 0 ]; then
+ echo "🔍 No port definitions found in environment files, skipping port availability check."
+ echo ""
+ echo "--------------------------------"
+ echo ""
+ return 0
+ fi
+
+ echo "🔍 Checking port availability defined in environment files..."
+ local occupied_ports=()
+ local occupied_sources=()
+
+ local idx
+ for idx in "${!PORTS_TO_CHECK[@]}"; do
+ local port="${PORTS_TO_CHECK[$idx]}"
+ local source="${PORT_SOURCES[$idx]}"
+
+ if is_port_in_use "$port"; then
+ occupied_ports+=("$port")
+ occupied_sources+=("$source")
+ echo " ❌ Port $port is already in use."
+ else
+ echo " ✅ Port $port is free."
+ fi
+ done
+
+ if [ ${#occupied_ports[@]} -gt 0 ]; then
+ echo ""
+ echo "❌ Port conflict detected. The following ports required by Nexent are already in use:"
+ local i
+ for i in "${!occupied_ports[@]}"; do
+ echo " - Port ${occupied_ports[$i]}"
+ done
+ echo ""
+ echo "Please free these ports or update the corresponding .env files."
+ echo ""
+
+ # Ask user whether to continue deployment even if some ports are occupied
+ local confirm_continue
+ read -p "👉 Do you still want to continue deployment even though some ports are in use? [y/N]: " confirm_continue
+ confirm_continue=$(sanitize_input "$confirm_continue")
+ if ! [[ "$confirm_continue" =~ ^[Yy]$ ]]; then
+ echo "🚫 Deployment aborted due to port conflicts."
+ exit 0
+ fi
+
+ echo "⚠️ Continuing deployment even though some required ports are already in use."
+ fi
+
+ echo ""
+ echo "--------------------------------"
+ echo ""
+}
+
generate_minio_ak_sk() {
echo "🔑 Generating MinIO keys..."
@@ -69,7 +239,6 @@ generate_minio_ak_sk() {
if [ -z "$ACCESS_KEY" ] || [ -z "$SECRET_KEY" ]; then
echo " ❌ ERROR Failed to generate MinIO access keys"
- ERROR_OCCURRED=1
return 1
fi
@@ -130,7 +299,7 @@ generate_supabase_keys() {
generate_elasticsearch_api_key() {
# Function to generate Elasticsearch API key
- wait_for_elasticsearch_healthy || { echo " ❌ Elasticsearch health check failed"; exit 1; }
+ wait_for_elasticsearch_healthy || { echo " ❌ Elasticsearch health check failed"; return 0; }
# Generate API key
echo "🔑 Generating ELASTICSEARCH_API_KEY..."
@@ -203,7 +372,7 @@ get_compose_version() {
fi
echo "unknown"
- return 1
+ return 0
}
disable_dashboard() {
@@ -325,7 +494,6 @@ create_dir_with_permission() {
# Check if parameters are provided
if [ -z "$dir_path" ] || [ -z "$permission" ]; then
echo " ❌ ERROR Directory path and permission parameters are required." >&2
- ERROR_OCCURRED=1
return 1
fi
@@ -334,7 +502,6 @@ create_dir_with_permission() {
mkdir -p "$dir_path"
if [ $? -ne 0 ]; then
echo " ❌ ERROR Failed to create directory $dir_path." >&2
- ERROR_OCCURRED=1
return 1
fi
fi
@@ -377,7 +544,7 @@ deploy_core_services() {
echo "👀 Starting core services..."
if ! ${docker_compose_command} -p nexent -f "docker-compose${COMPOSE_FILE_SUFFIX}" up -d nexent-config nexent-runtime nexent-mcp nexent-northbound nexent-web nexent-data-process; then
echo " ❌ ERROR Failed to start core services"
- exit 1
+ return 0
fi
}
@@ -394,7 +561,7 @@ deploy_infrastructure() {
if ! ${docker_compose_command} -p nexent -f "docker-compose${COMPOSE_FILE_SUFFIX}" up -d $INFRA_SERVICES; then
echo " ❌ ERROR Failed to start infrastructure services"
- exit 1
+ return 0
fi
if [ "$ENABLE_TERMINAL_TOOL_CONTAINER" = "true" ]; then
@@ -408,14 +575,12 @@ deploy_infrastructure() {
# Check if the supabase compose file exists
if [ ! -f "docker-compose-supabase${COMPOSE_FILE_SUFFIX}" ]; then
echo " ❌ ERROR Supabase compose file not found: docker-compose-supabase${COMPOSE_FILE_SUFFIX}"
- ERROR_OCCURRED=1
return 1
fi
# Start Supabase services
if ! $docker_compose_command -p nexent -f "docker-compose-supabase${COMPOSE_FILE_SUFFIX}" up -d; then
echo " ❌ ERROR Failed to start supabase services"
- ERROR_OCCURRED=1
return 1
fi
@@ -488,8 +653,7 @@ setup_package_install_script() {
echo " ✅ Package installation script created/updated"
else
echo " ❌ ERROR openssh-install-script.sh not found"
- ERROR_OCCURRED=1
- return 1
+ return 0
fi
}
@@ -506,7 +670,7 @@ wait_for_elasticsearch_healthy() {
if [ $retries -eq $max_retries ]; then
echo " ⚠️ Warning: Elasticsearch did not become healthy within expected time"
echo " You may need to check the container logs and try again"
- return 1
+ return 0
else
echo " ✅ Elasticsearch is now healthy!"
return 0
@@ -580,7 +744,6 @@ select_terminal_tool() {
echo ""
if [ -z "$input_password" ]; then
echo "❌ SSH password cannot be empty"
- ERROR_OCCURRED=1
return 1
fi
SSH_PASSWORD="$input_password"
@@ -589,7 +752,6 @@ select_terminal_tool() {
# Validate credentials
if [ -z "$SSH_USERNAME" ] || [ -z "$SSH_PASSWORD" ]; then
echo "❌ Both username and password are required"
- ERROR_OCCURRED=1
return 1
fi
@@ -671,25 +833,28 @@ main_deploy() {
echo "--------------------------------"
echo ""
+ # Check all relevant ports from environment files before starting deployment
+ check_ports_in_env_files
+
# Select deployment version, mode and image source
- select_deployment_version || { echo "❌ Deployment version selection failed"; exit 1; }
- select_deployment_mode || { echo "❌ Deployment mode selection failed"; exit 1; }
- select_terminal_tool || { echo "❌ Terminal tool container configuration failed"; exit 1; }
- choose_image_env || { echo "❌ Image environment setup failed"; exit 1; }
+ select_deployment_version || { echo "❌ Deployment version selection failed"; exit 0; }
+ select_deployment_mode || { echo "❌ Deployment mode selection failed"; exit 0; }
+ select_terminal_tool || { echo "❌ Terminal tool container configuration failed"; exit 0; }
+ choose_image_env || { echo "❌ Image environment setup failed"; exit 0; }
# Add permission
- prepare_directory_and_data || { echo "❌ Permission setup failed"; exit 1; }
- generate_minio_ak_sk || { echo "❌ MinIO key generation failed"; exit 1; }
+ prepare_directory_and_data || { echo "❌ Permission setup failed"; exit 0; }
+ generate_minio_ak_sk || { echo "❌ MinIO key generation failed"; exit 0; }
# Generate Supabase secrets
- generate_supabase_keys || { echo "❌ Supabase secrets generation failed"; exit 1; }
+ generate_supabase_keys || { echo "❌ Supabase secrets generation failed"; exit 0; }
# Deploy infrastructure services
- deploy_infrastructure || { echo "❌ Infrastructure deployment failed"; exit 1; }
+ deploy_infrastructure || { echo "❌ Infrastructure deployment failed"; exit 0; }
# Generate Elasticsearch API key
- generate_elasticsearch_api_key || { echo "❌ Elasticsearch API key generation failed"; exit 1; }
+ generate_elasticsearch_api_key || { echo "❌ Elasticsearch API key generation failed"; exit 0; }
echo ""
echo "--------------------------------"
@@ -697,7 +862,7 @@ main_deploy() {
# Special handling for infrastructure mode
if [ "$DEPLOYMENT_MODE" = "infrastructure" ]; then
- generate_env_for_infrastructure || { echo "❌ Environment generation failed"; exit 1; }
+ generate_env_for_infrastructure || { echo "❌ Environment generation failed"; exit 0; }
echo "🎉 Infrastructure deployment completed successfully!"
echo " You can now start the core services manually using dev containers"
echo " Environment file available at: $(cd .. && pwd)/.env"
@@ -706,7 +871,7 @@ main_deploy() {
fi
# Start core services
- deploy_core_services || { echo "❌ Core services deployment failed"; exit 1; }
+ deploy_core_services || { echo "❌ Core services deployment failed"; exit 0; }
echo " ✅ Core services started successfully"
echo ""
@@ -715,7 +880,7 @@ main_deploy() {
# Create default admin user
if [ "$DEPLOYMENT_VERSION" = "full" ]; then
- create_default_admin_user || { echo "❌ Default admin user creation failed"; exit 1; }
+ create_default_admin_user || { echo "❌ Default admin user creation failed"; exit 0; }
fi
echo "🎉 Deployment completed successfully!"
@@ -726,7 +891,7 @@ main_deploy() {
version_info=$(get_compose_version)
if [[ $version_info == "unknown" ]]; then
echo "Error: Docker Compose not found or version detection failed"
- exit 1
+ exit 0
fi
# extract version
@@ -741,7 +906,7 @@ case $version_type in
# The version v1.28.0 is the minimum requirement in Docker Compose v1 that explicitly supports interpolation syntax with default values like ${VAR:-default}
if [[ $version_number < "1.28.0" ]]; then
echo "Warning: V1 version is too old, consider upgrading to V2"
- exit 1
+ exit 0
fi
docker_compose_command="docker-compose"
;;
@@ -751,14 +916,14 @@ case $version_type in
;;
*)
echo "Error: Unknown docker compose version type."
- exit 1
+ exit 0
;;
esac
# Execute main deployment with error handling
if ! main_deploy; then
echo "❌ Deployment failed. Please check the error messages above and try again."
- exit 1
+ exit 0
fi
clean
diff --git a/docker/docker-compose.prod.yml b/docker/docker-compose.prod.yml
index 41b0f0c1c..71042c3ef 100644
--- a/docker/docker-compose.prod.yml
+++ b/docker/docker-compose.prod.yml
@@ -186,6 +186,7 @@ services:
- WS_BACKEND=ws://nexent-runtime:5014
- RUNTIME_HTTP_BACKEND=http://nexent-runtime:5014
- MINIO_ENDPOINT=http://nexent-minio:9000
+ - MARKET_BACKEND=https://market.nexent.tech
logging:
driver: "json-file"
options:
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index f59430d24..7898ddf61 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -199,6 +199,7 @@ services:
- WS_BACKEND=ws://nexent-runtime:5014
- RUNTIME_HTTP_BACKEND=http://nexent-runtime:5014
- MINIO_ENDPOINT=http://nexent-minio:9000
+ - MARKET_BACKEND=https://market.nexent.tech
logging:
driver: "json-file"
options:
diff --git a/docker/init.sql b/docker/init.sql
index a8af3d190..1181c8237 100644
--- a/docker/init.sql
+++ b/docker/init.sql
@@ -167,6 +167,7 @@ CREATE TABLE IF NOT EXISTS "model_record_t" (
"maximum_chunk_size" int4,
"display_name" varchar(100) COLLATE "pg_catalog"."default",
"connect_status" varchar(100) COLLATE "pg_catalog"."default",
+ "ssl_verify" boolean DEFAULT true,
"create_time" timestamp(0) DEFAULT CURRENT_TIMESTAMP,
"delete_flag" varchar(1) COLLATE "pg_catalog"."default" DEFAULT 'N'::character varying,
"update_time" timestamp(0) DEFAULT CURRENT_TIMESTAMP,
@@ -189,6 +190,7 @@ COMMENT ON COLUMN "model_record_t".expected_chunk_size IS 'Expected chunk size f
COMMENT ON COLUMN "model_record_t".maximum_chunk_size IS 'Maximum chunk size for embedding models, used during document chunking';
COMMENT ON COLUMN "model_record_t"."display_name" IS 'Model name displayed directly in frontend, customized by user';
COMMENT ON COLUMN "model_record_t"."connect_status" IS 'Model connectivity status from last check, optional values: "检测中"、"可用"、"不可用"';
+COMMENT ON COLUMN "model_record_t"."ssl_verify" IS 'Whether to verify SSL certificates when connecting to this model API. Default is true. Set to false for local services without SSL support.';
COMMENT ON COLUMN "model_record_t"."create_time" IS 'Creation time, audit field';
COMMENT ON COLUMN "model_record_t"."delete_flag" IS 'When deleted by user frontend, delete flag will be set to true, achieving soft delete effect. Optional values Y/N';
COMMENT ON COLUMN "model_record_t"."update_time" IS 'Update time, audit field';
diff --git a/docker/sql/1129_add_ssl_verify_to_model_record_t.sql b/docker/sql/1129_add_ssl_verify_to_model_record_t.sql
new file mode 100644
index 000000000..aa2c9d9c9
--- /dev/null
+++ b/docker/sql/1129_add_ssl_verify_to_model_record_t.sql
@@ -0,0 +1,5 @@
+ALTER TABLE nexent.model_record_t
+ADD COLUMN ssl_verify BOOLEAN DEFAULT TRUE;
+
+COMMENT ON COLUMN nexent.model_record_t.ssl_verify IS 'Whether to verify SSL certificates when connecting to this model API. Default is true. Set to false for local services without SSL support.';
+
diff --git a/frontend/app/[locale]/agents/AgentsContent.tsx b/frontend/app/[locale]/agents/AgentsContent.tsx
index af658c0a4..72d5e66ed 100644
--- a/frontend/app/[locale]/agents/AgentsContent.tsx
+++ b/frontend/app/[locale]/agents/AgentsContent.tsx
@@ -74,9 +74,11 @@ export default function AgentsContent({
transition={pageTransition}
style={{width: "100%", height: "100%"}}
>
- {canAccessProtectedData ? (
-
- ) : null}
+
+ {canAccessProtectedData ? (
+
+ ) : null}
+
(null);
+
+ // Agent import wizard states
+ const [importWizardVisible, setImportWizardVisible] = useState(false);
+ const [importWizardData, setImportWizardData] = useState(null);
// Use generation state passed from parent component, not local state
// Delete confirmation popup status
@@ -1589,7 +1594,7 @@ export default function AgentSetupOrchestrator({
[runNormalImport, runForceImport]
);
- // Handle importing agent
+ // Handle importing agent - use AgentImportWizard for ExportAndImportDataFormat
const handleImportAgent = (t: TFunction) => {
// Create a hidden file input element
const fileInput = document.createElement("input");
@@ -1618,6 +1623,20 @@ export default function AgentSetupOrchestrator({
return;
}
+ // Check if it's ExportAndImportDataFormat (has agent_id and agent_info)
+ if (agentInfo.agent_id && agentInfo.agent_info && typeof agentInfo.agent_info === "object") {
+ // Use AgentImportWizard for full agent import with configuration
+ const importData: ImportAgentData = {
+ agent_id: agentInfo.agent_id,
+ agent_info: agentInfo.agent_info,
+ mcp_info: agentInfo.mcp_info || [],
+ };
+ setImportWizardData(importData);
+ setImportWizardVisible(true);
+ return;
+ }
+
+ // Fallback to legacy import logic for other formats
const normalizeValue = (value?: string | null) =>
typeof value === "string" ? value.trim() : "";
@@ -1700,6 +1719,13 @@ export default function AgentSetupOrchestrator({
fileInput.click();
};
+ // Handle import completion from wizard
+ const handleImportComplete = () => {
+ refreshAgentList(t, false);
+ setImportWizardVisible(false);
+ setImportWizardData(null);
+ };
+
const handleConfirmedDuplicateImport = useCallback(async () => {
if (!pendingImportData) {
return;
@@ -2256,6 +2282,23 @@ export default function AgentSetupOrchestrator({
{t("businessLogic.config.import.duplicateDescription")}
+ {/* Agent Import Wizard */}
+ {
+ setImportWizardVisible(false);
+ setImportWizardData(null);
+ }}
+ initialData={importWizardData}
+ onImportComplete={handleImportComplete}
+ title={undefined} // Use default title
+ agentDisplayName={
+ importWizardData?.agent_info?.[String(importWizardData.agent_id)]?.display_name
+ }
+ agentDescription={
+ importWizardData?.agent_info?.[String(importWizardData.agent_id)]?.description
+ }
+ />
{/* Auto unselect knowledge_base_search notice when embedding not configured */}
{
+ log.error("Failed to refresh tools and agents after deletion:", error);
+ });
} else {
message.error(result.message);
+ // Throw error to prevent modal from closing
+ throw new Error(result.message);
}
} catch (error) {
message.error(t("mcpConfig.message.deleteServerFailed"));
+ // Throw error to prevent modal from closing
+ throw error;
}
},
});
diff --git a/frontend/app/[locale]/agents/components/agent/SubAgentPool.tsx b/frontend/app/[locale]/agents/components/agent/SubAgentPool.tsx
index f74c46040..88b8594bb 100644
--- a/frontend/app/[locale]/agents/components/agent/SubAgentPool.tsx
+++ b/frontend/app/[locale]/agents/components/agent/SubAgentPool.tsx
@@ -288,6 +288,7 @@ export default function SubAgentPool({
const isCurrentlyEditing =
editingAgent &&
String(editingAgent.id) === String(agent.id); // Ensure type matching
+ const displayName = agent.display_name || agent.name;
const agentItem = (
-
+
{!isAvailable && (
)}
- {agent.display_name && (
-
- {agent.display_name}
+ {displayName && (
+
+ {displayName}
)}
-
- {agent.name}
-
{unsavedAgentId !== null &&
String(unsavedAgentId) === String(agent.id) && (
>
) : (
@@ -650,15 +648,19 @@ function ToolPool({
{t("toolPool.tooltip.functionGuide")}
}
- overlayInnerStyle={{
- backgroundColor: "#ffffff",
- color: "#374151",
- border: "1px solid #e5e7eb",
- borderRadius: "6px",
- boxShadow: "0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)",
- padding: "12px",
- maxWidth: "600px",
- minWidth: "400px",
+ color="#ffffff"
+ styles={{
+ body: {
+ backgroundColor: "#ffffff",
+ color: "#374151",
+ border: "1px solid #e5e7eb",
+ borderRadius: "6px",
+ boxShadow: "0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)",
+ padding: "12px",
+ maxWidth: "800px",
+ minWidth: "700px",
+ width: "fit-content",
+ },
}}
>
diff --git a/frontend/app/[locale]/chat/components/chatRightPanel.tsx b/frontend/app/[locale]/chat/components/chatRightPanel.tsx
index 874dee081..80792db0a 100644
--- a/frontend/app/[locale]/chat/components/chatRightPanel.tsx
+++ b/frontend/app/[locale]/chat/components/chatRightPanel.tsx
@@ -1,6 +1,6 @@
import { useState, useEffect, useRef, useCallback } from "react";
import { useTranslation } from "react-i18next";
-import { ExternalLink, Database, X } from "lucide-react";
+import { ExternalLink, Database, X, Server } from "lucide-react";
import { Button } from "@/components/ui/button";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
@@ -8,6 +8,8 @@ import { StaticScrollArea } from "@/components/ui/scrollArea";
import { ImageItem, ChatRightPanelProps, SearchResult } from "@/types/chat";
import { API_ENDPOINTS } from "@/services/api";
import { formatDate, formatUrl } from "@/lib/utils";
+import { convertImageUrlToApiUrl, extractObjectNameFromUrl, storageService } from "@/services/storageService";
+import { message } from "antd";
import log from "@/lib/logger";
@@ -92,30 +94,48 @@ export function ChatRightPanel({
}));
try {
- // Use the proxy service to get the image
- const response = await fetch(API_ENDPOINTS.proxy.image(imageUrl));
- const data = await response.json();
+ // Convert image URL to backend API URL
+ const apiUrl = convertImageUrlToApiUrl(imageUrl);
+
+ // Use backend API to get the image
+ const response = await fetch(apiUrl);
+
+ if (!response.ok) {
+ throw new Error(`Failed to load image: ${response.statusText}`);
+ }
- if (data.success) {
+ // Get image as blob and convert to base64
+ const blob = await response.blob();
+ const reader = new FileReader();
+
+ reader.onloadend = () => {
+ const base64Data = reader.result as string;
+ // Remove data URL prefix (e.g., "data:image/png;base64,")
+ const base64 = base64Data.split(',')[1] || base64Data;
+
setImageData((prev) => ({
...prev,
[imageUrl]: {
- base64Data: data.base64,
- contentType: data.content_type || "image/jpeg",
+ base64Data: base64,
+ contentType: blob.type || "image/jpeg",
isLoading: false,
loadAttempts: currentAttempts + 1,
},
}));
- } else {
- // If loading fails, remove it directly from the list
+ loadingImages.current.delete(imageUrl);
+ };
+
+ reader.onerror = () => {
+ log.error("Failed to read image blob");
handleImageLoadFail(imageUrl);
- }
+ loadingImages.current.delete(imageUrl);
+ };
+
+ reader.readAsDataURL(blob);
} catch (error) {
log.error(t("chatRightPanel.imageProxyError"), error);
// If loading fails, remove it directly from the list
handleImageLoadFail(imageUrl);
- } finally {
- // Whether successful or not, remove the loading mark
loadingImages.current.delete(imageUrl);
}
@@ -200,11 +220,71 @@ export function ChatRightPanel({
// Search result item component
const SearchResultItem = ({ result }: { result: SearchResult }) => {
const [isExpanded, setIsExpanded] = useState(false);
+ const [isDownloading, setIsDownloading] = useState(false);
const title = result.title || t("chatRightPanel.unknownTitle");
const url = result.url || "#";
const text = result.text || t("chatRightPanel.noContentDescription");
const published_date = result.published_date || "";
const source_type = result.source_type || "url";
+ const filename = result.filename || result.title || "";
+ const datamateDatasetId = result.score_details?.datamate_dataset_id;
+ const datamateFileId = result.score_details?.datamate_file_id;
+ const datamateBaseUrl = result.score_details?.datamate_base_url;
+
+ // Handle file download
+ const handleFileDownload = async (e: React.MouseEvent) => {
+ e.preventDefault();
+ e.stopPropagation();
+
+ if (!filename && !url) {
+ message.error(t("chatRightPanel.fileDownloadError", "File name or URL is missing"));
+ return;
+ }
+
+ setIsDownloading(true);
+ try {
+ // Handle datamate source type
+ if (source_type === "datamate") {
+ if (!datamateDatasetId || !datamateFileId || !datamateBaseUrl) {
+ if (!url || url === "#") {
+ message.error(t("chatRightPanel.fileDownloadError", "Missing Datamate dataset or file information"));
+ return;
+ }
+ }
+ await storageService.downloadDatamateFile({
+ url: url !== "#" ? url : undefined,
+ baseUrl: datamateBaseUrl,
+ datasetId: datamateDatasetId,
+ fileId: datamateFileId,
+ filename: filename || undefined,
+ });
+ message.success(t("chatRightPanel.fileDownloadSuccess", "File download started"));
+ return;
+ }
+
+ // Handle regular file source type (source_type === "file")
+ // For knowledge base files, backend stores the MinIO object_name in path_or_url,
+ // so we should always try to extract it from the URL and avoid guessing from filename.
+ let objectName: string | undefined = undefined;
+
+ if (url && url !== "#") {
+ objectName = extractObjectNameFromUrl(url) || undefined;
+ }
+
+ if (!objectName) {
+ message.error(t("chatRightPanel.fileDownloadError", "Cannot determine file object name"));
+ return;
+ }
+
+ await storageService.downloadFile(objectName, filename || "download");
+ message.success(t("chatRightPanel.fileDownloadSuccess", "File download started"));
+ } catch (error) {
+ log.error("Failed to download file:", error);
+ message.error(t("chatRightPanel.fileDownloadError", "Failed to download file. Please try again."));
+ } finally {
+ setIsDownloading(false);
+ }
+ };
return (
@@ -227,6 +307,29 @@ export function ChatRightPanel({
>
{title}
+ ) : source_type === "file" || source_type === "datamate" ? (
+
+ {isDownloading ? (
+
+ ⏳
+ {t("chatRightPanel.downloading", "Downloading...")}
+
+ ) : (
+ title
+ )}
+
) : (
-
- {source_type === "url" ? (
-
- ) : source_type === "file" ? (
-
- ) : null}
-
-
- {formatUrl(result)}
-
+ {source_type === "file" || source_type === "datamate" ? (
+ <>
+
+
+
+
+
+
+ {source_type === "datamate"
+ ? t("chatRightPanel.source.datamate", "Source: Datamate")
+ : source_type === "file"
+ ? t("chatRightPanel.source.nexent", "Source: Nexent")
+ : ""}
+
+
+ >
+ ) : (
+
+
+
+
+
+ {formatUrl(result)}
+
+
+ )}
{text.length > 150 && (
diff --git a/frontend/app/[locale]/chat/internal/chatAttachment.tsx b/frontend/app/[locale]/chat/internal/chatAttachment.tsx
index ff7b5ceb8..c08ece8f7 100644
--- a/frontend/app/[locale]/chat/internal/chatAttachment.tsx
+++ b/frontend/app/[locale]/chat/internal/chatAttachment.tsx
@@ -2,6 +2,9 @@ import { chatConfig } from "@/const/chatConfig";
import { useState } from "react";
import { useTranslation } from "react-i18next";
import { ExternalLink } from "lucide-react";
+import { storageService, convertImageUrlToApiUrl, extractObjectNameFromUrl } from "@/services/storageService";
+import { message } from "antd";
+import log from "@/lib/logger";
import {
AiFillFileImage,
AiFillFilePdf,
@@ -37,6 +40,9 @@ const ImageViewer = ({
}) => {
if (!isOpen) return null;
const { t } = useTranslation("common");
+
+ // Convert image URL to backend API URL
+ const imageUrl = convertImageUrlToApiUrl(url);
return (
@@ -56,13 +62,15 @@ const ImageViewer = ({
// File viewer component
const FileViewer = ({
+ objectName,
url,
name,
contentType,
isOpen,
onClose,
}: {
- url: string;
+ objectName?: string;
+ url?: string;
name: string;
contentType?: string;
isOpen: boolean;
@@ -70,6 +78,109 @@ const FileViewer = ({
}) => {
if (!isOpen) return null;
const { t } = useTranslation("common");
+ const [isDownloading, setIsDownloading] = useState(false);
+
+
+ // Handle file download
+ const handleDownload = async (e: React.MouseEvent) => {
+ // Prevent dialog from closing immediately
+ e.preventDefault();
+ e.stopPropagation();
+
+ // Check if URL is a direct http/https URL that can be accessed directly
+ // Exclude backend API endpoints (containing /api/file/download/)
+ if (
+ url &&
+ (url.startsWith("http://") || url.startsWith("https://")) &&
+ !url.includes("/api/file/download/")
+ ) {
+ // Direct download from HTTP/HTTPS URL without backend
+ const link = document.createElement("a");
+ link.href = url;
+ link.download = name;
+ link.style.display = "none";
+ document.body.appendChild(link);
+ link.click();
+ setTimeout(() => {
+ document.body.removeChild(link);
+ }, 100);
+ message.success(t("chatAttachment.downloadSuccess", "File download started"));
+ setTimeout(() => {
+ onClose();
+ }, 500);
+ return;
+ }
+
+ // Try to get object_name from props or extract from URL
+ let finalObjectName: string | undefined = objectName;
+
+ if (!finalObjectName && url) {
+ finalObjectName = extractObjectNameFromUrl(url) || undefined;
+ }
+
+ if (!finalObjectName) {
+ // If we still don't have object_name, fall back to direct URL download
+ if (url) {
+ // Create a temporary link to download from URL
+ const link = document.createElement("a");
+ link.href = url;
+ link.download = name;
+ link.style.display = "none";
+ document.body.appendChild(link);
+ link.click();
+ setTimeout(() => {
+ document.body.removeChild(link);
+ }, 100);
+ message.success(t("chatAttachment.downloadSuccess", "File download started"));
+ return;
+ } else {
+ message.error(t("chatAttachment.downloadError", "File object name or URL is missing"));
+ return;
+ }
+ }
+
+ setIsDownloading(true);
+ try {
+ // Start download (non-blocking, browser handles it)
+ await storageService.downloadFile(finalObjectName, name);
+ // Show success message immediately after triggering download
+ message.success(t("chatAttachment.downloadSuccess", "File download started"));
+ // Keep dialog open for a moment to show the message, then close
+ setTimeout(() => {
+ setIsDownloading(false);
+ onClose();
+ }, 500);
+ } catch (error) {
+ log.error("Failed to download file:", error);
+ setIsDownloading(false);
+ // If backend download fails and we have URL, try direct download as fallback
+ if (url) {
+ try {
+ const link = document.createElement("a");
+ link.href = url;
+ link.download = name;
+ link.style.display = "none";
+ document.body.appendChild(link);
+ link.click();
+ setTimeout(() => {
+ document.body.removeChild(link);
+ }, 100);
+ message.success(t("chatAttachment.downloadSuccess", "File download started"));
+ setTimeout(() => {
+ onClose();
+ }, 500);
+ } catch (fallbackError) {
+ message.error(
+ t("chatAttachment.downloadError", "Failed to download file. Please try again.")
+ );
+ }
+ } else {
+ message.error(
+ t("chatAttachment.downloadError", "Failed to download file. Please try again.")
+ );
+ }
+ }
+ };
return (
@@ -183,7 +296,8 @@ export function ChatAttachment({
}: ChatAttachmentProps) {
const [selectedImage, setSelectedImage] = useState
(null);
const [selectedFile, setSelectedFile] = useState<{
- url: string;
+ objectName?: string;
+ url?: string;
name: string;
contentType?: string;
} | null>(null);
@@ -218,6 +332,7 @@ export function ChatAttachment({
} else {
// For files, use internal preview
setSelectedFile({
+ objectName: attachment.object_name,
url: attachment.url,
name: attachment.name,
contentType: attachment.contentType,
@@ -252,7 +367,7 @@ export function ChatAttachment({
{attachment.url && (

e.stopPropagation()}
>

{
diff --git a/frontend/app/[locale]/chat/streaming/taskWindow.tsx b/frontend/app/[locale]/chat/streaming/taskWindow.tsx
index 0180c5b7c..cb1d1cc94 100644
--- a/frontend/app/[locale]/chat/streaming/taskWindow.tsx
+++ b/frontend/app/[locale]/chat/streaming/taskWindow.tsx
@@ -17,6 +17,7 @@ import { MarkdownRenderer } from "@/components/ui/markdownRenderer";
import { chatConfig } from "@/const/chatConfig";
import { ChatMessageType, TaskMessageType, CardItem, MessageHandler } from "@/types/chat";
import { useChatTaskMessage } from "@/hooks/useChatTaskMessage";
+import { storageService, extractObjectNameFromUrl } from "@/services/storageService";
import log from "@/lib/logger";
// Icon mapping dictionary - map strings to corresponding icon components
@@ -31,6 +32,23 @@ const iconMap: Record
= {
default: , // Default icon
};
+type KnowledgeSiteInfo = {
+ key: string;
+ domain: string;
+ displayName: string;
+ faviconUrl: string;
+ useDefaultIcon: boolean;
+ isKnowledgeBase: boolean;
+ sourceType: string;
+ url: string;
+ filename: string;
+ datamateDatasetId?: string;
+ datamateFileId?: string;
+ datamateBaseUrl?: string;
+ objectName?: string;
+ canOpenWeb: boolean;
+};
+
// Define the handlers for different types of messages to improve extensibility
const messageHandlers: MessageHandler[] = [
// Preprocess type processor - handles contents array logic
@@ -126,77 +144,188 @@ const messageHandlers: MessageHandler[] = [
}
);
- // Process website information for display
- const siteInfos = uniqueSearchResults.map((result: any) => {
- const pageUrl = result.url || "";
- const filename = result.filename || "";
- const sourceType = result.source_type || "";
- let domain = t("taskWindow.unknownSource");
- let displayName = t("taskWindow.unknownSource");
- let baseUrl = "";
- let faviconUrl = "";
- let useDefaultIcon = false;
- let isKnowledgeBase = false;
- let canClick = true; // whether to allow click to jump
+ // Process website / knowledge base information for display
+ const siteInfos: KnowledgeSiteInfo[] = uniqueSearchResults.map(
+ (result: any, index: number) => {
+ const pageUrl = result.url || "";
+ const filename = result.filename || result.title || "";
+ const sourceType = result.source_type || (filename ? "file" : "url");
+ const scoreDetails = result.score_details || {};
+ const datamateDatasetId =
+ scoreDetails?.datamate_dataset_id || scoreDetails?.dataset_id;
+ const datamateFileId =
+ scoreDetails?.datamate_file_id || scoreDetails?.file_id;
+ const datamateBaseUrl =
+ scoreDetails?.datamate_base_url ||
+ scoreDetails?.datamate_baseUrl ||
+ scoreDetails?.base_url;
+ const objectName =
+ result.object_name ||
+ scoreDetails?.object_name ||
+ scoreDetails?.minio_object_name;
+
+ let domain = t("taskWindow.unknownSource");
+ let displayName = t("taskWindow.unknownSource");
+ let baseUrl = "";
+ let faviconUrl = "";
+ let useDefaultIcon = false;
+ let isKnowledgeBase =
+ sourceType === "file" ||
+ sourceType === "datamate" ||
+ (!sourceType && !!filename);
+ let canOpenWeb = false;
+
+ if (isKnowledgeBase) {
+ displayName =
+ filename || result.title || t("taskWindow.knowledgeFile");
+ domain =
+ datamateBaseUrl ||
+ (pageUrl && pageUrl !== "#"
+ ? (() => {
+ try {
+ return new URL(pageUrl).hostname;
+ } catch {
+ return t("taskWindow.unknownSource");
+ }
+ })()
+ : t("taskWindow.unknownSource"));
+ useDefaultIcon = true;
+ } else if (pageUrl && pageUrl !== "#") {
+ try {
+ const parsedUrl = new URL(pageUrl);
+ baseUrl = `${parsedUrl.protocol}//${parsedUrl.host}`;
+ domain = parsedUrl.hostname;
+
+ displayName = domain
+ .replace(/^www\./, "")
+ .replace(
+ /\.(com|cn|org|net|io|gov|edu|co|info|biz|xyz)(\.[a-z]{2})?$/,
+ ""
+ );
+ if (!displayName) {
+ displayName = domain;
+ }
+
+ faviconUrl = `${baseUrl}/favicon.ico`;
+ canOpenWeb = true;
+ } catch (e) {
+ log.error(t("taskWindow.urlParseError"), e);
+ useDefaultIcon = true;
+ canOpenWeb = false;
+ }
+ } else {
+ useDefaultIcon = true;
+ canOpenWeb = false;
+ }
- // first judge based on source_type
- if (sourceType === "file") {
- isKnowledgeBase = true;
- displayName =
- filename || result.title || t("taskWindow.knowledgeFile");
- useDefaultIcon = true;
- canClick = false; // file type does not allow jump
- }
- // if there is no source_type, judge based on filename (compatibility processing)
- else if (filename) {
- isKnowledgeBase = true;
- displayName = filename;
- useDefaultIcon = true;
- canClick = false; // file type does not allow jump
+ return {
+ key: `site-${index}-${result.cite_index ?? ""}-${filename ?? ""}`,
+ domain,
+ displayName,
+ faviconUrl,
+ url: pageUrl,
+ useDefaultIcon,
+ isKnowledgeBase,
+ filename,
+ sourceType,
+ datamateDatasetId,
+ datamateFileId,
+ datamateBaseUrl,
+ objectName,
+ canOpenWeb,
+ };
}
- // handle webpage link
- else if (pageUrl && pageUrl !== "#") {
- try {
- const parsedUrl = new URL(pageUrl);
- baseUrl = `${parsedUrl.protocol}//${parsedUrl.host}`;
- domain = parsedUrl.hostname;
+ );
- // Process the domain, remove the www prefix and com/cn etc. suffix
- displayName = domain
- .replace(/^www\./, "") // Remove the www. prefix
- .replace(
- /\.(com|cn|org|net|io|gov|edu|co|info|biz|xyz)(\.[a-z]{2})?$/,
- ""
- ); // Remove common suffixes
+ const handleKnowledgeFileDownload = async (
+ site: KnowledgeSiteInfo
+ ): Promise => {
+ try {
+ if (site.sourceType === "datamate") {
+ if (
+ !site.datamateDatasetId &&
+ !site.datamateFileId &&
+ (!site.url || site.url === "#")
+ ) {
+ message.error(
+ t(
+ "taskWindow.downloadError",
+ "Missing Datamate dataset or file information"
+ )
+ );
+ return;
+ }
- // If the processing is empty, use the original domain
- if (!displayName) {
- displayName = domain;
+ await storageService.downloadDatamateFile({
+ url: site.url && site.url !== "#" ? site.url : undefined,
+ baseUrl: site.datamateBaseUrl,
+ datasetId: site.datamateDatasetId,
+ fileId: site.datamateFileId,
+ filename: site.filename || undefined,
+ });
+ } else {
+ // Check if URL is a direct http/https URL that can be accessed directly
+ // Exclude backend API endpoints (containing /api/file/download/)
+ if (
+ site.url &&
+ site.url !== "#" &&
+ (site.url.startsWith("http://") || site.url.startsWith("https://")) &&
+ !site.url.includes("/api/file/download/")
+ ) {
+ // Direct download from HTTP/HTTPS URL without backend
+ const link = document.createElement("a");
+ link.href = site.url;
+ link.download = site.filename || "download";
+ link.style.display = "none";
+ document.body.appendChild(link);
+ link.click();
+ setTimeout(() => {
+ document.body.removeChild(link);
+ }, 100);
+ message.success(
+ t("taskWindow.downloadSuccess", "File download started")
+ );
+ return;
}
- faviconUrl = `${baseUrl}/favicon.ico`;
- canClick = true;
- } catch (e) {
- log.error(t("taskWindow.urlParseError"), e);
- useDefaultIcon = true;
- canClick = false;
+ let objectName = site.objectName;
+ if (!objectName && site.url) {
+ objectName =
+ extractObjectNameFromUrl(site.url) || undefined;
+ }
+ if (!objectName && site.filename) {
+ objectName = site.filename.includes("/")
+ ? site.filename
+ : `attachments/${site.filename}`;
+ }
+ if (!objectName) {
+ message.error(
+ t(
+ "taskWindow.downloadError",
+ "Failed to download file. Please try again."
+ )
+ );
+ return;
+ }
+ await storageService.downloadFile(
+ objectName,
+ site.filename || undefined
+ );
}
- } else {
- useDefaultIcon = true;
- canClick = false;
- }
- return {
- domain,
- displayName,
- faviconUrl,
- url: pageUrl,
- useDefaultIcon,
- isKnowledgeBase,
- filename,
- canClick,
- };
- });
+ message.success(
+ t("taskWindow.downloadSuccess", "File download started")
+ );
+ } catch (error) {
+ log.error("Failed to download knowledge file:", error);
+ message.error(
+ t(
+ "taskWindow.downloadError",
+ "Failed to download file. Please try again."
+ )
+ );
+ }
+ };
// Render the search result information bar
return (
@@ -237,9 +366,11 @@ const messageHandlers: MessageHandler[] = [
gap: "0.5rem",
}}
>
- {siteInfos.map((site: any, index: number) => (
+ {siteInfos.map((site) => {
+ const isClickable = site.isKnowledgeBase || site.canOpenWeb;
+ return (
{
- if (site.canClick && site.url) {
+ if (site.isKnowledgeBase) {
+ handleKnowledgeFileDownload(site);
+ } else if (site.canOpenWeb && site.url) {
window.open(site.url, "_blank", "noopener,noreferrer");
}
}}
onMouseEnter={(e) => {
- if (site.canClick) {
+ if (isClickable) {
e.currentTarget.style.backgroundColor = "#f3f4f6";
}
}}
onMouseLeave={(e) => {
- if (site.canClick) {
+ if (isClickable) {
e.currentTarget.style.backgroundColor = "#f9fafb";
}
}}
title={
- site.canClick
+ site.isKnowledgeBase
+ ? t("taskWindow.downloadFile", {
+ name: site.filename || site.displayName,
+ })
+ : site.canOpenWeb
? t("taskWindow.visit", { domain: site.domain })
: site.filename || site.displayName
}
@@ -314,9 +449,26 @@ const messageHandlers: MessageHandler[] = [
}}
/>
)}
- {site.displayName}
+
+ {site.displayName}
+ {site.isKnowledgeBase && (
+
+ )}
+
- ))}
+ );
+ })}
@@ -967,6 +1119,19 @@ export function TaskWindow({ messages, isStreaming = false }: TaskWindowProps) {
);
};
+ // Error messages that should be completely hidden (including the node)
+ const suppressedErrorMessages = [
+ "Model is interrupted by stop event",
+ "Agent execution interrupted by external stop signal",
+ ];
+
+ // Check if a message should be suppressed (not displayed at all)
+ const shouldSuppressMessage = (message: any) => {
+ if (message.type !== "error") return false;
+ const content = message.content || "";
+ return suppressedErrorMessages.some((errText) => content.includes(errText));
+ };
+
// Check if it is the last message
const isLastMessage = (index: number, messages: any[]) => {
return index === messages.length - 1;
@@ -996,15 +1161,20 @@ export function TaskWindow({ messages, isStreaming = false }: TaskWindowProps) {
);
}
+ // Filter out messages that should be suppressed
+ const filteredGroupedMessages = groupedMessages.filter(
+ (group) => !shouldSuppressMessage(group.message)
+ );
+
return (
- {groupedMessages.map((group, groupIndex) => {
+ {filteredGroupedMessages.map((group, groupIndex) => {
const message = group.message;
const isBlinking = shouldBlinkDot(
groupIndex,
- groupedMessages.map((g) => g.message)
+ filteredGroupedMessages.map((g) => g.message)
);
return (
diff --git a/frontend/app/[locale]/knowledges/KnowledgeBaseConfiguration.tsx b/frontend/app/[locale]/knowledges/KnowledgeBaseConfiguration.tsx
index b2f7b4f24..995eea580 100644
--- a/frontend/app/[locale]/knowledges/KnowledgeBaseConfiguration.tsx
+++ b/frontend/app/[locale]/knowledges/KnowledgeBaseConfiguration.tsx
@@ -4,7 +4,7 @@ import type React from "react";
import { useState, useEffect, useRef, useLayoutEffect } from "react";
import { useTranslation } from "react-i18next";
-import { App, Modal } from "antd";
+import { App, Modal, Row, Col } from "antd";
import { InfoCircleFilled, WarningFilled } from "@ant-design/icons";
import {
DOCUMENT_ACTION_TYPES,
@@ -19,7 +19,7 @@ import { KnowledgeBase } from "@/types/knowledgeBase";
import { useConfig } from "@/hooks/useConfig";
import {
SETUP_PAGE_CONTAINER,
- FLEX_TWO_COLUMN_LAYOUT,
+ TWO_COLUMN_LAYOUT,
STANDARD_CARD,
} from "@/const/layoutConstants";
@@ -757,6 +757,40 @@ function DataConfig({ isActive }: DataConfigProps) {
setNewKbName(name);
};
+ // If Embedding model is not configured, show warning container instead of content
+ if (showEmbeddingWarning) {
+ return (
+
+
+
+
+
+ {t("embedding.knowledgeBaseDisabledWarningModal.title")}
+
+
+
+
+ );
+ }
+
return (
<>
- {showEmbeddingWarning && (
-
- )}
-
contentRef.current || document.body}
- styles={{ body: { padding: 0 } }}
- rootClassName="kb-embedding-warning"
- >
-
-
-
-
-
- {t("embedding.knowledgeBaseDisabledWarningModal.title")}
-
-
-
-
-
-
- {/* Left knowledge base list - occupies 1/3 space */}
-
+
+
{}} // No need to trigger repeatedly here as it's already handled in handleKnowledgeBaseClick
/>
-
-
- {/* Right content area - occupies 2/3 space, now unified with config.tsx style */}
-
+
+
+
{isCreatingMode ? (
)}
-
-
+
+
>
);
diff --git a/frontend/app/[locale]/knowledges/KnowledgesContent.tsx b/frontend/app/[locale]/knowledges/KnowledgesContent.tsx
index cb8f0731b..779ab47dc 100644
--- a/frontend/app/[locale]/knowledges/KnowledgesContent.tsx
+++ b/frontend/app/[locale]/knowledges/KnowledgesContent.tsx
@@ -99,7 +99,9 @@ export default function KnowledgesContent({
transition={pageTransition}
style={{width: "100%", height: "100%"}}
>
-
+
+
+
) : null}
>
diff --git a/frontend/app/[locale]/knowledges/components/document/DocumentChunk.tsx b/frontend/app/[locale]/knowledges/components/document/DocumentChunk.tsx
index b6e1d118b..0022eec70 100644
--- a/frontend/app/[locale]/knowledges/components/document/DocumentChunk.tsx
+++ b/frontend/app/[locale]/knowledges/components/document/DocumentChunk.tsx
@@ -26,6 +26,8 @@ import {
FilePlus2,
Goal,
X,
+ Server,
+ Database,
} from "lucide-react";
import { FieldNumberOutlined } from "@ant-design/icons";
import knowledgeBaseService from "@/services/knowledgeBaseService";
@@ -47,6 +49,7 @@ interface Chunk {
filename?: string;
create_time?: string;
score?: number; // Search score (0-1 range) - only present in search results
+ source_type?: string; // Source type: "file" (nexent) or "datamate"
}
interface ChunkFormValues {
@@ -289,6 +292,7 @@ const DocumentChunk: React.FC = ({
filename: item.filename,
create_time: item.create_time,
score: item.score, // Preserve search score for display
+ source_type: item.source_type, // Preserve source type for display
};
});
@@ -657,6 +661,37 @@ const DocumentChunk: React.FC = ({
}
>
+ {/* Display filename and source type if available */}
+ {chunk.filename && (
+
+
+
+
+
+
+
+ {chunk.filename}
+
+
+ {chunk.source_type && (
+
+
+
+
+
+ {chunk.source_type === "datamate"
+ ? t("document.chunk.source.datamate", "来源: Datamate")
+ : chunk.source_type === "file" ||
+ chunk.source_type === "minio" ||
+ chunk.source_type === "local"
+ ? t("document.chunk.source.nexent", "来源: Nexent")
+ : ""}
+
+
+ )}
+
+
+ )}
{chunk.content || ""}
diff --git a/frontend/app/[locale]/layout.tsx b/frontend/app/[locale]/layout.tsx
index 0a5822d8f..26a6f9d6b 100644
--- a/frontend/app/[locale]/layout.tsx
+++ b/frontend/app/[locale]/layout.tsx
@@ -16,25 +16,28 @@ import log from "@/lib/logger";
const inter = Inter({ subsets: ["latin"] });
export async function generateMetadata(props: {
- params: Promise<{ locale: string }>;
+ params: Promise<{ locale?: string }>;
}): Promise {
const { locale } = await props.params;
+ const resolvedLocale = (["zh", "en"].includes(locale ?? "")
+ ? locale
+ : "zh") as "zh" | "en";
let messages: any = {};
- if (["zh", "en"].includes(locale)) {
+ if (["zh", "en"].includes(resolvedLocale)) {
try {
const filePath = path.join(
process.cwd(),
"public",
"locales",
- locale,
+ resolvedLocale,
"common.json"
);
const fileContent = await fs.readFile(filePath, "utf8");
messages = JSON.parse(fileContent);
} catch (error) {
log.error(
- `Failed to load i18n messages for locale: ${locale}`,
+ `Failed to load i18n messages for locale: ${resolvedLocale}`,
error
);
}
@@ -54,15 +57,20 @@ export async function generateMetadata(props: {
};
}
-export default async function RootLayout(props: {
+export default async function RootLayout({
+ children,
+ params,
+}: {
children: ReactNode;
- params: Promise<{ locale: string }>;
+ params: Promise<{ locale?: string }>;
}) {
- const { children, params } = props;
const { locale } = await params;
+ const resolvedLocale = (["zh", "en"].includes(locale ?? "")
+ ? locale
+ : "zh") as "zh" | "en";
return (
-
+
void;
}
-interface ConfigField {
- fieldPath: string; // e.g., "duty_prompt", "tools[0].params.api_key"
- fieldLabel: string; // User-friendly label
- promptHint?: string; // Hint from
- currentValue: string;
-}
-
-interface McpServerToInstall {
- mcp_server_name: string;
- mcp_url: string;
- isInstalled: boolean;
- isUrlEditable: boolean; // true if url is
- editedUrl?: string;
-}
-
-const needsConfig = (value: any): boolean => {
- if (typeof value === "string") {
- return value.trim() === "" || value.trim().startsWith(" {
- if (typeof value !== "string") return undefined;
- const match = value.trim().match(/^$/);
- return match ? match[1] : undefined;
-};
-
export default function AgentInstallModal({
visible,
onCancel,
agentDetails,
onInstallComplete,
}: AgentInstallModalProps) {
- const { t, i18n } = useTranslation("common");
- const isZh = i18n.language === "zh" || i18n.language === "zh-CN";
- const { message } = App.useApp();
-
- // Use unified import hook
- const { importFromData, isImporting: isInstallingAgent } = useAgentImport({
- onSuccess: () => {
- onInstallComplete?.();
- },
- onError: (error) => {
- message.error(error.message || t("market.install.error.installFailed", "Failed to install agent"));
- },
- });
-
- const [currentStep, setCurrentStep] = useState(0);
- const [llmModels, setLlmModels] = useState([]);
- const [loadingModels, setLoadingModels] = useState(false);
- const [selectedModelId, setSelectedModelId] = useState(null);
- const [selectedModelName, setSelectedModelName] = useState("");
-
- const [configFields, setConfigFields] = useState([]);
- const [configValues, setConfigValues] = useState>({});
-
- const [mcpServers, setMcpServers] = useState([]);
- const [existingMcpServers, setExistingMcpServers] = useState([]);
- const [loadingMcpServers, setLoadingMcpServers] = useState(false);
- const [installingMcp, setInstallingMcp] = useState>({});
-
- // Load LLM models
- useEffect(() => {
- if (visible) {
- loadLLMModels();
- }
- }, [visible]);
-
- // Parse agent details for config fields and MCP servers
- useEffect(() => {
- if (visible && agentDetails) {
- parseConfigFields();
- parseMcpServers();
- }
- }, [visible, agentDetails]);
-
- const loadLLMModels = async () => {
- setLoadingModels(true);
- try {
- const models = await modelService.getLLMModels();
- setLlmModels(models.filter(m => m.connect_status === "available"));
-
- // Auto-select first available model
- if (models.length > 0 && models[0].connect_status === "available") {
- setSelectedModelId(models[0].id);
- setSelectedModelName(models[0].displayName);
- }
- } catch (error) {
- log.error("Failed to load LLM models:", error);
- message.error(t("market.install.error.loadModels", "Failed to load models"));
- } finally {
- setLoadingModels(false);
- }
- };
-
- const parseConfigFields = () => {
- if (!agentDetails) return;
-
- const fields: ConfigField[] = [];
-
- // Check basic fields (excluding MCP-related fields)
- const basicFields: Array<{ key: keyof MarketAgentDetail; label: string }> = [
- { key: "description", label: t("market.detail.description", "Description") },
- { key: "business_description", label: t("market.detail.businessDescription", "Business Description") },
- { key: "duty_prompt", label: t("market.detail.dutyPrompt", "Duty Prompt") },
- { key: "constraint_prompt", label: t("market.detail.constraintPrompt", "Constraint Prompt") },
- { key: "few_shots_prompt", label: t("market.detail.fewShotsPrompt", "Few Shots Prompt") },
- ];
-
- basicFields.forEach(({ key, label }) => {
- const value = agentDetails[key];
- if (needsConfig(value)) {
- fields.push({
- fieldPath: key,
- fieldLabel: label,
- promptHint: extractPromptHint(value as string),
- currentValue: value as string,
- });
- }
- });
-
- // Check tool params (excluding MCP server names/urls)
- agentDetails.tools?.forEach((tool, toolIndex) => {
- if (tool.params && typeof tool.params === "object") {
- Object.entries(tool.params).forEach(([paramKey, paramValue]) => {
- if (needsConfig(paramValue)) {
- fields.push({
- fieldPath: `tools[${toolIndex}].params.${paramKey}`,
- fieldLabel: `${tool.name || tool.class_name} - ${paramKey}`,
- promptHint: extractPromptHint(paramValue as string),
- currentValue: paramValue as string,
- });
- }
- });
- }
- });
-
- setConfigFields(fields);
-
- // Initialize config values
- const initialValues: Record = {};
- fields.forEach(field => {
- initialValues[field.fieldPath] = "";
- });
- setConfigValues(initialValues);
- };
-
- const parseMcpServers = async () => {
- if (!agentDetails?.mcp_servers || agentDetails.mcp_servers.length === 0) {
- setMcpServers([]);
- return;
- }
-
- setLoadingMcpServers(true);
- try {
- // Load existing MCP servers from system
- const result = await getMcpServerList();
- const existing = result.success ? result.data : [];
- setExistingMcpServers(existing);
-
- // Check each MCP server
- const serversToInstall: McpServerToInstall[] = agentDetails.mcp_servers.map(mcp => {
- const isUrlConfigNeeded = needsConfig(mcp.mcp_url);
-
- // Check if already installed (match by both name and url)
- const isInstalled = !isUrlConfigNeeded && existing.some(
- (existingMcp: McpServer) =>
- existingMcp.service_name === mcp.mcp_server_name &&
- existingMcp.mcp_url === mcp.mcp_url
- );
-
- return {
- mcp_server_name: mcp.mcp_server_name,
- mcp_url: mcp.mcp_url,
- isInstalled,
- isUrlEditable: isUrlConfigNeeded,
- editedUrl: isUrlConfigNeeded ? "" : mcp.mcp_url,
- };
- });
-
- setMcpServers(serversToInstall);
- } catch (error) {
- log.error("Failed to check MCP servers:", error);
- message.error(t("market.install.error.checkMcp", "Failed to check MCP servers"));
- } finally {
- setLoadingMcpServers(false);
- }
- };
-
- const handleMcpUrlChange = (index: number, newUrl: string) => {
- setMcpServers(prev => {
- const updated = [...prev];
- updated[index].editedUrl = newUrl;
- return updated;
- });
- };
-
- const handleInstallMcp = async (index: number) => {
- const mcp = mcpServers[index];
- const urlToUse = mcp.editedUrl || mcp.mcp_url;
-
- if (!urlToUse || urlToUse.trim() === "") {
- message.error(t("market.install.error.mcpUrlRequired", "MCP URL is required"));
- return;
- }
-
- const key = `${index}`;
- setInstallingMcp(prev => ({ ...prev, [key]: true }));
-
- try {
- const result = await addMcpServer(urlToUse, mcp.mcp_server_name);
- if (result.success) {
- message.success(t("market.install.success.mcpInstalled", "MCP server installed successfully"));
- // Mark as installed - update state directly without re-fetching
- setMcpServers(prev => {
- const updated = [...prev];
- updated[index].isInstalled = true;
- updated[index].editedUrl = urlToUse;
- return updated;
- });
- } else {
- message.error(result.message || t("market.install.error.mcpInstall", "Failed to install MCP server"));
+ // Convert MarketAgentDetail to ImportAgentData format
+ const importData: ImportAgentData | null = agentDetails?.agent_json
+ ? {
+ agent_id: agentDetails.agent_id,
+ agent_info: agentDetails.agent_json.agent_info,
+ mcp_info: agentDetails.agent_json.mcp_info,
}
- } catch (error) {
- log.error("Failed to install MCP server:", error);
- message.error(t("market.install.error.mcpInstall", "Failed to install MCP server"));
- } finally {
- setInstallingMcp(prev => ({ ...prev, [key]: false }));
- }
- };
-
- const handleNext = () => {
- if (currentStep === 0) {
- // Step 1: Model selection validation
- if (!selectedModelId || !selectedModelName) {
- message.error(t("market.install.error.modelRequired", "Please select a model"));
- return;
- }
- } else if (currentStep === 1) {
- // Step 2: Config fields validation
- const emptyFields = configFields.filter(field => !configValues[field.fieldPath]?.trim());
- if (emptyFields.length > 0) {
- message.error(t("market.install.error.configRequired", "Please fill in all required fields"));
- return;
- }
- }
-
- setCurrentStep(prev => prev + 1);
- };
-
- const handlePrevious = () => {
- setCurrentStep(prev => prev - 1);
- };
-
- const handleInstall = async () => {
- try {
- // Prepare the data structure for import
- const importData = prepareImportData();
-
- if (!importData) {
- message.error(t("market.install.error.invalidData", "Invalid agent data"));
- return;
- }
-
- log.info("Importing agent with data:", importData);
-
- // Import using unified hook
- await importFromData(importData);
-
- // Success message will be shown by onSuccess callback
- message.success(t("market.install.success", "Agent installed successfully!"));
- } catch (error) {
- // Error message will be shown by onError callback
- log.error("Failed to install agent:", error);
- }
- };
-
- const prepareImportData = () => {
- if (!agentDetails) return null;
-
- // Clone agent_json structure
- const agentJson = JSON.parse(JSON.stringify(agentDetails.agent_json));
-
- // Update model information
- const agentInfo = agentJson.agent_info[String(agentDetails.agent_id)];
- if (agentInfo) {
- agentInfo.model_id = selectedModelId;
- agentInfo.model_name = selectedModelName;
-
- // Clear business logic model fields
- agentInfo.business_logic_model_id = null;
- agentInfo.business_logic_model_name = null;
-
- // Update config fields
- configFields.forEach(field => {
- const value = configValues[field.fieldPath];
- if (field.fieldPath.includes("tools[")) {
- // Handle tool params
- const match = field.fieldPath.match(/tools\[(\d+)\]\.params\.(.+)/);
- if (match && agentInfo.tools) {
- const toolIndex = parseInt(match[1]);
- const paramKey = match[2];
- if (agentInfo.tools[toolIndex]) {
- agentInfo.tools[toolIndex].params[paramKey] = value;
- }
- }
- } else {
- // Handle basic fields
- agentInfo[field.fieldPath] = value;
- }
- });
-
- // Update MCP info
- if (agentJson.mcp_info) {
- agentJson.mcp_info = agentJson.mcp_info.map((mcp: any) => {
- const matchingServer = mcpServers.find(
- s => s.mcp_server_name === mcp.mcp_server_name
- );
- if (matchingServer && matchingServer.editedUrl) {
- return {
- ...mcp,
- mcp_url: matchingServer.editedUrl,
- };
- }
- return mcp;
- });
- }
- }
-
- return agentJson;
- };
-
- const handleCancel = () => {
- // Reset state
- setCurrentStep(0);
- setSelectedModelId(null);
- setSelectedModelName("");
- setConfigFields([]);
- setConfigValues({});
- setMcpServers([]);
- onCancel();
- };
-
- // Filter only required steps for navigation
- const steps = [
- {
- key: "model",
- title: t("market.install.step.model", "Select Model"),
- },
- configFields.length > 0 && {
- key: "config",
- title: t("market.install.step.config", "Configure Fields"),
- },
- mcpServers.length > 0 && {
- key: "mcp",
- title: t("market.install.step.mcp", "MCP Servers"),
- },
- ].filter(Boolean) as Array<{ key: string; title: string }>;
-
- // Check if can proceed to next step
- const canProceed = () => {
- const currentStepKey = steps[currentStep]?.key;
-
- if (currentStepKey === "model") {
- return selectedModelId !== null && selectedModelName !== "";
- } else if (currentStepKey === "config") {
- return configFields.every(field => configValues[field.fieldPath]?.trim());
- } else if (currentStepKey === "mcp") {
- // All non-editable MCPs should be installed or have edited URLs
- return mcpServers.every(mcp =>
- mcp.isInstalled ||
- (mcp.isUrlEditable && mcp.editedUrl && mcp.editedUrl.trim() !== "") ||
- (!mcp.isUrlEditable && mcp.mcp_url && mcp.mcp_url.trim() !== "")
- );
- }
-
- return true;
- };
-
- const renderStepContent = () => {
- const currentStepKey = steps[currentStep]?.key;
-
- if (currentStepKey === "model") {
- return (
-
- {/* Agent Info - Title and Description Style */}
- {agentDetails && (
-
-
- {agentDetails.display_name}
-
-
- {agentDetails.description}
-
-
- )}
-
-
-
- {t("market.install.model.description", "Select a model from your configured models to use for this agent.")}
-
-
-
-
-
- {loadingModels ? (
-
- ) : (
-
- )}
-
-
-
- {llmModels.length === 0 && !loadingModels && (
-
- {t("market.install.model.noModels", "No available models. Please configure models first.")}
-
- )}
-
-
- );
- } else if (currentStepKey === "config") {
- return (
-
-
- {t("market.install.config.description", "Please configure the following required fields for this agent.")}
-
-
-
- {field.fieldLabel}
- *
-
- }
- required={false}
- >
- {
- setConfigValues(prev => ({
- ...prev,
- [field.fieldPath]: e.target.value,
- }));
- }}
- placeholder={field.promptHint || t("market.install.config.placeholder", "Enter configuration value")}
- rows={3}
- size="large"
- />
-
- ))}
-
-
- );
- } else if (currentStepKey === "mcp") {
- return (
-
-
- {t("market.install.mcp.description", "This agent requires the following MCP servers. Please install or configure them.")}
-
-
- {loadingMcpServers ? (
-
-
-
- ) : (
-
- {mcpServers.map((mcp, index) => (
-
-
-
-
-
- {mcp.mcp_server_name}
-
- {mcp.isInstalled ? (
- } color="success" className="text-sm">
- {t("market.install.mcp.installed", "Installed")}
-
- ) : (
- } color="default" className="text-sm">
- {t("market.install.mcp.notInstalled", "Not Installed")}
-
- )}
-
-
-
-
- MCP URL:
-
- {(mcp.isUrlEditable || !mcp.isInstalled) ? (
- handleMcpUrlChange(index, e.target.value)}
- placeholder={mcp.isUrlEditable
- ? t("market.install.mcp.urlPlaceholder", "Enter MCP server URL")
- : mcp.mcp_url
- }
- size="middle"
- disabled={mcp.isInstalled}
- style={{ maxWidth: "400px" }}
- />
- ) : (
-
- {mcp.editedUrl || mcp.mcp_url}
-
- )}
-
-
-
- {!mcp.isInstalled && (
-
}
- onClick={() => handleInstallMcp(index)}
- loading={installingMcp[String(index)]}
- disabled={!mcp.editedUrl || mcp.editedUrl.trim() === ""}
- className="flex-shrink-0"
- >
- {t("market.install.mcp.install", "Install")}
-
- )}
-
-
- ))}
-
- )}
-
- );
- }
-
- return null;
- };
-
- const isLastStep = currentStep === steps.length - 1;
+ : null;
return (
-
-
- {t("market.install.title", "Install Agent")}
-
- }
- open={visible}
- onCancel={handleCancel}
- width={800}
- footer={
-
-
-
- {currentStep > 0 && (
-
- )}
- {!isLastStep && (
-
- )}
- {isLastStep && (
- }
- >
- {isInstallingAgent
- ? t("market.install.button.installing", "Installing...")
- : t("market.install.button.install", "Install")}
-
- )}
-
-
- }
- >
-
-
({
- title: step.title,
- }))}
- className="mb-6"
- />
-
-
- {renderStepContent()}
-
-
-
+
);
}
-
diff --git a/frontend/app/[locale]/mcp-tools/McpToolsContent.tsx b/frontend/app/[locale]/mcp-tools/McpToolsContent.tsx
new file mode 100644
index 000000000..89c6c03d4
--- /dev/null
+++ b/frontend/app/[locale]/mcp-tools/McpToolsContent.tsx
@@ -0,0 +1,128 @@
+"use client";
+
+import React from "react";
+import { motion } from "framer-motion";
+import { useTranslation } from "react-i18next";
+import { Puzzle } from "lucide-react";
+
+import { useSetupFlow } from "@/hooks/useSetupFlow";
+import { ConnectionStatus } from "@/const/modelConfig";
+
+interface McpToolsContentProps {
+ /** Connection status */
+ connectionStatus?: ConnectionStatus;
+ /** Is checking connection */
+ isCheckingConnection?: boolean;
+ /** Check connection callback */
+ onCheckConnection?: () => void;
+ /** Callback to expose connection status */
+ onConnectionStatusChange?: (status: ConnectionStatus) => void;
+}
+
+/**
+ * McpToolsContent - MCP tools management coming soon page
+ * This will allow admins to manage MCP servers and tools
+ */
+export default function McpToolsContent({
+ connectionStatus: externalConnectionStatus,
+ isCheckingConnection: externalIsCheckingConnection,
+ onCheckConnection: externalOnCheckConnection,
+ onConnectionStatusChange,
+}: McpToolsContentProps) {
+ const { t } = useTranslation("common");
+
+ // Use custom hook for common setup flow logic
+ const { canAccessProtectedData, pageVariants, pageTransition } = useSetupFlow({
+ requireAdmin: true,
+ externalConnectionStatus,
+ externalIsCheckingConnection,
+ onCheckConnection: externalOnCheckConnection,
+ onConnectionStatusChange,
+ });
+
+ return (
+ <>
+ {canAccessProtectedData ? (
+
+
+ {/* Icon */}
+
+
+
+
+ {/* Title */}
+
+ {t("mcpTools.comingSoon.title")}
+
+
+ {/* Description */}
+
+ {t("mcpTools.comingSoon.description")}
+
+
+ {/* Feature list */}
+
+
+ ✓
+
+ {t("mcpTools.comingSoon.feature1")}
+
+
+
+ ✓
+
+ {t("mcpTools.comingSoon.feature2")}
+
+
+
+ ✓
+
+ {t("mcpTools.comingSoon.feature3")}
+
+
+
+
+ {/* Coming soon badge */}
+
+ {t("mcpTools.comingSoon.badge")}
+
+
+
+ ) : null}
+ >
+ );
+}
+
+
diff --git a/frontend/app/[locale]/memory/MemoryContent.tsx b/frontend/app/[locale]/memory/MemoryContent.tsx
index e294074c6..dd35eab82 100644
--- a/frontend/app/[locale]/memory/MemoryContent.tsx
+++ b/frontend/app/[locale]/memory/MemoryContent.tsx
@@ -349,29 +349,31 @@ export default function MemoryContent({ onNavigate }: MemoryContentProps) {
style={{ width: "100%", height: "100%" }}
>
{canAccessProtectedData ? (
-
+
-
memory.setActiveTabKey(key)}
- tabBarStyle={{
- marginBottom: "16px",
+
+ >
+ memory.setActiveTabKey(key)}
+ tabBarStyle={{
+ marginBottom: "16px",
+ }}
+ />
+
) : null}
diff --git a/frontend/app/[locale]/models/ModelsContent.tsx b/frontend/app/[locale]/models/ModelsContent.tsx
index e53fead10..6b48e8dac 100644
--- a/frontend/app/[locale]/models/ModelsContent.tsx
+++ b/frontend/app/[locale]/models/ModelsContent.tsx
@@ -130,16 +130,18 @@ export default function ModelsContent({
transition={pageTransition}
style={{width: "100%", height: "100%"}}
>
- {canAccessProtectedData ? (
-
- setLiveSelectedModels(selected)
- }
- onEmbeddingConnectivityChange={() => {}}
- forwardedRef={modelConfigSectionRef}
- canAccessProtectedData={canAccessProtectedData}
- />
- ) : null}
+
+ {canAccessProtectedData ? (
+
+ setLiveSelectedModels(selected)
+ }
+ onEmbeddingConnectivityChange={() => {}}
+ forwardedRef={modelConfigSectionRef}
+ canAccessProtectedData={canAccessProtectedData}
+ />
+ ) : null}
+
void;
onSuccess: (model?: AddedModel) => Promise;
+ defaultProvider?: string; // Default provider to select when dialog opens
}
// Connectivity status type comes from utils
@@ -129,6 +130,7 @@ export const ModelAddDialog = ({
isOpen,
onClose,
onSuccess,
+ defaultProvider,
}: ModelAddDialogProps) => {
const { t } = useTranslation();
const { message } = App.useApp();
@@ -166,7 +168,7 @@ export const ModelAddDialog = ({
isMultimodal: false,
// Whether to import multiple models at once
isBatchImport: false,
- provider: "silicon",
+ provider: "modelengine",
vectorDimension: "1024",
// Default chunk size range for embedding models
chunkSizeRange: [
@@ -219,6 +221,17 @@ export const ModelAddDialog = ({
setLoadingModelList,
});
+ // Handle default provider when dialog opens
+ useEffect(() => {
+ if (isOpen && defaultProvider) {
+ setForm((prev) => ({
+ ...prev,
+ provider: defaultProvider,
+ isBatchImport: true,
+ }));
+ }
+ }, [isOpen, defaultProvider]);
+
const parseModelName = (name: string): string => {
if (!name) return "";
const parts = name.split("/");
@@ -638,6 +651,7 @@ export const ModelAddDialog = ({
value={form.provider}
onChange={(value) => handleFormChange("provider", value)}
>
+
@@ -1058,6 +1072,13 @@ export const ModelAddDialog = ({
{t("model.dialog.label.currentlySupported")}
+
+
+
{form.isBatchImport && (
void;
onSuccess: () => Promise;
- customModels: ModelOption[];
+ models: ModelOption[];
}
export const ModelDeleteDialog = ({
isOpen,
onClose,
onSuccess,
- customModels,
+ models,
}: ModelDeleteDialogProps) => {
const { t } = useTranslation();
const { message } = App.useApp();
@@ -167,6 +167,8 @@ export const ModelDeleteDialog = ({
return t("model.source.openai");
case MODEL_SOURCES.SILICON:
return t("model.source.silicon");
+ case MODEL_SOURCES.MODELENGINE:
+ return t("model.source.modelEngine");
case MODEL_SOURCES.OPENAI_API_COMPATIBLE:
return t("model.source.custom");
default:
@@ -185,6 +187,12 @@ export const ModelDeleteDialog = ({
text: "text-purple-600",
border: "border-purple-100",
};
+ case MODEL_SOURCES.MODELENGINE:
+ return {
+ bg: "bg-blue-50",
+ text: "text-blue-600",
+ border: "border-blue-100",
+ };
case MODEL_SOURCES.OPENAI:
return {
bg: "bg-indigo-50",
@@ -217,6 +225,14 @@ export const ModelDeleteDialog = ({
className="w-5 h-5"
/>
);
+ case MODEL_SOURCES.MODELENGINE:
+ return (
+
+ );
case MODEL_SOURCES.OPENAI:
return (
@@ -242,12 +258,12 @@ export const ModelDeleteDialog = ({
const getApiKeyByType = (type: ModelType | null): string => {
if (!type) return "";
// Prioritize silicon models of the current type
- const byType = customModels.find(
+ const byType = models.find(
(m) => m.source === MODEL_SOURCES.SILICON && m.type === type && m.apiKey
);
if (byType?.apiKey) return byType.apiKey;
// Fall back to any available silicon model
- const anySilicon = customModels.find(
+ const anySilicon = models.find(
(m) => m.source === MODEL_SOURCES.SILICON && m.apiKey
);
return anySilicon?.apiKey || "";
@@ -266,9 +282,9 @@ export const ModelDeleteDialog = ({
apiKey: apiKey && apiKey.trim() !== "" ? apiKey : "sk-no-api-key",
});
setProviderModels(result || []);
- // Initialize pending selected switch states (based on current customModels status)
+ // Initialize pending selected switch states (based on current models status)
const currentIds = new Set(
- customModels
+ models
.filter(
(m) => m.type === modelType && m.source === MODEL_SOURCES.SILICON
)
@@ -379,7 +395,7 @@ export const ModelDeleteDialog = ({
// Adjust hierarchical navigation based on remaining count after deletion
if (deletingModelType) {
- const remainingByTypeAndSource = customModels.filter(
+ const remainingByTypeAndSource = models.filter(
(model) =>
model.type === deletingModelType &&
(!selectedSource || model.source === selectedSource) &&
@@ -389,7 +405,7 @@ export const ModelDeleteDialog = ({
// No models under current source, return to source selection
setSelectedSource(null);
}
- const remainingByType = customModels.filter(
+ const remainingByType = models.filter(
(model) =>
model.type === deletingModelType &&
model.displayName !== displayName
@@ -452,7 +468,7 @@ export const ModelDeleteDialog = ({
if (selectedSource === MODEL_SOURCES.SILICON && deletingModelType) {
try {
const currentIds = new Set(
- customModels
+ models
.filter(
(m) =>
m.type === deletingModelType &&
@@ -462,7 +478,7 @@ export const ModelDeleteDialog = ({
);
// Build payload items for the current silicon models in required format
- const currentModelPayloads = customModels
+ const currentModelPayloads = models
.filter(
(m) =>
m.type === deletingModelType &&
@@ -630,12 +646,12 @@ export const ModelDeleteDialog = ({
MODEL_TYPES.TTS,
] as ModelType[]
).map((type) => {
- const customModelsByType = customModels.filter(
+ const modelsByType = models.filter(
(model) => model.type === type
);
const colorScheme = getModelColorScheme(type);
- if (customModelsByType.length === 0) return null;
+ if (modelsByType.length === 0) return null;
return (
{t("model.dialog.delete.customModelCount", {
- count: customModelsByType.length,
+ count: modelsByType.length,
})}
{(type === MODEL_TYPES.STT ||
type === MODEL_TYPES.TTS) &&
@@ -685,7 +701,7 @@ export const ModelDeleteDialog = ({
})}
- {customModels.length === 0 && (
+ {models.length === 0 && (
{t("model.dialog.delete.noModels")}
@@ -717,12 +733,13 @@ export const ModelDeleteDialog = ({
{(
[
+ MODEL_SOURCES.MODELENGINE,
MODEL_SOURCES.OPENAI,
MODEL_SOURCES.SILICON,
MODEL_SOURCES.OPENAI_API_COMPATIBLE,
] as ModelSource[]
).map((source) => {
- const modelsOfSource = customModels.filter(
+ const modelsOfSource = models.filter(
(model) =>
model.type === deletingModelType && model.source === source
);
@@ -918,7 +935,7 @@ export const ModelDeleteDialog = ({
) : (
- {customModels
+ {models
.filter(
(model) =>
model.type === deletingModelType &&
@@ -994,7 +1011,7 @@ export const ModelDeleteDialog = ({
))}
- {customModels.filter(
+ {models.filter(
(model) =>
model.type === deletingModelType &&
model.source === selectedSource
@@ -1045,7 +1062,7 @@ export const ModelDeleteDialog = ({
onClose={() => setIsProviderConfigOpen(false)}
initialApiKey={getApiKeyByType(deletingModelType)}
initialMaxTokens={(
- customModels.find(
+ models.find(
(m) => m.type === deletingModelType && m.source === "silicon"
)?.maxTokens || 4096
).toString()}
diff --git a/frontend/app/[locale]/models/components/model/ModelEditDialog.tsx b/frontend/app/[locale]/models/components/model/ModelEditDialog.tsx
index c7005206a..feaeacad8 100644
--- a/frontend/app/[locale]/models/components/model/ModelEditDialog.tsx
+++ b/frontend/app/[locale]/models/components/model/ModelEditDialog.tsx
@@ -153,9 +153,16 @@ export const ModelEditDialog = ({
let maxTokensValue = parseInt(form.maxTokens);
if (isEmbeddingModel) maxTokensValue = 0;
+ // Use original displayName for lookup, pass new displayName in body if changed
+ const originalDisplayName = model.displayName || model.name;
+ const newDisplayName = form.displayName;
+
await modelService.updateSingleModel({
- model_id: model.id.toString(),
- displayName: form.displayName,
+ currentDisplayName: originalDisplayName,
+ // Only send displayName if it changed
+ ...(newDisplayName !== originalDisplayName
+ ? { displayName: newDisplayName }
+ : {}),
url: form.url,
apiKey: form.apiKey.trim() === "" ? "sk-no-api-key" : form.apiKey,
...(maxTokensValue !== 0 ? { maxTokens: maxTokensValue } : {}),
@@ -210,6 +217,7 @@ export const ModelEditDialog = ({
message.error(t("model.dialog.error.serverError"));
} else {
message.error(t("model.dialog.error.editFailed"));
+ console.error(error);
}
} finally {
setLoading(false);
diff --git a/frontend/app/[locale]/models/components/model/ModelListCard.tsx b/frontend/app/[locale]/models/components/model/ModelListCard.tsx
index 83d6ccf53..daf7b8610 100644
--- a/frontend/app/[locale]/models/components/model/ModelListCard.tsx
+++ b/frontend/app/[locale]/models/components/model/ModelListCard.tsx
@@ -13,7 +13,6 @@ import {
import {
ModelConnectStatus,
ModelOption,
- ModelSource,
ModelType,
} from "@/types/modelConfig";
import log from "@/lib/logger";
@@ -119,10 +118,9 @@ interface ModelListCardProps {
modelTypeName: string;
selectedModel: string;
onModelChange: (value: string) => void;
- officialModels: ModelOption[];
- customModels: ModelOption[];
- onVerifyModel?: (modelName: string, modelType: ModelType) => void; // New callback for verifying models
- errorFields?: { [key: string]: boolean }; // New error field state
+ models: ModelOption[];
+ onVerifyModel?: (modelName: string, modelType: ModelType) => void;
+ errorFields?: { [key: string]: boolean };
}
export const ModelListCard = ({
@@ -131,18 +129,14 @@ export const ModelListCard = ({
modelTypeName,
selectedModel,
onModelChange,
- officialModels,
- customModels,
+ models,
onVerifyModel,
errorFields,
}: ModelListCardProps) => {
const { t } = useTranslation();
// Add model list state for updates
- const [modelsData, setModelsData] = useState({
- official: [...officialModels],
- custom: [...customModels],
- });
+ const [modelsData, setModelsData] = useState([...models]);
// Create a style element in the component containing animation definitions
useEffect(() => {
@@ -158,110 +152,44 @@ export const ModelListCard = ({
};
}, []);
- // When getting model list, need to consider specific option type
- const getModelsBySource = (): {
- official: ModelOption[];
- custom: ModelOption[];
- } => {
- // Each type only shows models of corresponding type
- return {
- official: modelsData.official.filter((model) => model.type === type),
- custom: modelsData.custom.filter((model) => model.type === type),
- };
+ // Get filtered models by type
+ const getFilteredModels = (): ModelOption[] => {
+ return modelsData.filter((model) => model.type === type);
};
- // Get model source
+ // Get model source label based on source field
const getModelSource = (displayName: string): string => {
- if (
- type === MODEL_TYPES.TTS ||
- type === MODEL_TYPES.STT ||
- type === MODEL_TYPES.VLM
- ) {
- const modelOfType = modelsData.custom.find(
- (m) => m.type === type && m.displayName === displayName
- );
- if (modelOfType) return t("model.source.custom");
- }
-
- const officialModel = modelsData.official.find(
- (m) => m.type === type && m.name === displayName
- );
- if (officialModel) return t("model.source.modelEngine");
-
- const customModel = modelsData.custom.find(
+ const model = modelsData.find(
(m) => m.type === type && m.displayName === displayName
);
- return customModel ? t("model.source.custom") : t("model.source.unknown");
+
+ if (!model) return t("model.source.unknown");
+
+ // Return source label based on model.source
+ if (model.source === "modelengine") {
+ return t("model.source.modelEngine");
+ } else if (model.source === "silicon") {
+ return t("model.source.silicon");
+ } else if (model.source === "OpenAI-API-Compatible") {
+ return t("model.source.custom");
+ }
+
+ return t("model.source.unknown");
};
- const modelsBySource = getModelsBySource();
-
- // Local update model status
- const updateLocalModelStatus = (
- displayName: string,
- status: ModelConnectStatus
- ) => {
- setModelsData((prevData) => {
- // Find model to update
- const modelToUpdate = prevData.custom.find(
- (m) => m.displayName === displayName && m.type === type
- );
-
- if (!modelToUpdate) {
- log.warn(t("model.warning.updateNotFound", { displayName, type }));
- return prevData;
- }
-
- const updatedCustomModels = prevData.custom.map((model) => {
- if (model.displayName === displayName && model.type === type) {
- return {
- ...model,
- connect_status: status,
- };
- }
- return model;
- });
-
- return {
- official: prevData.official,
- custom: updatedCustomModels,
- };
- });
+ const filteredModels = getFilteredModels();
+
+ // Group models by source for display
+ const groupedModels = {
+ modelengine: filteredModels.filter((m) => m.source === "modelengine"),
+ silicon: filteredModels.filter((m) => m.source === "silicon"),
+ custom: filteredModels.filter((m) => m.source === "OpenAI-API-Compatible"),
};
// When parent component's model list updates, update local state
useEffect(() => {
- // Update local state but don't trigger fetchModelsStatus
- setModelsData((prevData) => {
- const updatedOfficialModels = officialModels.map((model) => {
- // Preserve existing connect_status if it exists
- const existingModel = prevData.official.find(
- (m) => m.name === model.name && m.type === model.type
- );
- return {
- ...model,
- connect_status:
- existingModel?.connect_status ||
- (MODEL_STATUS.AVAILABLE as ModelConnectStatus),
- };
- });
-
- const updatedCustomModels = customModels.map((model) => {
- // Prioritize using newly passed status to reflect latest backend state
- return {
- ...model,
- connect_status:
- model.connect_status ||
- (MODEL_STATUS.UNCHECKED as ModelConnectStatus),
- };
- });
-
- return {
- official: updatedOfficialModels,
- custom: updatedCustomModels,
- };
- });
- }, [officialModels, customModels, type, modelId]);
+ setModelsData(models);
+ }, [models]);
// Handle status indicator click event
const handleStatusClick = (e: React.MouseEvent, displayName: string) => {
@@ -270,9 +198,7 @@ export const ModelListCard = ({
e.nativeEvent.stopImmediatePropagation(); // Prevent all sibling event handlers
if (onVerifyModel && displayName) {
- // First update local state to "checking"
- updateLocalModelStatus(displayName, MODEL_STATUS.CHECKING);
- // Then call verification function
+ // Call verification function (parent component will update status)
onVerifyModel(displayName, type);
}
@@ -317,35 +243,105 @@ export const ModelListCard = ({
errorFields && errorFields[`${type}.${modelId}`] ? "error-select" : ""
}
>
- {modelsBySource.official.length > 0 && (
+ {groupedModels.modelengine.length > 0 && (
- {modelsBySource.official.map((model) => (
+ {groupedModels.modelengine.map((model) => (