Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions config-auth.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,9 @@ GOPIE_POSTGRES_SSLMODE=disable
GOPIE_AIAGENT_URL=http://chat-server:8000
GOPIE_ENCRYPTION_KEY=E5B8A0F3C1D9E7B2A5F0C3D8E6B1A4F2
GOPIE_ENABLED_SERVERS=api
GOPIE_S3_ACCESS_KEY=minioadmin
GOPIE_S3_SECRET_KEY=minioadmin
GOPIE_S3_ENDPOINT=http://minio:9000
GOPIE_S3_ACCESS_KEY=rustfsadmin
GOPIE_S3_SECRET_KEY=rustfsadmin
GOPIE_S3_ENDPOINT=http://rustfs:9000
GOPIE_S3_SSL=false
GOPIE_S3_REGION=us-east-1
GOPIE_DOWNLOADS_S3_BUCKET=downloads
Expand Down Expand Up @@ -89,8 +89,8 @@ NEXT_PUBLIC_DISABLE_SECURE_COOKIES="true"
COMPANION_AWS_ENDPOINT=http://localhost:9000
COMPANION_AWS_REGION=us-east-1
COMPANION_AWS_BUCKET=gopie
COMPANION_AWS_KEY=minioadmin
COMPANION_AWS_SECRET=minioadmin
COMPANION_AWS_KEY=rustfsadmin
COMPANION_AWS_SECRET=rustfsadmin
COMPANION_DOMAIN=localhost:3020
COMPANION_PROTOCOL=http
COMPANION_DATADIR=/
Expand Down Expand Up @@ -209,10 +209,10 @@ CHAT_DEFAULT_EMBEDDING_MODEL="text-embedding-3-large"
# ==================================
# S3 Storage (Chat Server)
# ==================================
CHAT_INTERNAL_S3_HOST="http://minio:9000"
CHAT_INTERNAL_S3_HOST="http://rustfs:9000"
CHAT_EXTERNAL_S3_HOST="http://localhost:9000"
CHAT_S3_ACCESS_KEY="minioadmin"
CHAT_S3_SECRET_KEY="minioadmin"
CHAT_S3_ACCESS_KEY="rustfsadmin"
CHAT_S3_SECRET_KEY="rustfsadmin"
CHAT_S3_BUCKET="gopie"
CHAT_S3_REGION="us-east-1"

Expand Down
16 changes: 8 additions & 8 deletions config-noauth.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ GOPIE_POSTGRES_SSLMODE=disable
GOPIE_AIAGENT_URL=http://chat-server:8000
GOPIE_ENCRYPTION_KEY=E5B8A0F3C1D9E7B2A5F0C3D8E6B1A4F2
GOPIE_ENABLED_SERVERS=api
GOPIE_S3_ACCESS_KEY=minioadmin
GOPIE_S3_SECRET_KEY=minioadmin
GOPIE_S3_ENDPOINT=http://minio:9000
GOPIE_S3_ACCESS_KEY=rustfsadmin
GOPIE_S3_SECRET_KEY=rustfsadmin
GOPIE_S3_ENDPOINT=http://rustfs:9000
GOPIE_S3_SSL=false
GOPIE_S3_REGION=us-east-1
GOPIE_DOWNLOADS_S3_BUCKET=downloads
Expand All @@ -41,8 +41,8 @@ GOPIE_CORS_HANDLED_BY_INGRESS=false
COMPANION_AWS_ENDPOINT=http://localhost:9000
COMPANION_AWS_REGION=us-east-1
COMPANION_AWS_BUCKET=gopie
COMPANION_AWS_KEY=minioadmin
COMPANION_AWS_SECRET=minioadmin
COMPANION_AWS_KEY=rustfsadmin
COMPANION_AWS_SECRET=rustfsadmin
COMPANION_DOMAIN=localhost:3020
COMPANION_PROTOCOL=http
COMPANION_DATADIR=/
Expand Down Expand Up @@ -169,10 +169,10 @@ CHAT_DEFAULT_EMBEDDING_MODEL="text-embedding-3-large"
# ==================================
# S3 Storage (Chat Server)
# ==================================
CHAT_INTERNAL_S3_HOST="http://minio:9000"
CHAT_INTERNAL_S3_HOST="http://rustfs:9000"
CHAT_EXTERNAL_S3_HOST="http://localhost:9000"
CHAT_S3_ACCESS_KEY="minioadmin"
CHAT_S3_SECRET_KEY="minioadmin"
CHAT_S3_ACCESS_KEY="rustfsadmin"
CHAT_S3_SECRET_KEY="rustfsadmin"
CHAT_S3_BUCKET="gopie"
CHAT_S3_REGION="us-east-1"

Expand Down
51 changes: 25 additions & 26 deletions docker-compose-auth.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,42 +100,41 @@ services:
- gopie

###########################################################################
####### MINIO #######
####### RUSTFS #######
###########################################################################
minio:
image: minio/minio
rustfs:
image: rustfs/rustfs:latest
restart: always
ports:
- "9000:9000"
- "9002:9002"
volumes:
- ./volumes/minio/data:/export
- ./volumes/minio/config:/root/.minio
- "9000:9000" # S3 API
- "9001:9001" # RustFS Console
environment:
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- RUSTFS_ACCESS_KEY=rustfsadmin
- RUSTFS_SECRET_KEY=rustfsadmin
- RUSTFS_CONSOLE_ENABLE=true
- RUSTFS_ADDRESS=:9000
volumes:
- ./volumes/rustfs/data:/data
networks:
- gopie
command: server -console-address :9002 /export
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5

Comment on lines +105 to 120
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Fix RUSTFS_ADDRESS format to include IP binding.

The RUSTFS_ADDRESS environment variable should be 0.0.0.0:9000 instead of :9000 to properly bind to all interfaces. Additionally, add RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001 to explicitly configure the console binding. The official RustFS configuration uses RUSTFS_ADDRESS=0.0.0.0:9000 and RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001.

Also consider adding a healthcheck: "curl -f http://localhost:9000/health && curl -f http://localhost:9001/rustfs/console/health" to ensure proper service readiness before bucket creation attempts. The default RUSTFS_ACCESS_KEY and RUSTFS_SECRET_KEY values are intended for development only — rotate these credentials for production.

🤖 Prompt for AI Agents
In docker-compose-auth.yaml around lines 105 to 120, update the rustfs service
environment and readiness: change RUSTFS_ADDRESS from ":9000" to "0.0.0.0:9000",
add RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001" to bind the console, and add a
healthcheck that probes both endpoints (e.g. curl -f
http://localhost:9000/health && curl -f
http://localhost:9001/rustfs/console/health) so other services wait for
readiness; also note that RUSTFS_ACCESS_KEY/RUSTFS_SECRET_KEY are development
defaults and should be rotated for production.

createbuckets:
image: minio/mc
create-buckets:
image: amazon/aws-cli:latest
depends_on:
minio:
condition: service_healthy
- rustfs
environment:
- AWS_ACCESS_KEY_ID=rustfsadmin
- AWS_SECRET_ACCESS_KEY=rustfsadmin
- AWS_DEFAULT_REGION=us-east-1
networks:
- gopie
entrypoint: >
/bin/sh -c "
echo 'Minio is up. Creating required buckets...' &&
/usr/bin/mc alias set myminio http://minio:9000 minioadmin minioadmin &&
/usr/bin/mc mb -p myminio/gopie &&
/usr/bin/mc mb -p myminio/downloads &&
/usr/bin/mc anonymous set public myminio/gopie/visualizations
sleep 5;
aws s3 mb s3://gopie --endpoint-url http://rustfs:9000 || true;
aws s3 mb s3://downloads --endpoint-url http://rustfs:9000 || true;
echo 'Buckets created successfully';
exit 0;
"

#########################################################################
Expand All @@ -150,7 +149,7 @@ services:
networks:
- gopie
depends_on:
- minio
- rustfs

#########################################################################
##### CHAT SERVER SERVICES #####
Expand Down
49 changes: 24 additions & 25 deletions docker-compose-noauth.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ services:
networks:
- gopie
depends_on:
- minio
- rustfs

gopie-migrate:
build:
Expand Down Expand Up @@ -75,40 +75,39 @@ services:
chmod -R 755 /home/gopie/dataful &&
su -s /bin/sh gopie -c 'cd /home/gopie && gopie serve'"

minio:
image: minio/minio
rustfs:
image: rustfs/rustfs:latest
restart: always
ports:
- "9000:9000"
- "9002:9002"
volumes:
- ./volumes/minio/data:/export
- ./volumes/minio/config:/root/.minio
- "9000:9000" # S3 API
- "9001:9001" # RustFS Console
environment:
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- RUSTFS_ACCESS_KEY=rustfsadmin
- RUSTFS_SECRET_KEY=rustfsadmin
- RUSTFS_CONSOLE_ENABLE=true
- RUSTFS_ADDRESS=:9000
volumes:
- ./volumes/rustfs/data:/data
networks:
- gopie
command: server -console-address :9002 /export
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5

createbuckets:
image: minio/mc
create-buckets:
image: amazon/aws-cli:latest
depends_on:
minio:
condition: service_healthy
- rustfs
environment:
- AWS_ACCESS_KEY_ID=rustfsadmin
- AWS_SECRET_ACCESS_KEY=rustfsadmin
- AWS_DEFAULT_REGION=us-east-1
networks:
- gopie
entrypoint: >
/bin/sh -c "
echo 'Minio is up. Creating required buckets...' &&
/usr/bin/mc alias set myminio http://minio:9000 minioadmin minioadmin &&
/usr/bin/mc mb -p myminio/gopie &&
/usr/bin/mc mb -p myminio/downloads &&
/usr/bin/mc anonymous set public myminio/gopie/visualizations
sleep 5;
aws s3 mb s3://gopie --endpoint-url http://rustfs:9000 || true;
aws s3 mb s3://downloads --endpoint-url http://rustfs:9000 || true;
echo 'Buckets created successfully';
exit 0;
"

include:
Expand Down
65 changes: 32 additions & 33 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,9 @@ services:
environment:
- GOPIE_SERVE_HOST=${GOPIE_SERVE_HOST}
- GOPIE_SERVE_PORT=${GOPIE_SERVE_PORT}
- GOPIE_S3_ACCESS_KEY=${MINIO_ACCESS_KEY}
- GOPIE_S3_SECRET_KEY=${MINIO_SECRET_KEY}
- GOPIE_S3_REGION=${MINIO_REGION}
- GOPIE_S3_ACCESS_KEY=${RUSTFS_ACCESS_KEY}
- GOPIE_S3_SECRET_KEY=${RUSTFS_SECRET_KEY}
- GOPIE_S3_REGION=${RUSTFS_REGION}
- GOPIE_S3_ENDPOINT=${GOPIE_S3_ENDPOINT}
- GOPIE_LOGGER_LEVEL=${GOPIE_LOGGER_LEVEL}
- GOPIE_LOGGER_FILE=${GOPIE_LOGGER_FILE}
Expand Down Expand Up @@ -158,41 +158,40 @@ services:
- gopie

#########################################################################
##### MINIO #######
##### RUSTFS #######
#########################################################################
minio:
image: minio/minio
rustfs:
image: rustfs/rustfs:latest
restart: always
ports:
- "9000:9000"
- "9002:9002"
volumes:
- ./volumes/minio/data:/export
- ./volumes/minio/config:/root/.minio
- "9000:9000" # S3 API
- "9001:9001" # RustFS Console
environment:
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- RUSTFS_ACCESS_KEY=rustfsadmin
- RUSTFS_SECRET_KEY=rustfsadmin
- RUSTFS_CONSOLE_ENABLE=true
- RUSTFS_ADDRESS=:9000
volumes:
- ./volumes/rustfs/data:/data
networks:
- gopie
command: server -console-address :9002 /export
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5

createbuckets:
image: minio/mc
create-buckets:
image: amazon/aws-cli:latest
depends_on:
minio:
condition: service_healthy
- rustfs
environment:
- AWS_ACCESS_KEY_ID=rustfsadmin
- AWS_SECRET_ACCESS_KEY=rustfsadmin
- AWS_DEFAULT_REGION=us-east-1
networks:
- gopie
entrypoint: >
/bin/sh -c "
echo 'Minio is up. Creating the bucket!!' &&
/usr/bin/mc alias set myminio http://minio:9000 minioadmin minioadmin &&
/usr/bin/mc mb -p myminio/gopie &&
/usr/bin/mc anonymous set public myminio/gopie
sleep 5;
aws s3 mb s3://gopie --endpoint-url http://rustfs:9000 || true;
echo 'Buckets created successfully';
exit 0;
"
Comment on lines +179 to 195
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Inconsistent bucket creation and missing healthcheck.

Two issues:

  1. This compose file only creates the gopie bucket (line 192), while docker-compose-auth.yaml and docker-compose-noauth.yaml create both gopie and downloads buckets. Based on config-auth.env.example line 58 (GOPIE_DOWNLOADS_S3_BUCKET=downloads), the downloads bucket appears to be required.

  2. Similar to other files, consider adding a healthcheck instead of relying on sleep 5.

🔎 Add the missing downloads bucket:
  entrypoint: >
    /bin/sh -c "
    sleep 5;
    aws s3 mb s3://gopie --endpoint-url http://rustfs:9000 || true;
+   aws s3 mb s3://downloads --endpoint-url http://rustfs:9000 || true;
    echo 'Buckets created successfully';
    exit 0;
    "
🤖 Prompt for AI Agents
In docker-compose.yaml around lines 179 to 195, the create-buckets service only
creates the gopie bucket and uses a hardcoded sleep rather than a healthcheck;
update it to also create the downloads bucket (aws s3 mb s3://downloads
--endpoint-url http://rustfs:9000) so it matches docker-compose-auth.yaml/noauth
and config-auth.env.example, and replace the sleep hack by adding a proper
healthcheck for the rustfs service (or for this container) that waits for the S3
endpoint on rustfs:9000 to be ready before running the bucket creation commands,
then run both aws s3 mb commands (each tolerant of existing buckets) and exit
successfully.


gopie-reindex:
Expand All @@ -211,11 +210,11 @@ services:
ports:
- "3020:3020"
environment:
- COMPANION_AWS_ENDPOINT=${MINIO_ENDPOINT}
- COMPANION_AWS_REGION=${MINIO_REGION}
- COMPANION_AWS_BUCKET=${MINIO_BUCKET}
- COMPANION_AWS_KEY=${MINIO_ACCESS_KEY}
- COMPANION_AWS_SECRET=${MINIO_SECRET_KEY}
- COMPANION_AWS_ENDPOINT=${RUSTFS_ENDPOINT}
- COMPANION_AWS_REGION=${RUSTFS_REGION}
- COMPANION_AWS_BUCKET=${RUSTFS_BUCKET}
- COMPANION_AWS_KEY=${RUSTFS_ACCESS_KEY}
- COMPANION_AWS_SECRET=${RUSTFS_SECRET_KEY}
- COMPANION_DOMAIN=${COMPANION_DOMAIN}
- COMPANION_PROTOCOL=${COMPANION_PROTOCOL}
- COMPANION_DATADIR=${COMPANION_DATADIR}
Expand All @@ -224,7 +223,7 @@ services:
networks:
- gopie
depends_on:
- minio
- rustfs

#########################################################################
##### CHAT SERVER SERVICES #####
Expand Down
12 changes: 8 additions & 4 deletions web/src/components/dataset/dataset-upload-wizard.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -378,16 +378,20 @@ export function DatasetUploadWizard({ projectId }: DatasetUploadWizardProps) {
s3Url = `s3://${bucket}/${key}`;
}
} else {
// Non-S3 URL, might be MinIO, presigned URL, or proxy
// For localhost:9000 (MinIO) or similar, the format is usually: http://localhost:9000/bucket/key
// Try to extract path assuming format: /bucket/key
// Non-S3 URL, might be RUSTFS, presigned URL, or proxy
// For localhost:9000 (RUSTFS) or similar, the format is usually: http://localhost:9000/bucket
// Try to extract path assuming format: /bucket
if (pathParts.length >= 2) {
const bucket = pathParts[0];
const key = pathParts.slice(1).join("/");
s3Url = `s3://${bucket}/${key}`;
} else if (pathParts.length === 1) {
// Only one path part, assume it's just the bucket
s3Url = `s3://${pathParts[0]}`;
const uploadResponse = useUploadStore.getState()
.uploadResponse as { body?: { key?: string } } | undefined;

const key = uploadResponse?.body?.key;
s3Url = `${key ? `s3://${pathParts[0]}/${key}` : `s3://${pathParts[0]}`}`;
Comment on lines +381 to +394
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Validate that the s3:// URL includes a key and consider refactoring duplicated logic.

This code has the same issue as in file-refresh-wizard.tsx: when pathParts.length === 1 and uploadResponse?.body?.key is undefined or empty, it constructs an incomplete s3 URL (s3://bucket). This will likely cause the dataset creation API call at line 425 to fail.

Additionally, this URL parsing logic is duplicated across multiple upload components, which increases maintenance burden.

🔎 Proposed fix with validation
          } else {
            // Non-S3 URL, might be RUSTFS, presigned URL, or proxy
            // For localhost:9000 (RUSTFS) or similar, the format is usually: http://localhost:9000/bucket
            // Try to extract path assuming format: /bucket
            if (pathParts.length >= 2) {
              const bucket = pathParts[0];
              const key = pathParts.slice(1).join("/");
              s3Url = `s3://${bucket}/${key}`;
            } else if (pathParts.length === 1) {
-             // Only one path part, assume it's just the bucket
              const uploadResponse = useUploadStore.getState()
                .uploadResponse as { body?: { key?: string } } | undefined;

              const key = uploadResponse?.body?.key;
-             s3Url = `${key ? `s3://${pathParts[0]}/${key}` : `s3://${pathParts[0]}`}`;
+             if (!key) {
+               throw new Error("Upload response missing object key for single-path URL");
+             }
+             s3Url = `s3://${pathParts[0]}/${key}`;
            } else {

Refactoring suggestion: Consider extracting the S3 URL parsing logic into a shared utility function (e.g., lib/utils/s3-url-parser.ts) to avoid duplication:

// lib/utils/s3-url-parser.ts
export function parseUploadUrlToS3(
  uploadURL: string, 
  uploadResponse?: { body?: { key?: string } }
): string {
  // Consolidated parsing logic here
}

Then use it in both files:

s3Url = parseUploadUrlToS3(uploadURL, useUploadStore.getState().uploadResponse);
🤖 Prompt for AI Agents
In web/src/components/dataset/dataset-upload-wizard.tsx around lines 381 to 394,
the code constructs s3://bucket when pathParts.length === 1 and
uploadResponse?.body?.key is missing, producing an incomplete S3 URL that will
fail later; update the logic to validate that a key exists before forming an
s3://bucket/key URL (if no key, fail early or return an explicit error/undefined
instead of s3://bucket), and refactor this parsing into a shared utility (e.g.,
lib/utils/s3-url-parser.ts with signature parseUploadUrlToS3(uploadURL,
uploadResponse)) so both this file and file-refresh-wizard.tsx call the same
function and avoid duplicated logic.

} else {
// No path parts, this shouldn't happen
throw new Error("No path found in upload URL");
Expand Down
8 changes: 6 additions & 2 deletions web/src/components/dataset/file-refresh-wizard.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -171,13 +171,17 @@ export function FileRefreshWizard({
s3Url = `s3://${bucket}/${key}`;
}
} else {
// --- Handle MinIO / localhost / generic object storage ---
// --- Handle RUSTFS / localhost / generic object storage ---
if (pathParts.length >= 2) {
const bucket = pathParts[0];
const key = pathParts.slice(1).join("/");
s3Url = `s3://${bucket}/${key}`;
} else if (pathParts.length === 1) {
s3Url = `s3://${pathParts[0]}`;
const uploadResponse = useUploadStore.getState().uploadResponse as
| { body?: { key?: string } }
| undefined;
const key = uploadResponse?.body?.key;
s3Url = `${key ? `s3://${pathParts[0]}/${key}` : `s3://${pathParts[0]}`}`;
} else {
throw new Error("No path found in upload URL");
}
Expand Down