Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
81 changes: 0 additions & 81 deletions infra/prover-cluster/components/DataServicesComponent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,15 @@ export interface DataServicesComponentConfig extends BaseComponentConfig {
taskDBPassword: string;
securityGroupId: pulumi.Output<string>;
rdsInstanceClass?: string;
redisNodeType?: string;
}

export class DataServicesComponent extends BaseComponent {
public readonly rdsInstance: aws.rds.Instance;
public readonly rdsEndpoint: pulumi.Output<string>;
public readonly redisCluster: aws.elasticache.ReplicationGroup;
public readonly redisEndpoint: pulumi.Output<string>;
public readonly s3Bucket: aws.s3.Bucket;
public readonly s3BucketName: pulumi.Output<string>;
public readonly dbSubnetGroup: aws.rds.SubnetGroup;
public readonly rdsSecurityGroup: aws.ec2.SecurityGroup;
public readonly redisSecurityGroup: aws.ec2.SecurityGroup;
public readonly redisSubnetGroup: aws.elasticache.SubnetGroup;

constructor(config: DataServicesComponentConfig) {
super(config, "boundless-bento");
Expand Down Expand Up @@ -63,51 +58,6 @@ export class DataServicesComponent extends BaseComponent {
},
});

// Create Redis subnet group
// ElastiCache subnet group names must be unique across account and max 40 chars
// Use format: bb-redis-{stackName} (truncated to fit 40 chars)
const prefix = "bb-redis-";
const maxStackNameLength = 40 - prefix.length;
const truncatedStackName = config.stackName.length > maxStackNameLength
? config.stackName.substring(0, maxStackNameLength)
: config.stackName;
const redisSubnetGroupName = `${prefix}${truncatedStackName}`;
this.redisSubnetGroup = new aws.elasticache.SubnetGroup(`${config.stackName}-redis-subnet-group`, {
name: redisSubnetGroupName,
subnetIds: config.privateSubnetIds,
tags: {
Environment: config.environment,
Stack: config.stackName,
Component: "data-services",
},
});

// Create Redis security group
this.redisSecurityGroup = new aws.ec2.SecurityGroup(`${config.stackName}-redis`, {
name: config.stackName,
vpcId: config.vpcId,
description: "Security group for ElastiCache Redis",
ingress: [{
protocol: "tcp",
fromPort: 6379,
toPort: 6379,
securityGroups: [config.securityGroupId],
description: "Redis access from cluster instances",
}],
egress: [{
protocol: "-1",
fromPort: 0,
toPort: 0,
cidrBlocks: ["0.0.0.0/0"],
description: "All outbound traffic",
}],
tags: {
Environment: config.environment,
Stack: config.stackName,
Component: "redis",
},
});

// Create RDS PostgreSQL instance
this.rdsInstance = new aws.rds.Instance(`${config.stackName}`, {
identifier: this.generateName("postgres"),
Expand Down Expand Up @@ -137,37 +87,6 @@ export class DataServicesComponent extends BaseComponent {
// RDS endpoint is just the hostname, need to append port
this.rdsEndpoint = pulumi.interpolate`${this.rdsInstance.endpoint}:${this.rdsInstance.port}`;

// Create ElastiCache Redis replication group
// ElastiCache replication group IDs must be 1-40 characters
// Use a shorter name: stackName-redis (max 40 chars)
const redisGroupId = config.stackName.length > 35
? `${config.stackName.substring(0, 34)}-redis`
: `${config.stackName}-redis`;
this.redisCluster = new aws.elasticache.ReplicationGroup(`${config.stackName}`, {
replicationGroupId: redisGroupId,
description: `Redis cluster for ${config.stackName}`,
engine: "redis",
engineVersion: "7.1",
nodeType: config.redisNodeType || "cache.t4g.micro",
port: 6379,
parameterGroupName: "default.redis7",
numCacheClusters: 1,
subnetGroupName: this.redisSubnetGroup.name,
securityGroupIds: [this.redisSecurityGroup.id],
atRestEncryptionEnabled: true,
transitEncryptionEnabled: false,
automaticFailoverEnabled: false,
tags: {
Name: config.stackName,
Environment: config.environment,
Stack: config.stackName,
Component: "redis",
},
});

// Redis endpoint is just the hostname, need to append port
this.redisEndpoint = pulumi.interpolate`${this.redisCluster.primaryEndpointAddress}:${this.redisCluster.port}`;

// Create S3 bucket for workflow storage
const bucketName = this.generateName("bento-storage");
this.s3Bucket = new aws.s3.Bucket(`${config.stackName}-storage`, {
Expand Down
42 changes: 31 additions & 11 deletions infra/prover-cluster/components/LaunchTemplateComponent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ export interface LaunchTemplateConfig extends BaseComponentConfig {
componentType: "manager" | "prover" | "execution" | "aux";
volumeSize?: number;
rdsEndpoint?: pulumi.Output<string>;
redisEndpoint?: pulumi.Output<string>;
s3BucketName?: pulumi.Output<string>;
s3AccessKeyId?: pulumi.Output<string>;
s3SecretAccessKey?: pulumi.Output<string>;
Expand Down Expand Up @@ -112,7 +111,6 @@ export class LaunchTemplateComponent extends BaseComponent {
this.config.stackName,
config.componentType,
config.rdsEndpoint!,
config.redisEndpoint!,
config.s3BucketName!,
config.s3AccessKeyId!,
config.s3SecretAccessKey!,
Expand All @@ -133,14 +131,14 @@ export class LaunchTemplateComponent extends BaseComponent {
config.maxFetchRetries || 3,
config.allowClientAddresses || "",
config.lockinPriorityGas || "0",
]).apply(([dbName, dbUser, dbPass, rpcUrl, privKey, orderStreamUrl, verifierAddress, boundlessMarketAddress, setVerifierAddress, collateralTokenAddress, chainId, stackName, componentType, rdsEndpoint, redisEndpoint, s3BucketName, s3AccessKeyId, s3SecretAccessKey, mcyclePrice, peakProveKhz, minDeadline, lookbackBlocks, maxCollateral, maxFileSize, maxMcycleLimit, maxConcurrentProofs, balanceWarnThreshold, balanceErrorThreshold, collateralBalanceWarnThreshold, collateralBalanceErrorThreshold, priorityRequestorAddresses, denyRequestorAddresses, maxFetchRetries, allowClientAddresses, lockinPriorityGas]) => {
]).apply(([dbName, dbUser, dbPass, rpcUrl, privKey, orderStreamUrl, verifierAddress, boundlessMarketAddress, setVerifierAddress, collateralTokenAddress, chainId, stackName, componentType, rdsEndpoint, s3BucketName, s3AccessKeyId, s3SecretAccessKey, mcyclePrice, peakProveKhz, minDeadline, lookbackBlocks, maxCollateral, maxFileSize, maxMcycleLimit, maxConcurrentProofs, balanceWarnThreshold, balanceErrorThreshold, collateralBalanceWarnThreshold, collateralBalanceErrorThreshold, priorityRequestorAddresses, denyRequestorAddresses, maxFetchRetries, allowClientAddresses, lockinPriorityGas]) => {
// Extract host from endpoints (format: host:port)
const rdsEndpointStr = String(rdsEndpoint);
const redisEndpointStr = String(redisEndpoint);
const rdsHost = rdsEndpointStr.split(':')[0];
const rdsPort = rdsEndpointStr.split(':')[1] || '5432';
const redisHost = redisEndpointStr.split(':')[0];
const redisPort = redisEndpointStr.split(':')[1] || '6379';
// Manager runs Redis/Valkey locally, so use localhost
const redisHost = "127.0.0.1";
const redisPort = "6379";

const brokerTomlContent = `[market]
mcycle_price = "${mcyclePrice}"
Expand Down Expand Up @@ -234,6 +232,29 @@ ${aggregationDimensionsJson.split('\n').map(line => ` ${line}`).join('\n')}

runcmd:
- |
# Install Valkey (Redis fork) as a systemd service
apt-get update
apt-get install -y software-properties-common
add-apt-repository -y ppa:valkey/valkey
apt-get update
apt-get install -y valkey-server
- |
# Configure Valkey to listen on all interfaces (for worker nodes to connect)
cat > /etc/valkey/valkey.conf << 'VALKEYEOF'
bind 0.0.0.0
port 6379
protected-mode no
maxmemory 12gb
maxmemory-policy allkeys-lru
save ""
VALKEYEOF
- |
# Enable and start Valkey service
systemctl enable valkey-server
systemctl restart valkey-server
- |
# Allow Redis/Valkey connections from worker nodes in the security group
# (Security group rules are managed by Pulumi, but we ensure the service is ready)
cat /etc/environment.d/bento.conf >> /etc/environment
- |
/usr/bin/sed -i 's|group_name: "/boundless/bent.*"|group_name: "/boundless/bento/${stackName}/${componentType}"|g' /etc/vector/vector.yaml
Expand Down Expand Up @@ -269,18 +290,17 @@ runcmd:
this.config.stackName,
config.componentType,
config.rdsEndpoint!,
config.redisEndpoint!,
config.s3BucketName!,
config.s3AccessKeyId!,
config.s3SecretAccessKey!
]).apply(([managerIp, dbName, dbUser, dbPass, stackName, componentType, rdsEndpoint, redisEndpoint, s3BucketName, s3AccessKeyId, s3SecretAccessKey]) => {
]).apply(([managerIp, dbName, dbUser, dbPass, stackName, componentType, rdsEndpoint, s3BucketName, s3AccessKeyId, s3SecretAccessKey]) => {
// Extract host from endpoints (format: host:port)
const rdsEndpointStr = String(rdsEndpoint);
const redisEndpointStr = String(redisEndpoint);
const rdsHost = rdsEndpointStr.split(':')[0];
const rdsPort = rdsEndpointStr.split(':')[1] || '5432';
const redisHost = redisEndpointStr.split(':')[0];
const redisPort = redisEndpointStr.split(':')[1] || '6379';
// Workers connect to Redis/Valkey on the manager node
const redisHost = managerIp;
const redisPort = "6379";
const commonEnvVars = this.generateCommonEnvVars(managerIp, dbName, dbUser, dbPass, stackName, componentType, rdsHost, rdsPort, redisHost, redisPort, s3BucketName, s3AccessKeyId, s3SecretAccessKey);

let componentSpecificVars = "";
Expand Down
1 change: 0 additions & 1 deletion infra/prover-cluster/components/ManagerComponent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ export interface ManagerComponentConfig extends BaseComponentConfig {
chainId: string;
alertsTopicArns: string[];
rdsEndpoint: pulumi.Output<string>;
redisEndpoint: pulumi.Output<string>;
s3BucketName: pulumi.Output<string>;
s3AccessKeyId: pulumi.Output<string>;
s3SecretAccessKey: pulumi.Output<string>;
Expand Down
Loading
Loading