Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -209,9 +209,14 @@ go test -race -v -run TestCreateSandbox ./internal/handlers
- After editing proto files, run `make generate/orchestrator` and `make generate/shared`

### Database Migrations
- Migrations: `packages/db/migrations/`
- **Main migrations**: `packages/db/migrations/` — run via `make migrate` (requires POSTGRES_CONNECTION_STRING)
- **Dashboard migrations**: `packages/db/pkg/dashboard/migrations/` — run separately:
```
GOOSE_DRIVER=postgres GOOSE_DBSTRING="$POSTGRES_CONNECTION_STRING" \
go tool goose -table "_dashboard_migrations" -dir "pkg/dashboard/migrations" up
```
These are required when `DASHBOARD_API_COUNT > 0`. They use a separate goose table (`_dashboard_migrations`).
- Create: Add new `XXXXXX_name.sql` file
- Apply: `make migrate` (requires POSTGRES_CONNECTION_STRING)
- Code generation: `make generate/db` (regenerates sqlc code)

### Environment Variables
Expand Down
2 changes: 1 addition & 1 deletion iac/modules/job-dashboard-api/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ resource "nomad_job" "dashboard_api" {
subdomain = "dashboard-api"

otel_collector_grpc_endpoint = "localhost:${var.otel_collector_grpc_port}"
logs_collector_address = "http://localhost:${var.logs_proxy_port.port}"
logs_collector_address = var.logs_collector_address
})
}
7 changes: 2 additions & 5 deletions iac/modules/job-dashboard-api/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,6 @@ variable "otel_collector_grpc_port" {
default = 4317
}

variable "logs_proxy_port" {
type = object({
name = string
port = number
})
variable "logs_collector_address" {
type = string
}
1 change: 1 addition & 0 deletions iac/provider-aws/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ tf_vars := AWS_PROFILE=$(AWS_PROFILE) AWS_REGION=$(AWS_REGION) \
$(call tfvar, API_SERVER_MACHINE_TYPE) \
$(call tfvar, CLIENT_PROXY_COUNT) \
$(call tfvar, INGRESS_COUNT) \
$(call tfvar, DASHBOARD_API_COUNT) \
$(call tfvar, BUILD_CLUSTER_SIZE) \
$(call tfvar, BUILD_SERVER_MACHINE_TYPE) \
$(call tfvar, BUILD_SERVER_NESTED_VIRTUALIZATION) \
Expand Down
4 changes: 4 additions & 0 deletions iac/provider-aws/init/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@ output "db_migrator_repository_name" {
value = aws_ecr_repository.db_migrator.name
}

output "dashboard_api_repository_name" {
value = aws_ecr_repository.dashboard_api.name
}

// ---
// Cloudflare
// ---
Expand Down
3 changes: 3 additions & 0 deletions iac/provider-aws/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,9 @@ module "nomad" {
client_proxy_count = var.client_proxy_count
client_proxy_repository_name = module.init.client_proxy_repository_name

dashboard_api_count = var.dashboard_api_count
dashboard_api_repository_name = module.init.dashboard_api_repository_name

orchestrator_node_pool = local.client_pool_name
allow_sandbox_internet = var.allow_sandbox_internet
orchestrator_port = var.orchestrator_port
Expand Down
21 changes: 21 additions & 0 deletions iac/provider-aws/nomad/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,27 @@ module "api" {
}
}

module "dashboard_api" {
source = "../../modules/job-dashboard-api"
count = var.dashboard_api_count > 0 ? 1 : 0

count_instances = var.dashboard_api_count
node_pool = var.api_node_pool
update_stanza = var.dashboard_api_count > 1
environment = var.environment

image = "${var.aws_account_id}.dkr.ecr.${var.aws_region}.amazonaws.com/${var.dashboard_api_repository_name}:latest"

postgres_connection_string = var.postgres_connection_string
auth_db_connection_string = var.postgres_connection_string
auth_db_read_replica_connection_string = ""
clickhouse_connection_string = local.clickhouse_connection_string
supabase_jwt_secrets = var.supabase_jwt_secrets

otel_collector_grpc_port = var.otel_collector_grpc_port
logs_collector_address = "http://localhost:${var.logs_proxy_port}"
}

data "aws_s3_object" "orchestrator" {
bucket = var.fc_env_pipeline_bucket_name
key = "orchestrator"
Expand Down
11 changes: 11 additions & 0 deletions iac/provider-aws/nomad/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,17 @@ variable "additional_traefik_arguments" {
default = []
}

# Dashboard API
variable "dashboard_api_count" {
type = number
default = 0
}

variable "dashboard_api_repository_name" {
type = string
default = ""
}

variable "db_max_open_connections" {
type = number
}
Expand Down
5 changes: 5 additions & 0 deletions iac/provider-aws/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,11 @@ variable "control_server_cluster_size" {
default = 3
}

variable "dashboard_api_count" {
type = number
default = 0
}

variable "additional_traefik_arguments" {
type = list(string)
default = []
Expand Down
2 changes: 1 addition & 1 deletion iac/provider-gcp/nomad/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ module "dashboard_api" {
supabase_jwt_secrets = trimspace(data.google_secret_manager_secret_version.supabase_jwt_secrets.secret_data)

otel_collector_grpc_port = var.otel_collector_grpc_port
logs_proxy_port = var.logs_proxy_port
logs_collector_address = "http://localhost:${var.logs_proxy_port.port}"
}

module "redis" {
Expand Down
26 changes: 20 additions & 6 deletions self-host.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,16 @@ Check if you can use config for terraform state management
- e2b-supabase-jwt-secrets (optional / required to self-host the [E2B dashboard](https://github.com/e2b-dev/dashboard))
> Get Supabase JWT Secret: go to the [Supabase dashboard](https://supabase.com/dashboard) -> Select your Project -> Project Settings -> Data API -> JWT Settings
- e2b-posthog-api-key (optional, for monitoring)
9. Run `make plan-without-jobs` and then `make apply`
10. Run `make plan` and then `make apply`. Note: This will work after the TLS certificates was issued. It can take some time; you can check the status in the Google Cloud Console. Database migrations run automatically via the API's db-migrator task.
11. Setup data in the cluster by running `make prep-cluster` in `packages/shared` to create an initial user, team, and build a base template.
9. (Optional) To enable the [E2B dashboard](https://github.com/e2b-dev/dashboard), set `DASHBOARD_API_COUNT=1` in your `.env` file and ensure `e2b-supabase-jwt-secrets` is populated. You also need to run the dashboard-specific database migrations:
```sh
cd packages/db
GOOSE_DRIVER=postgres GOOSE_DBSTRING="$POSTGRES_CONNECTION_STRING" \
go tool goose -table "_dashboard_migrations" -dir "pkg/dashboard/migrations" up
```
The dashboard API will be accessible at `dashboard-api.<your-domain>`.
10. Run `make plan-without-jobs` and then `make apply`
11. Run `make plan` and then `make apply`. Note: This will work after the TLS certificates was issued. It can take some time; you can check the status in the Google Cloud Console. Database migrations run automatically via the API's db-migrator task.
12. Setup data in the cluster by running `make prep-cluster` in `packages/shared` to create an initial user, team, and build a base template.
- You can also run `make seed-db` in `packages/db` to create more users and teams.

### GCP Troubleshooting
Expand Down Expand Up @@ -149,16 +156,23 @@ Now, you should see the right quota options in `All Quotas` and be able to reque
7. Run `make build-and-upload` to build and push container images and binaries
8. Run `make copy-public-builds` to copy Firecracker kernels and rootfs to your S3 buckets
9. Run `make plan-without-jobs` and then `make apply` to provision the cluster infrastructure
10. Run `make plan` and then `make apply` to deploy all Nomad jobs (this also runs database migrations automatically via the API's db-migrator task)
11. Setup data in the cluster by running `make prep-cluster` in `packages/shared` to create an initial user, team, and build a base template
10. (Optional) To enable the [E2B dashboard](https://github.com/e2b-dev/dashboard), set `DASHBOARD_API_COUNT=1` in your `.env` file and ensure `{prefix}supabase-jwt-secrets` is populated. You also need to run the dashboard-specific database migrations:
```sh
cd packages/db
GOOSE_DRIVER=postgres GOOSE_DBSTRING="$POSTGRES_CONNECTION_STRING" \
go tool goose -table "_dashboard_migrations" -dir "pkg/dashboard/migrations" up
```
The dashboard API will be accessible at `dashboard-api.<your-domain>`.
11. Run `make plan` and then `make apply` to deploy all Nomad jobs (this also runs database migrations automatically via the API's db-migrator task)
12. Setup data in the cluster by running `make prep-cluster` in `packages/shared` to create an initial user, team, and build a base template

### AWS Architecture

The AWS deployment provisions the following:

**Node Pools (EC2 Auto Scaling Groups):**
- **Control Server** - Nomad/Consul servers (default: 3x `t3.medium`)
- **API** - API server, ingress, client proxy, otel, loki, logs collector (default: `t3.xlarge`)
- **API** - API server, ingress, client proxy, dashboard API (optional), otel, loki, logs collector (default: `t3.xlarge`)
- **Client** - Firecracker orchestrator nodes with nested virtualization (default: `m8i.4xlarge`)
- **Build** - Template manager for building sandbox templates (default: `m8i.2xlarge`)
- **ClickHouse** - Analytics database (default: `t3.xlarge`)
Expand Down