-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathconfig.toml.example
More file actions
83 lines (68 loc) · 2.26 KB
/
config.toml.example
File metadata and controls
83 lines (68 loc) · 2.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# Open FinOps Stack Configuration
# Copy this file to config.toml and update with your values
[project]
# Project-level defaults
name = "open-finops-stack"
# Default data directory for DuckDB and other local storage
data_dir = "./data"
[database]
# Database backend configuration
# Options: "duckdb", "snowflake", "bigquery", "postgresql"
backend = "duckdb"
# DuckDB-specific configuration (default)
[database.duckdb]
database_path = "./data/finops.duckdb"
# Snowflake-specific configuration (uncomment to use)
# [database.snowflake]
# account = "your-account.snowflakecomputing.com"
# warehouse = "FINOPS_WH"
# database = "FINOPS_DB"
# schema = "AWS_BILLING"
# user = "finops_user"
# role = "FINOPS_ROLE"
# # password via SNOWFLAKE_PASSWORD environment variable
# BigQuery-specific configuration (uncomment to use)
# [database.bigquery]
# project_id = "your-gcp-project"
# dataset = "finops_data"
# location = "US"
# # credentials via GOOGLE_APPLICATION_CREDENTIALS environment variable
# PostgreSQL-specific configuration (uncomment to use)
# [database.postgresql]
# host = "localhost"
# port = 5432
# database = "finops"
# schema = "aws_billing"
# user = "finops_user"
# # password via POSTGRESQL_PASSWORD environment variable
[aws]
# AWS CUR pipeline configuration
# All these values can be overridden via command-line flags
# Dataset/schema name for AWS billing data
dataset_name = "aws_billing"
# Required settings (no defaults)
# bucket = "your-cur-bucket-name"
# prefix = "your-cur-prefix"
# export_name = "your-cur-export-name"
# Optional settings with defaults
cur_version = "v1" # Options: "v1" or "v2"
export_format = "csv" # Options: "csv" or "parquet" (auto-detected if not specified)
# Date range filters (optional - imports all data if not specified)
# start_date = "2024-01" # Format: YYYY-MM
# end_date = "2024-12" # Format: YYYY-MM
# Pipeline behavior
reset = false # Drop existing tables before import
# AWS credentials (optional - uses AWS SDK credential chain if not specified)
# access_key_id = ""
# secret_access_key = ""
# region = "us-east-1"
[azure]
# Azure billing pipeline configuration (future)
# storage_account = ""
# container = ""
# export_name = ""
[gcp]
# GCP billing pipeline configuration (future)
# project_id = ""
# dataset_id = ""
# table_id = ""