All Options
Provided below are working, documented YAML configs for each BuildBuddy binary
containing every option that that binary accepts, each set to the default value
for that option. Any option that can be specified in the YAML config can also be
passed on the command line. For nested options, be sure to write out the full
YAML path, with a .
separating each part.
For example:
storage:
disk:
root_directory: /tmp/buildbuddy
becomes:
buildbuddy -storage.disk.root_directory="/tmp/buildbuddy"
For specifying lists of structures using flags on the command line, use the JSON representation of the list you wish to concatenate to the end or the element you wish to append:
For example, given the following schema:
cache:
disk:
partitions: [] # type: []disk.Partition
# e.g.:
# - id: "" # type: string
# max_size_bytes: 0 # type: int
We see that cache.disk.partitions
is configured as a list of disk.Partition
. In YAML, we'd normally configure it like this:
cache:
disk:
partitions:
- id: "1GB"
max_size_bytes: 1073741824
- id: "2GB"
max_size_bytes: 2147483648
The flag equivalent of this example would be:
buildbuddy -cache.disk.partitions='{"id": "1GB", "max_size_bytes": 1073741824}' -cache.disk.partitions='{"id": "2GB", "max_size_bytes": 2147483648}'
or
buildbuddy -cache.disk.partitions='[{"id": "1GB", "max_size_bytes": 1073741824}, {"id": "2GB", "max_size_bytes": 2147483648}]'
BuildBuddy Server (FOSS)
# Unstructured settings
# app_directory (string): the directory containing app binary files to host
app_directory: ""
# auto_migrate_db (bool): If true, attempt to automigrate the db when
# connecting
auto_migrate_db: true
# auto_migrate_db_and_exit (bool): If true, attempt to automigrate the db when
# connecting, then exit the program.
auto_migrate_db_and_exit: false
# block_profile_rate (int): The fraction of goroutine blocking events
# reported. (1/rate, 0 disables)
block_profile_rate: 0
# cache_stats_finalization_delay (time.Duration): The time allowed for all
# metrics collectors across all apps to flush their local cache stats to the
# backing storage, before finalizing stats in the DB.
cache_stats_finalization_delay: 500ms
# cleanup_interval (time.Duration): How often the janitor cleanup tasks will
# run
cleanup_interval: 10m0s
# cleanup_workers (int): How many cleanup tasks to run
cleanup_workers: 1
# disable_ga (bool): If true; ga will be disabled
disable_ga: false
# disable_telemetry (bool): If true; telemetry will be disabled
disable_telemetry: false
# drop_invocation_pk_cols (bool): If true, attempt to drop invocation PK cols
drop_invocation_pk_cols: false
# exit_when_ready (bool): If set, the app will exit as soon as it becomes
# ready (useful for migrations)
exit_when_ready: false
# grpc_client_origin_header (string): Header value to set for
# x-buildbuddy-origin.
grpc_client_origin_header: ""
# grpc_max_recv_msg_size_bytes (int): Configures the max GRPC receive message
# size [bytes]
grpc_max_recv_msg_size_bytes: 50000000
# grpc_port (int): The port to listen for gRPC traffic on
grpc_port: 1985
# grpcs_port (int): The port to listen for gRPCS traffic on
grpcs_port: 1986
# internal_grpc_port (int): The port to listen for internal gRPC traffic on
internal_grpc_port: 1987
# internal_grpcs_port (int): The port to listen for internal gRPCS traffic on
internal_grpcs_port: 1988
# internal_http_port (int): The port to listen for internal HTTP traffic
internal_http_port: 0
# js_entry_point_path (string): Absolute URL path of the app JS entry point
js_entry_point_path: /app/app_bundle/app.js?hash={APP_BUNDLE_HASH}
# listen (string): The interface to listen on (default: 0.0.0.0)
listen: 0.0.0.0
# log_deletion_errors (bool): If true; log errors when ttl-deleting expired
# data
log_deletion_errors: false
# log_goroutine_profile_on_shutdown (bool): Whether to log all goroutine stack
# traces on shutdown.
log_goroutine_profile_on_shutdown: false
# max_shutdown_duration (time.Duration): Time to wait for shutdown
max_shutdown_duration: 25s
# max_threads (int): The maximum number of threads to allow before panicking.
# If unset, the golang default will be used (currently 10,000).
max_threads: 0
# migrate_disk_cache_to_v2_and_exit (bool): If true, attempt to migrate disk
# cache to v2 layout.
migrate_disk_cache_to_v2_and_exit: false
# monitoring_port (int): The port to listen for monitoring traffic on
monitoring_port: 9090
# mutex_profile_fraction (int): The fraction of mutex contention events
# reported. (1/rate, 0 disables)
mutex_profile_fraction: 0
# port (int): The port to listen for HTTP traffic on
port: 8080
# regions ([]region.Region): A list of regions that executors might be
# connected to.
regions: []
# For example:
# - name: "" # The user-friendly name of this region. Ex: Europe (type: string)
# server: "" # The http endpoint for this server, with the protocol. Ex: https://app.europe.buildbuddy.io (type: string)
# subdomains: "" # The format for subdomain urls of with a single * wildcard. Ex: https://*.europe.buildbuddy.io (type: string)
# report_not_ready (bool): If set to true, the app will always report as being
# unready.
report_not_ready: false
# server_type (string): The server type to match on health checks
server_type: buildbuddy-server
# shutdown_lameduck_duration (time.Duration): If set, the server will be
# marked unready but not run shutdown functions until this period passes.
shutdown_lameduck_duration: 0s
# ssl_port (int): The port to listen for HTTPS traffic on
ssl_port: 8081
# static_directory (string): the directory containing static files to host
static_directory: ""
# telemetry_endpoint (string): The telemetry endpoint to use
telemetry_endpoint: grpcs://t.buildbuddy.io:443
# telemetry_interval (time.Duration): How often telemetry data will be
# reported
telemetry_interval: 24h0m0s
# verbose_telemetry_client (bool): If true; print telemetry client information
verbose_telemetry_client: false
# Structured settings
api:
# api.api_key (string): The default API key to use for on-prem enterprise
# deploys with a single organization/group. **DEPRECATED** Manual API key
# specification is no longer supported; to retrieve specific API keys
# programmatically, please use the API key table. This field will still
# specify an API key to redact in case a manual API key was specified when
# buildbuddy was first set up.
api_key: ""
app:
# app.admin_only_create_group (bool): If true, only admins of an existing
# group can create a new groups.
admin_only_create_group: false
# app.audit_logs_ui_enabled (bool): If set, the audit logs UI will be
# accessible from the sidebar.
audit_logs_ui_enabled: false
# app.bazel_buttons_enabled (bool): If set, show remote bazel buttons in
# the UI.
bazel_buttons_enabled: false
# app.build_buddy_url (URL): The external URL where your BuildBuddy
# instance can be found.
build_buddy_url: http://localhost:8080
# app.cache_api_url (URL): Overrides the default remote cache protocol
# gRPC address shown by BuildBuddy on the configuration screen.
cache_api_url: ""
# app.code_editor_enabled (bool): If set, code editor functionality will
# be enabled.
code_editor_enabled: false
# app.code_editor_v2_enabled (bool): If set, show v2 of code editor that
# stores state on server instead of local storage.
code_editor_v2_enabled: false
# app.code_review_enabled (bool): If set, show the code review UI.
code_review_enabled: false
# app.codesearch_enabled (bool): If set, show the code search UI.
codesearch_enabled: false
# app.community_links_enabled (bool): If set, show links to BuildBuddy
# community in the UI.
community_links_enabled: true
# app.customer_managed_encryption_keys_enabled (bool): If set, show
# customer-managed encryption configuration UI.
customer_managed_encryption_keys_enabled: false
# app.default_login_slug (string): If set, the login page will default to
# using this slug.
default_login_slug: ""
# app.default_subdomains ([]string): List of subdomains that should not be
# handled as user-owned subdomains.
default_subdomains: []
# app.default_to_dense_mode (bool): Enables the dense UI mode by default.
default_to_dense_mode: false
# app.deprecate_anonymous_access (bool): If true, log a warning in the
# bazel console when clients are unauthenticated
deprecate_anonymous_access: false
# app.disable_cert_config (bool): If true, the certificate based auth
# option will not be shown in the config widget.
disable_cert_config: false
# app.enable_canaries (bool): If true, enable slow function canaries
enable_canaries: true
# app.enable_grpc_metrics_by_group_id (bool): If enabled, grpc metrics by
# group ID will be recorded
enable_grpc_metrics_by_group_id: false
# app.enable_prometheus_histograms (bool): If true, collect prometheus
# histograms for all RPCs
enable_prometheus_histograms: true
# app.enable_read_target_statuses_from_olap_db (bool): If enabled, read
# target statuses from OLAP DB
enable_read_target_statuses_from_olap_db: false
# app.enable_structured_logging (bool): If true, log messages will be
# json-formatted.
enable_structured_logging: false
# app.enable_subdomain_matching (bool): If true, request subdomain will be
# taken into account when determining what request restrictions should be
# applied.
enable_subdomain_matching: false
# app.enable_target_tracking (bool): Cloud-Only
enable_target_tracking: false
# app.enable_write_executions_to_olap_db (bool): If enabled, complete
# Executions will be flushed to OLAP DB
enable_write_executions_to_olap_db: false
# app.enable_write_test_target_statuses_to_olap_db (bool): If enabled,
# test target statuses will be flushed to OLAP DB
enable_write_test_target_statuses_to_olap_db: false
# app.enable_write_to_olap_db (bool): If enabled, complete invocations
# will be flushed to OLAP DB
enable_write_to_olap_db: true
# app.events_api_url (URL): Overrides the default build event protocol
# gRPC address shown by BuildBuddy on the configuration screen.
events_api_url: ""
# app.execution_search_enabled (bool): If set, fetch lists of executions
# from the OLAP DB in the trends UI.
execution_search_enabled: true
# app.expanded_suggestions_enabled (bool): If set, enable more build
# suggestions in the UI.
expanded_suggestions_enabled: false
# app.grpc_max_recv_msg_size_bytes (int): DEPRECATED: use
# --grpc_max_recv_msg_size_bytes instead
grpc_max_recv_msg_size_bytes: 50000000
# app.grpc_over_http_port_enabled (bool): Enables grpc traffic to be
# served over the http port.
grpc_over_http_port_enabled: true
# app.ignore_forced_tracing_header (bool): If set, we will not honor the
# forced tracing header.
ignore_forced_tracing_header: false
# app.invocation_log_streaming_enabled (bool): If set, the UI will stream
# invocation logs instead of polling.
invocation_log_streaming_enabled: false
# app.ip_rules_ui_enabled (bool): If set, show the IP rules tab in
# settings page.
ip_rules_ui_enabled: false
# app.log_enable_gcp_logging_format (bool): If true, the output structured
# logs will be compatible with format expected by GCP Logging.
log_enable_gcp_logging_format: false
# app.log_enable_grpc_request (bool): If true, log grpc request when log
# level is default
log_enable_grpc_request: true
# app.log_error_stack_traces (bool): If true, stack traces will be printed
# for errors that have them.
log_error_stack_traces: false
# app.log_gcp_log_id (string): The log ID to log to in GCP (if any).
log_gcp_log_id: ""
# app.log_gcp_project_id (string): The project ID to log to in GCP (if
# any).
log_gcp_project_id: ""
# app.log_include_short_file_name (bool): If true, log messages will
# include shortened originating file name.
log_include_short_file_name: false
# app.log_level (string): The desired log level. Logs with a level >= this
# level will be emitted. One of {'fatal', 'error', 'warn', 'info',
# 'debug'}
log_level: info
# app.new_trends_ui_enabled (bool): DEPRECATED: If set, show a new trends
# UI with a bit more organization.
new_trends_ui_enabled: false
# app.org_admin_api_key_creation_enabled (bool): If set, SCIM API keys
# will be able to be created in the UI.
org_admin_api_key_creation_enabled: false
# app.paginate_invocations (bool): If true, paginate invocations returned
# to the UI.
paginate_invocations: true
# app.pattern_filter_enabled (bool): If set, allow filtering by pattern in
# the client.
pattern_filter_enabled: true
# app.popup_auth_enabled (bool): Whether popup windows should be used for
# authentication.
popup_auth_enabled: false
# app.proxy_targets ([]grpc_forward.proxyPair)
proxy_targets: []
# For example:
# - prefix: "" # The gRPC method prefix to match. (type: string)
# target: "" # The gRPC target to forward requests to. (type: string)
# app.reader_writer_roles_enabled (bool): If set, Reader/Writer roles will
# be enabled in the user management UI.
reader_writer_roles_enabled: true
# app.remote_execution_api_url (URL): Overrides the default remote
# execution protocol gRPC address shown by BuildBuddy on the configuration
# screen.
remote_execution_api_url: ""
# app.restrict_bytestream_dialing (bool): If true, only allow dialing
# localhost or the configured cache backend for bytestream requests.
restrict_bytestream_dialing: false
# app.streaming_http_enabled (bool): Whether to support server-streaming
# http requests between server and web UI.
streaming_http_enabled: false
# app.strict_csp_enabled (bool): If set, set a strict CSP header.
# Violations are logged at warning level.
strict_csp_enabled: false
# app.tags_enabled (bool): Enable setting tags on invocations via
# build_metadata
tags_enabled: false
# app.tags_ui_enabled (bool): If set, expose tags data and let users
# filter by tag.
tags_ui_enabled: false
# app.target_flakes_ui_enabled (bool): If set, show some fancy new
# features for analyzing flakes.
target_flakes_ui_enabled: false
# app.test_grid_v2_enabled (bool): Whether to enable test grid V2
test_grid_v2_enabled: true
# app.test_output_manifests_enabled (bool): If set, the target page will
# render the contents of test output zips.
test_output_manifests_enabled: true
# app.timeseries_charts_in_timing_profile_enabled (bool): If set, charts
# with sampled time series data (such as CPU and memory usage) will be
# shown
timeseries_charts_in_timing_profile_enabled: true
# app.trace_fraction (float64): Fraction of requests to sample for
# tracing.
trace_fraction: 0
# app.trace_fraction_overrides ([]string): Tracing fraction override based
# on name in format name=fraction.
trace_fraction_overrides: []
# app.trace_jaeger_collector (string): Address of the Jager collector
# endpoint where traces will be sent.
trace_jaeger_collector: ""
# app.trace_project_id (string): Optional GCP project ID to export traces
# to. If not specified, determined from default credentials or metadata
# server if running on GCP.
trace_project_id: ""
# app.trace_service_name (string): Name of the service to associate with
# traces.
trace_service_name: ""
# app.trace_viewer_enabled (bool): Whether the new trace viewer is
# enabled.
trace_viewer_enabled: false
# app.trends_heatmap_enabled (bool): If set, enable a fancy heatmap UI for
# exploring build trends.
trends_heatmap_enabled: true
# app.trends_range_selection (bool): If set, let users drag to select time
# ranges in the trends UI.
trends_range_selection: true
# app.trends_summary_enabled (bool): If set, show the new 'summary'
# section at the top of the trends UI.
trends_summary_enabled: false
# app.usage_enabled (bool): If set, the usage page will be enabled in the
# UI.
usage_enabled: false
# app.user_management_enabled (bool): If set, the user management page
# will be enabled in the UI. **DEPRECATED** This flag has no effect and
# will be removed in the future.
user_management_enabled: true
auth:
# auth.domain_wide_cookies (bool): If true, cookies will have domain set
# so that they are accessible on domain and all subdomains.
domain_wide_cookies: false
# auth.https_only_cookies (bool): If true, cookies will only be set over
# https connections.
https_only_cookies: false
# auth.jwt_claims_cache_ttl (time.Duration): TTL for JWT string to parsed
# claims caching. Set to '0' to disable cache.
jwt_claims_cache_ttl: 15s
# auth.jwt_duration (time.Duration): Maximum lifetime of the generated
# JWT.
jwt_duration: 6h0m0s
# auth.jwt_key (string): The key to use when signing JWT tokens.
jwt_key: set_the_jwt_in_config
# auth.new_jwt_key (string): If set, JWT verifications will try both this
# and the old JWT key.
new_jwt_key: ""
# auth.sign_using_new_jwt_key (bool): If true, new JWTs will be signed
# using the new JWT key.
sign_using_new_jwt_key: false
# auth.trust_xforwardedfor_header (bool): If true, client IP information
# will be retrieved from the X-Forwarded-For header. Should only be
# enabled if the BuildBuddy server is only accessible behind a trusted
# proxy.
trust_xforwardedfor_header: false
build_event_proxy:
# build_event_proxy.buffer_size (int): The number of build events to
# buffer locally when proxying build events.
buffer_size: 100
# build_event_proxy.hosts ([]string): The list of hosts to pass build
# events onto.
hosts: []
cache:
client:
# cache.client.ac_rpc_timeout (time.Duration): Maximum time a single
# Action Cache RPC can take.
ac_rpc_timeout: 15s
# cache.client.cas_rpc_timeout (time.Duration): Maximum time a single
# batch RPC or a single ByteStream chunk read can take.
cas_rpc_timeout: 1m0s
# cache.client.enable_upload_compression (bool): If true, enable
# compression of uploads to remote caches
enable_upload_compression: true
# cache.count_ttl (time.Duration): How long to go without receiving any
# cache requests for an invocation before deleting the invocation's counts
# from the metrics collector.
count_ttl: 24h0m0s
# cache.detailed_stats_enabled (bool): Whether to enable detailed stats
# recording for all cache requests.
detailed_stats_enabled: false
# cache.detailed_stats_ttl (time.Duration): How long to go without
# receiving any cache requests for an invocation before deleting the
# invocation's detailed results from the metrics collector. Has no effect
# if cache.detailed_stats_enabled is not set.
detailed_stats_ttl: 3h0m0s
# cache.directory_sizes_enabled (bool): If true, enable an RPC that
# computes the cumulative size of directories stored in the cache.
directory_sizes_enabled: false
disk:
# cache.disk.partition_mappings ([]disk.PartitionMapping)
partition_mappings: []
# For example:
# - group_id: "" # The Group ID to which this mapping applies. (type: string)
# prefix: "" # The remote instance name prefix used to select this partition. (type: string)
# partition_id: "" # The partition to use if the Group ID and prefix match. (type: string)
# cache.disk.partitions ([]disk.Partition)
partitions: []
# For example:
# - id: "" # The ID of the partition. (type: string)
# max_size_bytes: 0 # Maximum size of the partition. (type: int64)
# encryption_supported: false # Whether encrypted data can be stored on this partition. (type: bool)
# cache.disk.root_directory (string): The root directory to store all
# blobs in, if using disk based storage.
root_directory: ""
# cache.disk.use_v2_layout (bool): If enabled, files will be stored
# using the v2 layout. See disk_cache.MigrateToV2Layout for a
# description.
use_v2_layout: false
# cache.enable_tree_caching (bool): If true, cache GetTree responses (full
# and partial)
enable_tree_caching: true
# cache.in_memory (bool): Whether or not to use the in_memory cache.
in_memory: false
# cache.max_direct_write_size_bytes (int64): For bytestream requests
# smaller than this size, write straight to the cache without checking if
# the entry already exists.
max_direct_write_size_bytes: 16384
# cache.max_size_bytes (int64): How big to allow the cache to be (in
# bytes).
max_size_bytes: 10000000000
# cache.max_tree_cache_set_duration (time.Duration): The max amount of
# time to wait for unfinished tree cache entries to be set.
max_tree_cache_set_duration: 1s
# cache.tree_cache_min_descendents (int): The min number of descendents a
# node must parent in order to be cached
tree_cache_min_descendents: 3
# cache.tree_cache_min_level (int): The min level at which the tree may be
# cached. 0 is the root
tree_cache_min_level: 2
# cache.tree_cache_seed (string): If set, hash this with digests before
# caching / reading from tree cache
tree_cache_seed: treecache-09032024
# cache.tree_cache_splitting (bool): If true, try to split up TreeCache
# entries to save space.
tree_cache_splitting: false
# cache.tree_cache_splitting_min_size (int): Minimum number of files in a
# subtree before we'll split it in the treecache.
tree_cache_splitting_min_size: 10000
# cache.tree_cache_write_probability (float64): Write to the tree cache
# with this probability
tree_cache_write_probability: 0.1
# cache.zstd_transcoding_enabled (bool): Whether to accept requests to
# read/write zstd-compressed blobs, compressing/decompressing
# outgoing/incoming blobs on the fly.
zstd_transcoding_enabled: true
database:
# database.advanced_data_source (db.AdvancedConfig): Alternative to the
# database.data_source flag that allows finer control over database
# settings as well as allowing use of AWS IAM credentials. For most users,
# database.data_source is a simpler configuration method.
advanced_data_source:
driver: "" # The driver to use: one of sqlite3, mysql, or postgresql. (type: string)
endpoint: "" # Typically the host:port combination of the database server. (type: string)
username: "" # Username to use when connecting. (type: string)
password: "" # Password to use when connecting. Not used if AWS IAM is enabled. (type: string)
db_name: "" # The name of the database to use for BuildBuddy data. (type: string)
region: "" # Region of the database instance. Required if AWS IAM is enabled. (type: string)
use_aws_iam: false # If enabled, AWS IAM authentication is used instead of fixed credentials. Make sure the endpoint includes the port, otherwise IAM-based auth will fail. (type: bool)
params: "" # Optional parameters to pass to the database driver (in format key1=val1&key2=val2) (type: string)
# database.advanced_read_replica (db.AdvancedConfig): Advanced alternative
# to database.read_replica. Refer to database.advanced for more
# information.
advanced_read_replica:
driver: "" # The driver to use: one of sqlite3, mysql, or postgresql. (type: string)
endpoint: "" # Typically the host:port combination of the database server. (type: string)
username: "" # Username to use when connecting. (type: string)
password: "" # Password to use when connecting. Not used if AWS IAM is enabled. (type: string)
db_name: "" # The name of the database to use for BuildBuddy data. (type: string)
region: "" # Region of the database instance. Required if AWS IAM is enabled. (type: string)
use_aws_iam: false # If enabled, AWS IAM authentication is used instead of fixed credentials. Make sure the endpoint includes the port, otherwise IAM-based auth will fail. (type: bool)
params: "" # Optional parameters to pass to the database driver (in format key1=val1&key2=val2) (type: string)
# database.conn_max_lifetime_seconds (int): The maximum lifetime of a
# connection to the db
conn_max_lifetime_seconds: 0
# database.data_source (string): The SQL database to connect to, specified
# as a connection string.
data_source: sqlite3:///tmp/buildbuddy.db
# database.log_queries (bool): If true, log all queries
log_queries: false
# database.max_idle_conns (int): The maximum number of idle connections to
# maintain to the db
max_idle_conns: 0
# database.max_open_conns (int): The maximum number of open connections to
# maintain to the db
max_open_conns: 0
# database.print_schema_changes_and_exit (bool): If set, print schema
# changes from auto-migration, then exit the program.
print_schema_changes_and_exit: false
# database.read_replica (string): A secondary, read-only SQL database to
# connect to, specified as a connection string.
read_replica: ""
# database.slow_query_threshold (time.Duration): Queries longer than this
# duration will be logged with a 'Slow SQL' warning.
slow_query_threshold: 500ms
# database.stats_poll_interval (time.Duration): How often to poll the DB
# client for connection stats (default: '5s').
stats_poll_interval: 5s
executor:
# executor.host_id (string): Optional: Allows for manual specification of
# an executor's host id. If not set, a random UUID will be used.
host_id: ""
github:
# github.access_token (string): The GitHub access token used to post
# GitHub commit statuses. ** Enterprise only **
access_token: ""
# github.client_id (string): The client ID of your GitHub Oauth App. **
# Enterprise only **
client_id: ""
# github.client_secret (string): The client secret of your GitHub Oauth
# App. ** Enterprise only **
client_secret: ""
# github.enterprise_host (string): The Github enterprise hostname to use
# if using GitHub enterprise server, not including https:// and no
# trailing slash.
enterprise_host: ""
# github.jwt_key (string): The key to use when signing JWT tokens for
# github auth.
jwt_key: ""
# github.status_name_suffix (string): Suffix to be appended to all
# reported GitHub status names. Useful for differentiating BuildBuddy
# deployments. For example: '(dev)' ** Enterprise only **
status_name_suffix: ""
# github.status_per_test_target (bool): If true, report status per test
# target. ** Enterprise only **
status_per_test_target: false
gossip:
# gossip.join ([]string): The nodes to join/gossip with. Ex.
# '1.2.3.4:1991,2.3.4.5:1991...'
join: []
# gossip.listen_addr (string): The address to listen for gossip traffic
# on. Ex. 'localhost:1991'
listen_addr: ""
# gossip.node_name (string): The gossip node's name. If empty will default
# to host_id.'
node_name: ""
# gossip.secret_key (string): The value should be either 16, 24, or 32
# bytes.
secret_key: ""
grpc_client:
# grpc_client.enable_pool_cache (bool): Whether or not to enable the
# connection pool cache.
enable_pool_cache: false
# grpc_client.pool_size (int): Number of connections to create to each
# target.
pool_size: 15
integrations:
invocation_upload:
# integrations.invocation_upload.aws_credentials (string): Credentials
# CSV file for Amazon s3 invocation upload webhook. ** Enterprise only
# **
aws_credentials: ""
# integrations.invocation_upload.enabled (bool): Whether to upload
# webhook data to the webhook URL configured per-Group. ** Enterprise
# only **
enabled: false
# integrations.invocation_upload.gcs_credentials (string): Credentials
# JSON for the Google service account used to authenticate when GCS is
# used as the invocation upload target. ** Enterprise only **
gcs_credentials: ""
slack:
# integrations.slack.webhook_url (string): A Slack webhook url to post
# build update messages to.
webhook_url: ""
monitoring:
basic_auth:
# monitoring.basic_auth.password (string): Optional password for basic
# auth on the monitoring port.
password: ""
# monitoring.basic_auth.username (string): Optional username for basic
# auth on the monitoring port.
username: ""
olap_database:
# olap_database.cluster_name (string): The cluster name of the database
cluster_name: '{cluster}'
# olap_database.enable_data_replication (bool): If true, data replication
# is enabled.
enable_data_replication: false
# olap_database.replica_name (string): The replica name of the table in
# zookeeper
replica_name: '{replica}'
# olap_database.zoo_path (string): The path to the table name in
# zookeeper, used to set up data replication
zoo_path: /clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}
remote_execution:
# remote_execution.enable_executor_key_creation (bool): If enabled, UI
# will allow executor keys to be created.
enable_executor_key_creation: false
# remote_execution.enable_remote_exec (bool): If true, enable remote-exec.
# ** Enterprise only **
enable_remote_exec: true
# remote_execution.enable_user_owned_executors (bool): If enabled, users
# can register their own executors with the scheduler.
enable_user_owned_executors: false
# remote_execution.enable_workflows (bool): Whether to enable BuildBuddy
# workflows.
enable_workflows: false
# remote_execution.force_user_owned_darwin_executors (bool): If enabled,
# darwin actions will always run on user-owned executors.
force_user_owned_darwin_executors: false
ssl:
# ssl.cert_file (string): Path to a PEM encoded certificate file to use
# for TLS if not using ACME.
cert_file: ""
# ssl.client_ca_cert (string): PEM encoded certificate authority used to
# issue client certificates for mTLS auth.
client_ca_cert: ""
# ssl.client_ca_cert_file (string): Path to a PEM encoded certificate
# authority file used to issue client certificates for mTLS auth.
client_ca_cert_file: ""
# ssl.client_ca_key (string): PEM encoded certificate authority key used
# to issue client certificates for mTLS auth.
client_ca_key: ""
# ssl.client_ca_key_file (string): Path to a PEM encoded certificate
# authority key file used to issue client certificates for mTLS auth.
client_ca_key_file: ""
# ssl.client_cert_lifespan (time.Duration): The duration client
# certificates are valid for. Ex: '730h' for one month. If not set,
# defaults to 100 years.
client_cert_lifespan: 876000h0m0s
# ssl.default_host (string): Host name to use for ACME generated cert if
# TLS request does not contain SNI.
default_host: ""
# ssl.enable_ssl (bool): Whether or not to enable SSL/TLS on gRPC
# connections (gRPCS).
enable_ssl: false
# ssl.host_whitelist ([]string): Cloud-Only
host_whitelist: []
# ssl.key_file (string): Path to a PEM encoded key file to use for TLS if
# not using ACME.
key_file: ""
# ssl.self_signed (bool): If true, a self-signed cert will be generated
# for TLS termination.
self_signed: false
# ssl.upgrade_insecure (bool): True if http requests should be redirected
# to https. Assumes http traffic is served on port 80 and https traffic is
# served on port 443 (typically via an ingress / load balancer).
upgrade_insecure: false
# ssl.use_acme (bool): Whether or not to automatically configure SSL certs
# using ACME. If ACME is enabled, cert_file and key_file should not be
# set.
use_acme: false
storage:
aws_s3:
# storage.aws_s3.bucket (string): The AWS S3 bucket to store files in.
bucket: ""
# storage.aws_s3.credentials_profile (string): A custom credentials
# profile to use.
credentials_profile: ""
# storage.aws_s3.disable_ssl (bool): Disables the use of SSL, useful
# for configuring the use of MinIO. **DEPRECATED** Specify a non-HTTPS
# endpoint instead.
disable_ssl: false
# storage.aws_s3.endpoint (string): The AWS endpoint to use, useful
# for configuring the use of MinIO.
endpoint: ""
# storage.aws_s3.region (string): The AWS region.
region: ""
# storage.aws_s3.role_arn (string): The role ARN to use for web
# identity auth.
role_arn: ""
# storage.aws_s3.role_session_name (string): The role session name to
# use for web identity auth.
role_session_name: ""
# storage.aws_s3.s3_force_path_style (bool): Force path style urls for
# objects, useful for configuring the use of MinIO.
s3_force_path_style: false
# storage.aws_s3.static_credentials_id (string): Static credentials ID
# to use, useful for configuring the use of MinIO.
static_credentials_id: ""
# storage.aws_s3.static_credentials_secret (string): Static
# credentials secret to use, useful for configuring the use of MinIO.
static_credentials_secret: ""
# storage.aws_s3.static_credentials_token (string): Static credentials
# token to use, useful for configuring the use of MinIO.
static_credentials_token: ""
# storage.aws_s3.web_identity_token_file (string): The file path to
# the web identity token file.
web_identity_token_file: ""
azure:
# storage.azure.account_key (string): The key for the Azure storage
# account
account_key: ""
# storage.azure.account_name (string): The name of the Azure storage
# account
account_name: ""
# storage.azure.container_name (string): The name of the Azure storage
# container
container_name: ""
# storage.chunk_file_size_bytes (int): How many bytes to buffer in memory
# before flushing a chunk of build protocol data to disk.
chunk_file_size_bytes: 3000000
# storage.cleanup_batch_size (int): How many invocations to delete in each
# janitor cleanup task
cleanup_batch_size: 10
# storage.disable_persist_cache_artifacts (bool): If disabled, buildbuddy
# will not persist cache artifacts in the blobstore. This may make older
# invocations not display properly.
disable_persist_cache_artifacts: false
disk:
# storage.disk.root_directory (string): The root directory to store
# all blobs in, if using disk based storage.
root_directory: /tmp/buildbuddy
# storage.disk.use_v2_layout (bool): If enabled, files will be stored
# using the v2 layout. See disk_cache.MigrateToV2Layout for a
# description.
use_v2_layout: false
# storage.enable_chunked_event_logs (bool): If true, Event logs will be
# stored separately from the invocation proto in chunks.
enable_chunked_event_logs: true
execution:
# storage.execution.cleanup_batch_size (int): How many invocations to
# delete in each janitor cleanup task
cleanup_batch_size: 200
# storage.execution.cleanup_interval (time.Duration): How often the
# janitor cleanup tasks will run
cleanup_interval: 5m0s
# storage.execution.cleanup_workers (int): How many cleanup tasks to
# run
cleanup_workers: 1
# storage.execution.ttl (time.Duration): The time, in seconds, to keep
# invocations before deletion. 0 disables invocation deletion.
ttl: 0s
gcs:
# storage.gcs.bucket (string): The name of the GCS bucket to store
# build artifact files in.
bucket: ""
# storage.gcs.credentials (string): Credentials in JSON format that
# will be used to authenticate to GCS.
credentials: ""
# storage.gcs.credentials_file (string): A path to a JSON credentials
# file that will be used to authenticate to GCS.
credentials_file: ""
# storage.gcs.project_id (string): The Google Cloud project ID of the
# project owning the above credentials and GCS bucket.
project_id: ""
# storage.path_prefix (string): The prefix directory to store all blobs in
path_prefix: ""
# storage.tempdir (string): Root directory for temporary files. Defaults
# to the OS-specific temp dir.
tempdir: /tmp
# storage.ttl_seconds (int): The time, in seconds, to keep invocations
# before deletion. 0 disables invocation deletion.
ttl_seconds: 0
BuildBuddy Server (Enterprise)
# Unstructured settings
# app_directory (string): the directory containing app binary files to host
app_directory: ""
# auto_migrate_db (bool): If true, attempt to automigrate the db when
# connecting
auto_migrate_db: true
# auto_migrate_db_and_exit (bool): If true, attempt to automigrate the db when
# connecting, then exit the program.
auto_migrate_db_and_exit: false
# block_profile_rate (int): The fraction of goroutine blocking events
# reported. (1/rate, 0 disables)
block_profile_rate: 0
# cache_stats_finalization_delay (time.Duration): The time allowed for all
# metrics collectors across all apps to flush their local cache stats to the
# backing storage, before finalizing stats in the DB.
cache_stats_finalization_delay: 500ms
# cleanup_interval (time.Duration): How often the janitor cleanup tasks will
# run
cleanup_interval: 10m0s
# cleanup_workers (int): How many cleanup tasks to run
cleanup_workers: 1
# debug_enable_anonymous_runner_recycling (bool): Whether to enable runner
# recycling for unauthenticated requests. For debugging purposes only - do not
# use in production.
debug_enable_anonymous_runner_recycling: false
# debug_use_local_images_only (bool): Do not pull OCI images and only used
# locally cached images. This can be set to test local image builds during
# development without needing to push to a container registry. Not intended
# for production use.
debug_use_local_images_only: false
# disable_ga (bool): If true; ga will be disabled
disable_ga: false
# disable_telemetry (bool): If true; telemetry will be disabled
disable_telemetry: false
# drop_invocation_pk_cols (bool): If true, attempt to drop invocation PK cols
drop_invocation_pk_cols: false
# enable_cache_delete_api (bool): If true, enable access to cache delete API.
enable_cache_delete_api: false
# exit_when_ready (bool): If set, the app will exit as soon as it becomes
# ready (useful for migrations)
exit_when_ready: false
# grpc_client_origin_header (string): Header value to set for
# x-buildbuddy-origin.
grpc_client_origin_header: ""
# grpc_max_recv_msg_size_bytes (int): Configures the max GRPC receive message
# size [bytes]
grpc_max_recv_msg_size_bytes: 50000000
# grpc_port (int): The port to listen for gRPC traffic on
grpc_port: 1985
# grpcs_port (int): The port to listen for gRPCS traffic on
grpcs_port: 1986
# internal_grpc_port (int): The port to listen for internal gRPC traffic on
internal_grpc_port: 1987
# internal_grpcs_port (int): The port to listen for internal gRPCS traffic on
internal_grpcs_port: 1988
# internal_http_port (int): The port to listen for internal HTTP traffic
internal_http_port: 0
# js_entry_point_path (string): Absolute URL path of the app JS entry point
js_entry_point_path: /app/app_bundle/app.js?hash={APP_BUNDLE_HASH}
# listen (string): The interface to listen on (default: 0.0.0.0)
listen: 0.0.0.0
# log_deletion_errors (bool): If true; log errors when ttl-deleting expired
# data
log_deletion_errors: false
# log_goroutine_profile_on_shutdown (bool): Whether to log all goroutine stack
# traces on shutdown.
log_goroutine_profile_on_shutdown: false
# max_shutdown_duration (time.Duration): Time to wait for shutdown
max_shutdown_duration: 25s
# max_threads (int): The maximum number of threads to allow before panicking.
# If unset, the golang default will be used (currently 10,000).
max_threads: 0
# migrate_disk_cache_to_v2_and_exit (bool): If true, attempt to migrate disk
# cache to v2 layout.
migrate_disk_cache_to_v2_and_exit: false
# monitoring_port (int): The port to listen for monitoring traffic on
monitoring_port: 9090
# mutex_profile_fraction (int): The fraction of mutex contention events
# reported. (1/rate, 0 disables)
mutex_profile_fraction: 0
# port (int): The port to listen for HTTP traffic on
port: 8080
# redis_command_buffer_flush_period (time.Duration): How long to wait between
# flushing buffered redis commands. Setting this to 0 will disable buffering
# at the cost of higher redis QPS.
redis_command_buffer_flush_period: 250ms
# regions ([]region.Region): A list of regions that executors might be
# connected to.
regions: []
# For example:
# - name: "" # The user-friendly name of this region. Ex: Europe (type: string)
# server: "" # The http endpoint for this server, with the protocol. Ex: https://app.europe.buildbuddy.io (type: string)
# subdomains: "" # The format for subdomain urls of with a single * wildcard. Ex: https://*.europe.buildbuddy.io (type: string)
# report_not_ready (bool): If set to true, the app will always report as being
# unready.
report_not_ready: false
# server_type (string): The server type to match on health checks
server_type: buildbuddy-server
# shutdown_lameduck_duration (time.Duration): If set, the server will be
# marked unready but not run shutdown functions until this period passes.
shutdown_lameduck_duration: 0s
# ssl_port (int): The port to listen for HTTPS traffic on
ssl_port: 8081
# static_directory (string): the directory containing static files to host
static_directory: ""
# telemetry_endpoint (string): The telemetry endpoint to use
telemetry_endpoint: grpcs://t.buildbuddy.io:443
# telemetry_interval (time.Duration): How often telemetry data will be
# reported
telemetry_interval: 24h0m0s
# telemetry_port (int): The port on which to listen for telemetry events
telemetry_port: 9099
# verbose_telemetry_client (bool): If true; print telemetry client information
verbose_telemetry_client: false
# verbose_telemetry_server (bool): If true; print telemetry server information
verbose_telemetry_server: false
# zone_override (string): A value that will override the auto-detected zone.
# Ignored if empty
zone_override: ""
# Structured settings
api:
# api.api_key (string): The default API key to use for on-prem enterprise
# deploys with a single organization/group. **DEPRECATED** Manual API key
# specification is no longer supported; to retrieve specific API keys
# programmatically, please use the API key table. This field will still
# specify an API key to redact in case a manual API key was specified when
# buildbuddy was first set up.
api_key: ""
# api.enable_api (bool): Whether or not to enable the BuildBuddy API.
enable_api: true
# api.enable_cache (bool): Whether or not to enable the API cache.
enable_cache: false
# api.enable_metrics_api (bool): If true, enable access to metrics API.
enable_metrics_api: false
app:
# app.add_user_to_domain_group (bool): Cloud-Only
add_user_to_domain_group: false
# app.admin_only_create_group (bool): If true, only admins of an existing
# group can create a new groups.
admin_only_create_group: false
# app.audit_logs_enabled (bool): Whether to log administrative events to
# an audit log. Requires OLAP database to be configured.
audit_logs_enabled: false
# app.audit_logs_ui_enabled (bool): If set, the audit logs UI will be
# accessible from the sidebar.
audit_logs_ui_enabled: false
# app.bazel_buttons_enabled (bool): If set, show remote bazel buttons in
# the UI.
bazel_buttons_enabled: false
# app.blended_invocation_search_enabled (bool): If true,
# InvocationSearchService will query clickhouse for all searches, filling
# in in-progress invocations from the regular DB.
blended_invocation_search_enabled: false
# app.build_buddy_url (URL): The external URL where your BuildBuddy
# instance can be found.
build_buddy_url: http://localhost:8080
# app.cache_api_url (URL): Overrides the default remote cache protocol
# gRPC address shown by BuildBuddy on the configuration screen.
cache_api_url: ""
client_identity:
# app.client_identity.client (string): The client identifier to place
# in the identity header.
client: ""
# app.client_identity.key (string): The key used to sign and verify
# identity JWTs.
key: ""
# app.client_identity.origin (string): The origin identifier to place
# in the identity header.
origin: ""
# app.code_editor_enabled (bool): If set, code editor functionality will
# be enabled.
code_editor_enabled: false
# app.code_editor_v2_enabled (bool): If set, show v2 of code editor that
# stores state on server instead of local storage.
code_editor_v2_enabled: false
# app.code_review_enabled (bool): If set, show the code review UI.
code_review_enabled: false
# app.codesearch_backend (string): Address and port to connect to
codesearch_backend: ""
# app.codesearch_enabled (bool): If set, show the code search UI.
codesearch_enabled: false
# app.community_links_enabled (bool): If set, show links to BuildBuddy
# community in the UI.
community_links_enabled: true
# app.create_group_per_user (bool): Cloud-Only
create_group_per_user: false
# app.customer_managed_encryption_keys_enabled (bool): If set, show
# customer-managed encryption configuration UI.
customer_managed_encryption_keys_enabled: false
# app.default_login_slug (string): If set, the login page will default to
# using this slug.
default_login_slug: ""
# app.default_redis_target (string): A Redis target for storing remote
# shared state. To ease migration, the redis target from the remote
# execution config will be used if this value is not specified.
default_redis_target: ""
default_sharded_redis:
# app.default_sharded_redis.password (string): Redis password
password: ""
# app.default_sharded_redis.shards ([]string): Ordered list of Redis
# shard addresses.
shards: []
# app.default_sharded_redis.username (string): Redis username
username: ""
# app.default_subdomains ([]string): List of subdomains that should not be
# handled as user-owned subdomains.
default_subdomains: []
# app.default_to_dense_mode (bool): Enables the dense UI mode by default.
default_to_dense_mode: false
# app.deprecate_anonymous_access (bool): If true, log a warning in the
# bazel console when clients are unauthenticated
deprecate_anonymous_access: false
# app.disable_cert_config (bool): If true, the certificate based auth
# option will not be shown in the config widget.
disable_cert_config: false
# app.enable_canaries (bool): If true, enable slow function canaries
enable_canaries: true
# app.enable_execution_trends (bool): If enabled, fill execution trend
# stats in GetTrendResponse
enable_execution_trends: true
# app.enable_grpc_metrics_by_group_id (bool): If enabled, grpc metrics by
# group ID will be recorded
enable_grpc_metrics_by_group_id: false
# app.enable_invocation_stat_percentiles (bool): If enabled, provide
# percentile breakdowns for invocation stats in GetTrendResponse
enable_invocation_stat_percentiles: true
# app.enable_prometheus_histograms (bool): If true, collect prometheus
# histograms for all RPCs
enable_prometheus_histograms: true
# app.enable_quota_management (bool): If set, quota management will be
# enabled
enable_quota_management: false
# app.enable_read_from_olap_db (bool): If enabled, read from OLAP DB
enable_read_from_olap_db: true
# app.enable_read_target_statuses_from_olap_db (bool): If enabled, read
# target statuses from OLAP DB
enable_read_target_statuses_from_olap_db: false
# app.enable_secret_service (bool): If set, secret service will be enabled
enable_secret_service: false
# app.enable_structured_logging (bool): If true, log messages will be
# json-formatted.
enable_structured_logging: false
# app.enable_subdomain_matching (bool): If true, request subdomain will be
# taken into account when determining what request restrictions should be
# applied.
enable_subdomain_matching: false
# app.enable_target_tracking (bool): Cloud-Only
enable_target_tracking: false
# app.enable_write_executions_to_olap_db (bool): If enabled, complete
# Executions will be flushed to OLAP DB
enable_write_executions_to_olap_db: false
# app.enable_write_test_target_statuses_to_olap_db (bool): If enabled,
# test target statuses will be flushed to OLAP DB
enable_write_test_target_statuses_to_olap_db: false
# app.enable_write_to_olap_db (bool): If enabled, complete invocations
# will be flushed to OLAP DB
enable_write_to_olap_db: true
# app.events_api_url (URL): Overrides the default build event protocol
# gRPC address shown by BuildBuddy on the configuration screen.
events_api_url: ""
# app.execution_search_enabled (bool): If set, fetch lists of executions
# from the OLAP DB in the trends UI.
execution_search_enabled: true
# app.expanded_suggestions_enabled (bool): If set, enable more build
# suggestions in the UI.
expanded_suggestions_enabled: false
# app.fetch_tags_drilldown_data (bool): If enabled,
# DrilldownType_TAG_DRILLDOWN_TYPE can be returned in
# GetStatDrilldownRequests
fetch_tags_drilldown_data: true
# app.finer_time_buckets (bool): If enabled, split trends and drilldowns
# into smaller time buckets when the user has a smaller date range
# selected.
finer_time_buckets: false
# app.grpc_max_recv_msg_size_bytes (int): DEPRECATED: use
# --grpc_max_recv_msg_size_bytes instead
grpc_max_recv_msg_size_bytes: 50000000
# app.grpc_over_http_port_enabled (bool): Enables grpc traffic to be
# served over the http port.
grpc_over_http_port_enabled: true
# app.ignore_forced_tracing_header (bool): If set, we will not honor the
# forced tracing header.
ignore_forced_tracing_header: false
# app.invocation_log_streaming_enabled (bool): If set, the UI will stream
# invocation logs instead of polling.
invocation_log_streaming_enabled: false
# app.invocation_summary_available_usec (int64): The timstamp when the
# invocation summary is available in the DB
invocation_summary_available_usec: 0
# app.ip_rules_ui_enabled (bool): If set, show the IP rules tab in
# settings page.
ip_rules_ui_enabled: false
# app.log_enable_gcp_logging_format (bool): If true, the output structured
# logs will be compatible with format expected by GCP Logging.
log_enable_gcp_logging_format: false
# app.log_enable_grpc_request (bool): If true, log grpc request when log
# level is default
log_enable_grpc_request: true
# app.log_error_stack_traces (bool): If true, stack traces will be printed
# for errors that have them.
log_error_stack_traces: false
# app.log_gcp_log_id (string): The log ID to log to in GCP (if any).
log_gcp_log_id: ""
# app.log_gcp_project_id (string): The project ID to log to in GCP (if
# any).
log_gcp_project_id: ""
# app.log_include_short_file_name (bool): If true, log messages will
# include shortened originating file name.
log_include_short_file_name: false
# app.log_level (string): The desired log level. Logs with a level >= this
# level will be emitted. One of {'fatal', 'error', 'warn', 'info',
# 'debug'}
log_level: info
# app.new_trends_ui_enabled (bool): DEPRECATED: If set, show a new trends
# UI with a bit more organization.
new_trends_ui_enabled: false
# app.no_default_user_group (bool): Cloud-Only
no_default_user_group: false
# app.olap_invocation_search_enabled (bool): If true,
# InvocationSearchService will query clickhouse for a few impossibly slow
# queries (i.e., tags), but mostly use the regular DB.
olap_invocation_search_enabled: true
# app.org_admin_api_key_creation_enabled (bool): If set, SCIM API keys
# will be able to be created in the UI.
org_admin_api_key_creation_enabled: false
# app.paginate_invocations (bool): If true, paginate invocations returned
# to the UI.
paginate_invocations: true
# app.pattern_filter_enabled (bool): If set, allow filtering by pattern in
# the client.
pattern_filter_enabled: true
# app.popup_auth_enabled (bool): Whether popup windows should be used for
# authentication.
popup_auth_enabled: false
# app.proxy_targets ([]grpc_forward.proxyPair)
proxy_targets: []
# For example:
# - prefix: "" # The gRPC method prefix to match. (type: string)
# target: "" # The gRPC target to forward requests to. (type: string)
# app.reader_writer_roles_enabled (bool): If set, Reader/Writer roles will
# be enabled in the user management UI.
reader_writer_roles_enabled: true
# app.region (string): The region in which the app is running.
region: ""
# app.remote_execution_api_url (URL): Overrides the default remote
# execution protocol gRPC address shown by BuildBuddy on the configuration
# screen.
remote_execution_api_url: ""
# app.restrict_bytestream_dialing (bool): If true, only allow dialing
# localhost or the configured cache backend for bytestream requests.
restrict_bytestream_dialing: false
# app.streaming_http_enabled (bool): Whether to support server-streaming
# http requests between server and web UI.
streaming_http_enabled: false
# app.strict_csp_enabled (bool): If set, set a strict CSP header.
# Violations are logged at warning level.
strict_csp_enabled: false
# app.tags_enabled (bool): Enable setting tags on invocations via
# build_metadata
tags_enabled: false
# app.tags_ui_enabled (bool): If set, expose tags data and let users
# filter by tag.
tags_ui_enabled: false
# app.target_flakes_ui_enabled (bool): If set, show some fancy new
# features for analyzing flakes.
target_flakes_ui_enabled: false
# app.test_grid_v2_enabled (bool): Whether to enable test grid V2
test_grid_v2_enabled: true
# app.test_output_manifests_enabled (bool): If set, the target page will
# render the contents of test output zips.
test_output_manifests_enabled: true
# app.timeseries_charts_in_timing_profile_enabled (bool): If set, charts
# with sampled time series data (such as CPU and memory usage) will be
# shown
timeseries_charts_in_timing_profile_enabled: true
# app.trace_fraction (float64): Fraction of requests to sample for
# tracing.
trace_fraction: 0
# app.trace_fraction_overrides ([]string): Tracing fraction override based
# on name in format name=fraction.
trace_fraction_overrides: []
# app.trace_jaeger_collector (string): Address of the Jager collector
# endpoint where traces will be sent.
trace_jaeger_collector: ""
# app.trace_project_id (string): Optional GCP project ID to export traces
# to. If not specified, determined from default credentials or metadata
# server if running on GCP.
trace_project_id: ""
# app.trace_service_name (string): Name of the service to associate with
# traces.
trace_service_name: ""
# app.trace_viewer_enabled (bool): Whether the new trace viewer is
# enabled.
trace_viewer_enabled: false
# app.trends_heatmap_enabled (bool): If set, enable a fancy heatmap UI for
# exploring build trends.
trends_heatmap_enabled: true
# app.trends_range_selection (bool): If set, let users drag to select time
# ranges in the trends UI.
trends_range_selection: true
# app.trends_summary_enabled (bool): If set, show the new 'summary'
# section at the top of the trends UI.
trends_summary_enabled: false
# app.usage_enabled (bool): If set, the usage page will be enabled in the
# UI.
usage_enabled: false
# app.usage_start_date (string): If set, usage data will only be viewable
# on or after this timestamp. Specified in RFC3339 format, like
# 2021-10-01T00:00:00Z
usage_start_date: ""
# app.usage_tracking_enabled (bool): If set, enable usage data collection.
usage_tracking_enabled: false
# app.use_timezone_in_heatmap_queries (bool): If enabled, use timezone
# instead of 'timezone offset' to compute day boundaries in heatmap
# queries.
use_timezone_in_heatmap_queries: true
# app.user_management_enabled (bool): If set, the user management page
# will be enabled in the UI. **DEPRECATED** This flag has no effect and
# will be removed in the future.
user_management_enabled: true
# app.user_owned_keys_enabled (bool): If true, enable user-owned API keys.
user_owned_keys_enabled: false
auth:
# auth.admin_group_id (string): ID of a group whose members can perform
# actions only accessible to server admins.
admin_group_id: ""
api_key_encryption:
# auth.api_key_encryption.encrypt_new_keys (bool): If enabled, all new
# API keys will be written in an encrypted format.
encrypt_new_keys: false
# auth.api_key_encryption.encrypt_old_keys (bool): If enabled, all
# existing unencrypted keys will be encrypted on startup. The
# unencrypted keys will remain in the database and will need to be
# cleared manually after verifying the success of the migration.
encrypt_old_keys: false
# auth.api_key_encryption.key (string): Base64-encoded 256-bit
# encryption key for API keys.
key: ""
# auth.api_key_group_cache_ttl (time.Duration): TTL for API Key to Group
# caching. Set to '0' to disable cache.
api_key_group_cache_ttl: 5m0s
# auth.disable_refresh_token (bool): If true, the offline_access scope
# which requests refresh tokens will not be requested.
disable_refresh_token: false
# auth.domain_wide_cookies (bool): If true, cookies will have domain set
# so that they are accessible on domain and all subdomains.
domain_wide_cookies: false
# auth.enable_anonymous_usage (bool): If true, unauthenticated build
# uploads will still be allowed but won't be associated with your
# organization.
enable_anonymous_usage: false
# auth.enable_scim (bool): Whether or not to enable SCIM.
enable_scim: false
# auth.enable_self_auth (bool): If true, enables a single user login via
# an oauth provider on the buildbuddy server. Recommend use only when
# server is behind a firewall; this option may allow anyone with access to
# the webpage admin rights to your buildbuddy installation. ** Enterprise
# only **
enable_self_auth: false
# auth.force_approval (bool): If true, when a user doesn't have a session
# (first time logging in, or manually logged out) force the auth provider
# to show the consent screen allowing the user to select an account if
# they have multiple. This isn't supported by all auth providers.
force_approval: false
# auth.https_only_cookies (bool): If true, cookies will only be set over
# https connections.
https_only_cookies: false
ip_rules:
# auth.ip_rules.allow_ipv6 (bool): If true, IPv6 rules will be
# allowed.
allow_ipv6: false
# auth.ip_rules.cache_ttl (time.Duration): Duration of time IP rules
# will be cached in memory.
cache_ttl: 5m0s
# auth.ip_rules.enable (bool): If true, IP rules will be checked
# during auth.
enable: false
# auth.jwt_claims_cache_ttl (time.Duration): TTL for JWT string to parsed
# claims caching. Set to '0' to disable cache.
jwt_claims_cache_ttl: 15s
# auth.jwt_duration (time.Duration): Maximum lifetime of the generated
# JWT.
jwt_duration: 6h0m0s
# auth.jwt_key (string): The key to use when signing JWT tokens.
jwt_key: set_the_jwt_in_config
# auth.new_jwt_key (string): If set, JWT verifications will try both this
# and the old JWT key.
new_jwt_key: ""
# auth.oauth_providers ([]oidc.OauthProvider): The list of oauth providers
# to use to authenticate.
oauth_providers: []
# For example:
# - issuer_url: "" # The issuer URL of this OIDC Provider. (type: string)
# client_id: "" # The oauth client ID. (type: string)
# client_secret: "" # The oauth client secret. (type: string)
# slug: "" # The slug of this OIDC Provider. (type: string)
saml:
# auth.saml.cert (string): PEM encoded certificate used for SAML auth.
cert: ""
# auth.saml.cert_file (string): Path to a PEM encoded certificate file
# used for SAML auth.
cert_file: ""
# auth.saml.key (string): PEM encoded certificate key used for SAML
# auth.
key: ""
# auth.saml.key_file (string): Path to a PEM encoded certificate key
# file used for SAML auth.
key_file: ""
# auth.saml.trusted_idp_cert_files ([]string): List of PEM-encoded
# trusted IDP certificates. Intended for testing and development only.
trusted_idp_cert_files: []
# auth.sign_using_new_jwt_key (bool): If true, new JWTs will be signed
# using the new JWT key.
sign_using_new_jwt_key: false
# auth.trust_xforwardedfor_header (bool): If true, client IP information
# will be retrieved from the X-Forwarded-For header. Should only be
# enabled if the BuildBuddy server is only accessible behind a trusted
# proxy.
trust_xforwardedfor_header: false
build_event_proxy:
# build_event_proxy.buffer_size (int): The number of build events to
# buffer locally when proxying build events.
buffer_size: 100
# build_event_proxy.hosts ([]string): The list of hosts to pass build
# events onto.
hosts: []
cache:
client:
# cache.client.ac_rpc_timeout (time.Duration): Maximum time a single
# Action Cache RPC can take.
ac_rpc_timeout: 15s
# cache.client.cas_rpc_timeout (time.Duration): Maximum time a single
# batch RPC or a single ByteStream chunk read can take.
cas_rpc_timeout: 1m0s
# cache.client.enable_upload_compression (bool): If true, enable
# compression of uploads to remote caches
enable_upload_compression: true
# cache.count_ttl (time.Duration): How long to go without receiving any
# cache requests for an invocation before deleting the invocation's counts
# from the metrics collector.
count_ttl: 24h0m0s
# cache.detailed_stats_enabled (bool): Whether to enable detailed stats
# recording for all cache requests.
detailed_stats_enabled: false
# cache.detailed_stats_ttl (time.Duration): How long to go without
# receiving any cache requests for an invocation before deleting the
# invocation's detailed results from the metrics collector. Has no effect
# if cache.detailed_stats_enabled is not set.
detailed_stats_ttl: 3h0m0s
# cache.directory_sizes_enabled (bool): If true, enable an RPC that
# computes the cumulative size of directories stored in the cache.
directory_sizes_enabled: false
disk:
# cache.disk.partition_mappings ([]disk.PartitionMapping)
partition_mappings: []
# For example:
# - group_id: "" # The Group ID to which this mapping applies. (type: string)
# prefix: "" # The remote instance name prefix used to select this partition. (type: string)
# partition_id: "" # The partition to use if the Group ID and prefix match. (type: string)
# cache.disk.partitions ([]disk.Partition)
partitions: []
# For example:
# - id: "" # The ID of the partition. (type: string)
# max_size_bytes: 0 # Maximum size of the partition. (type: int64)
# encryption_supported: false # Whether encrypted data can be stored on this partition. (type: bool)
# cache.disk.root_directory (string): The root directory to store all
# blobs in, if using disk based storage.
root_directory: ""
# cache.disk.use_v2_layout (bool): If enabled, files will be stored
# using the v2 layout. See disk_cache.MigrateToV2Layout for a
# description.
use_v2_layout: false
distributed_cache:
# cache.distributed_cache.cluster_size (int): The total number of
# nodes in this cluster. Required for health checking. ** Enterprise
# only **
cluster_size: 0
# cache.distributed_cache.consistent_hash_function (string): A
# consistent hash function to use when hashing data. CRC32 or SHA256
consistent_hash_function: CRC32
# cache.distributed_cache.consistent_hash_vnodes (int): The number of
# copies (virtual nodes) of each peer on the consistent hash ring
consistent_hash_vnodes: 100
# cache.distributed_cache.enable_backfill (bool): If enabled, digests
# written to avoid unavailable nodes will be backfilled when those
# nodes return
enable_backfill: true
# cache.distributed_cache.enable_local_compression_lookup (bool): If
# enabled, checks the local cache for compression support. If not set,
# distributed compression defaults to off.
enable_local_compression_lookup: true
# cache.distributed_cache.enable_local_writes (bool): If enabled,
# shortcuts distributed writes that belong to the local shard to local
# cache instead of making an RPC.
enable_local_writes: false
# cache.distributed_cache.group_name (string): A unique name for this
# distributed cache group. ** Enterprise only **
group_name: ""
# cache.distributed_cache.listen_addr (string): The address to listen
# for local BuildBuddy distributed cache traffic on.
listen_addr: ""
# cache.distributed_cache.lookaside_cache_size_bytes (int64): If > 0 ;
# lookaside cache will be enabled
lookaside_cache_size_bytes: 0
# cache.distributed_cache.lookaside_cache_ttl (time.Duration): How
# long to hold stuff in the lookaside cache. Should be <<
# atime_update_threshold
lookaside_cache_ttl: 1m0s
# cache.distributed_cache.max_hinted_handoffs_per_peer (int64): The
# maximum number of hinted handoffs to keep in memory. Each hinted
# handoff is a digest (~64 bytes), prefix, and peer (40 bytes). So
# keeping around 100000 of these means an extra 10MB per peer.
max_hinted_handoffs_per_peer: 100000
# cache.distributed_cache.max_lookaside_entry_bytes (int64): The
# biggest allowed entry size in the lookaside cache.
max_lookaside_entry_bytes: 10000
# cache.distributed_cache.new_consistent_hash_function (string): A
# consistent hash function to use when hashing data. CRC32 or SHA256
new_consistent_hash_function: CRC32
# cache.distributed_cache.new_consistent_hash_vnodes (int): The number
# of copies of each peer on the new consistent hash ring
new_consistent_hash_vnodes: 100
# cache.distributed_cache.new_nodes ([]string): The new nodeset to add
# data too. Useful for migrations. ** Enterprise only **
new_nodes: []
# cache.distributed_cache.new_nodes_read_only (bool): If true, only
# attempt to read from the newNodes set; do not write to them yet
new_nodes_read_only: false
# cache.distributed_cache.nodes ([]string): The hardcoded list of peer
# distributed cache nodes. If this is set, redis_target will be
# ignored. ** Enterprise only **
nodes: []
# cache.distributed_cache.redis_target (string): Redis target for used
# for discovering distributed cache replicas. Target can be provided
# as either a redis connection URI or a host:port pair. URI schemas
# supported: redis[s]://[[USER][:PASSWORD]@][HOST][:PORT][/DATABASE]
# or unix://[[USER][:PASSWORD]@]SOCKET_PATH[?db=DATABASE] **
# Enterprise only **
redis_target: ""
# cache.distributed_cache.replication_factor (int): How many total
# servers the data should be replicated to. Must be >= 1. **
# Enterprise only **
replication_factor: 1
# cache.enable_tree_caching (bool): If true, cache GetTree responses (full
# and partial)
enable_tree_caching: true
gcs:
# cache.gcs.bucket (string): The name of the GCS bucket to store cache
# files in.
bucket: ""
# cache.gcs.credentials_file (string): A path to a JSON credentials
# file that will be used to authenticate to GCS.
credentials_file: ""
# cache.gcs.project_id (string): The Google Cloud project ID of the
# project owning the above credentials and GCS bucket.
project_id: ""
# cache.gcs.ttl_days (int64): The period after which cache files
# should be TTLd. Disabled if 0.
ttl_days: 0
# cache.in_memory (bool): Whether or not to use the in_memory cache.
in_memory: false
# cache.max_direct_write_size_bytes (int64): For bytestream requests
# smaller than this size, write straight to the cache without checking if
# the entry already exists.
max_direct_write_size_bytes: 16384
# cache.max_size_bytes (int64): How big to allow the cache to be (in
# bytes).
max_size_bytes: 10000000000
# cache.max_tree_cache_set_duration (time.Duration): The max amount of
# time to wait for unfinished tree cache entries to be set.
max_tree_cache_set_duration: 1s
# cache.memcache_targets ([]string): Deprecated. Use Redis Target instead.
memcache_targets: []
# cache.migration (migration_cache.MigrationConfig): Config to specify the
# details of a cache migration
migration:
src: null # (type: migration_cache.CacheConfig)
# For example:
# disk: null # (type: migration_cache.DiskCacheConfig)
# # For example:
# # root_directory: "" # (type: string)
# # partitions: [] # (type: []disk.Partition)
# # # For example:
# # # - id: "" # The ID of the partition. (type: string)
# # # max_size_bytes: 0 # Maximum size of the partition. (type: int64)
# # # encryption_supported: false # Whether encrypted data can be stored on this partition. (type: bool)
# #
# # partition_mappings: [] # (type: []disk.PartitionMapping)
# # # For example:
# # # - group_id: "" # The Group ID to which this mapping applies. (type: string)
# # # prefix: "" # The remote instance name prefix used to select this partition. (type: string)
# # # partition_id: "" # The partition to use if the Group ID and prefix match. (type: string)
# #
# # use_v2_layout: false # (type: bool)
# #
#
# pebble: null # (type: migration_cache.PebbleCacheConfig)
# # For example:
# # name: "" # (type: string)
# # root_directory: "" # (type: string)
# # partitions: [] # (type: []disk.Partition)
# # # For example:
# # # - id: "" # The ID of the partition. (type: string)
# # # max_size_bytes: 0 # Maximum size of the partition. (type: int64)
# # # encryption_supported: false # Whether encrypted data can be stored on this partition. (type: bool)
# #
# # partition_mappings: [] # (type: []disk.PartitionMapping)
# # # For example:
# # # - group_id: "" # The Group ID to which this mapping applies. (type: string)
# # # prefix: "" # The remote instance name prefix used to select this partition. (type: string)
# # # partition_id: "" # The partition to use if the Group ID and prefix match. (type: string)
# #
# # max_size_bytes: 0 # (type: int64)
# # block_cache_size_bytes: 0 # (type: int64)
# # max_inline_file_size_bytes: 0 # (type: int64)
# # atime_update_threshold: null # (type: time.Duration)
# # atime_buffer_size: null # (type: int)
# # min_eviction_age: null # (type: time.Duration)
# # min_bytes_auto_zstd_compression: 0 # (type: int64)
# # average_chunk_size_bytes: 0 # (type: int)
# # clear_cache_on_startup: false # (type: bool)
# # active_key_version: null # (type: int64)
# #
#
dest: null # (type: migration_cache.CacheConfig)
# For example:
# disk: null # (type: migration_cache.DiskCacheConfig)
# # For example:
# # root_directory: "" # (type: string)
# # partitions: [] # (type: []disk.Partition)
# # # For example:
# # # - id: "" # The ID of the partition. (type: string)
# # # max_size_bytes: 0 # Maximum size of the partition. (type: int64)
# # # encryption_supported: false # Whether encrypted data can be stored on this partition. (type: bool)
# #
# # partition_mappings: [] # (type: []disk.PartitionMapping)
# # # For example:
# # # - group_id: "" # The Group ID to which this mapping applies. (type: string)
# # # prefix: "" # The remote instance name prefix used to select this partition. (type: string)
# # # partition_id: "" # The partition to use if the Group ID and prefix match. (type: string)
# #
# # use_v2_layout: false # (type: bool)
# #
#
# pebble: null # (type: migration_cache.PebbleCacheConfig)
# # For example:
# # name: "" # (type: string)
# # root_directory: "" # (type: string)
# # partitions: [] # (type: []disk.Partition)
# # # For example:
# # # - id: "" # The ID of the partition. (type: string)
# # # max_size_bytes: 0 # Maximum size of the partition. (type: int64)
# # # encryption_supported: false # Whether encrypted data can be stored on this partition. (type: bool)
# #
# # partition_mappings: [] # (type: []disk.PartitionMapping)
# # # For example:
# # # - group_id: "" # The Group ID to which this mapping applies. (type: string)
# # # prefix: "" # The remote instance name prefix used to select this partition. (type: string)
# # # partition_id: "" # The partition to use if the Group ID and prefix match. (type: string)
# #
# # max_size_bytes: 0 # (type: int64)
# # block_cache_size_bytes: 0 # (type: int64)
# # max_inline_file_size_bytes: 0 # (type: int64)
# # atime_update_threshold: null # (type: time.Duration)
# # atime_buffer_size: null # (type: int)
# # min_eviction_age: null # (type: time.Duration)
# # min_bytes_auto_zstd_compression: 0 # (type: int64)
# # average_chunk_size_bytes: 0 # (type: int)
# # clear_cache_on_startup: false # (type: bool)
# # active_key_version: null # (type: int64)
# #
#
double_read_percentage: 0 # (type: float64)
decompress_percentage: 0 # (type: float64)
log_not_found_errors: false # (type: bool)
copy_chan_buffer_size: 0 # (type: int)
copy_chan_full_warning_interval_min: 0 # (type: int64)
max_copies_per_sec: 0 # (type: int)
num_copy_workers: 0 # (type: int)
async_dest_writes: false # (type: bool)
pebble:
# cache.pebble.active_key_version (int64): The key version new data
# will be written with. If negative, will write to the highest
# existing version in the database, or the highest known version if a
# new database is created.
active_key_version: -1
# cache.pebble.atime_buffer_size (int): Buffer up to this many atime
# updates in a channel before dropping atime updates
atime_buffer_size: 100000
# cache.pebble.atime_update_threshold (time.Duration): Don't update
# atime if it was updated more recently than this
atime_update_threshold: 10m0s
# cache.pebble.average_chunk_size_bytes (int): Average size of chunks
# that's stored in the cache. Disabled if 0.
average_chunk_size_bytes: 0
# cache.pebble.background_repair_frequency (time.Duration): How
# frequently to run period background repair tasks.
background_repair_frequency: 24h0m0s
# cache.pebble.background_repair_qps_limit (int): QPS limit for
# background repair modifications.
background_repair_qps_limit: 100
# cache.pebble.block_cache_size_bytes (int64): How much ram to give
# the block cache
block_cache_size_bytes: 1000000000
# cache.pebble.copy_partition_data (string): If set, all data will be
# copied from the source partition to the destination partition on
# startup. The cache will not serve data while the copy is in
# progress. Specified in format
# source_partition_id:destination_partition_id,
copy_partition_data: ""
# cache.pebble.delete_buffer_size (int): Buffer up to this many
# samples for eviction eviction
delete_buffer_size: 20
# cache.pebble.deletes_per_eviction (int): Maximum number keys to
# delete in one eviction attempt before resampling.
deletes_per_eviction: 5
# cache.pebble.dir_deletion_delay (time.Duration): How old directories
# must be before being eligible for deletion when empty
dir_deletion_delay: 1h0m0s
# cache.pebble.enable_table_bloom_filter (bool): If true, write bloom
# filter data with pebble SSTables.
enable_table_bloom_filter: false
# cache.pebble.eviction_rate_limit (int): Maximum number of entries to
# evict per second (per partition).
eviction_rate_limit: 300
# cache.pebble.force_calculate_metadata (bool): If set, partition size
# and counts will be calculated even if cached information is
# available.
force_calculate_metadata: false
# cache.pebble.force_compaction (bool): If set, compact the DB when
# it's created
force_compaction: false
# cache.pebble.include_metadata_size (bool): If true, include metadata
# size
include_metadata_size: false
# cache.pebble.max_inline_file_size_bytes (int64): Files smaller than
# this may be inlined directly into pebble
max_inline_file_size_bytes: 1024
# cache.pebble.migration_qps_limit (int): QPS limit for data version
# migration
migration_qps_limit: 50
# cache.pebble.min_bytes_auto_zstd_compression (int64): Blobs larger
# than this will be zstd compressed before written to disk.
min_bytes_auto_zstd_compression: 100
# cache.pebble.min_eviction_age (time.Duration): Don't evict anything
# unless it's been idle for at least this long
min_eviction_age: 6h0m0s
# cache.pebble.name (string): The name used in reporting cache metrics
# and status.
name: pebble_cache
# cache.pebble.num_delete_workers (int): Number of deletes in parallel
num_delete_workers: 2
# cache.pebble.orphan_delete_dry_run (bool): If set, log orphaned
# files instead of deleting them
orphan_delete_dry_run: true
# cache.pebble.partition_mappings ([]disk.PartitionMapping)
partition_mappings: []
# For example:
# - group_id: "" # The Group ID to which this mapping applies. (type: string)
# prefix: "" # The remote instance name prefix used to select this partition. (type: string)
# partition_id: "" # The partition to use if the Group ID and prefix match. (type: string)
# cache.pebble.partitions ([]disk.Partition)
partitions: []
# For example:
# - id: "" # The ID of the partition. (type: string)
# max_size_bytes: 0 # Maximum size of the partition. (type: int64)
# encryption_supported: false # Whether encrypted data can be stored on this partition. (type: bool)
# cache.pebble.root_directory (string): The root directory to store
# the database in.
root_directory: ""
# cache.pebble.sample_buffer_size (int): Buffer up to this many
# samples for eviction sampling
sample_buffer_size: 8000
# cache.pebble.sample_pool_size (int): How many deletion candidates to
# maintain between evictions
sample_pool_size: 500
# cache.pebble.sampler_iter_refresh_peroid (time.Duration): How often
# we refresh iterator in sampler
sampler_iter_refresh_peroid: 5m0s
# cache.pebble.samples_per_batch (int): How many keys we read forward
# every time we get a random key.
samples_per_batch: 10000
# cache.pebble.samples_per_eviction (int): How many records to sample
# on each eviction
samples_per_eviction: 20
# cache.pebble.scan_for_missing_files (bool): If set, scan all keys
# and check if external files are missing on disk. Deletes keys with
# missing files.
scan_for_missing_files: false
# cache.pebble.scan_for_orphaned_files (bool): If true, scan for
# orphaned files
scan_for_orphaned_files: false
# cache.pebble.warn_about_leaks (bool): If set, warn about leaked DB
# handles
warn_about_leaks: true
raft:
# cache.raft.atime_buffer_size (int): Buffer up to this many atime
# updates in a channel before dropping atime updates
atime_buffer_size: 100000
# cache.raft.atime_update_threshold (time.Duration): Don't update
# atime if it was updated more recently than this
atime_update_threshold: 3h0m0s
# cache.raft.atime_write_batch_size (int): Buffer this many writes
# before writing atime data
atime_write_batch_size: 100
# cache.raft.clear_cache_on_startup (bool): If set, remove all raft +
# cache data on start
clear_cache_on_startup: false
# cache.raft.clear_prev_cache_on_startup (bool): If set, remove all
# raft + cache data from previous run on start
clear_prev_cache_on_startup: false
# cache.raft.client_session_lifetime (time.Duration): The duration of
# a client session before it's reset
client_session_lifetime: 1h0m0s
# cache.raft.client_session_ttl (time.Duration): The duration we keep
# the sessions stored.
client_session_ttl: 24h0m0s
# cache.raft.dead_store_timeout (time.Duration): The amount of time
# after which we didn't receive alive status for a node, consider a
# store dead
dead_store_timeout: 5m0s
# cache.raft.delete_buffer_size (int): Buffer up to this many samples
# for eviction eviction
delete_buffer_size: 20
# cache.raft.deletes_per_eviction (int): Maximum number keys to delete
# in one eviction attempt before resampling.
deletes_per_eviction: 5
# cache.raft.enable_driver (bool): If true, enable placement driver
enable_driver: true
# cache.raft.enable_txn_cleanup (bool): If true, clean up stuck
# transactions periodically
enable_txn_cleanup: true
# cache.raft.entries_between_usage_checks (int): Re-check usage after
# this many updates
entries_between_usage_checks: 1000
# cache.raft.eviction_batch_size (int): Buffer this many writes before
# delete
eviction_batch_size: 100
# cache.raft.eviction_rate_limit (int): Maximum number of entries to
# evict per second (per partition).
eviction_rate_limit: 300
# cache.raft.grpc_addr (string): The address to listen for internal
# API traffic on. Ex. '1993'
grpc_addr: ""
# cache.raft.http_addr (string): The address to listen for HTTP raft
# traffic. Ex. '1992'
http_addr: ""
# cache.raft.leader_updated_chan_size (int64): The length of the
# leader updated channel; Should be greather than the max number of
# ranges on a node.
leader_updated_chan_size: 10000
# cache.raft.local_size_update_period (time.Duration): How often we
# update local size updates.
local_size_update_period: 10s
# cache.raft.max_range_size_bytes (int64): If set to a value greater
# than 0, ranges will be split until smaller than this size
max_range_size_bytes: 100000000
# cache.raft.min_eviction_age (time.Duration): Don't evict anything
# unless it's been idle for at least this long
min_eviction_age: 6h0m0s
# cache.raft.min_meta_range_replicas (int): The minimum number of
# replicas each range for meta range
min_meta_range_replicas: 5
# cache.raft.min_replicas_per_range (int): The minimum number of
# replicas each range should have
min_replicas_per_range: 3
# cache.raft.new_replica_grace_period (time.Duration): The amount of
# time we allow for a new replica to catch up to the leader's before
# we start to consider it to be behind.
new_replica_grace_period: 5m0s
# cache.raft.node_ready_chan_size (int64): The length of the node
# ready channel
node_ready_chan_size: 10000
# cache.raft.partition_mappings ([]disk.PartitionMapping)
partition_mappings: []
# For example:
# - group_id: "" # The Group ID to which this mapping applies. (type: string)
# prefix: "" # The remote instance name prefix used to select this partition. (type: string)
# partition_id: "" # The partition to use if the Group ID and prefix match. (type: string)
# cache.raft.partition_usage_delta_bytes_threshold (int): Gossip
# partition usage information if it has changed by more than this
# amount since the last gossip.
partition_usage_delta_bytes_threshold: 100000000
# cache.raft.partitions ([]disk.Partition)
partitions: []
# For example:
# - id: "" # The ID of the partition. (type: string)
# max_size_bytes: 0 # Maximum size of the partition. (type: int64)
# encryption_supported: false # Whether encrypted data can be stored on this partition. (type: bool)
# cache.raft.replica_scan_interval (time.Duration): The interval we
# wait to check if the replicas need to be queued for replication
replica_scan_interval: 1m0s
# cache.raft.root_directory (string): The root directory to use for
# storing cached data.
root_directory: ""
# cache.raft.sample_buffer_size (int): Buffer up to this many samples
# for eviction sampling
sample_buffer_size: 100
# cache.raft.sample_pool_size (int): How many deletion candidates to
# maintain between evictions
sample_pool_size: 500
# cache.raft.sampler_iter_refresh_peroid (time.Duration): How often we
# refresh iterator in sampler
sampler_iter_refresh_peroid: 5m0s
# cache.raft.samples_per_batch (int): How many keys we read forward
# every time we get a random key.
samples_per_batch: 10000
# cache.raft.samples_per_eviction (int): How many records to sample on
# each eviction
samples_per_eviction: 20
# cache.raft.suspect_store_duration (time.Duration): The amount of
# time we consider a node suspect after it becomes unavailable
suspect_store_duration: 30s
# cache.raft.zombie_min_duration (time.Duration): The minimum duration
# a replica must remain in a zombie state to be considered a zombie.
zombie_min_duration: 1m0s
# cache.raft.zombie_node_scan_interval (time.Duration): Check if one
# replica is a zombie every this often. 0 to disable.
zombie_node_scan_interval: 10s
redis:
# cache.redis.max_value_size_bytes (int64): The maximum value size to
# cache in redis (in bytes).
max_value_size_bytes: 10000000
# cache.redis.redis_target (string): A redis target for improved
# Caching/RBE performance. Target can be provided as either a redis
# connection URI or a host:port pair. URI schemas supported:
# redis[s]://[[USER][:PASSWORD]@][HOST][:PORT][/DATABASE] or
# unix://[[USER][:PASSWORD]@]SOCKET_PATH[?db=DATABASE] ** Enterprise
# only **
redis_target: ""
sharded:
# cache.redis.sharded.password (string): Redis password
password: ""
# cache.redis.sharded.shards ([]string): Ordered list of Redis
# shard addresses.
shards: []
# cache.redis.sharded.username (string): Redis username
username: ""
# cache.redis_target (string): A redis target for improved Caching/RBE
# performance. Target can be provided as either a redis connection URI or
# a host:port pair. URI schemas supported:
# redis[s]://[[USER][:PASSWORD]@][HOST][:PORT][/DATABASE] or
# unix://[[USER][:PASSWORD]@]SOCKET_PATH[?db=DATABASE] ** Enterprise only
# **
redis_target: ""
s3:
# cache.s3.bucket (string): The AWS S3 bucket to store files in.
bucket: ""
# cache.s3.credentials_profile (string): A custom credentials profile
# to use.
credentials_profile: ""
# cache.s3.disable_ssl (bool): Disables the use of SSL, useful for
# configuring the use of MinIO. **DEPRECATED** Specify a non-HTTPS
# endpoint instead.
disable_ssl: false
# cache.s3.endpoint (string): The AWS endpoint to use, useful for
# configuring the use of MinIO.
endpoint: ""
# cache.s3.path_prefix (string): Prefix inside the AWS S3 bucket to
# store files
path_prefix: ""
# cache.s3.region (string): The AWS region.
region: ""
# cache.s3.role_arn (string): The role ARN to use for web identity
# auth.
role_arn: ""
# cache.s3.role_session_name (string): The role session name to use
# for web identity auth.
role_session_name: ""
# cache.s3.s3_force_path_style (bool): Force path style urls for
# objects, useful for configuring the use of MinIO.
s3_force_path_style: false
# cache.s3.static_credentials_id (string): Static credentials ID to
# use, useful for configuring the use of MinIO.
static_credentials_id: ""
# cache.s3.static_credentials_secret (string): Static credentials
# secret to use, useful for configuring the use of MinIO.
static_credentials_secret: ""
# cache.s3.static_credentials_token (string): Static credentials token
# to use, useful for configuring the use of MinIO.
static_credentials_token: ""
# cache.s3.ttl_days (int): The period after which cache files should
# be TTLd. Disabled if 0.
ttl_days: 0
# cache.s3.web_identity_token_file (string): The file path to the web
# identity token file.
web_identity_token_file: ""
# cache.tree_cache_min_descendents (int): The min number of descendents a
# node must parent in order to be cached
tree_cache_min_descendents: 3
# cache.tree_cache_min_level (int): The min level at which the tree may be
# cached. 0 is the root
tree_cache_min_level: 2
# cache.tree_cache_seed (string): If set, hash this with digests before
# caching / reading from tree cache
tree_cache_seed: treecache-09032024
# cache.tree_cache_splitting (bool): If true, try to split up TreeCache
# entries to save space.
tree_cache_splitting: false
# cache.tree_cache_splitting_min_size (int): Minimum number of files in a
# subtree before we'll split it in the treecache.
tree_cache_splitting_min_size: 10000
# cache.tree_cache_write_probability (float64): Write to the tree cache
# with this probability
tree_cache_write_probability: 0.1
# cache.zstd_transcoding_enabled (bool): Whether to accept requests to
# read/write zstd-compressed blobs, compressing/decompressing
# outgoing/incoming blobs on the fly.
zstd_transcoding_enabled: true
crypter:
# crypter.key_reencrypt_interval (time.Duration): How frequently keys will
# be re-encrypted (to support key rotation).
key_reencrypt_interval: 6h0m0s
# crypter.key_ttl (time.Duration): The maximum amount of time a key can be
# cached without being re-verified before it is considered invalid.
key_ttl: 10m0s
database:
# database.advanced_data_source (db.AdvancedConfig): Alternative to the
# database.data_source flag that allows finer control over database
# settings as well as allowing use of AWS IAM credentials. For most users,
# database.data_source is a simpler configuration method.
advanced_data_source:
driver: "" # The driver to use: one of sqlite3, mysql, or postgresql. (type: string)
endpoint: "" # Typically the host:port combination of the database server. (type: string)
username: "" # Username to use when connecting. (type: string)
password: "" # Password to use when connecting. Not used if AWS IAM is enabled. (type: string)
db_name: "" # The name of the database to use for BuildBuddy data. (type: string)
region: "" # Region of the database instance. Required if AWS IAM is enabled. (type: string)
use_aws_iam: false # If enabled, AWS IAM authentication is used instead of fixed credentials. Make sure the endpoint includes the port, otherwise IAM-based auth will fail. (type: bool)
params: "" # Optional parameters to pass to the database driver (in format key1=val1&key2=val2) (type: string)
# database.advanced_read_replica (db.AdvancedConfig): Advanced alternative
# to database.read_replica. Refer to database.advanced for more
# information.
advanced_read_replica:
driver: "" # The driver to use: one of sqlite3, mysql, or postgresql. (type: string)
endpoint: "" # Typically the host:port combination of the database server. (type: string)
username: "" # Username to use when connecting. (type: string)
password: "" # Password to use when connecting. Not used if AWS IAM is enabled. (type: string)
db_name: "" # The name of the database to use for BuildBuddy data. (type: string)
region: "" # Region of the database instance. Required if AWS IAM is enabled. (type: string)
use_aws_iam: false # If enabled, AWS IAM authentication is used instead of fixed credentials. Make sure the endpoint includes the port, otherwise IAM-based auth will fail. (type: bool)
params: "" # Optional parameters to pass to the database driver (in format key1=val1&key2=val2) (type: string)
# database.conn_max_lifetime_seconds (int): The maximum lifetime of a
# connection to the db
conn_max_lifetime_seconds: 0
# database.data_source (string): The SQL database to connect to, specified
# as a connection string.
data_source: sqlite3:///tmp/buildbuddy.db
# database.log_queries (bool): If true, log all queries
log_queries: false
# database.max_idle_conns (int): The maximum number of idle connections to
# maintain to the db
max_idle_conns: 0
# database.max_open_conns (int): The maximum number of open connections to
# maintain to the db
max_open_conns: 0
# database.print_schema_changes_and_exit (bool): If set, print schema
# changes from auto-migration, then exit the program.
print_schema_changes_and_exit: false
# database.read_replica (string): A secondary, read-only SQL database to
# connect to, specified as a connection string.
read_replica: ""
# database.slow_query_threshold (time.Duration): Queries longer than this
# duration will be logged with a 'Slow SQL' warning.
slow_query_threshold: 500ms
# database.stats_poll_interval (time.Duration): How often to poll the DB
# client for connection stats (default: '5s').
stats_poll_interval: 5s
executor:
# executor.affinity_routing_enabled (bool): Enables affinity routing,
# which attempts to route actions to the executor that most recently ran
# that action.
affinity_routing_enabled: true
# executor.container_registries ([]oci.Registry)
container_registries: []
# For example:
# - hostnames: [] # (type: []string)
# username: "" # (type: string)
# password: "" # (type: string)
# executor.container_registry_region (string): All occurrences of
# '{{region}}' in container image names will be replaced with this string,
# if specified.
container_registry_region: ""
# executor.custom_resources ([]resources.CustomResource): Optional
# allocatable custom resources. This works similarly to bazel's
# local_extra_resources flag. Request these resources in exec_properties
# using the 'resources:<name>': '<value>' syntax.
custom_resources: []
# For example:
# - name: "" # (type: string)
# value: 0 # (type: float64)
# executor.default_image (string): The default docker image to use to warm
# up executors or if no platform property is set. Ex:
# gcr.io/flame-public/executor-docker-default:enterprise-v1.5.4
default_image: gcr.io/flame-public/executor-docker-default:enterprise-v1.6.0
# executor.default_isolation_type (string): The default workload isolation
# type when no type is specified in an action. If not set, we use the
# first of the following that is set: docker, podman, firecracker, or none
# (bare).
default_isolation_type: ""
# executor.default_xcode_version (string): Sets the default Xcode version
# number to use if an action doesn't specify one. If not set,
# /Applications/Xcode.app/ is used.
default_xcode_version: ""
# executor.docker_socket (string): If set, run execution commands in
# docker using the provided socket.
docker_socket: ""
# executor.enable_bare_runner (bool): Enables running execution commands
# directly on the host without isolation.
enable_bare_runner: false
# executor.enable_firecracker (bool): Enables running execution commands
# inside of firecracker VMs
enable_firecracker: false
# executor.enable_local_snapshot_sharing (bool): Enables local snapshot
# sharing for firecracker VMs. Also requires that
# executor.firecracker_enable_nbd is true.
enable_local_snapshot_sharing: false
# executor.enable_oci (bool): Enables running execution commands using an
# OCI runtime directly.
enable_oci: false
# executor.enable_podman (bool): Enables running execution commands inside
# podman containers.
enable_podman: false
# executor.enable_remote_snapshot_sharing (bool): Enables remote snapshot
# sharing for firecracker VMs. Also requires that
# executor.firecracker_enable_nbd and executor.firecracker_enable_uffd are
# true.
enable_remote_snapshot_sharing: false
# executor.enable_sandbox (bool): Enables running execution commands
# inside of sandbox-exec.
enable_sandbox: false
# executor.enable_vfs (bool): Whether FUSE based filesystem is enabled.
enable_vfs: false
# executor.extra_env_vars ([]string): Additional environment variables to
# pass to remotely executed actions. i.e. MY_ENV_VAR=foo
extra_env_vars: []
# executor.forced_network_isolation_type (string): If set, run all
# commands that require networking with this isolation
forced_network_isolation_type: ""
# executor.host_id (string): Optional: Allows for manual specification of
# an executor's host id. If not set, a random UUID will be used.
host_id: ""
# executor.image_pull_timeout (time.Duration): How long to wait for the
# container image to be pulled before returning an Unavailable (retryable)
# error for an action execution attempt. Applies to all isolation types
# (docker, firecracker, etc.)
image_pull_timeout: 5m0s
# executor.memory_bytes (int64): Optional maximum memory to allocate to
# execution tasks (approximate). Cannot set both this option and the
# SYS_MEMORY_BYTES env var.
memory_bytes: 0
# executor.millicpu (int64): Optional maximum CPU milliseconds to allocate
# to execution tasks (approximate). Cannot set both this option and the
# SYS_CPU env var.
millicpu: 0
# executor.mmap_memory_bytes (int64): Maximum memory to be allocated
# towards mmapped files for Firecracker copy-on-write functionality. This
# is subtraced from the configured memory_bytes. Has no effect if
# firecracker is disabled or snapshot sharing is disabled.
mmap_memory_bytes: 10000000000
# executor.record_usage_timelines (bool): Capture resource usage
# timeseries data in UsageStats for each task.
record_usage_timelines: false
# executor.remote_snapshot_readonly (bool): Disables remote snapshot
# writes.
remote_snapshot_readonly: false
# executor.snaploader_eager_fetch_concurrency (int): Max number of
# goroutines allowed to run concurrently when eagerly fetching chunks.
snaploader_eager_fetch_concurrency: 32
# executor.snaploader_max_eager_fetches_per_sec (int): Max number of
# chunks snaploader can eagerly fetch in the background per second.
snaploader_max_eager_fetches_per_sec: 1000
# executor.verbose_snapshot_logs (bool): Enables extra-verbose snapshot
# logs (even at debug log level)
verbose_snapshot_logs: false
gcp:
# gcp.client_id (string): The client id to use for GCP linking.
client_id: ""
# gcp.client_secret (string): The client secret to use for GCP linking.
client_secret: ""
github:
# github.access_token (string): The GitHub access token used to post
# GitHub commit statuses. ** Enterprise only **
access_token: ""
app:
actions:
# github.app.actions.runner_enabled (bool): Whether to enable the
# buildbuddy-hosted runner for GitHub actions.
runner_enabled: false
# github.app.actions.runner_label (string): Label to apply to the
# actions runner. This is what 'runs-on' needs to be set to in
# GitHub workflow YAML in order to run on this BuildBuddy
# instance.
runner_label: buildbuddy
# github.app.actions.runner_pool_name (string): Executor pool name
# to use for GitHub actions runner.
runner_pool_name: ""
# github.app.client_id (string): GitHub app OAuth client ID.
client_id: ""
# github.app.client_secret (string): GitHub app OAuth client secret.
client_secret: ""
# github.app.enabled (bool): Whether to enable the BuildBuddy GitHub
# app server.
enabled: false
# github.app.id (string): GitHub app ID.
id: ""
# github.app.private_key (string): GitHub app private key.
private_key: ""
# github.app.public_link (string): GitHub app installation URL.
public_link: ""
# github.app.review_mutates_enabled (bool): Perform mutations of PRs
# via the GitHub API.
review_mutates_enabled: false
# github.app.webhook_secret (string): GitHub app webhook secret used
# to verify that webhook payload contents were sent by GitHub.
webhook_secret: ""
# github.client_id (string): The client ID of your GitHub Oauth App. **
# Enterprise only **
client_id: ""
# github.client_secret (string): The client secret of your GitHub Oauth
# App. ** Enterprise only **
client_secret: ""
# github.enterprise_host (string): The Github enterprise hostname to use
# if using GitHub enterprise server, not including https:// and no
# trailing slash.
enterprise_host: ""
# github.jwt_key (string): The key to use when signing JWT tokens for
# github auth.
jwt_key: ""
# github.status_name_suffix (string): Suffix to be appended to all
# reported GitHub status names. Useful for differentiating BuildBuddy
# deployments. For example: '(dev)' ** Enterprise only **
status_name_suffix: ""
# github.status_per_test_target (bool): If true, report status per test
# target. ** Enterprise only **
status_per_test_target: false
gossip:
# gossip.join ([]string): The nodes to join/gossip with. Ex.
# '1.2.3.4:1991,2.3.4.5:1991...'
join: []
# gossip.listen_addr (string): The address to listen for gossip traffic
# on. Ex. 'localhost:1991'
listen_addr: ""
# gossip.node_name (string): The gossip node's name. If empty will default
# to host_id.'
node_name: ""
# gossip.secret_key (string): The value should be either 16, 24, or 32
# bytes.
secret_key: ""
grpc_client:
# grpc_client.enable_pool_cache (bool): Whether or not to enable the
# connection pool cache.
enable_pool_cache: false
# grpc_client.pool_size (int): Number of connections to create to each
# target.
pool_size: 15
integrations:
invocation_upload:
# integrations.invocation_upload.aws_credentials (string): Credentials
# CSV file for Amazon s3 invocation upload webhook. ** Enterprise only
# **
aws_credentials: ""
# integrations.invocation_upload.enabled (bool): Whether to upload
# webhook data to the webhook URL configured per-Group. ** Enterprise
# only **
enabled: false
# integrations.invocation_upload.gcs_credentials (string): Credentials
# JSON for the Google service account used to authenticate when GCS is
# used as the invocation upload target. ** Enterprise only **
gcs_credentials: ""
slack:
# integrations.slack.webhook_url (string): A Slack webhook url to post
# build update messages to.
webhook_url: ""
keystore:
aws:
# keystore.aws.credentials (string): AWS CSV credentials that will be
# used to authenticate. If not specified, credentials will be
# retrieved as described by
# https://docs.aws.amazon.com/sdkref/latest/guide/standardized-credentials.html
credentials: ""
# keystore.aws.credentials_file (string): A path to a AWS CSV
# credentials file that will be used to authenticate. If not
# specified, credentials will be retrieved as described by
# https://docs.aws.amazon.com/sdkref/latest/guide/standardized-credentials.html
credentials_file: ""
# keystore.aws.enabled (bool): Whether AWS KMS support should be
# enabled. Implicitly enabled if the master key URI references an AWS
# KMS URI.
enabled: false
gcp:
# keystore.gcp.credentials (string): GCP JSON credentials that will be
# used to authenticate.
credentials: ""
# keystore.gcp.credentials_file (string): A path to a gcp JSON
# credentials file that will be used to authenticate.
credentials_file: ""
# keystore.gcp.enabled (bool): Whether GCP KMS support should be
# enabled. Implicitly enabled if the master key URI references a GCP
# KMS URI.
enabled: false
# keystore.local_insecure_kms_directory (string): For development only. If
# set, keys in format local-insecure-kms://[id] are read from this
# directory.
local_insecure_kms_directory: ""
# keystore.master_key_uri (string): The master key URI (see tink docs for
# example)
master_key_uri: ""
monitoring:
basic_auth:
# monitoring.basic_auth.password (string): Optional password for basic
# auth on the monitoring port.
password: ""
# monitoring.basic_auth.username (string): Optional username for basic
# auth on the monitoring port.
username: ""
olap_database:
# olap_database.auto_migrate_db (bool): If true, attempt to automigrate
# the db when connecting
auto_migrate_db: true
# olap_database.cluster_name (string): The cluster name of the database
cluster_name: '{cluster}'
# olap_database.conn_max_lifetime (time.Duration): The maximum lifetime of
# a connection to clickhouse
conn_max_lifetime: 0s
# olap_database.data_source (string): The clickhouse database to connect
# to, specified a a connection string
data_source: ""
# olap_database.enable_data_replication (bool): If true, data replication
# is enabled.
enable_data_replication: false
# olap_database.max_idle_conns (int): The maximum number of idle
# connections to maintain to the db
max_idle_conns: 0
# olap_database.max_open_conns (int): The maximum number of open
# connections to maintain to the db
max_open_conns: 0
# olap_database.print_schema_changes_and_exit (bool): If set, print schema
# changes from auto-migration, then exit the program.
print_schema_changes_and_exit: false
# olap_database.replica_name (string): The replica name of the table in
# zookeeper
replica_name: '{replica}'
# olap_database.zoo_path (string): The path to the table name in
# zookeeper, used to set up data replication
zoo_path: /clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}
openai:
# openai.api_key (string): OpenAI API key
api_key: ""
# openai.model (string): OpenAI model name to use. Find them here:
# https://platform.openai.com/docs/models
model: gpt-3.5-turbo
org:
# org.domain (string): Your organization's email domain. If this is set,
# only users with email addresses in this domain will be able to register
# for a BuildBuddy account.
domain: ""
# org.name (string): The name of your organization, which is displayed on
# your organization's build history.
name: Organization
prometheus:
# prometheus.address (string): the address of the promethus HTTP API
address: ""
registry:
# registry.backend (string): The registry backend to forward requests to
backend: https://bcr.bazel.build/
# registry.enabled (bool): Whether the registry service should be enabled.
enabled: false
remote_execution:
# remote_execution.action_merging_hedge_count (int): When action merging
# is enabled, this flag controls how many additional, 'hedged' attempts an
# action is run in the background. Note that even hedged actions are run
# at most once per execution request.
action_merging_hedge_count: 0
# remote_execution.action_merging_hedge_delay (time.Duration): When action
# merging hedging is enabled, up to
# --remote_execution.action_merging_hedge_count hedged actions are run
# with this delay of linear backoff.
action_merging_hedge_delay: 0s
# remote_execution.cgroup_settings_enabled (bool): Apply cgroup2 settings
# to Linux executions.
cgroup_settings_enabled: false
# remote_execution.ci_runner_default_timeout (time.Duration): Default
# timeout applied to all ci runners.
ci_runner_default_timeout: 8h0m0s
# remote_execution.ci_runner_recycling_max_wait (time.Duration): Max
# duration that a ci_runner task should wait for a warm runner before
# running on a potentially cold runner.
ci_runner_recycling_max_wait: 3s
# remote_execution.cpu_quota_limit (time.Duration): Maximum CPU time
# allowed for each quota period.
cpu_quota_limit: 3s
# remote_execution.cpu_quota_period (time.Duration): How often the CPU
# quota is refreshed.
cpu_quota_period: 100ms
# remote_execution.default_pool_name (string): The default executor pool
# to use if one is not specified.
default_pool_name: ""
# remote_execution.enable_action_merging (bool): If enabled, identical
# actions being executed concurrently are merged into a single execution.
enable_action_merging: true
# remote_execution.enable_executor_key_creation (bool): If enabled, UI
# will allow executor keys to be created.
enable_executor_key_creation: false
# remote_execution.enable_kythe_indexing (bool): If set, and codesearch is
# enabled, automatically run a kythe indexing action.
enable_kythe_indexing: false
# remote_execution.enable_redis_availability_monitoring (bool): If
# enabled, the execution server will detect if Redis has lost state and
# will ask Bazel to retry executions.
enable_redis_availability_monitoring: false
# remote_execution.enable_remote_exec (bool): If true, enable remote-exec.
# ** Enterprise only **
enable_remote_exec: true
# remote_execution.enable_user_owned_executors (bool): If enabled, users
# can register their own executors with the scheduler.
enable_user_owned_executors: false
# remote_execution.enable_workflows (bool): Whether to enable BuildBuddy
# workflows.
enable_workflows: false
# remote_execution.force_user_owned_darwin_executors (bool): If enabled,
# darwin actions will always run on user-owned executors.
force_user_owned_darwin_executors: false
# remote_execution.lease_duration (time.Duration): How long before a task
# lease must be renewed by the executor client.
lease_duration: 10s
# remote_execution.lease_grace_period (time.Duration): How long to wait
# for the executor to renew the lease after the TTL duration has elapsed.
lease_grace_period: 10s
# remote_execution.lease_reconnect_grace_period (time.Duration): How long
# to delay re-enqueued tasks in order to allow the previous lease holder
# to renew its lease (following a server shutdown).
lease_reconnect_grace_period: 1s
# remote_execution.max_scheduling_delay (time.Duration): Max duration that
# actions can sit in a non-preferred executor's queue before they are
# executed.
max_scheduling_delay: 5s
# remote_execution.pids_limit (int64): Maximum number of processes allowed
# per task at any time.
pids_limit: 2048
# remote_execution.redis_pubsub_pool_size (int): Maximum number of
# connections used for waiting for execution updates.
redis_pubsub_pool_size: 10000
# remote_execution.redis_target (string): A Redis target for storing
# remote execution state. Falls back to app.default_redis_target if
# unspecified. Required for remote execution. To ease migration, the redis
# target from the cache config will be used if neither this value nor
# app.default_redis_target are specified.
redis_target: ""
# remote_execution.require_executor_authorization (bool): If true,
# executors connecting to this server must provide a valid executor API
# key.
require_executor_authorization: false
sharded_redis:
# remote_execution.sharded_redis.password (string): Redis password
password: ""
# remote_execution.sharded_redis.shards ([]string): Ordered list of
# Redis shard addresses.
shards: []
# remote_execution.sharded_redis.username (string): Redis username
username: ""
# remote_execution.shared_executor_pool_group_id (string): Group ID that
# owns the shared executor pool.
shared_executor_pool_group_id: ""
# remote_execution.stored_task_size_millicpu_limit (int64): Limit placed
# on milliCPU calculated from task execution statistics.
stored_task_size_millicpu_limit: 7500
task_size_model:
# remote_execution.task_size_model.enabled (bool): Whether to enable
# model-based task size prediction.
enabled: false
# remote_execution.task_size_model.features_config_path (string): Path
# pointing to features.json config file.
features_config_path: ""
# remote_execution.task_size_model.serving_address (string): gRPC
# address pointing to TensorFlow Serving prediction service with task
# size models (cpu, mem).
serving_address: ""
# remote_execution.task_size_psi_correction (float64): What percentage of
# full-stall time should be subtracted from the execution duration.
task_size_psi_correction: 1
# remote_execution.use_measured_task_sizes (bool): Whether to use measured
# usage stats to determine task sizes.
use_measured_task_sizes: false
# remote_execution.workflow_default_branch_routing_enabled (bool): Enables
# default branch routing for workflows. When routing a workflow action, if
# there are no executors that ran that action for the same git branch, try
# to route it to an executor that ran the action for the same default
# branch.
workflow_default_branch_routing_enabled: false
# remote_execution.workflows_ci_runner_bazel_command (string): Bazel
# command to be used by the CI runner.
workflows_ci_runner_bazel_command: ""
# remote_execution.workflows_ci_runner_debug (bool): Whether to run the CI
# runner in debug mode.
workflows_ci_runner_debug: false
# remote_execution.workflows_default_image (string): The default
# container-image property to use for workflows. Must include docker://
# prefix if applicable.
workflows_default_image: docker://gcr.io/flame-public/buildbuddy-ci-runner@sha256:8cf614fc4695789bea8321446402e7d6f84f6be09b8d39ec93caa508fa3e3cfc
# remote_execution.workflows_enable_firecracker (bool): Whether to enable
# firecracker for Linux workflow actions.
workflows_enable_firecracker: false
# remote_execution.workflows_linux_compute_units (int): Number of
# BuildBuddy compute units (BCU) to reserve for Linux workflow actions.
workflows_linux_compute_units: 3
# remote_execution.workflows_mac_compute_units (int): Number of BuildBuddy
# compute units (BCU) to reserve for Mac workflow actions.
workflows_mac_compute_units: 3
# remote_execution.workflows_pool_name (string): The executor pool to use
# for workflow actions. Defaults to the default executor pool if not
# specified.
workflows_pool_name: ""
soci_artifact_store:
# soci_artifact_store.cache_seed (string): If set, this seed is hashed
# with container image IDs to generate cache keys storing soci indexes.
cache_seed: socicache-092872023
# soci_artifact_store.layer_storage (string): Directory in which to store
# pulled container image layers for indexing by soci artifact store.
layer_storage: /tmp/
ssl:
# ssl.cert_file (string): Path to a PEM encoded certificate file to use
# for TLS if not using ACME.
cert_file: ""
# ssl.client_ca_cert (string): PEM encoded certificate authority used to
# issue client certificates for mTLS auth.
client_ca_cert: ""
# ssl.client_ca_cert_file (string): Path to a PEM encoded certificate
# authority file used to issue client certificates for mTLS auth.
client_ca_cert_file: ""
# ssl.client_ca_key (string): PEM encoded certificate authority key used
# to issue client certificates for mTLS auth.
client_ca_key: ""
# ssl.client_ca_key_file (string): Path to a PEM encoded certificate
# authority key file used to issue client certificates for mTLS auth.
client_ca_key_file: ""
# ssl.client_cert_lifespan (time.Duration): The duration client
# certificates are valid for. Ex: '730h' for one month. If not set,
# defaults to 100 years.
client_cert_lifespan: 876000h0m0s
# ssl.default_host (string): Host name to use for ACME generated cert if
# TLS request does not contain SNI.
default_host: ""
# ssl.enable_ssl (bool): Whether or not to enable SSL/TLS on gRPC
# connections (gRPCS).
enable_ssl: false
# ssl.host_whitelist ([]string): Cloud-Only
host_whitelist: []
# ssl.key_file (string): Path to a PEM encoded key file to use for TLS if
# not using ACME.
key_file: ""
# ssl.self_signed (bool): If true, a self-signed cert will be generated
# for TLS termination.
self_signed: false
# ssl.upgrade_insecure (bool): True if http requests should be redirected
# to https. Assumes http traffic is served on port 80 and https traffic is
# served on port 443 (typically via an ingress / load balancer).
upgrade_insecure: false
# ssl.use_acme (bool): Whether or not to automatically configure SSL certs
# using ACME. If ACME is enabled, cert_file and key_file should not be
# set.
use_acme: false
storage:
aws_s3:
# storage.aws_s3.bucket (string): The AWS S3 bucket to store files in.
bucket: ""
# storage.aws_s3.credentials_profile (string): A custom credentials
# profile to use.
credentials_profile: ""
# storage.aws_s3.disable_ssl (bool): Disables the use of SSL, useful
# for configuring the use of MinIO. **DEPRECATED** Specify a non-HTTPS
# endpoint instead.
disable_ssl: false
# storage.aws_s3.endpoint (string): The AWS endpoint to use, useful
# for configuring the use of MinIO.
endpoint: ""
# storage.aws_s3.region (string): The AWS region.
region: ""
# storage.aws_s3.role_arn (string): The role ARN to use for web
# identity auth.
role_arn: ""
# storage.aws_s3.role_session_name (string): The role session name to
# use for web identity auth.
role_session_name: ""
# storage.aws_s3.s3_force_path_style (bool): Force path style urls for
# objects, useful for configuring the use of MinIO.
s3_force_path_style: false
# storage.aws_s3.static_credentials_id (string): Static credentials ID
# to use, useful for configuring the use of MinIO.
static_credentials_id: ""
# storage.aws_s3.static_credentials_secret (string): Static
# credentials secret to use, useful for configuring the use of MinIO.
static_credentials_secret: ""
# storage.aws_s3.static_credentials_token (string): Static credentials
# token to use, useful for configuring the use of MinIO.
static_credentials_token: ""
# storage.aws_s3.web_identity_token_file (string): The file path to
# the web identity token file.
web_identity_token_file: ""
azure:
# storage.azure.account_key (string): The key for the Azure storage
# account
account_key: ""
# storage.azure.account_name (string): The name of the Azure storage
# account
account_name: ""
# storage.azure.container_name (string): The name of the Azure storage
# container
container_name: ""
# storage.chunk_file_size_bytes (int): How many bytes to buffer in memory
# before flushing a chunk of build protocol data to disk.
chunk_file_size_bytes: 3000000
# storage.cleanup_batch_size (int): How many invocations to delete in each
# janitor cleanup task
cleanup_batch_size: 10
# storage.disable_persist_cache_artifacts (bool): If disabled, buildbuddy
# will not persist cache artifacts in the blobstore. This may make older
# invocations not display properly.
disable_persist_cache_artifacts: false
disk:
# storage.disk.root_directory (string): The root directory to store
# all blobs in, if using disk based storage.
root_directory: /tmp/buildbuddy
# storage.disk.use_v2_layout (bool): If enabled, files will be stored
# using the v2 layout. See disk_cache.MigrateToV2Layout for a
# description.
use_v2_layout: false
# storage.enable_chunked_event_logs (bool): If true, Event logs will be
# stored separately from the invocation proto in chunks.
enable_chunked_event_logs: true
execution:
# storage.execution.cleanup_batch_size (int): How many invocations to
# delete in each janitor cleanup task
cleanup_batch_size: 200
# storage.execution.cleanup_interval (time.Duration): How often the
# janitor cleanup tasks will run
cleanup_interval: 5m0s
# storage.execution.cleanup_workers (int): How many cleanup tasks to
# run
cleanup_workers: 1
# storage.execution.ttl (time.Duration): The time, in seconds, to keep
# invocations before deletion. 0 disables invocation deletion.
ttl: 0s
gcs:
# storage.gcs.bucket (string): The name of the GCS bucket to store
# build artifact files in.
bucket: ""
# storage.gcs.credentials (string): Credentials in JSON format that
# will be used to authenticate to GCS.
credentials: ""
# storage.gcs.credentials_file (string): A path to a JSON credentials
# file that will be used to authenticate to GCS.
credentials_file: ""
# storage.gcs.project_id (string): The Google Cloud project ID of the
# project owning the above credentials and GCS bucket.
project_id: ""
# storage.path_prefix (string): The prefix directory to store all blobs in
path_prefix: ""
# storage.tempdir (string): Root directory for temporary files. Defaults
# to the OS-specific temp dir.
tempdir: /tmp
# storage.ttl_seconds (int): The time, in seconds, to keep invocations
# before deletion. 0 disables invocation deletion.
ttl_seconds: 0
vertexai:
# vertexai.credentials (string): The GCP credentials to use
credentials: ""
# vertexai.model (string): The model ID to use
model: codechat-bison
# vertexai.project (string): The GCP project ID to use
project: flame-build
# vertexai.region (string): The GCP region to use
region: us-central1
workspace:
# workspace.enabled (bool): If true, enable workspaces.
enabled: false
# workspace.use_blobstore (bool): If true, use blobstore to store
# workspaces. Otherwise the cache will be used
use_blobstore: true
BuildBuddy Executor
# Unstructured settings
# debug_disable_firecracker_workspace_sync (bool): Do not sync the action
# workspace to the guest, instead using the existing workspace from the VM
# snapshot.
debug_disable_firecracker_workspace_sync: false
# debug_enable_anonymous_runner_recycling (bool): Whether to enable runner
# recycling for unauthenticated requests. For debugging purposes only - do not
# use in production.
debug_enable_anonymous_runner_recycling: false
# debug_force_remote_snapshots (bool): When remote snapshotting is enabled,
# force remote snapshotting even for tasks which otherwise wouldn't support
# it.
debug_force_remote_snapshots: false
# debug_stream_command_outputs (bool): If true, stream command outputs to the
# terminal. Intended for debugging purposes only and should not be used in
# production.
debug_stream_command_outputs: false
# debug_use_local_images_only (bool): Do not pull OCI images and only used
# locally cached images. This can be set to test local image builds during
# development without needing to push to a container registry. Not intended
# for production use.
debug_use_local_images_only: false
# docker_cap_add (string): Sets --cap-add= on the docker command. Comma
# separated.
docker_cap_add: ""
# drop_invocation_pk_cols (bool): If true, attempt to drop invocation PK cols
drop_invocation_pk_cols: false
# grpc_client_origin_header (string): Header value to set for
# x-buildbuddy-origin.
grpc_client_origin_header: ""
# grpc_max_recv_msg_size_bytes (int): Configures the max GRPC receive message
# size [bytes]
grpc_max_recv_msg_size_bytes: 50000000
# grpc_port (int): The port to listen for gRPC traffic on
grpc_port: 1985
# grpcs_port (int): The port to listen for gRPCS traffic on
grpcs_port: 1986
# internal_grpc_port (int): The port to listen for internal gRPC traffic on
internal_grpc_port: 1987
# internal_grpcs_port (int): The port to listen for internal gRPCS traffic on
internal_grpcs_port: 1988
# listen (string): The interface to listen on (default: 0.0.0.0)
listen: 0.0.0.0
# log_goroutine_profile_on_shutdown (bool): Whether to log all goroutine stack
# traces on shutdown.
log_goroutine_profile_on_shutdown: false
# max_shutdown_duration (time.Duration): Time to wait for shutdown
max_shutdown_duration: 25s
# monitoring_port (int): The port to listen for monitoring traffic on
monitoring_port: 9090
# port (int): The port to listen for HTTP traffic on
port: 8080
# redis_command_buffer_flush_period (time.Duration): How long to wait between
# flushing buffered redis commands. Setting this to 0 will disable buffering
# at the cost of higher redis QPS.
redis_command_buffer_flush_period: 250ms
# regions ([]region.Region): A list of regions that executors might be
# connected to.
regions: []
# For example:
# - name: "" # The user-friendly name of this region. Ex: Europe (type: string)
# server: "" # The http endpoint for this server, with the protocol. Ex: https://app.europe.buildbuddy.io (type: string)
# subdomains: "" # The format for subdomain urls of with a single * wildcard. Ex: https://*.europe.buildbuddy.io (type: string)
# report_not_ready (bool): If set to true, the app will always report as being
# unready.
report_not_ready: false
# server_type (string): The server type to match on health checks
server_type: prod-buildbuddy-executor
# shutdown_lameduck_duration (time.Duration): If set, the server will be
# marked unready but not run shutdown functions until this period passes.
shutdown_lameduck_duration: 0s
# zone_override (string): A value that will override the auto-detected zone.
# Ignored if empty
zone_override: ""
# Structured settings
app:
# app.admin_only_create_group (bool): If true, only admins of an existing
# group can create a new groups.
admin_only_create_group: false
# app.build_buddy_url (URL): The external URL where your BuildBuddy
# instance can be found.
build_buddy_url: http://localhost:8080
# app.cache_api_url (URL): Overrides the default remote cache protocol
# gRPC address shown by BuildBuddy on the configuration screen.
cache_api_url: ""
client_identity:
# app.client_identity.client (string): The client identifier to place
# in the identity header.
client: ""
# app.client_identity.key (string): The key used to sign and verify
# identity JWTs.
key: ""
# app.client_identity.origin (string): The origin identifier to place
# in the identity header.
origin: ""
# app.code_editor_enabled (bool): If set, code editor functionality will
# be enabled.
code_editor_enabled: false
# app.code_editor_v2_enabled (bool): If set, show v2 of code editor that
# stores state on server instead of local storage.
code_editor_v2_enabled: false
# app.default_redis_target (string): A Redis target for storing remote
# shared state. To ease migration, the redis target from the remote
# execution config will be used if this value is not specified.
default_redis_target: ""
default_sharded_redis:
# app.default_sharded_redis.password (string): Redis password
password: ""
# app.default_sharded_redis.shards ([]string): Ordered list of Redis
# shard addresses.
shards: []
# app.default_sharded_redis.username (string): Redis username
username: ""
# app.default_subdomains ([]string): List of subdomains that should not be
# handled as user-owned subdomains.
default_subdomains: []
# app.enable_canaries (bool): If true, enable slow function canaries
enable_canaries: true
# app.enable_grpc_metrics_by_group_id (bool): If enabled, grpc metrics by
# group ID will be recorded
enable_grpc_metrics_by_group_id: false
# app.enable_prometheus_histograms (bool): If true, collect prometheus
# histograms for all RPCs
enable_prometheus_histograms: true
# app.enable_structured_logging (bool): If true, log messages will be
# json-formatted.
enable_structured_logging: false
# app.enable_subdomain_matching (bool): If true, request subdomain will be
# taken into account when determining what request restrictions should be
# applied.
enable_subdomain_matching: false
# app.events_api_url (URL): Overrides the default build event protocol
# gRPC address shown by BuildBuddy on the configuration screen.
events_api_url: ""
# app.grpc_max_recv_msg_size_bytes (int): DEPRECATED: use
# --grpc_max_recv_msg_size_bytes instead
grpc_max_recv_msg_size_bytes: 50000000
# app.grpc_over_http_port_enabled (bool): Enables grpc traffic to be
# served over the http port.
grpc_over_http_port_enabled: true
# app.ignore_forced_tracing_header (bool): If set, we will not honor the
# forced tracing header.
ignore_forced_tracing_header: false
# app.log_enable_gcp_logging_format (bool): If true, the output structured
# logs will be compatible with format expected by GCP Logging.
log_enable_gcp_logging_format: false
# app.log_enable_grpc_request (bool): If true, log grpc request when log
# level is default
log_enable_grpc_request: true
# app.log_error_stack_traces (bool): If true, stack traces will be printed
# for errors that have them.
log_error_stack_traces: false
# app.log_gcp_log_id (string): The log ID to log to in GCP (if any).
log_gcp_log_id: ""
# app.log_gcp_project_id (string): The project ID to log to in GCP (if
# any).
log_gcp_project_id: ""
# app.log_include_short_file_name (bool): If true, log messages will
# include shortened originating file name.
log_include_short_file_name: false
# app.log_level (string): The desired log level. Logs with a level >= this
# level will be emitted. One of {'fatal', 'error', 'warn', 'info',
# 'debug'}
log_level: info
# app.proxy_targets ([]grpc_forward.proxyPair)
proxy_targets: []
# For example:
# - prefix: "" # The gRPC method prefix to match. (type: string)
# target: "" # The gRPC target to forward requests to. (type: string)
# app.strict_csp_enabled (bool): If set, set a strict CSP header.
# Violations are logged at warning level.
strict_csp_enabled: false
# app.trace_fraction (float64): Fraction of requests to sample for
# tracing.
trace_fraction: 0
# app.trace_fraction_overrides ([]string): Tracing fraction override based
# on name in format name=fraction.
trace_fraction_overrides: []
# app.trace_jaeger_collector (string): Address of the Jager collector
# endpoint where traces will be sent.
trace_jaeger_collector: ""
# app.trace_project_id (string): Optional GCP project ID to export traces
# to. If not specified, determined from default credentials or metadata
# server if running on GCP.
trace_project_id: ""
# app.trace_service_name (string): Name of the service to associate with
# traces.
trace_service_name: ""
auth:
# auth.admin_group_id (string): ID of a group whose members can perform
# actions only accessible to server admins.
admin_group_id: ""
# auth.disable_refresh_token (bool): If true, the offline_access scope
# which requests refresh tokens will not be requested.
disable_refresh_token: false
# auth.domain_wide_cookies (bool): If true, cookies will have domain set
# so that they are accessible on domain and all subdomains.
domain_wide_cookies: false
# auth.enable_anonymous_usage (bool): If true, unauthenticated build
# uploads will still be allowed but won't be associated with your
# organization.
enable_anonymous_usage: false
# auth.enable_self_auth (bool): If true, enables a single user login via
# an oauth provider on the buildbuddy server. Recommend use only when
# server is behind a firewall; this option may allow anyone with access to
# the webpage admin rights to your buildbuddy installation. ** Enterprise
# only **
enable_self_auth: false
# auth.force_approval (bool): If true, when a user doesn't have a session
# (first time logging in, or manually logged out) force the auth provider
# to show the consent screen allowing the user to select an account if
# they have multiple. This isn't supported by all auth providers.
force_approval: false
# auth.https_only_cookies (bool): If true, cookies will only be set over
# https connections.
https_only_cookies: false
# auth.jwt_claims_cache_ttl (time.Duration): TTL for JWT string to parsed
# claims caching. Set to '0' to disable cache.
jwt_claims_cache_ttl: 15s
# auth.jwt_duration (time.Duration): Maximum lifetime of the generated
# JWT.
jwt_duration: 6h0m0s
# auth.jwt_key (string): The key to use when signing JWT tokens.
jwt_key: set_the_jwt_in_config
# auth.new_jwt_key (string): If set, JWT verifications will try both this
# and the old JWT key.
new_jwt_key: ""
# auth.oauth_providers ([]oidc.OauthProvider): The list of oauth providers
# to use to authenticate.
oauth_providers: []
# For example:
# - issuer_url: "" # The issuer URL of this OIDC Provider. (type: string)
# client_id: "" # The oauth client ID. (type: string)
# client_secret: "" # The oauth client secret. (type: string)
# slug: "" # The slug of this OIDC Provider. (type: string)
saml:
# auth.saml.cert (string): PEM encoded certificate used for SAML auth.
cert: ""
# auth.saml.cert_file (string): Path to a PEM encoded certificate file
# used for SAML auth.
cert_file: ""
# auth.saml.key (string): PEM encoded certificate key used for SAML
# auth.
key: ""
# auth.saml.key_file (string): Path to a PEM encoded certificate key
# file used for SAML auth.
key_file: ""
# auth.saml.trusted_idp_cert_files ([]string): List of PEM-encoded
# trusted IDP certificates. Intended for testing and development only.
trusted_idp_cert_files: []
# auth.sign_using_new_jwt_key (bool): If true, new JWTs will be signed
# using the new JWT key.
sign_using_new_jwt_key: false
# auth.trust_xforwardedfor_header (bool): If true, client IP information
# will be retrieved from the X-Forwarded-For header. Should only be
# enabled if the BuildBuddy server is only accessible behind a trusted
# proxy.
trust_xforwardedfor_header: false
cache:
client:
# cache.client.ac_rpc_timeout (time.Duration): Maximum time a single
# Action Cache RPC can take.
ac_rpc_timeout: 15s
# cache.client.cas_rpc_timeout (time.Duration): Maximum time a single
# batch RPC or a single ByteStream chunk read can take.
cas_rpc_timeout: 1m0s
# cache.client.enable_download_compression (bool): If true, enable
# compression of downloads from remote caches
enable_download_compression: true
# cache.client.enable_upload_compression (bool): If true, enable
# compression of uploads to remote caches
enable_upload_compression: true
# cache.client.filecache_link_parallelism (int): Number of goroutines
# to use when linking inputs from filecache. If 0 uses the value of
# GOMAXPROCS.
filecache_link_parallelism: 0
# cache.client.input_tree_setup_parallelism (int): Number of
# goroutines to use across all tasks when setting up the input tree
# structure. -1 means no queueing. 0 means GOMAXPROCS.
input_tree_setup_parallelism: -1
gcs:
# cache.gcs.bucket (string): The name of the GCS bucket to store cache
# files in.
bucket: ""
# cache.gcs.credentials_file (string): A path to a JSON credentials
# file that will be used to authenticate to GCS.
credentials_file: ""
# cache.gcs.project_id (string): The Google Cloud project ID of the
# project owning the above credentials and GCS bucket.
project_id: ""
# cache.gcs.ttl_days (int64): The period after which cache files
# should be TTLd. Disabled if 0.
ttl_days: 0
# cache.memcache_targets ([]string): Deprecated. Use Redis Target instead.
memcache_targets: []
redis:
# cache.redis.max_value_size_bytes (int64): The maximum value size to
# cache in redis (in bytes).
max_value_size_bytes: 10000000
# cache.redis.redis_target (string): A redis target for improved
# Caching/RBE performance. Target can be provided as either a redis
# connection URI or a host:port pair. URI schemas supported:
# redis[s]://[[USER][:PASSWORD]@][HOST][:PORT][/DATABASE] or
# unix://[[USER][:PASSWORD]@]SOCKET_PATH[?db=DATABASE] ** Enterprise
# only **
redis_target: ""
sharded:
# cache.redis.sharded.password (string): Redis password
password: ""
# cache.redis.sharded.shards ([]string): Ordered list of Redis
# shard addresses.
shards: []
# cache.redis.sharded.username (string): Redis username
username: ""
# cache.redis_target (string): A redis target for improved Caching/RBE
# performance. Target can be provided as either a redis connection URI or
# a host:port pair. URI schemas supported:
# redis[s]://[[USER][:PASSWORD]@][HOST][:PORT][/DATABASE] or
# unix://[[USER][:PASSWORD]@]SOCKET_PATH[?db=DATABASE] ** Enterprise only
# **
redis_target: ""
s3:
# cache.s3.bucket (string): The AWS S3 bucket to store files in.
bucket: ""
# cache.s3.credentials_profile (string): A custom credentials profile
# to use.
credentials_profile: ""
# cache.s3.disable_ssl (bool): Disables the use of SSL, useful for
# configuring the use of MinIO. **DEPRECATED** Specify a non-HTTPS
# endpoint instead.
disable_ssl: false
# cache.s3.endpoint (string): The AWS endpoint to use, useful for
# configuring the use of MinIO.
endpoint: ""
# cache.s3.path_prefix (string): Prefix inside the AWS S3 bucket to
# store files
path_prefix: ""
# cache.s3.region (string): The AWS region.
region: ""
# cache.s3.role_arn (string): The role ARN to use for web identity
# auth.
role_arn: ""
# cache.s3.role_session_name (string): The role session name to use
# for web identity auth.
role_session_name: ""
# cache.s3.s3_force_path_style (bool): Force path style urls for
# objects, useful for configuring the use of MinIO.
s3_force_path_style: false
# cache.s3.static_credentials_id (string): Static credentials ID to
# use, useful for configuring the use of MinIO.
static_credentials_id: ""
# cache.s3.static_credentials_secret (string): Static credentials
# secret to use, useful for configuring the use of MinIO.
static_credentials_secret: ""
# cache.s3.static_credentials_token (string): Static credentials token
# to use, useful for configuring the use of MinIO.
static_credentials_token: ""
# cache.s3.ttl_days (int): The period after which cache files should
# be TTLd. Disabled if 0.
ttl_days: 0
# cache.s3.web_identity_token_file (string): The file path to the web
# identity token file.
web_identity_token_file: ""
executor:
# executor.api_key (string): API Key used to authorize the executor with
# the BuildBuddy app server.
api_key: ""
# executor.app_target (string): The GRPC url of a buildbuddy app server.
app_target: grpcs://remote.buildbuddy.io
bare:
# executor.bare.enable_log_files (bool): Whether to send bare runner
# output to log files for debugging. These files are stored adjacent
# to the task directory and are deleted when the task is complete.
enable_log_files: false
# executor.bare.enable_stats (bool): Whether to enable stats for bare
# command execution.
enable_stats: false
# executor.cache_target (string): The GRPC url of the remote cache to use.
# If empty, the value from --executor.app_target is used.
cache_target: ""
# executor.child_cgroups_enabled (bool): On startup, sets up separate
# child cgroups for the executor process and any action processes that it
# starts. When using this flag, the executor's starting cgroup must not
# have any other processes besides the executor. In particular, the
# executor cannot be run under tini when using this flag.
child_cgroups_enabled: false
# executor.container_registries ([]oci.Registry)
container_registries: []
# For example:
# - hostnames: [] # (type: []string)
# username: "" # (type: string)
# password: "" # (type: string)
# executor.container_registry_region (string): All occurrences of
# '{{region}}' in container image names will be replaced with this string,
# if specified.
container_registry_region: ""
# executor.custom_resources ([]resources.CustomResource): Optional
# allocatable custom resources. This works similarly to bazel's
# local_extra_resources flag. Request these resources in exec_properties
# using the 'resources:<name>': '<value>' syntax.
custom_resources: []
# For example:
# - name: "" # (type: string)
# value: 0 # (type: float64)
# executor.default_image (string): The default docker image to use to warm
# up executors or if no platform property is set. Ex:
# gcr.io/flame-public/executor-docker-default:enterprise-v1.5.4
default_image: gcr.io/flame-public/executor-docker-default:enterprise-v1.6.0
# executor.default_isolation_type (string): The default workload isolation
# type when no type is specified in an action. If not set, we use the
# first of the following that is set: docker, podman, firecracker, or none
# (bare).
default_isolation_type: ""
# executor.default_task_timeout (time.Duration): Timeout to use for tasks
# that do not have a timeout set explicitly.
default_task_timeout: 8h0m0s
# executor.default_termination_grace_period (time.Duration): Default
# termination grace period for all actions. (Termination grace period is
# the time to wait between an action timing out and forcefully shutting it
# down.)
default_termination_grace_period: 0s
# executor.default_xcode_version (string): Sets the default Xcode version
# number to use if an action doesn't specify one. If not set,
# /Applications/Xcode.app/ is used.
default_xcode_version: ""
# executor.delete_build_root_on_startup (bool): If true, delete the build
# root on startup
delete_build_root_on_startup: false
# executor.delete_filecache_on_startup (bool): If true, delete the file
# cache on startup
delete_filecache_on_startup: false
# executor.delete_parallelism (int): Number of goroutines to use when
# deleting files.
delete_parallelism: 0
# executor.die_on_firecracker_failure (bool): Makes the host executor
# process die if any command orchestrating or running Firecracker fails.
# Useful for capturing failures preemptively. WARNING: using this option
# MAY leave the host machine in an unhealthy state on Firecracker failure;
# some post-hoc cleanup may be necessary.
die_on_firecracker_failure: false
# executor.disable_local_cache (bool): If true, a local file cache will
# not be used.
disable_local_cache: false
# executor.docker_devices ([]container.DockerDeviceMapping): Configure
# (docker) devices that will be available inside the sandbox container.
# Format is
# --executor.docker_devices='[{"PathOnHost":"/dev/foo","PathInContainer":"/some/dest","CgroupPermissions":"see,docker,docs"}]'
docker_devices: []
# For example:
# - path_on_host: "" # path to device that should be mapped from the host. (type: string)
# path_in_container: "" # path under which the device will be present in container. (type: string)
# cgroup_permissions: "" # cgroup permissions that should be assigned to device. (type: string)
# executor.docker_gpus (string): If set to 'all', run docker containers
# with a device request for all GPUs.
docker_gpus: ""
# executor.docker_inherit_user_ids (bool): If set, run docker containers
# using the same uid and gid as the user running the executor process.
docker_inherit_user_ids: false
# executor.docker_mount_mode (string): Sets the mount mode of volumes
# mounted to docker images. Useful if running on SELinux
# https://www.projectatomic.io/blog/2015/06/using-volumes-with-docker-can-cause-problems-with-selinux/
docker_mount_mode: ""
# executor.docker_net_host (bool): Sets --net=host on the docker command.
# Intended for local development only. **DEPRECATED** Use
# --executor.docker_network=host instead.
docker_net_host: false
# executor.docker_network (string): If set, set docker/podman --network to
# this value by default. Can be overridden per-action with the
# `dockerNetwork` exec property, which accepts values 'off'
# (--network=none) or 'bridge' (--network=<default>).
docker_network: ""
# executor.docker_sibling_containers (bool): If set, mount the configured
# Docker socket to containers spawned for each action, to enable
# Docker-out-of-Docker (DooD). Takes effect only if docker_socket is also
# set. Should not be set by executors that can run untrusted code.
docker_sibling_containers: false
# executor.docker_socket (string): If set, run execution commands in
# docker using the provided socket.
docker_socket: ""
# executor.docker_volumes ([]string): Additional --volume arguments to be
# passed to docker or podman.
docker_volumes: []
# executor.enable_bare_runner (bool): Enables running execution commands
# directly on the host without isolation.
enable_bare_runner: false
# executor.enable_fastcopy_reflinking (bool): If true, attempt to use `cp
# --reflink=auto` to link files
enable_fastcopy_reflinking: false
# executor.enable_firecracker (bool): Enables running execution commands
# inside of firecracker VMs
enable_firecracker: false
# executor.enable_lease_reconnect (bool): Enable task lease reconnection
# on scheduler server shutdown.
enable_lease_reconnect: true
# executor.enable_local_snapshot_sharing (bool): Enables local snapshot
# sharing for firecracker VMs. Also requires that
# executor.firecracker_enable_nbd is true.
enable_local_snapshot_sharing: false
# executor.enable_oci (bool): Enables running execution commands using an
# OCI runtime directly.
enable_oci: false
# executor.enable_podman (bool): Enables running execution commands inside
# podman containers.
enable_podman: false
# executor.enable_remote_snapshot_sharing (bool): Enables remote snapshot
# sharing for firecracker VMs. Also requires that
# executor.firecracker_enable_nbd and executor.firecracker_enable_uffd are
# true.
enable_remote_snapshot_sharing: false
# executor.enable_sandbox (bool): Enables running execution commands
# inside of sandbox-exec.
enable_sandbox: false
# executor.enable_vfs (bool): Whether FUSE based filesystem is enabled.
enable_vfs: false
# executor.excess_capacity_threshold (float64): A percentage (of RAM and
# CPU) utilization below which this executor may request additional work
excess_capacity_threshold: 0.4
# executor.exclusive_task_scheduling (bool): If true, only one task will
# be scheduled at a time. Default is false
exclusive_task_scheduling: false
# executor.extra_env_vars ([]string): Additional environment variables to
# pass to remotely executed actions. i.e. MY_ENV_VAR=foo
extra_env_vars: []
# executor.firecracker_cgroup_version (string): Specifies the cgroup
# version for firecracker to use.
firecracker_cgroup_version: ""
# executor.firecracker_debug_stream_vm_logs (bool): Stream firecracker VM
# logs to the terminal.
firecracker_debug_stream_vm_logs: false
# executor.firecracker_debug_terminal (bool): Run an interactive terminal
# in the Firecracker VM connected to the executor's controlling terminal.
# For debugging only.
firecracker_debug_terminal: false
# executor.firecracker_enable_cpu_weight (bool): Set cgroup CPU weight to
# match VM size
firecracker_enable_cpu_weight: false
# executor.firecracker_enable_merged_rootfs (bool): Merges the containerfs
# and scratchfs into a single rootfs, removing the need to use overlayfs
# for the guest's root filesystem. Requires NBD to also be enabled.
firecracker_enable_merged_rootfs: false
# executor.firecracker_enable_uffd (bool): Enables userfaultfd for
# firecracker VMs.
firecracker_enable_uffd: false
# executor.firecracker_enable_vbd (bool): Enables the FUSE-based virtual
# block device interface for block devices.
firecracker_enable_vbd: false
# executor.firecracker_health_check_interval (time.Duration): How often to
# run VM health checks while tasks are executing.
firecracker_health_check_interval: 10s
# executor.firecracker_health_check_timeout (time.Duration): Timeout for
# VM health check requests.
firecracker_health_check_timeout: 30s
# executor.firecracker_init_on_alloc_and_free (bool): Set init_on_alloc=1
# and init_on_free=1 in firecracker vms
firecracker_init_on_alloc_and_free: false
# executor.firecracker_overprovision_cpus (int): Number of CPUs to
# overprovision for VMs. This allows VMs to more effectively utilize CPU
# resources on the host machine. Set to -1 to allow all VMs to use max
# CPU.
firecracker_overprovision_cpus: 3
# executor.firecracker_workspace_disk_slack_space_mb (int64): Extra space
# to allocate to firecracker workspace disks, in megabytes. **
# Experimental **
firecracker_workspace_disk_slack_space_mb: 2000
# executor.forced_network_isolation_type (string): If set, run all
# commands that require networking with this isolation
forced_network_isolation_type: ""
# executor.host_id (string): Optional: Allows for manual specification of
# an executor's host id. If not set, a random UUID will be used.
host_id: ""
# executor.host_root_directory (string): Path on the host where the
# executor container root directory is mounted.
host_root_directory: ""
# executor.image_pull_timeout (time.Duration): How long to wait for the
# container image to be pulled before returning an Unavailable (retryable)
# error for an action execution attempt. Applies to all isolation types
# (docker, firecracker, etc.)
image_pull_timeout: 5m0s
# executor.include_subdir_prefix (bool): If true, store files under
# subdirs named by the first 4 chars of file digest
include_subdir_prefix: false
# executor.local_cache_always_clone (bool): If true, files from the
# filecache will always be cloned instead of hardlinked
local_cache_always_clone: false
# executor.local_cache_directory (string): A local on-disk cache
# directory. Must be on the same device (disk partition, Docker volume,
# etc.) as the configured root_directory, since files are hard-linked to
# this cache for performance reasons. Otherwise, 'Invalid cross-device
# link' errors may result.
local_cache_directory: /tmp/buildbuddy/filecache
# executor.local_cache_size_bytes (int64): The maximum size, in bytes, to
# use for the local on-disk cache
local_cache_size_bytes: 1000000000
# executor.max_task_timeout (time.Duration): Max timeout that can be
# requested by a task. A value <= 0 means unlimited. An error will be
# returned if a task requests a timeout greater than this value.
max_task_timeout: 24h0m0s
# executor.max_termination_grace_period (time.Duration): Max termination
# grace period that actions can request. An error will be returned if a
# task requests a grace period greater than this value. (Termination grace
# period is the time to wait between an action timing out and forcefully
# shutting it down.)
max_termination_grace_period: 1m0s
# executor.memory_bytes (int64): Optional maximum memory to allocate to
# execution tasks (approximate). Cannot set both this option and the
# SYS_MEMORY_BYTES env var.
memory_bytes: 0
# executor.metadata_directory (string): Location where executor host_id
# and other metadata is stored. Defaults to
# executor.local_cache_directory/../
metadata_directory: ""
# executor.millicpu (int64): Optional maximum CPU milliseconds to allocate
# to execution tasks (approximate). Cannot set both this option and the
# SYS_CPU env var.
millicpu: 0
# executor.mmap_memory_bytes (int64): Maximum memory to be allocated
# towards mmapped files for Firecracker copy-on-write functionality. This
# is subtraced from the configured memory_bytes. Has no effect if
# firecracker is disabled or snapshot sharing is disabled.
mmap_memory_bytes: 10000000000
# executor.nat_source_port_range (string): If set, restrict the source
# ports for NATed traffic to this range.
nat_source_port_range: ""
# executor.network_lock_directory (string): If set, use this directory to
# store lockfiles for allocated IP ranges. This is required if running
# multiple executors within the same networking environment.
network_lock_directory: ""
oci:
# executor.oci.cpu_limit (int): Hard limit for CPU resources,
# expressed as CPU count. Default (0) is no limit.
cpu_limit: 0
# executor.oci.cpu_shares_enabled (bool): Enable CPU weighting based
# on task size.
cpu_shares_enabled: false
# executor.oci.dns (string): Specifies a custom DNS server for use
# inside OCI containers. If set to the empty string, mount
# /etc/resolv.conf from the host.
dns: 8.8.8.8
# executor.oci.network_pool_size (int): Limit on the number of
# networks to be reused between containers. Setting to 0 disables
# pooling. Setting to -1 uses the recommended default.
network_pool_size: -1
# executor.oci.pids_limit (int64): PID limit for OCI runtime. Set to
# -1 for unlimited PIDs.
pids_limit: 2048
# executor.oci.runtime (string): OCI runtime
runtime: ""
# executor.oci.runtime_root (string): Root directory for storage of
# container state (see <runtime> --help for default)
runtime_root: ""
podman:
# executor.podman.dns (string): Specifies a custom DNS server for
# podman to use. Defaults to 8.8.8.8. If set to empty, no --dns= flag
# will be passed to podman.
dns: 8.8.8.8
# executor.podman.enable_image_streaming (bool): If set, all public
# (non-authenticated) podman images are streamed using soci artifacts
# generated and stored in the apps.
enable_image_streaming: false
# executor.podman.enable_private_image_streaming (bool): If set and
# --executor.podman.enable_image_streaming is set, all private
# (authenticated) podman images are streamed using soci artifacts
# generated and stored in the apps.
enable_private_image_streaming: false
# executor.podman.enable_stats (bool): Whether to enable cgroup-based
# podman stats.
enable_stats: true
# executor.podman.gpus (string): Specifies the value of the --gpus=
# flag to pass to podman. Set to 'all' to pass all GPUs.
gpus: ""
# executor.podman.parallel_pulls (int): The system-wide maximum number
# of image layers to be pulled from remote container registries
# simultaneously. If set to 0, no value is set and podman will use its
# default value.
parallel_pulls: 0
# executor.podman.pids_limit (string): Specifies the value of the
# --pids-limit= flag to pass to podman. Set to '-1' for unlimited
# PIDs. The default is 2048 on systems that support pids cgroup
# controller.
pids_limit: ""
# executor.podman.pull_log_level (string): Level at which to log
# `podman pull` command output. Should be one of the standard log
# levels, all lowercase.
pull_log_level: ""
# executor.podman.pull_timeout (time.Duration): Timeout for image
# pulls.
pull_timeout: 10m0s
# executor.podman.runtime (string): Enables running podman with other
# runtimes, like gVisor (runsc).
runtime: ""
# executor.podman.soci_artifact_store_target (string): The GRPC url to
# use to access the SociArtifactStore GRPC service.
soci_artifact_store_target: ""
# executor.podman.soci_store_binary (string): The name of the
# soci-store binary to run. If empty, soci-store is not started even
# if it's needed (for local development).
soci_store_binary: soci-store
# executor.podman.soci_store_keychain_port (int): The port on which
# the soci-store local keychain service is exposed, for sharing
# credentials for streaming private container images.
soci_store_keychain_port: 1989
# executor.podman.soci_store_log_level (string): The level at which
# the soci-store should log. Should be one of the standard log levels,
# all lowercase.
soci_store_log_level: ""
# executor.podman.storage_driver (string): The podman storage driver
# to use.
storage_driver: overlay
# executor.podman.transient_store (bool): Enables --transient-store
# for podman commands. **DEPRECATED** --transient-store is now always
# applied if the podman version supports it
transient_store: false
# executor.podman.warmup_default_images (bool): Whether to warmup the
# default podman images or not.
warmup_default_images: true
# executor.pool (string): Executor pool name. Only one of this config
# option or the MY_POOL environment variable should be specified.
pool: ""
# executor.preserve_existing_netns (bool): Preserve existing bb-executor
# net namespaces. By default all "bb-executor" net namespaces are removed
# on executor startup, but if multiple executors are running on the same
# machine this behavior should be disabled to prevent them interfering
# with each other.
preserve_existing_netns: false
# executor.record_usage_timelines (bool): Capture resource usage
# timeseries data in UsageStats for each task.
record_usage_timelines: false
# executor.remote_snapshot_readonly (bool): Disables remote snapshot
# writes.
remote_snapshot_readonly: false
# executor.root_directory (string): The root directory to use for build
# files.
root_directory: /tmp/buildbuddy/remote_build
# executor.route_prefix (string): The prefix in the ip route to locate a
# device: either 'default' or the ip range of the subnet e.g.
# 172.24.0.0/18
route_prefix: default
runner_pool:
# executor.runner_pool.max_runner_count (int): Maximum number of
# recycled RBE runners that can be pooled at once. Defaults to a value
# derived from estimated CPU usage, max RAM, allocated CPU, and
# allocated memory.
max_runner_count: 0
# executor.runner_pool.max_runner_disk_size_bytes (int64): Maximum
# disk size for a recycled runner; runners exceeding this threshold
# are not recycled. Defaults to 16GB.
max_runner_disk_size_bytes: 16000000000
# executor.runner_pool.max_runner_memory_usage_bytes (int64): Maximum
# memory usage for a recycled runner; runners exceeding this threshold
# are not recycled.
max_runner_memory_usage_bytes: 0
# executor.shutdown_cleanup_duration (time.Duration): The minimum duration
# during the shutdown window to allocate for cleaning up containers. This
# is capped to the value of `max_shutdown_duration`.
shutdown_cleanup_duration: 15s
# executor.slow_task_threshold (time.Duration): Warn about tasks that take
# longer than this threshold.
slow_task_threshold: 1h0m0s
# executor.snaploader_eager_fetch_concurrency (int): Max number of
# goroutines allowed to run concurrently when eagerly fetching chunks.
snaploader_eager_fetch_concurrency: 32
# executor.snaploader_max_eager_fetches_per_sec (int): Max number of
# chunks snaploader can eagerly fetch in the background per second.
snaploader_max_eager_fetches_per_sec: 1000
# executor.startup_warmup_max_wait_secs (int64): Maximum time to block
# startup while waiting for default image to be pulled. Default is no
# wait.
startup_warmup_max_wait_secs: 0
# executor.task_ip_range (string): Subnet to allocate IP addresses from
# for actions that require network access. Must be a /16 range.
task_ip_range: 192.168.0.0/16
# executor.task_progress_publish_interval (time.Duration): How often tasks
# should publish progress updates to the app.
task_progress_publish_interval: 1m0s
# executor.verbose_snapshot_logs (bool): Enables extra-verbose snapshot
# logs (even at debug log level)
verbose_snapshot_logs: false
vfs:
# executor.vfs.log_fuse_latency_stats (bool): Enables logging of
# per-operation latency stats when VFS is unmounted. Implicitly
# enabled by --executor.vfs.verbose.
log_fuse_latency_stats: false
# executor.vfs.log_fuse_per_file_stats (bool): Enables tracking and
# logging of per-file per-operation stats. Logged when VFS is
# unmounted.
log_fuse_per_file_stats: false
# executor.vfs.verbose (bool): Enables verbose logs for VFS
# operations.
verbose: false
# executor.vfs.verbose_fuse (bool): Enables low-level verbose logs in
# the go-fuse library.
verbose_fuse: false
# executor.warmup_additional_images ([]string): List of container images
# to warm up alongside the executor default images on executor start up.
warmup_additional_images: []
# executor.warmup_timeout_secs (int64): The default time (in seconds) to
# wait for an executor to warm up i.e. download the default docker image.
# Default is 120s
warmup_timeout_secs: 120
# executor.warmup_workflow_images (bool): Whether to warm up the Linux
# workflow images (firecracker only).
warmup_workflow_images: false
workspace:
# executor.workspace.overlayfs_enabled (bool): Enable overlayfs
# support for anonymous action workspaces. ** UNSTABLE **
overlayfs_enabled: false
github:
# github.access_token (string): The GitHub access token used to post
# GitHub commit statuses. ** Enterprise only **
access_token: ""
# github.client_id (string): The client ID of your GitHub Oauth App. **
# Enterprise only **
client_id: ""
# github.client_secret (string): The client secret of your GitHub Oauth
# App. ** Enterprise only **
client_secret: ""
# github.enterprise_host (string): The Github enterprise hostname to use
# if using GitHub enterprise server, not including https:// and no
# trailing slash.
enterprise_host: ""
# github.jwt_key (string): The key to use when signing JWT tokens for
# github auth.
jwt_key: ""
# github.status_name_suffix (string): Suffix to be appended to all
# reported GitHub status names. Useful for differentiating BuildBuddy
# deployments. For example: '(dev)' ** Enterprise only **
status_name_suffix: ""
grpc_client:
# grpc_client.pool_size (int): Number of connections to create to each
# target.
pool_size: 15
metrics:
# metrics.observed_group_ids ([]string): Group IDs allowed in group_id
# metrics labels. Groups not in this list will be labeled as 'ANON' if
# unauthenticated or 'other' if authenticated.
observed_group_ids: []
monitoring:
basic_auth:
# monitoring.basic_auth.password (string): Optional password for basic
# auth on the monitoring port.
password: ""
# monitoring.basic_auth.username (string): Optional username for basic
# auth on the monitoring port.
username: ""
# monitoring.ssl_port (int): If non-negative, the SSL port to listen for
# monitoring traffic on. `ssl` config must have `ssl_enabled: true` and be
# properly configured.
ssl_port: -1
olap_database:
# olap_database.cluster_name (string): The cluster name of the database
cluster_name: '{cluster}'
# olap_database.enable_data_replication (bool): If true, data replication
# is enabled.
enable_data_replication: false
# olap_database.replica_name (string): The replica name of the table in
# zookeeper
replica_name: '{replica}'
# olap_database.zoo_path (string): The path to the table name in
# zookeeper, used to set up data replication
zoo_path: /clickhouse/{installation}/{cluster}/tables/{shard}/{database}/{table}
remote_execution:
# remote_execution.ci_runner_default_timeout (time.Duration): Default
# timeout applied to all ci runners.
ci_runner_default_timeout: 8h0m0s
# remote_execution.ci_runner_recycling_max_wait (time.Duration): Max
# duration that a ci_runner task should wait for a warm runner before
# running on a potentially cold runner.
ci_runner_recycling_max_wait: 3s
# remote_execution.cpu_quota_limit (time.Duration): Maximum CPU time
# allowed for each quota period.
cpu_quota_limit: 3s
# remote_execution.cpu_quota_period (time.Duration): How often the CPU
# quota is refreshed.
cpu_quota_period: 100ms
# remote_execution.enable_remote_exec (bool): If true, enable remote-exec.
# ** Enterprise only **
enable_remote_exec: true
# remote_execution.pids_limit (int64): Maximum number of processes allowed
# per task at any time.
pids_limit: 2048
# remote_execution.redis_target (string): A Redis target for storing
# remote execution state. Falls back to app.default_redis_target if
# unspecified. Required for remote execution. To ease migration, the redis
# target from the cache config will be used if neither this value nor
# app.default_redis_target are specified.
redis_target: ""
sharded_redis:
# remote_execution.sharded_redis.password (string): Redis password
password: ""
# remote_execution.sharded_redis.shards ([]string): Ordered list of
# Redis shard addresses.
shards: []
# remote_execution.sharded_redis.username (string): Redis username
username: ""
# remote_execution.stored_task_size_millicpu_limit (int64): Limit placed
# on milliCPU calculated from task execution statistics.
stored_task_size_millicpu_limit: 7500
task_size_model:
# remote_execution.task_size_model.enabled (bool): Whether to enable
# model-based task size prediction.
enabled: false
# remote_execution.task_size_model.features_config_path (string): Path
# pointing to features.json config file.
features_config_path: ""
# remote_execution.task_size_model.serving_address (string): gRPC
# address pointing to TensorFlow Serving prediction service with task
# size models (cpu, mem).
serving_address: ""
# remote_execution.task_size_psi_correction (float64): What percentage of
# full-stall time should be subtracted from the execution duration.
task_size_psi_correction: 1
# remote_execution.use_measured_task_sizes (bool): Whether to use measured
# usage stats to determine task sizes.
use_measured_task_sizes: false
ssl:
# ssl.cert_file (string): Path to a PEM encoded certificate file to use
# for TLS if not using ACME.
cert_file: ""
# ssl.client_ca_cert (string): PEM encoded certificate authority used to
# issue client certificates for mTLS auth.
client_ca_cert: ""
# ssl.client_ca_cert_file (string): Path to a PEM encoded certificate
# authority file used to issue client certificates for mTLS auth.
client_ca_cert_file: ""
# ssl.client_ca_key (string): PEM encoded certificate authority key used
# to issue client certificates for mTLS auth.
client_ca_key: ""
# ssl.client_ca_key_file (string): Path to a PEM encoded certificate
# authority key file used to issue client certificates for mTLS auth.
client_ca_key_file: ""
# ssl.client_cert_lifespan (time.Duration): The duration client
# certificates are valid for. Ex: '730h' for one month. If not set,
# defaults to 100 years.
client_cert_lifespan: 876000h0m0s
# ssl.default_host (string): Host name to use for ACME generated cert if
# TLS request does not contain SNI.
default_host: ""
# ssl.enable_ssl (bool): Whether or not to enable SSL/TLS on gRPC
# connections (gRPCS).
enable_ssl: false
# ssl.host_whitelist ([]string): Cloud-Only
host_whitelist: []
# ssl.key_file (string): Path to a PEM encoded key file to use for TLS if
# not using ACME.
key_file: ""
# ssl.self_signed (bool): If true, a self-signed cert will be generated
# for TLS termination.
self_signed: false
# ssl.upgrade_insecure (bool): True if http requests should be redirected
# to https. Assumes http traffic is served on port 80 and https traffic is
# served on port 443 (typically via an ingress / load balancer).
upgrade_insecure: false
# ssl.use_acme (bool): Whether or not to automatically configure SSL certs
# using ACME. If ACME is enabled, cert_file and key_file should not be
# set.
use_acme: false