Skip to main content
Version: 22.2

Redpanda Console Configuration

Configuration sources

Console loads configuration properties from three sources - in that order:

  • Command line arguments (flags)
  • YAML config
  • Environment variables Accordingly, environment variables and YAML configs can overwrite input that is set using flags.

YAML configuration

The recommended configuration source is a YAML file. Specify the path to the file by either setting the flag -config.filepath or by providing the path using the environment variable CONFIG_FILEPATH. A reference YAML configuration file is provided under Sample configuration.

Environment variables

Configuration options can be configured using environment variables as well. The key for the environment variable is auto generated by upper casing the YAML equivalent and adding an underscore for each indentation level. For example:

  • kafka.rackId => KAFKA_RACKID
  • kafka.tls.caFilepath => KAFKA_TLS_CAFILEPATH

You can provide configuration properties that expect a list of values using a comma-separated string:

  • KAFKA_BROKERS=redpanda-0:9092,redpanda-1:9092,redpanda-2:9092
danger

You cannot use environment variables to configure object arrays, such as the configuration for Kafka connect clusters. The recommended configuration source is a YAML file, whose secrets can be provided using environment variables or command line arguments.

Sample configuration

The following config.yaml configuration file includes a complete list of all Console config properties and their descriptions. All shown values reflect the default values. Provide the filepath to your configuration file by setting either the flag -config.filepath or the environment variable CONFIG_FILEPATH.

note

This config file contains both enterprise and community configurations. If you don't provide an enterprise license, Console ignores configurations for enterprise features.

kafka:
# Brokers is a list of bootstrap servers with
# port (for example "localhost:9092").
brokers: []
# Client ID that the Kafka client can use to identify itself
# against the target Kafka cluster.
clientId: console
# In multi zone Kafka clusters you can reduce traffic costs
# by consuming messages from replica brokers in the same zone
rackId:
# SASL configuration for Console to connect to the Kafka API.
sasl:
enabled: false
username:
# Password can be set via the --kafka.sasl.password flag as well.
password:
# Valid mechanisms are: PLAIN, SCRAM-SHA-256,
# SCRAM-SHA-512, GSSAPI, OAUTHBEARER and AWS_MSK_IAM.
mechanism: PLAIN
gssapi:
authType:
keyTabPath:
kerberosConfigPath:
serviceName:
username:
# Password can be set via the --kafka.sasl.gssapi.password flag as well
password:
realm:
enableFast: true
oauth:
# can be set via the --kafka.sasl.oauth.token flag as well
token:
awsMskIam:
accessKey:
# can be set via the --kafka.sasl.aws-msk-iam.secret-key flag
# as well
secretKey:
# can be set via the --kafka.sasl.aws-msk-iam.session-token flag
# as well
sessionToken:
userAgent:
tls:
# If you connect to a Cluster that uses commonly trusted
# certificates, enable TLS and do not provide a certificate
# authority by not configuring the caFilepath. In this case,
# the system's cert pool is used.
enabled: false
caFilepath:
certFilepath:
keyFilepath:
# This can be set via the --kafka.tls.passphrase flag as well
passphrase:
insecureSkipTlsVerify: false
schemaRegistry:
enabled: false
# Url with scheme is required, e.g. ["http://localhost:8081"]
urls: []
# Basic auth username
username:
# Basic auth password. This can be set via the --schema.registry.password
# flag as well
password:
# This can be set via the --schema.registry.token flag as well
bearerToken:
tls:
# If you connect to a schema registry that uses commonly trusted
# certificates, enable TLS and do not provide a certificate
# authority by not configuring the caFilepath. In this case,
# the system's cert pool is used.
enabled: false
caFilepath:
certFilepath:
keyFilepath:
insecureSkipTlsVerify: false
protobuf:
enabled: false
mappings: []
# Map the proto type names for each of your topics.
# These proto types will be used for deserialization
# - topicName: xy
# You can specify the proto type for the record key
# and/or value (just one will work too)
# valueProtoType: fake_model.Order
# keyProtoType: package.Type
# SchemaRegistry does not require any mappings to be specified.
# The schema registry client that is configured in the
# kafka config block will be reused.
schemaRegistry:
enabled: false
refreshInterval: 5m
# FileSystem can be configured if you want Console to
# search the local file system for the .proto files
fileSystem:
enabled: false
paths: []
refreshInterval: 5m
# Git is where the .proto files come from
git:
enabled: false
repository:
url:
branch: (defaults to primary/default branch)
# How often Console shall pull the repository to look for new files.
# Set 0 to disable periodic pulls
refreshInterval: 1m
# Basic Auth
# If you want to use GitHub's personal access tokens use `token`
# as username and pass the token as password
basicAuth:
enabled: true
username: token
password:
# SSH Auth
# You can either pass the private key file directly via a flag or
# yaml config. Another option is to refer to a mounted key file
# by providing the filepath in this config block.
ssh:
enabled: false
username:
privateKey:
privateKeyFilepath:
passphrase:
messagePack:
enabled: false
topicNames: ["/.*/"] # List of topic name regexes, defaults to /.*/

connect:
enabled: false
# An empty array for clusters is the default, but you have to
# specify at least one cluster, as soon as you enable Kafka connect.
# Otherwise you'll not be able to start Console.
clusters: []
- name: xy
url: http://my-cluster:8083
tls:
# Trusted certificates are still allowed if TLS is not enabled.
enabled: false
# caFilepath:
# certFilepath:
# keyFilepath:
# insecureSkipTlsVerify: false
username:
password:
token:
connectTimeout: 15s # used to test cluster connectivity
readTimeout: 60s # overall REST timeout
requestTimeout: 6s # timeout for REST requests

console:
# Config to use for embedded topic documentation
topicDocumentation:
enabled: false
# Configure the git repository, which contains the topic documentation.
# Console clones the git repository and periodically pulls for new
# changes so that it can render the markdown files within the topic view.
git:
enabled: false
repository:
url:
branch: (defaults to primary/default branch)
baseDirectory: .
# How often Console shall pull the repository to look for new files.
# Set 0 to disable periodic pulls.
refreshInterval: 1m
# If you want to use GitHub's personal access tokens use `token`
# as username and pass the token as password.
basicAuth:
enabled: true
username: token
password:
ssh:
enabled: false
username:
privateKey:
privateKeyFilepath:
passphrase:

redpanda:
# Redpanda Admin API configuration that enables additional features
# that are Redpanda specific.
adminApi:
enabled: false
# HTTP urls (e.g. http://localhost:9644) that Console should use
# to send admin api requests to.
urls: []
# Username for basic auth
username:
# Password for basic auth
password:
tls:
enabled: false
caFilepath:
certFilepath:
keyFilepath:
insecureSkipTlsVerify:

# Filepath to your redpanda.license file
# This is only required if you want to use an Enterprise feature
# such as SSO or RBAC.
licenseFilepath:

# Login contains all configurations in order to protect Console
# with a login screen. Configure one or more of the below identity
# providers in order to support SSO.
# This feature requires an Enterprise license.
login:
enabled: false
# jwtSecret is a secret string that is used to sign and encrypt
# the JSON Web tokens that are used by the backend for session management.
jwtSecret: redacted
google:
enabled: false
clientId: redacted.apps.googleusercontent.com
clientSecret: redacted
# The directory config is optional. You have to configure it if you want to use
# Google groups in your RBAC role bindings.
directory:
# Filepath to the mounted service account key file in JSON format.
serviceAccountFilepath: /etc/secrets/google-sa.json
# targetPrincipal is the user that shall be impersonated
# for the Google Admin API calls.
targetPrincipal: admin@mycompany.com
oidc:
enabled: false
clientId: redacted
clientSecret: redacted
domain: example-endpoint.us.auth0.com # OIDC endpoint
userIdentifyingClaimKey: sub
github:
enabled: false
clientId: redacted
clientSecret: redacted
# The directory config is optional. You have to configure it if you want
# to use GitHub teams in your RBAC role bindings.
directory:
personalAccessToken: redacted
okta:
enabled: false
clientId: redacted
clientSecret: redacted
# The directory config is optional. You have to configure it if you want
# to use Okta groups in your RBAC role bindings.
directory:
apiToken: redacted

# All configurations in the enterprise block contain features that
# can only be used with a valid Enterprise license.
enterprise:
rbac:
# Whether or not RBAC shall be used. This must be enabled
# if login is enabled. By default no authenticated user
# has any permissions.
enabled: false
# Path to YAML file that contains all role bindings
roleBindingsFilepath:

# Server configures Console's HTTP server that serves all resources, including the Frontend application.
server:
listenPort: 8080
listenAddress:
gracefulShutdownTimeout: 30s
listenPort: 8080
readTimeout: 30s
writeTimeout: 30s
idleTimeout: 30s
compressionLevel: 4
# Sub-path under which Console is hosted. See Features / HTTP path rewrites.
basePath: ""
# Whether or not to check the 'X-Forwarded-Prefix' header to (potentially)
# override 'basePath'. See Features / HTTP path rewrites.
setBasePathFromXForwardedPrefix: true
# Whether or not Console should strip the prefix internally.
stripPrefix: true

logger:
level: info # Valid values are: debug, info, warn, error, fatal

# Prefix for all exported Prometheus metrics
metricsNamespace: console