Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save honnuanand/0b0f5aefef64136658b5481d66b3ad22 to your computer and use it in GitHub Desktop.
Save honnuanand/0b0f5aefef64136658b5481d66b3ad22 to your computer and use it in GitHub Desktop.
# Whether to allow SSH access to application instances
allow_app_ssh_access: true
# Change to anything other than 'wipe' to bypass the wipe-env job
arg_wipe: wipe
authentication_mode: internal
# If the azure_storage_account_name is empty "", ERT will use internal storage. If you want ERT to use Azure blob store, provide a valid storage account name.
azure_storage_account_name: CHANGEME #<--- This has to be created manually. Please see instructions on the blog
# Required if Azure blob storage is used as external storage by ERT, which is determined by azure_storage_account_name being not null. This setting is used to specify the container where the buildpacks will be stored. It is similar to the S3 bucket in AWS.
azure_buildpacks_container: CHANGEME # <-- my sample value is buildpacks-az-c2s-com
azure_client_id: CHANGEME
azure_client_secret: CHANHGEME
# Required if Azure blob storage is used as external storage by ERT, which is determined by azure_storage_account_name being not null. This setting is used to specify the container where the droplets will be stored. It is similar to the S3 bucket in AWS.
azure_droplets_container: CHANGEME # <-- my sample value is droplets-az-c2s-com
# Required if using c0-azure-multi-res-group - An existing network for your Azure environment
azure_multi_resgroup_network:
# Required if using c0-azure-multi-res-group - The resource group name for your Azure environment
azure_multi_resgroup_pcf:
# Private IP for the Operations Manager VM
azure_opsman_priv_ip: 192.168.0.4
# Required if Azure blob storage is used as external storage by ERT, which is determined by azure_storage_account_name being not null. This setting is used to specify the container where the packages will be stored. It is similar to the S3 bucket in AWS.
azure_packages_container: CHANGEME # <-- my sample value is packages-az-c2s-com
# The set of terraform templates and opsman configuration to use in the pipeline (c0-azure-base|c0-azure-multi-res-group)
azure_pcf_terraform_template: c0-azure-base
# a valid Azure region.
azure_region: westus
# Required if Azure blob storage is used as external storage by ERT, which is determined by azure_storage_account_name being not null. This setting is used to specify the container where the resources will be stored. It is similar to the S3 bucket in AWS.
azure_resources_container: CHANGEME # <-- my sample value is resources-az-c2s-com
azure_subscription_id: CHANGEME
azure_tenant_id: CHANGEME
# Prefix to use for Terraform-managed infrastructure, e.g. 'pcf-terraform'
# Must be globally unique.
azure_terraform_prefix: CHANGEME # <-- my sample value is c2s
# ERT-specific network configurations. Leave these as default unless you need to make changes to align with existing infrastructure.
azure_terraform_subnet_dynamic_services_cidr: "192.168.12.0/22"
azure_terraform_subnet_dynamic_services_dns: "168.63.129.16,8.8.8.8"
azure_terraform_subnet_dynamic_services_gateway: "192.168.12.1"
azure_terraform_subnet_dynamic_services_reserved: "192.168.12.1-192.168.12.9"
azure_terraform_subnet_ert_cidr: "192.168.4.0/22"
azure_terraform_subnet_ert_dns: "168.63.129.16,8.8.8.8"
azure_terraform_subnet_ert_gateway: "192.168.4.1"
azure_terraform_subnet_ert_reserved: "192.168.4.1-192.168.4.9"
azure_terraform_subnet_infra_cidr: "192.168.0.0/26"
azure_terraform_subnet_infra_dns: "168.63.129.16,8.8.8.8"
azure_terraform_subnet_infra_gateway: "192.168.0.1"
azure_terraform_subnet_infra_reserved: "192.168.0.1-192.168.0.9"
azure_terraform_subnet_services1_cidr: "192.168.8.0/22"
azure_terraform_subnet_services1_dns: "168.63.129.16,8.8.8.8"
azure_terraform_subnet_services1_gateway: "192.168.8.1"
azure_terraform_subnet_services1_reserved: "192.168.8.1-192.168.8.9"
azure_terraform_vnet_cidr: "192.168.0.0/20"
# azure_vm_admin value should match with user ID used to create the certs pcf_ssh_key_pub.
# The user ID will appear towards the end of the public key.
azure_vm_admin: ubuntu
# Ciphers
# An ordered, colon-delimited list of Golang supported TLS cipher suites in OpenSSL format.
# Operators should verify that these are supported by any clients or downstream components that will initiate TLS handshakes with the Router/HAProxy.
# The recommended settings are filled in below, change as necessary.
router_tls_ciphers: "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384"
haproxy_tls_ciphers: "DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384"
# Company Name for Apps Manager
company_name: CHANGEME # <-- my sample value is Clue2solve Inc
# c2c networking network cidr
container_networking_nw_cidr: 10.255.0.0/16
# For credhub integration, Set the number of credhub instances in resource config to 2
# Primary Encryption Name MUST match one of the encryption key names provided
# Encryption keys 2 and 3 are optional
credhub_primary_encryption_name: key1
credhub_encryption_key_name1: key1
credhub_encryption_key_secret1: abcdefghijklmnopqrstuvwxyz123456789123456789
credhub_encryption_key_name2: # Optional Name 2
credhub_encryption_key_secret2: # Optional Secret 2
credhub_encryption_key_name3: # Optional Name 3
credhub_encryption_key_secret3: # Optional Secret 2
default_quota_max_number_services: 1000
default_quota_memory_limit_mb: 10240
# Domain Names for ERT
pcf_ert_domain: CHANGEME # <-- my sample value is pas.cf.az.clue2solve.com # This is the domain you will access ERT with
system_domain: CHANGEME # <-- my sample value is sys.pas.cf.az.clue2solve.com # This is usually sys.${pcf_ert_domain}
apps_domain: CHANGEME # <-- my sample value is cfapps.pas.cf.az.clue2solve.com # This is usually cfapps.${pcf_ert_domain}
# This defines the deployment network name within PCF.
deployment_network_name: "ert"
# Disable HTTP on gorouters (true|false)
disable_http_proxy: false
# If true, disable insecure cookies on the router.
disable_insecure_cookies: false
# PCF Elastic Runtime minor version to track
ert_major_minor_version: ^2\.1\.[0-9]+$ # ERT minor version to track (e.g ^2\.0\.[0-9]+$ will track 2.0.x versions)
# Optional. Duration in seconds to maintain an open connection when client supports keep-alive.
frontend_idle_timeout: 239
# Optional - if your git repo requires an SSH key.
git_private_key: |
-----BEGIN RSA PRIVATE KEY-----
CHANGEME
-----END RSA PRIVATE KEY-----
# Required if haproxy_forward_tls is enabled - HAProxy will use the CA provided to verify the certificates provided by the router.
haproxy_backend_ca:
# If enabled HAProxy will forward all requests to the router over TLS (enable|disable)
haproxy_forward_tls: disable
# The lb value can be obtained by prepending "web-lb" with "azure_terraform_prefix". For example, if your "azure_terraform_prefix" is "test", then the "ha_proxy_lb_name" is "test-web-lb".
ha_proxy_lb_name: CHANGEME # <-- my sample value is c2s-web-lb
# Instances
## These resources can take any parameter made available in
## the ops manager API ( https://your-ops-man/docs#configuring-resources-for-a-job)
backup_prepare_instances: 0
clock_global_instances: 1
cloud_controller_instances: 2
cloud_controller_worker_instances: 2
consul_server_instances: 3
credhub_instances: 0
diego_brain_instances: 3
diego_cell_instances: 3
diego_database_instances: 3
doppler_instances: 3
ha_proxy_instances: 1
loggregator_trafficcontroller_instances: 1
mysql_instances: 1
mysql_monitor_instances: 0
mysql_proxy_instances: 1
nats_instances: 2
nfs_server_instances: 1
router_instances: 3
syslog_adapter_instances: 3
syslog_scheduler_instances: 2
tcp_router_instances: 0
uaa_instances: 2
# Whether or not the ERT VMs are internet connected.
internet_connected: false
# IPs
# Leave blank if you don't need to make changes to existing network infrastructure configs.
ha_proxy_ips: # Comma-separated list of static IPs
router_static_ips: # Comma-separated list of static IPs
tcp_router_static_ips: # Comma-separated list of static IPs
ssh_static_ips: # Comma-separated list of static IPs
mysql_static_ips: # Comma-separated list of static IPs
# Loggegrator Port. Default is 443
loggregator_endpoint_port:
# Email address for sending mysql monitor notifications
mysql_monitor_recipient_email: CHANGEME # <-- my sample value is anand@clue2solve.com
# opsman_admin_username/opsman_admin_password needs to be specified
# These define the login values for the OpsMan UI.
opsman_admin_username: admin
opsman_admin_password: CHANGEME
# Opsman Settings
opsman_domain_or_ip_address: opsman.pas.cf.az.clue2solve.com # This should be your pcf_ert_domain with "opsman." as a prefix
# PCF Ops Manager minor version to track
opsman_major_minor_version: ^2\.1\.[0-9]+$ # Ops Manager minor version to track (e.g ^2\.0\.[0-9]+$ will track 2.0.x versions)
# SAML Service Credential provider Cert and Key, if the string is blank, then the pipeline will auto generate certs
pcf_ert_saml_cert:
pcf_ert_saml_key:
pcf_opsman_disk_size_in_gb: 120
# SSH keys for Operations Manager director
# More information can be found in these locations.
# 1. https://docs.pivotal.io/pivotalcf/2-0/customizing/pcf_azure.html#getting-started
# 2. https://docs.pivotal.io/pivotalcf/2-0/customizing/azure-om-config.html#azure-config
pcf_ssh_key_pub: CHANGEME
pcf_ssh_key_priv: |
-----BEGIN RSA PRIVATE KEY-----
CHANGEME
-----END RSA PRIVATE KEY-----
# Pivnet token for downloading resources from Pivnet. Find this token at https://network.pivotal.io/users/dashboard/edit-profile
pivnet_token: CHANGEME # Pivnet token for downloading resources from Pivnet. Find this token at https://network.pivotal.io/users/dashboard/edit-profile
# Optional. If blank the cert(s) will be generated
poe_ssl_name1:
poe_ssl_cert1:
poe_ssl_key1:
poe_ssl_name2:
poe_ssl_cert2:
poe_ssl_key2:
poe_ssl_name3:
poe_ssl_cert3:
poe_ssl_key3:
# Request timeout for gorouter
# It is recommended by both Pivotal and Microsoft that this value remain under 240 due to timeout configurations in Azure.
router_request_timeout_in_seconds: 239
# Optional - these certificates can be used to validate the certificates from incoming client requests.
# All CA certificates should be appended together into a single collection of PEM-encoded entries.
routing_custom_ca_certificates:
# Support for the X-Forwarded-Client-Cert header. Possible values: (load_balancer|ha_proxy|router)
routing_tls_termination: load_balancer
# Setting appropriate Application Security Groups is critical for a secure
# deployment. Change the value of the param below to "X" to acknowledge that
# once the Elastic Runtime deployment completes, you will review and set the
# appropriate application security groups.
# See https://docs.pivotal.io/pivotalcf/opsguide/app-sec-groups.html
security_acknowledgement: X
# Whether to disable SSL cert verification for this environment.
skip_cert_verify: true
# If smtp_address is configured, smtp_from, smtp_port, smtp_user, smtp_pwd,
# smtp_enable_starttls_auto, and smtp_auth_mechanism must also be set.
smtp_address:
smtp_auth_mechanism: # (none|plain|cram-md5)
smtp_enable_starttls_auto: true
smtp_from:
smtp_port:
smtp_pwd:
smtp_user:
## Syslog endpoint configuration goes here
# Optional. If syslog_host is specified, syslog_port, syslog_protocol,
# syslog_drain_buffer_size, and enable_security_event_logging must be set.
enable_security_event_logging: false
syslog_drain_buffer_size: 10000
syslog_host:
syslog_port:
syslog_protocol:
## TCP routing and routing services
ignore_ssl_cert_verification: false # Whether to disable SSL cert verification for route services
route_services: enable # Enable/disable route services (enable|disable)
tcp_routing: disable # Enable/disable TCP routing (enable|disable)
tcp_routing_ports: # A comma-separated list of ports and hyphen-separated port ranges, e.g. 52135,34000-35000,23478
# Optional PEM-encoded certificates to add to BOSH VMs
trusted_certificates:
# The storage account and container that will be used for your terraform state
terraform_azure_storage_account_name: CHANGEME # <-- my sample value is araoc2sterraform
terraform_azure_storage_access_key: CHANGEME <-- See the blog for extracting this value from the Azure portal
azure_storage_container_name: CHANGEME # <-- my sample value is terraformstate
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment