Skip to content

Instantly share code, notes, and snippets.

@get2arun
Created November 6, 2019 01:04
Show Gist options
  • Save get2arun/60943d07d82692759e107d4b561dfba6 to your computer and use it in GitHub Desktop.
Save get2arun/60943d07d82692759e107d4b561dfba6 to your computer and use it in GitHub Desktop.
helm-install.log
helm get all chart-1573001538
NAME: chart-1573001538
LAST DEPLOYED: Tue Nov 5 21:52:21 2019
NAMESPACE: dev-poc-namespace
STATUS: deployed
REVISION: 1
TEST SUITE: None
USER-SUPPLIED VALUES:
null
COMPUTED VALUES:
global:
enabled: true
image: harbor.private.registry/vault:1.2.2
tlsDisable: true
server:
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ template "vault.name" . }}
release: "{{ .Release.Name }}"
component: server
topologyKey: kubernetes.io/hostname
annotations: {}
auditStorage:
accessMode: ReadWriteOnce
enabled: false
size: 10Gi
storageClass: null
authDelegator:
enabled: false
dataStorage:
accessMode: ReadWriteOnce
enabled: true
size: 10Gi
storageClass: null
dev:
enabled: false
extraEnvironmentVars: {}
extraSecretEnvironmentVars: []
extraVolumes: []
ha:
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "consul" {
path = "vault"
address = "HOST_IP:8500"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev-246514"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
disruptionBudget:
enabled: true
maxUnavailable: null
enabled: false
replicas: 3
nodeSelector: {}
resources: null
service:
enabled: true
standalone:
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/vault/data"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
enabled: '-'
tolerations: {}
ui:
annotations: {}
enabled: false
serviceNodePort: null
serviceType: ClusterIP
HOOKS:
MANIFEST:
---
# Source: vault/templates/server-config-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: chart-1573001538-vault-config
labels:
helm.sh/chart: vault-0.1.2
app.kubernetes.io/name: vault
app.kubernetes.io/instance: chart-1573001538
app.kubernetes.io/managed-by: Helm
data:
extraconfig-from-values.hcl: |-
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/vault/data"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
---
# Source: vault/templates/server-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: chart-1573001538-vault
namespace: dev-poc-namespace
labels:
helm.sh/chart: vault-0.1.2
app.kubernetes.io/name: vault
app.kubernetes.io/instance: chart-1573001538
app.kubernetes.io/managed-by: Helm
---
# Source: vault/templates/server-service.yaml
# Service for Vault cluster
apiVersion: v1
kind: Service
metadata:
name: chart-1573001538-vault
labels:
helm.sh/chart: vault-0.1.2
app.kubernetes.io/name: vault
app.kubernetes.io/instance: chart-1573001538
app.kubernetes.io/managed-by: Helm
annotations:
# This must be set in addition to publishNotReadyAddresses due
# to an open issue where it may not work:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
# We want the servers to become available even if they're not ready
# since this DNS is also used for join operations.
publishNotReadyAddresses: true
ports:
- name: http
port: 8200
targetPort: 8200
- name: internal
port: 8201
targetPort: 8201
selector:
app.kubernetes.io/name: vault
app.kubernetes.io/instance: chart-1573001538
component: server
---
# Source: vault/templates/server-statefulset.yaml
# StatefulSet to run the actual vault server cluster.
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: chart-1573001538-vault
labels:
helm.sh/chart: vault-0.1.2
app.kubernetes.io/name: vault
app.kubernetes.io/instance: chart-1573001538
app.kubernetes.io/managed-by: Helm
spec:
serviceName: chart-1573001538-vault
podManagementPolicy: Parallel
replicas: 1
updateStrategy:
type: OnDelete
selector:
matchLabels:
helm.sh/chart: vault-0.1.2
app.kubernetes.io/name: vault
app.kubernetes.io/instance: chart-1573001538
component: server
template:
metadata:
labels:
helm.sh/chart: vault-0.1.2
app.kubernetes.io/name: vault
app.kubernetes.io/instance: chart-1573001538
component: server
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: vault
release: "chart-1573001538"
component: server
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
serviceAccountName: chart-1573001538-vault
securityContext:
fsGroup: 1000
volumes:
- name: config
configMap:
name: chart-1573001538-vault-config
containers:
- name: vault
securityContext:
privileged: true
image: "harbor.private.registry/vault:1.2.2"
command:
- "/bin/sh"
- "-ec"
args:
- |
sed -E "s/HOST_IP/${HOST_IP?}/g" /vault/config/extraconfig-from-values.hcl > /tmp/storageconfig.hcl;
sed -Ei "s/POD_IP/${POD_IP?}/g" /tmp/storageconfig.hcl;
chown vault:vault /tmp/storageconfig.hcl;
/usr/local/bin/docker-entrypoint.sh vault server -config=/tmp/storageconfig.hcl
env:
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: VAULT_ADDR
value: "http://127.0.0.1:8200"
- name: VAULT_API_ADDR
value: "http://$(POD_IP):8200"
- name: SKIP_CHOWN
value: "true"
volumeMounts:
- name: data
mountPath: /vault/data
- name: config
mountPath: /vault/config
ports:
- containerPort: 8200
name: http
- containerPort: 8201
name: internal
- containerPort: 8202
name: replication
readinessProbe:
# Check status; unsealed vault servers return 0
# The exit code reflects the seal status:
# 0 - unsealed
# 1 - error
# 2 - sealed
exec:
command: ["/bin/sh", "-ec", "vault status -tls-skip-verify"]
failureThreshold: 2
initialDelaySeconds: 5
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 5
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# Source: vault/templates/server-disruptionbudget.yaml
# PodDisruptionBudget to prevent degrading the server cluster through
# voluntary cluster changes.
---
# Source: vault/templates/ui-service.yaml
# Headless service for Vault server DNS entries. This service should only
# point to Vault servers. For access to an agent, one should assume that
# the agent is installed locally on the node and the NODE_IP should be used.
# If the node can't run a Vault agent, then this service can be used to
# communicate directly to a server agent.
NOTES:
Thank you for installing HashiCorp Vault!
Now that you have deployed Vault, you should look over the docs on using
Vault with Kubernetes available here:
https://www.vaultproject.io/docs/
Your release is named chart-1573001538. To learn more about the release, try:
$ helm status chart-1573001538
$ helm get chart-1573001538
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment