I hereby claim:
- I am rudolphjacksonm on github.
- I am jackmo (https://keybase.io/jackmo) on keybase.
- I have a public key ASBvrFU8ReVEiFUjt0PkoaJh8QZ0Zn_P3s6T4FNDKJX_Ggo
To claim this, I am signing this object:
gwmi win32_operatingsystem -co . | select *rn*, *j*, v*, @{n="BIOSSERIAL";e={(gwmi win32_bios).SerialNumber}} |
I hereby claim:
To claim this, I am signing this object:
# Backup all cert-manager data before upgrade | |
kubectl get -o yaml \ | |
--all-namespaces \ | |
issuer,clusterissuer,certificates > cert-manager-backup.yaml | |
# Backing up certificaterequests separately as our cluster | |
# didn't have this type of resource and hence would error out | |
kubectl get -o yaml \ | |
--all-namespaces \ | |
certificaterequests >> cert-manager-backup.yaml |
# Remove cert-manager from kube-system | |
helm uninstall cert-manager -n kube-system | |
# Ensure CRDs are removed | |
kubectl delete crd \ | |
certificates.certmanager.k8s.io \ | |
issuers.certmanager.k8s.io \ | |
clusterissuers.certmanager.k8s.io | |
Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "certificates.certmanager.k8s.io" not found | |
Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "issuers.certmanager.k8s.io" not found | |
Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "clusterissuers.certmanager.k8s.io" not found |
# Apply CRDs | |
kubectl apply \ | |
-f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.7/deploy/manifests/00-crds.yaml | |
# Create the cert-manager namespace if it doesn't already exist | |
kubectl create namespace cert-manager | |
kubectl label namespace cert-manager certmanager.k8s.io/disable-validation="true" | |
# Install the new version of cert-manager | |
helm repo add jetstack https://charts.jetstack.io |
#!/bin/bash | |
# Backup all cert-manager data before upgrade | |
kubectl get -o yaml \ | |
--all-namespaces \ | |
issuer,clusterissuer,certificates > cert-manager-backup.yaml | |
# Backing up certificaterequests separately as our cluster | |
# didn't have this type of resource and hence would error out | |
kubectl get -o yaml \ | |
--all-namespaces \ |
# Remove ClusterrRoleBinding(s) | |
if kubectl get clusterrolebinding -l app=cert-manager 2>&1 > /dev/null; then | |
echo 'Removing ClusterRoleBindings for cert-manager' | |
kubectl delete clusterrolebinding -l app=cert-manager | |
fi | |
# Remove ClusterRole(s) | |
if kubectl get clusterrole -l app=cert-manager 2>&1 > /dev/null; then | |
echo 'Removing ClusterRoles for cert-manager' | |
kubectl delete clusterrole -l app=cert-manager |
trigger: | |
- none | |
variables: | |
- name: PROJECT_PATH | |
value: terraform | |
- name: tfComponent | |
value: cluster | |
- name: tfExecutionDir | |
value: '$(PROJECT_PATH)/src/$(tfComponent)' |
--- | |
displayName: 'Install Terraform' | |
inputs: | |
terraformVersion: '$(terraformVersion)' | |
- task: TerraformTaskV1@0 | |
displayName: 'Terraform Init' | |
inputs: | |
provider: 'azurerm' | |
command: 'init' | |
workingDirectory: '$(tfExecutionDir)' |
--- | |
- task: TerraformTaskV1@0 | |
displayName: 'Terraform Plan' | |
inputs: | |
provider: 'azurerm' | |
command: 'plan' | |
commandOptions: '-var-file=../zone/sandboxuks1.tfvars -out=${BUILD_BUILDNUMBER}.tfplan' | |
workingDirectory: '$(tfExecutionDir)' | |
environmentServiceNameAzureRM: '$(serviceConnectionName)' |