Pictures and stuff.
graph TD | |
subgraph SqlServerInstancePrimary1 | |
createEndpoint1 | |
grantConnectOnEndpoint1 | |
createAvailabilityGroup1 | |
end | |
subgraph SqlServerInstanceSecondary2 | |
createEndpoint2 |
{ | |
"id": "/subscriptions/d4aa0040-6651-443d-a078-728ce72a87ab/resourcegroups/rakirahman.me/providers/Microsoft.Cdn/profiles/rakirahman/endpoints/rakirahman", | |
"type": "Microsoft.Cdn/profiles/endpoints", | |
"name": "rakirahman", | |
"location": "Global", | |
"tags": {}, | |
"properties": { | |
"hostName": "rakirahman.azureedge.net", | |
"originHostHeader": "rakirahman.z9.web.core.windows.net", | |
"originPath": null, |
#!/bin/bash | |
# Continue on error | |
set +e | |
# Params | |
resource_group="arcdata-ci-direct" | |
days_to_keep=7 | |
# Get current date and calculate 7 days ago in seconds |
# Calculate Eviction Signals - Current State vs Threshold: | |
# | |
# https://kubernetes.io/docs/concepts/scheduling-eviction/node-pressure-eviction/#eviction-signals | |
# https://medium.com/kubernetes-tutorials/efficient-node-out-of-resource-management-in-kubernetes-67f158da6e59 | |
# | |
NODES=($(kubectl get nodes -o jsonpath='{.items[*].metadata.name}')) | |
NODE_COUNT=${#NODES[@]} | |
while true; | |
do |
########################################################## | |
# APPLY | |
########################################################## | |
# Apply workflow | |
# | |
kubectl apply -f sqlinstance-ha.yaml | |
# Get primary | |
# | |
kubectl get pods -l=role.ag.mssql.microsoft.com/chaos-bc-chaos-bc=primary -n ns1669830131581917320 |
# ========================================================================== | |
# SQLINSTANCE-HA | |
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - | |
# WORKFLOW DURATION: 18m | |
# -------------------------------------------------------------------------- | |
# CHAOS DESCRIPTION | |
# -------------------------------------------------------------------------- | |
# In this chaos simulation we target a 2 or 3 replica MIAA: | |
# | |
# 1. Setup a User DB |
The key capability a GitOps tool can provide to declaratively deploy each of our CRDs is health checks. Most Production Grade Gitops tools have the logic necessary to monitor K8s native resources (Pod
, Service
etc), but also the flexibility to extend to any Custom Resource.
Both Flux V1 and V2 has good support for K8s native resources, which makes it a great tool for the average Arc-enabled Kubernetes Customers deploying K8s components that aren't Customer Resources.
For custom resources, Flux V2 has limited literature, as it offloads that responsibility to kstatus, which makes rigid assumptions about the fields a CR should emit to be considered healthy - a boolean called Ready
. Due to there being no standardized pattern for this, many open source
apiVersion: v1 | |
kind: Namespace | |
metadata: | |
annotations: | |
argocd.argoproj.io/sync-wave: "-1" | |
name: single-job-hook-no | |
--- | |
apiVersion: v1 | |
data: | |
password: aGVsbG8K |
#!/bin/bash -e | |
################################################################################ | |
## File: install.sh | |
## Desc: Helper functions for installing tools | |
################################################################################ | |
download_with_retries() { | |
# Due to restrictions of bash functions, positional arguments are used here. | |
# In case if you using latest argument NAME, you should also set value to all previous parameters. | |
# Example: download_with_retries $ANDROID_SDK_URL "." "android_sdk.zip" |