az login
az aks start --name fabric-cluster --resource-group fabric
az aks stop --name fabric-cluster --resource-group fabric
export KUBECONFIG=/Users/rajanpunchouty/fabric-cluster.kubeconfig
source <(kubectl completion zsh) # setup autocomplete in zsh into the current shell
echo "[[ $commands[kubectl] ]] && source <(kubectl completion zsh)" >> ~/.zshrc # add autocomplete permanently to your zsh shell
- User : rajan.punchouty@gmail.com
- Subscription : 0RacloopSubscription (Upgrade it to Pay As You Go)
- Git Bot Account : racloop-bot
- Git Bot Email : rajan@racloop.com
- Git API Token : a38ee8c308633826b1e5af54bf02730b903c315f https://github.com/PacktPublishing/Hands-On-Continuous-Integration-and-Delivery-with-Jenkins-X-and-Kubernetes/tree/master/section-1/video-1.5
az login
az aks start --name aks-fabric-cluster --resource-group fabric
az aks stop --name aks-fabric-cluster --resource-group fabric
az group list --output table
az group delete -y --no-wait -n <resource_group_name>
rm -rf ~/.kube/*
ssh-keygen -t rsa -b 4096
az group create --name fabric-acr --location centralindia --subscription RacloopSubscription --tags 'ENV=DEV' 'SERVICE=FABRIC' 'OWNER=STATUSNEO'
az group list
az group show --name fabric
az acr create --name jiofabric --resource-group fabric-acr --sku Basic
az acr list
az acr show --name jiofabric
az group create --name fabric --location centralindia --subscription RacloopSubscription --tags 'ENV=DEV' 'SERVICE=FABRIC' 'OWNER=STATUSNEO'
az group list --output table
az group show --name fabric
VNET_RG="fabric"
VNET_NAME="vnet-fabric"
SUBNET_NAME="subnet-fabric"
VNET_REGION="centralindia"
VNET_SUBSCRIPTION="RacloopSubscription"
Below command will also create a new resource group - NetworkWatcher_centralindia
az network vnet create \
--name $VNET_NAME \
--resource-group $VNET_RG \
--address-prefixes 192.0.0.0/8 \
--subnet-name $SUBNET_NAME \
--subnet-prefixes 192.10.0.0/16 \
--location $VNET_REGION \
--subscription $VNET_SUBSCRIPTION
az network vnet list --output table
az network vnet show --name $VNET_NAME --resource-group $VNET_RG
VNET_ID=$(az network vnet show --resource-group $VNET_RG --name $VNET_NAME --query id -o tsv)
echo $VNET_ID
SUBNET_ID=$(az network vnet subnet show --resource-group $VNET_RG --vnet-name $VNET_NAME --name $SUBNET_NAME --query id -o tsv)
echo $SUBNET_ID
###3 Create Service Provider Create a service principal and configure its access to Azure resources.
az ad sp create-for-rbac --skip-assignment --name aks-service-principal
{
"appId": "be0b2d2d-5680-4d79-bfa2-94587f7aeb62",
"displayName": "aks-service-principal",
"name": "http://aks-service-principal",
"password": "BoFSDX97U-ws8ruLxa.QNtHbhPQ~AHLPV3",
"tenant": "1dad7237-4bf1-41a9-ba5d-8475256d9aab"
}
az ad sp list --output table
az ad sp show --id be0b2d2d-5680-4d79-bfa2-94587f7aeb62
az ad sp list --output table
az ad sp delete --id 5cc6597c-13ba-48cd-bf69-d14595ae65fb
az role assignment create \
--role "Owner" \
--assignee "be0b2d2d-5680-4d79-bfa2-94587f7aeb62" \
--resource-group $VNET_RG
- --service-principal is id of service principal created above
- --client-secret is password given back while defining service principal above
VNET_RG="fabric"
VNET_NAME="vnet-fabric"
SUBNET_NAME="subnet-fabric"
AKS_NAME="fabric-cluster"
AKS_RG="fabric"
AKS_REGION="centralindia"
AKS_SUBSCRIPTION="RacloopSubscription"
ACR="jiofabric"
SUBNET_ID=$(az network vnet subnet show --resource-group $VNET_RG --vnet-name $VNET_NAME --name $SUBNET_NAME --query id -o tsv)
echo $SUBNET_ID
az aks create --location $AKS_REGION \
--subscription $AKS_SUBSCRIPTION \
--resource-group $AKS_RG \
--name $AKS_NAME \
--ssh-key-value $HOME/.ssh/id_rsa.pub \
--service-principal "be0b2d2d-5680-4d79-bfa2-94587f7aeb62" \
--client-secret "BoFSDX97U-ws8ruLxa.QNtHbhPQ~AHLPV3" \
--network-plugin kubenet \
--load-balancer-sku standard \
--outbound-type loadBalancer \
--vnet-subnet-id $SUBNET_ID \
--pod-cidr 10.244.0.0/16 \
--service-cidr 10.0.0.0/16 \
--dns-service-ip 10.0.0.10 \
--docker-bridge-address 172.17.0.1/16 \
--node-vm-size Standard_B2ms \
--enable-cluster-autoscaler \
--max-count 5 \
--min-count 2 \
--node-count 3 \
--attach-acr $ACR \
--tags 'ENV=DEV' 'SERVICE=FABRIC' 'OWNER=STATUSNEO'
This will create Public IP See below for command output
CONFIG_FILE="./$AKS_NAME.kubeconfig"
az aks get-credentials \
-n $AKS_NAME \
-g $AKS_RG \
--subscription $AKS_SUBSCRIPTION \
--admin \
--file $CONFIG_FILE
export KUBECONFIG=$CONFIG_FILE
export KUBECONFIG=/Users/rajanpunchouty/fabric-cluster.kubeconfig
Cheat sheet - https://kubernetes.io/docs/reference/kubectl/cheatsheet/
kubectl cluster-info
kubectl get namespaces
kubectl get all -n default
kubectl get all -n kube-system
kubectl get pods
watch kubectl get pods
kubectl get svc
kubectl get pvc
kubectl get pv
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller
helm list
Got to https://github.com/kubernetes/dashboard Dashboard Url : http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.4/aio/deploy/recommended.yaml
kubectl proxy
watch kubectl get pods
kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.1.3/manifests/arango-crd.yaml
kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.1.3/manifests/arango-deployment.yaml
kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.1.3/manifests/arango-storage.yaml
kubectl apply -f https://raw.githubusercontent.com/arangodb/kube-arangodb/1.1.3/manifests/arango-deployment-replication.yaml
create following file arango.yaml
apiVersion: "database.arangodb.com/v1alpha"
kind: "ArangoDeployment"
metadata:
name: "arango-cluster"
spec:
mode: Cluster
image: arangodb/arangodb:3.6.1
run following commans
kubectl apply -f arango.yaml
helm install azure-marketplace/kafka --set externalAccess.enabled=true,\
externalAccess.service.type=LoadBalancer,\
externalAccess.service.port=9094,\
externalAccess.autoDiscovery.enabled=true,\
serviceAccount.create=true,\
deleteTopicEnable=true,\
rbac.create=true
- Get IP
- List Topics
echo "$(kubectl get svc --namespace default -l "app.kubernetes.io/name=kafka,app.kubernetes.io/instance=brazen-owl,app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')"
./kafka-topics.sh --bootstrap-server 40.80.89.244:9094 --list
./kafka-topics.sh --create --topic meta-git --bootstrap-server 40.80.89.244:9094
./kafka-topics.sh --delete --topic meta-git --bootstrap-server 40.80.89.244:9094
Got to https://github.com/kubernetes/dashboard Copy https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.4/aio/deploy/recommended.yaml to local (/Users/rajanpunchouty/learn/kubernates/admin/)
export KUBECONFIG=/Users/rajanpunchouty/learn/kubernates/admin/fabric-cluster.kubeconfig
kubectl apply -f recommended.yaml
kubectl proxy
Dashboard Url : http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/
Install jq
brew install jq
REGISTRY_NAME=jiofabric
token=$(az acr login --name $REGISTRY_NAME --expose-token | jq -r '.accessToken')
RESOURCE_GROUP=fabricrg
LOCATION=centralindia
AKS_NAME="aks-fabric-cluster"
AKS_RG="fabricrg"
AKS_REGION="centralindia"
AKS_SUBSCRIPTION="RacloopSubscription"
token=$(az acr login --name $REGISTRY_NAME --expose-token | jq -r '.accessToken')
https://github.com/bitnami/charts/tree/master/bitnami/kafka/#installing-the-chart
helm init
helm status
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
kubectl patch deploy --namespace kube-system tiller-deploy -p '{"spec":{"template":{"spec":{"serviceAccount":"tiller"}}}}'
helm install azure-marketplace/kafka --set externalAccess.enabled=true,\
externalAccess.service.type=LoadBalancer,\
externalAccess.service.port=9094,\
externalAccess.autoDiscovery.enabled=true,\
serviceAccount.create=true,\
deleteTopicEnable=true,\
rbac.create=true
NAME: tailored-hyena
LAST DEPLOYED: Mon Oct 26 19:43:27 2020
NAMESPACE: default
STATUS: DEPLOYED
RESOURCES:
==> v1/ConfigMap
NAME DATA AGE
tailored-hyena-kafka-scripts 2 0s
==> v1/Pod(related)
NAME READY STATUS RESTARTS AGE
tailored-hyena-kafka-0 0/1 Pending 0 1s
tailored-hyena-zookeeper-0 0/1 Pending 0 1s
==> v1/Role
NAME AGE
tailored-hyena-kafka 1s
==> v1/RoleBinding
NAME AGE
tailored-hyena-kafka 1s
==> v1/Service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
tailored-hyena-kafka ClusterIP 10.0.167.168 <none> 9092/TCP 0s
tailored-hyena-kafka-0-external LoadBalancer 10.0.247.158 <pending> 9094:31972/TCP 0s
tailored-hyena-kafka-headless ClusterIP None <none> 9092/TCP,9093/TCP 0s
tailored-hyena-zookeeper ClusterIP 10.0.152.94 <none> 2181/TCP,2888/TCP,3888/TCP 0s
tailored-hyena-zookeeper-headless ClusterIP None <none> 2181/TCP,2888/TCP,3888/TCP 0s
==> v1/ServiceAccount
NAME SECRETS AGE
tailored-hyena-kafka 1 0s
==> v1/StatefulSet
NAME READY AGE
tailored-hyena-kafka 0/1 0s
tailored-hyena-zookeeper 0/1 0s
NOTES:
---------------------------------------------------------------------------------------------
WARNING
By specifying "serviceType=LoadBalancer" and not configuring the authentication
you have most likely exposed the Kafka service externally without any
authentication mechanism.
For security reasons, we strongly suggest that you switch to "ClusterIP" or
"NodePort". As alternative, you can also configure the Kafka authentication.
---------------------------------------------------------------------------------------------
** Please be patient while the chart is being deployed **
Kafka can be accessed by consumers via port 9092 on the following DNS name from within your cluster:
tailored-hyena-kafka.default.svc.cluster.local
Each Kafka broker can be accessed by producers via port 9092 on the following DNS name(s) from within your cluster:
tailored-hyena-kafka-0.tailored-hyena-kafka-headless.default.svc.cluster.local:9092
To create a pod that you can use as a Kafka client run the following commands:
kubectl run tailored-hyena-kafka-client --restart='Never' --image marketplace.azurecr.io/bitnami/kafka:2.6.0-debian-10-r30 --namespace default --command -- sleep infinity
kubectl exec --tty -i tailored-hyena-kafka-client --namespace default -- bash
PRODUCER:
kafka-console-producer.sh \
--broker-list tailored-hyena-kafka-0.tailored-hyena-kafka-headless.default.svc.cluster.local:9092 \
--topic test
CONSUMER:
kafka-console-consumer.sh \
--bootstrap-server tailored-hyena-kafka.default.svc.cluster.local:9092 \
--topic test \
--from-beginning
To connect to your Kafka server from outside the cluster, follow the instructions below:
NOTE: It may take a few minutes for the LoadBalancer IPs to be available.
Watch the status with: 'kubectl get svc --namespace default -l "app.kubernetes.io/name=kafka,app.kubernetes.io/instance=tailored-hyena,app.kubernetes.io/component=kafka,pod" -w'
Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below:
echo "$(kubectl get svc --namespace default -l "app.kubernetes.io/name=kafka,app.kubernetes.io/instance=tailored-hyena,app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')"
Kafka Brokers port: 9094
- Get IP
- List Topics
echo "$(kubectl get svc --namespace default -l "app.kubernetes.io/name=kafka,app.kubernetes.io/instance=kafka,app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')"
./kafka-topics.sh --bootstrap-server 20.193.136.206:9094 --list
https://jenkins-x.io/commands/jx_create_cluster_aks/
az group create --name fabricrg --location centralindia --subscription RacloopSubscription --tags 'ENV=DEV' 'SERVICE=JIOFABRIC'
jx create cluster aks --cluster-name devcluster --disk-size 32 --nodes 3 --service-principal cecbf841-de52-43d6-a259-c9529480c073 --client-secret Ee0o.J6eidEEwURRXD1FhpwKLH8T-xPUMD --azure-acr-subscription RacloopSubscription --resource-group-name fabricrg --location centralindia
jx install --tekton
kubectl cluster-info
az aks get-credentials --resource-group kubernetes-resource-group --name kubernetes-dev-cluster
az ad sp show --id kubernetes-cluster-service-principal
az ad sp list
jx status
jx install
racloop-bot a38ee8c308633826b1e5af54bf02730b903c315f
rajanpunchouty@Rajans-MBP jenkinx % jx install --tekton
Command "install" is deprecated, it will be removed on Sep 1 2020. We now highly recommend you use jx boot instead. Please check https://jenkins-x.io/docs/getting-started/setup/boot/ for more details.
? Configured Jenkins installation type: Serverless Jenkins X Pipelines with Tekton
Context "devcluster" modified.
? Cloud Provider aks
role cluster-admin already exists for the cluster
clusterroles.rbac.authorization.k8s.io 'cluster-admin' already exists
created role cluster-admin
Git configured for user: Rajan Punchouty and email punchouty@gmail.com
Helm installed and configured
existing ingress controller found, no need to install a new one
Waiting for external loadbalancer to be created and update the nginx-ingress-controller service in kube-system namespace
External loadbalancer created
Waiting to find the external host name of the ingress controller Service in namespace kube-system with name jxing-nginx-ingress-controller
You can now configure a wildcard DNS pointing to the new Load Balancer address 20.193.152.75
If you don't have a wildcard DNS setup then create a DNS (A) record and point it at: 20.193.152.75, then use the DNS domain in the next input...
If you do not have a custom domain setup yet, Ingress rules will be set for magic DNS nip.io.
Once you have a custom domain ready, you can update with the command jx upgrade ingress --cluster
? Domain 20.193.152.75.nip.io
nginx ingress controller installed and configured
Set up a Git username and API token to be able to perform CI/CD
? Do you wish to use racloop-bot as the local Git user for github.com server: Yes
Select the CI/CD pipelines Git server and user
? Do you wish to use github.com as the pipelines Git server: Yes
Setting the pipelines Git server https://github.com and user name racloop-bot.
Assign AKS https://devcluster-fabricrg-0348cd-ffbf6aeb.hcp.centralindia.azmk8s.io:443 a reader role for ACR devcluster.azurecr.io
? A local Jenkins X cloud environments repository already exists, recreating with latest: Yes
Enumerating objects: 1440, done.
Total 1440 (delta 0), reused 0 (delta 0), pack-reused 1440
Updating Secret jx-install-config in namespace jx
Setting up prow config into namespace jx
Installing Tekton into namespace jx
Installing Prow into namespace jx
with values file /Users/rajanpunchouty/.jx/cloud-environments/env-aks/myvalues.yaml
Cloning the Jenkins X versions repo https://github.com/jenkins-x/jenkins-x-versions.git with ref refs/heads/master to /Users/rajanpunchouty/.jx/jenkins-x-versions
WARNING:
retrying after error:: failed to clone reference: refs/heads/master: read tcp 192.168.0.72:56209->13.234.176.102:443: read: connection reset by peer
Cloning the Jenkins X versions repo https://github.com/jenkins-x/jenkins-x-versions.git with ref refs/heads/master to /Users/rajanpunchouty/.jx/jenkins-x-versions
? Defaulting workload build pack: Kubernetes Workloads: Automated CI+CD with GitOps Promotion
Setting the team build pack to kubernetes-workloads repo: https://github.com/jenkins-x-buildpacks/jenkins-x-kubernetes.git ref: master
Installing jx into namespace jx
Installing jenkins-x-platform version: 2.0.2405
WARNING: waiting for install to be ready, if this is the first time then it will take a while to download images
Jenkins X deployments ready in namespace jx
Configuring the TeamSettings for ImportMode YAML
Creating default staging and production environments
? Select the organization where you want to create the environment repository: Racloop
Using Git provider github.com at https://github.com
? Using Git user name: racloop-bot
? Using organisation: Racloop
Creating repository Racloop/environment-salmonmaze-staging
Creating Git repository Racloop/environment-salmonmaze-staging
Pushed Git repository to https://github.com/Racloop/environment-salmonmaze-staging
Creating staging Environment in namespace jx
Created environment staging
Namespace jx-staging created
Creating GitHub webhook for Racloop/environment-salmonmaze-staging for url http://hook.jx.20.193.152.75.nip.io/hook
Using Git provider github.com at https://github.com
? Using Git user name: racloop-bot
? Using organisation: Racloop
Creating repository Racloop/environment-salmonmaze-production
Creating Git repository Racloop/environment-salmonmaze-production
Pushed Git repository to https://github.com/Racloop/environment-salmonmaze-production
Creating production Environment in namespace jx
Created environment production
Namespace jx-production created
Creating GitHub webhook for Racloop/environment-salmonmaze-production for url http://hook.jx.20.193.152.75.nip.io/hook
Jenkins X installation completed successfully
********************************************************
NOTE: Your admin password is: qt^G7m2m3HTV0+UNKlfO
********************************************************
Your Kubernetes context is now set to the namespace: jx
To switch back to your original namespace use: jx namespace jx
Or to use this context/namespace in just one terminal use: jx shell
For help on switching contexts see: https://jenkins-x.io/developing/kube-context/
To import existing projects into Jenkins X: jx import
To create a new Spring Boot microservice: jx create spring -d web -d actuator
To create a new microservice from a quickstart: jx create quickstart
{- Finished ..
"aadProfile": null,
"addonProfiles": {
"KubeDashboard": {
"config": null,
"enabled": false,
"identity": null
}
},
"agentPoolProfiles": [
{
"availabilityZones": null,
"count": 3,
"enableAutoScaling": true,
"enableNodePublicIp": false,
"kubeletConfig": null,
"linuxOsConfig": null,
"maxCount": 5,
"maxPods": 110,
"minCount": 2,
"mode": "System",
"name": "nodepool1",
"nodeImageVersion": "AKSUbuntu-1804-2021.01.06",
"nodeLabels": {},
"nodeTaints": null,
"orchestratorVersion": "1.18.14",
"osDiskSizeGb": 128,
"osDiskType": "Managed",
"osType": "Linux",
"podSubnetId": null,
"powerState": {
"code": "Running"
},
"provisioningState": "Succeeded",
"proximityPlacementGroupId": null,
"scaleSetEvictionPolicy": null,
"scaleSetPriority": null,
"spotMaxPrice": null,
"tags": null,
"type": "VirtualMachineScaleSets",
"upgradeSettings": null,
"vmSize": "Standard_B2ms",
"vnetSubnetId": "/subscriptions/0348cd5d-fe17-4c7f-ae8f-ccbb8da1e9bf/resourceGroups/fabric/providers/Microsoft.Network/virtualNetworks/vnet-fabric/subnets/subnet-fabric"
}
],
"apiServerAccessProfile": null,
"autoScalerProfile": {
"balanceSimilarNodeGroups": "false",
"expander": "random",
"maxEmptyBulkDelete": "10",
"maxGracefulTerminationSec": "600",
"maxTotalUnreadyPercentage": "45",
"newPodScaleUpDelay": "0s",
"okTotalUnreadyCount": "3",
"scaleDownDelayAfterAdd": "10m",
"scaleDownDelayAfterDelete": "10s",
"scaleDownDelayAfterFailure": "3m",
"scaleDownUnneededTime": "10m",
"scaleDownUnreadyTime": "20m",
"scaleDownUtilizationThreshold": "0.5",
"scanInterval": "10s",
"skipNodesWithLocalStorage": "false",
"skipNodesWithSystemPods": "true"
},
"autoUpgradeProfile": null,
"diskEncryptionSetId": null,
"dnsPrefix": "fabric-clu-fabric-0348cd",
"enablePodSecurityPolicy": false,
"enableRbac": true,
"fqdn": "fabric-clu-fabric-0348cd-f9c9cf63.hcp.centralindia.azmk8s.io",
"id": "/subscriptions/0348cd5d-fe17-4c7f-ae8f-ccbb8da1e9bf/resourcegroups/fabric/providers/Microsoft.ContainerService/managedClusters/fabric-cluster",
"identity": null,
"identityProfile": null,
"kubernetesVersion": "1.18.14",
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCUd7rY3nbHRfgJFafEqHpuYGew0slvcZzSw6kofVZ25Cpkh0ciLJ7Nh5OHeyFyiQGOp4h/Gc+XwvxiyEI8dUY8AEU/9qNXg1hyzV7TABoooVwvHSfNV8M/C1P48Q848dm7JmNr2eTdUkFA2HB2N5qIfdvxRbtrB+7NplMnDvBooSu8BdM3MnApfHqurnPTLuMtLBHjMT6yYDqReEddM9ycd4pTdFBxcMhcIaVT46EYCeubeKsbWeMq+ZL8b/dcC8ivlpcC6YG0vbS9FhbBs3GbgxKX9tXU62JywTReqW/KfFGllW/db2hYNWkNROeBpjff5Z5lSL47N0gnsWVe2D3DJIqS0vCHG23+EKJsNctsT1Z8DR+TqRnPASGsXP2y6+GaQD6NCREI+l3wd7IR443cPAqyzpigLUCQgQqbM/b+OLc3T4Fx2RgfvsFiH6P+YIltZZ/0lsSuDJmDLBTbybA9kJp31TL0ebwXvZCQYr/p4Y0IxmEtwzhMZUzfKaHGzYh2OKKnb6EMnSIRt5FYp3mPOvd822xxSdGbRpgJRRe7/96mcdZV+QuKA4EVdzvGMt85XKrYPOhfu9XS//lVVxyEIf6047vap060dZA7yKZ4DUG2TaCovdI3IZ36UI65STezyXgp4+wExFlVyNiXL+cvvpK1iRdz05PCZGswfT306w== punchouty@gmail.com\n"
}
]
}
},
"location": "centralindia",
"maxAgentPools": 10,
"name": "fabric-cluster",
"networkProfile": {
"dnsServiceIp": "10.0.0.10",
"dockerBridgeCidr": "172.17.0.1/16",
"loadBalancerProfile": {
"allocatedOutboundPorts": null,
"effectiveOutboundIps": [
{
"id": "/subscriptions/0348cd5d-fe17-4c7f-ae8f-ccbb8da1e9bf/resourceGroups/MC_fabric_fabric-cluster_centralindia/providers/Microsoft.Network/publicIPAddresses/0f71a679-4159-412e-b231-f2e3a167ba96",
"resourceGroup": "MC_fabric_fabric-cluster_centralindia"
}
],
"idleTimeoutInMinutes": null,
"managedOutboundIps": {
"count": 1
},
"outboundIpPrefixes": null,
"outboundIps": null
},
"loadBalancerSku": "Standard",
"networkMode": null,
"networkPlugin": "kubenet",
"networkPolicy": null,
"outboundType": "loadBalancer",
"podCidr": "10.244.0.0/16",
"serviceCidr": "10.0.0.0/16"
},
"nodeResourceGroup": "MC_fabric_fabric-cluster_centralindia",
"podIdentityProfile": null,
"powerState": {
"code": "Running"
},
"privateFqdn": null,
"provisioningState": "Succeeded",
"resourceGroup": "fabric",
"servicePrincipalProfile": {
"clientId": "be0b2d2d-5680-4d79-bfa2-94587f7aeb62",
"secret": null
},
"sku": {
"name": "Basic",
"tier": "Free"
},
"tags": {
"ENV": "DEV",
"OWNER": "STATUSNEO",
"SERVICE": "FABRIC"
},
"type": "Microsoft.ContainerService/ManagedClusters",
"windowsProfile": null
}
NAME: quarrelsome-termite
LAST DEPLOYED: Thu Jan 28 19:15:04 2021
NAMESPACE: default
STATUS: DEPLOYED
RESOURCES:
==> v1/ClusterRole
NAME CREATED AT
arango-quarrelsome-termite-operator-rbac-deployment 2021-01-28T13:45:04Z
arango-quarrelsome-termite-operator-rbac-deployment-replication 2021-01-28T13:45:04Z
==> v1/ClusterRoleBinding
NAME ROLE AGE
arango-quarrelsome-termite-operator-rbac-deployment ClusterRole/arango-quarrelsome-termite-operator-rbac-deployment 1s
arango-quarrelsome-termite-operator-rbac-deployment-replication ClusterRole/arango-quarrelsome-termite-operator-rbac-deployment-replication 1s
==> v1/Deployment
NAME READY UP-TO-DATE AVAILABLE AGE
arango-quarrelsome-termite-operator 0/2 2 0 1s
==> v1/Pod(related)
NAME READY STATUS RESTARTS AGE
arango-quarrelsome-termite-operator-55466f5858-22f9b 0/1 ContainerCreating 0 0s
arango-quarrelsome-termite-operator-55466f5858-2vm4k 0/1 ContainerCreating 0 0s
==> v1/Role
NAME CREATED AT
arango-quarrelsome-termite-operator-rbac-default 2021-01-28T13:45:04Z
arango-quarrelsome-termite-operator-rbac-deployment 2021-01-28T13:45:04Z
arango-quarrelsome-termite-operator-rbac-deployment-replication 2021-01-28T13:45:04Z
==> v1/RoleBinding
NAME ROLE AGE
arango-quarrelsome-termite-operator-rbac-default Role/arango-quarrelsome-termite-operator-rbac-default 1s
arango-quarrelsome-termite-operator-rbac-deployment Role/arango-quarrelsome-termite-operator-rbac-deployment 1s
arango-quarrelsome-termite-operator-rbac-deployment-replication Role/arango-quarrelsome-termite-operator-rbac-deployment-replication 1s
==> v1/Service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
arango-quarrelsome-termite-operator ClusterIP 10.0.16.57 <none> 8528/TCP 1s
==> v1/ServiceAccount
NAME SECRETS AGE
arango-quarrelsome-termite-operator 1 1s
NOTES:
You have installed Kubernetes ArangoDB Operator in version 1.1.3
To access ArangoDeployments you can use:
kubectl --namespace "default" get arangodeployments
More details can be found on https://github.com/arangodb/kube-arangodb/tree/1.1.3/docs
NAME: brazen-owl
LAST DEPLOYED: Thu Jan 28 21:16:48 2021
NAMESPACE: default
STATUS: DEPLOYED
RESOURCES:
==> v1/ConfigMap
NAME DATA AGE
brazen-owl-kafka-scripts 2 0s
==> v1/Pod(related)
NAME READY STATUS RESTARTS AGE
brazen-owl-kafka-0 0/1 Pending 0 1s
brazen-owl-zookeeper-0 0/1 Pending 0 1s
==> v1/Role
NAME CREATED AT
brazen-owl-kafka 2021-01-28T15:46:49Z
==> v1/RoleBinding
NAME ROLE AGE
brazen-owl-kafka Role/brazen-owl-kafka 0s
==> v1/Service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
brazen-owl-kafka ClusterIP 10.0.13.197 <none> 9092/TCP 0s
brazen-owl-kafka-0-external LoadBalancer 10.0.112.230 <pending> 9094:31912/TCP 0s
brazen-owl-kafka-headless ClusterIP None <none> 9092/TCP,9093/TCP 0s
brazen-owl-zookeeper ClusterIP 10.0.178.14 <none> 2181/TCP,2888/TCP,3888/TCP 0s
brazen-owl-zookeeper-headless ClusterIP None <none> 2181/TCP,2888/TCP,3888/TCP 0s
==> v1/ServiceAccount
NAME SECRETS AGE
brazen-owl-kafka 1 0s
==> v1/StatefulSet
NAME READY AGE
brazen-owl-kafka 0/1 0s
brazen-owl-zookeeper 0/1 0s
NOTES:
---------------------------------------------------------------------------------------------
WARNING
By specifying "serviceType=LoadBalancer" and not configuring the authentication
you have most likely exposed the Kafka service externally without any
authentication mechanism.
For security reasons, we strongly suggest that you switch to "ClusterIP" or
"NodePort". As alternative, you can also configure the Kafka authentication.
---------------------------------------------------------------------------------------------
** Please be patient while the chart is being deployed **
Kafka can be accessed by consumers via port 9092 on the following DNS name from within your cluster:
brazen-owl-kafka.default.svc.cluster.local
Each Kafka broker can be accessed by producers via port 9092 on the following DNS name(s) from within your cluster:
brazen-owl-kafka-0.brazen-owl-kafka-headless.default.svc.cluster.local:9092
To create a pod that you can use as a Kafka client run the following commands:
kubectl run brazen-owl-kafka-client --restart='Never' --image marketplace.azurecr.io/bitnami/kafka:2.6.0-debian-10-r30 --namespace default --command -- sleep infinity
kubectl exec --tty -i brazen-owl-kafka-client --namespace default -- bash
PRODUCER:
kafka-console-producer.sh \
--broker-list brazen-owl-kafka-0.brazen-owl-kafka-headless.default.svc.cluster.local:9092 \
--topic test
CONSUMER:
kafka-console-consumer.sh \
--bootstrap-server brazen-owl-kafka.default.svc.cluster.local:9092 \
--topic test \
--from-beginning
To connect to your Kafka server from outside the cluster, follow the instructions below:
NOTE: It may take a few minutes for the LoadBalancer IPs to be available.
Watch the status with: 'kubectl get svc --namespace default -l "app.kubernetes.io/name=kafka,app.kubernetes.io/instance=brazen-owl,app.kubernetes.io/component=kafka,pod" -w'
Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below:
echo "$(kubectl get svc --namespace default -l "app.kubernetes.io/name=kafka,app.kubernetes.io/instance=brazen-owl,app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')"
Kafka Brokers port: 9094