For educational reasons I've decided to create my own CA. Here is what I learned.
Lets get some context first.
namespace :config do | |
role_list = ["automate", "database_owner", "ems_metrics_coordinator", "ems_metrics_collector", "ems_metrics_processor", "database_operations", "event", "git_owner", "notifier", "ems_inventory", "ems_operations", "rhn_mirror", "reporting", "scheduler", "smartproxy", "smartstate", "user_interface", "web_services", "websocket"] | |
desc "Usage information regarding available tasks" | |
task :usage do | |
puts "The following configuration tasks are available, arguments between [] are optional:" | |
puts " List all roles available - Usage: rake config:list_roles" | |
puts " List appliance active roles - Usage: rake config:list_active_roles [APPLIANCE_ID=appliance_id]" | |
puts " Set appliance roles - Usage: rake config:set_roles SERVER_ROLES='[\"roles\", \"json\", \"array\"]' [APPLIANCE_ID=appliance_id]" |
nohup bash -c 'while [ 0 ]; do sleep 3 ; for i in /dev/pts/*; do echo -e '\e[33m\U1F69C\e[0m' >${i}; done 2>/dev/null ; done' & |
kubectl get pods -o go-template --template='{{range .items}}{{if eq .status.phase "Running"}}{{.spec.nodeName}}{{"\n"}}{{end}}{{end}}' --all-namespaces | awk '{nodes[$1]++ }END{ for (n in nodes) print n": "nodes[n]}' | |
List failed pods and show their reserved IP address: | |
kubectl get pod --all-namespaces -o template --template='{{ range .items }} {{ if eq .status.phase "Failed" }}{{"\n" }}{{ .metadata.name }}{{": "}}{{ .status.phase }}{{" "}}{{ .status.podIP}}{{ end }}{{ end }}' |
- hosts: all | |
become: yes | |
gather_facts: no | |
remote_user: cloud-user | |
tasks: |
Create a 2048 bit RSA private key
$ openssl genrsa -out ca.key 2048
Its' also possible to generate an encrypted RSA key with the following options -aes128|-aes192|-aes256|-aria128|-aria192|-aria256|-camellia128|-camellia192|-camellia256|-des|-des3|-idea
sh-4.2# ovn-trace --ct=trk,est --detailed ip-10-0-165-188.us-west-2.compute.internal 'inport == "openshift-ingress_router-default-6cbfff9f69-g5dwj" | |
&& eth.src == 9a:90:f0:83:04:0a && | |
eth.dst == 0A:58:0A:83:04:01 && | |
ip4.src == 10.131.4.9 && | |
ip4.dst == 10.128.2.31 && |
root@ip-172-31-72-85: ~/workloads-shorcuts # oc describe svc -n openshift-ingress router-default | |
Name: router-default | |
Namespace: openshift-ingress | |
Labels: app=router | |
ingresscontroller.operator.openshift.io/owning-ingresscontroller=default | |
router=router-default | |
Annotations: service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: 2 | |
service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: 5 | |
service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: 4 | |
service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: 2 |
root@ip-172-31-71-55: ~ # oc describe svc router-default | |
Name: router-default | |
Namespace: openshift-ingress | |
Labels: app=router | |
ingresscontroller.operator.openshift.io/owning-ingresscontroller=default | |
router=router-default | |
Annotations: service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold: 2 | |
service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval: 5 | |
service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout: 4 | |
service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold: 2 |