sudo dnf install httpd_tools
htpasswd -c -b -B passwdfile usama pass123 # -c Create new file, -B Use bcrypt algo, -b use password from command rather then stdin htpass
htpasswd -b passwdfile ahmad pass321 # Append credentials in the same file, notice absent of -c flag
oc create secret generic htpass-secret --from-file=htpasswd=passwdfile -n openshift-config
oc edit oauth cluster
# Edit the file to reflect the following
...
spec:
identityProviders:
- name: HTPASSWD
type: HTPasswd
mappingMethod: claim
htpasswd:
filedata:
name: htpass-secret
After the edit, new pods will be deployed in openshift-authentication
oc login -u usama -p pass123 https://api.domain.com:6443
oc whoami
oc new-project test-project --display-name="Test Project" --description="Project for Testing purposes"
oc adm policy add-role-to-user view usama -n test-project
oc adm policy remove-role-from-user view usama -n test-project
oc adm policy add-cluster-role-to-user view usama
oc adm policy remove-cluster-role-from-user view usama
oc adm policy add-role-to-group view temp-admins -n test-project
oc adm policy remove-role-from-group view temp-admins -n test-project
oc adm policy add-cluster-role-to-group view temp-admins
oc adm policy remove-cluster-role-from-group view temp-admins
oc adm groups new temp-admins
oc adm groups add-users temp-admins usama ahmad
oc adm groups remove-users temp-admins usama ahmad
oc delete group temp-admins
oc new-project rocky
oc create quota my-quota --hard="cpu=6,memory=32,configmaps=10,persistentvolumeclaims=1,secrets=10"
oc create clusterquota limit-usama --project-annotation-selector=openshift.io/requester=usama --hard=pods=10
oc create doesn't allow to create limitranges. Use oc explain limitrange
to navigate through the API
apiVersion: "v1"
kind: "LimitRange"
metadata:
name: "rocky-limits"
spec:
limits:
- type: "Pod"
max:
cpu: "300m"
memory: "300Mi"
min:
cpu: "5m"
memory: "5Mi"
- type: "Container"
default:
cpu: "100m"
memory: "100Mi"
max:
cpu: "300m"
memory: "300Mi"
min:
cpu: "5m"
memory: "5Mi"
oc adm taint node key1=value1:NoSchedule <node> # Don't schedule new pods unless they have toleration
oc adm taint node key2=value2:PreferNoSchedule <node> # Prefer not to schedule new pods unless they have toleration
oc adm taint node key3=value3:Execute <node> # Don't schedule new pods and evict currently running pods unless they have toleration
oc adm taint node <node> key1- # Removes taint with key key1 from <node>
oc scale deploy my-deployment --replicas=5 # Scale Deployment my-deployment to 5 replcias
oc scale dc my-dc --replicas=2 # Scale DeploymentConfig my-dc to 2 replicas
oc set resources dc my-dc --limits=cpu=200m,memory=512Mi # Set given requests/limits to all containers in deloymentConfig my-dc
oc set resources deploy my-deploy -c=nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=128Mi # Set given requests/limits only to container nginx in deloyment my-deploy
oc set resources -f path/to/file.yaml --limits=cpu=200m,memory=512Mi --local -o yaml # Print the result (in yaml format) of updating nginx container limits from a local, without hitting the server
oc autoscale dc/my-dc --min 2 --max 9 --cpu-percent 60 # Autoscale application if avg cpu utilization hits 60% of the limit with max replicas 9
oc create secret generic my-generic-secret --from-literal=key1=value1 --from-literal=key2=value2 # Create a generic key value secret from string literals
oc create secret generic my-generic-secret --from-file=key1=file.txt # Create a generic key value secret by taking value from files
oc create secret docker-registry my-dockerconfigjson-secret --docker-username=username --docker-password=password # Create a docker registry pull secret
oc create secret tls my-tls-secret --cert=domain.pem --key=domain.key # Create TLS secret
oc set env dc/my-dc --from=secret/my-generic-secret # Import environment from a secret
oc set env dc/my-dc --from=configmap/my-configmap --prefix=MYSQL_ # Import environment from a config map with a prefix
oc set env dc/my-dc --containers="c1" ENV- # Remove the environment variable ENV from container 'c1' in deploymentConfig my-dc
oc create route edge my-route --service my-service --hostname my-hostname.domain.com --key domain.key --cert domain.crt
You can create a Self signed certificate or Create a Key/CSR and then Sign it with your CA
openssl req -new -nodes -newkey rsa:2046 -keyout domain.key -out domain.csr -subj="/C=US/ST=North Carolina/L=Raleigh/O=Red Hat/CN=domain.com.pk" # Generate A CSR and a Key
openssl x509 -in domain.csr -passin domain.key -CA ca.pem -CAkey ca.key -CAcreateserial -out domain.crt -days 1825 -sha256 -extfile domain.ext # Generate a certificate consuming created CSR and key using CA cert and key
openssl req -x509 -nodes -newkey rsa:2046 -keyout domain.key -out domain.crt -days 365 -sha256 -subj="/C=US/ST=North Carolina/L=Raleigh/O=Red Hat/CN=domain.com.pk" # Generate a self signed certificate
oc create serviceaccount my-sa # Create a ServiceAccount
oc set serviceaccount deploy/my-deploy my-sa # Attach ServiceAccount my-sa to deployment my-deploy
oc get scc # List all SCCs available in cluster
oc adm polcy add-scc-to-user ayuid -z my-sa # Add SCC anyuid to serviceAccount my-sa
oc adm polcy add-scc-to-user privileged my-user # Add SCC privileged to user my-user
oc adm polcy add-scc-to-group restricted my-group my-group1 # Add SCC restricted to groups my-group my-group1
oc get pods
* notice pod in pending/error state
oc describe pod/my-dc-XXXX
* notice there is an error that node didn't match the node selector
oc describe pod/my-dc-XXXX |grep -i ^node-selector
* now will get the label info from the pod for eg: client=acme
oc get nodes -L client
* notice client key has different value or may be there is an issue with case sensitive like: client=ACME
oc edit dc/my-dc
* modify nodeselector value under spec -> template -> spec like this:
nodeSelector:
client: ACME
oc get route
* Try to access your application using route name if application not accessable then check endpoints in services:
oc get ep my-svc
* Notice there is no endpoints in service
* check the label info in deployment:
oc get dc/my-dc -o yaml |less
* Look at label under spec -> template -> metadata -> labels
oc edit svc/my-svc
* add modify the label name under spec -> selector like this:
spec:
selector:
key: value
oc get ep my-svc
Note: notice now endpoints is available in service