Skip to content

Instantly share code, notes, and snippets.

@luckylittle
Last active August 5, 2019 10:18
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save luckylittle/bd46ba696590b309ae337b0857b53fee to your computer and use it in GitHub Desktop.
Save luckylittle/bd46ba696590b309ae337b0857b53fee to your computer and use it in GitHub Desktop.
Red Hat OpenShift 3 Disconnected Installation (Lab)
# Verify Proxied RPM Repository
# /etc/httpd/conf.d/default.conf
<VirtualHost *:*>
ProxyPreserveHost On
ProxyPass / http://admin.na.shared.opentlc.com/
ProxyPassReverse / http://admin.na.shared.opentlc.com/
ServerName isolated1.b88e.internal
# parameterize me for more storage
CacheRoot "/var/cache/httpd/proxy/"
CacheEnable disk /
CacheDirLevels 2
CacheDirLength 1
CustomLog "/var/log/httpd/cached-requests.log" common env=cache-hit
CustomLog "/var/log/httpd/uncached-requests.log" common env=cache-miss
CustomLog "/var/log/httpd/revalidated-requests.log" common env=cache-revalidate
CustomLog "/var/log/httpd/invalidated-requests.log" common env=cache-invalidate
</VirtualHost>
#!/bin/bash
yum install -y docker-distribution skopeo podman
wget -O /usr/local/sbin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64
chmod +x /usr/local/sbin/jq
sed -i 's/^.*rootdirectory.*$/ rootdirectory: \/srv\/repohost\/registry/' /etc/docker-distribution/registry/config.yml
cat << EOF >> /etc/docker-distribution/registry/config.yml
log:
accesslog:
disabled: false
level: info
formatter: text
fields:
service: registry
environment: staging
EOF
mkdir -p /srv/repohost/registry
systemctl enable docker-distribution
systemctl start docker-distribution
systemctl status docker-distribution
#!/bin/bash
LOCAL_TAG=v3.11.59
IMAGES_CORE="apb-base apb-tools automation-broker-apb csi-attacher csi-driver-registrar csi-livenessprobe csi-provisioner grafana image-inspector mariadb-apb mediawiki mediawiki-apb mysql-apb ose-ansible ose-ansible-service-broker ose-cli ose-cluster-autoscaler ose-cluster-capacity ose-cluster-monitoring-operator ose-console ose-configmap-reloader ose-control-plane ose-deployer ose-descheduler ose-docker-builder"
time for image in ${IMAGES_CORE}
do
echo "Copying image: $image"
skopeo copy --dest-tls-verify=false docker://registry.redhat.io/openshift3/${image}:${LOCAL_TAG} docker://localhost:5000/openshift3/${image}:${LOCAL_TAG}
echo "Copied image: $image"
done
IMAGES_CORE="ose-docker-registry ose-efs-provisioner ose-egress-dns-proxy ose-egress-http-proxy ose-egress-router ose-haproxy-router ose-hyperkube ose-hypershift ose-keepalived-ipfailover ose-kube-rbac-proxy ose-kube-state-metrics ose-metrics-server ose-node ose-node-problem-detector ose-operator-lifecycle-manager ose-pod ose-prometheus-config-reloader ose-prometheus-operator ose-recycler ose-service-catalog ose-template-service-broker ose-web-console postgresql-apb registry-console snapshot-controller snapshot-provisioner"
time for image in ${IMAGES_CORE}
do
echo "Copying image: $image"
skopeo copy --dest-tls-verify=false docker://registry.redhat.io/openshift3/${image}:${LOCAL_TAG} docker://localhost:5000/openshift3/${image}:${LOCAL_TAG}
echo "Copied image: $image"
done
IMAGES_CRITICAL="rhel7/etcd:3.2.22"
time for image in ${IMAGES_CRITICAL}
do
skopeo copy --dest-tls-verify=false docker://registry.redhat.io/${image} docker://localhost:5000/${image}
echo "Copied image: $image"
done
IMAGES_ADDITIONAL="metrics-cassandra metrics-hawkular-metrics metrics-hawkular-openshift-agent metrics-heapster oauth-proxy ose-logging-curator5 ose-logging-elasticsearch5 ose-logging-eventrouter ose-logging-fluentd ose-logging-kibana5 ose-metrics-schema-installer prometheus prometheus-alert-buffer prometheus-alertmanager prometheus-node-exporter metrics-schema-installer"
time for image in ${IMAGES_ADDITIONAL}
do
echo "Copying image: $image"
skopeo copy --dest-tls-verify=false docker://registry.redhat.io/openshift3/${image}:${LOCAL_TAG} docker://localhost:5000/openshift3/${image}:${LOCAL_TAG}
echo "Copied image: $image"
done
skopeo copy --dest-tls-verify=false docker://registry.redhat.io/openshift3/prometheus-alert-buffer:latest docker://localhost:5000/openshift3/prometheus-alert-buffer:${LOCAL_TAG}
S2I_BUILDER_IMAGES="jboss-amq-6/amq63-openshift jboss-datagrid-7/datagrid71-openshift jboss-datagrid-7/datagrid71-client-openshift jboss-datavirt-6/datavirt63-openshift jboss-datavirt-6/datavirt63-driver-openshift jboss-decisionserver-6/decisionserver64-openshift jboss-processserver-6/processserver64-openshift jboss-eap-6/eap64-openshift jboss-eap-7/eap71-openshift jboss-eap-7/eap71-openshift jboss-webserver-3/webserver31-tomcat7-openshift jboss-webserver-3/webserver31-tomcat8-openshift openshift3/jenkins-2-rhel7 openshift3/jenkins-agent-maven-35-rhel7 openshift3/jenkins-agent-nodejs-8-rhel7"
time for image in ${S2I_BUILDER_IMAGES}
do
echo "Copying image: $image"
skopeo copy --dest-tls-verify=false docker://registry.redhat.io/${image} docker://localhost:5000/${image}
echo "Copied image: $image"
done
S2I_BUILDER_IMAGES="openshift3/jenkins-slave-base-rhel7 openshift3/jenkins-slave-maven-rhel7 openshift3/jenkins-slave-nodejs-rhel7 rhscl/mongodb-32-rhel7 rhscl/mysql-57-rhel7 rhscl/perl-524-rhel7 rhscl/php-56-rhel7 rhscl/postgresql-95-rhel7 rhscl/python-35-rhel7 redhat-sso-7/sso70-openshift rhscl/ruby-24-rhel7 redhat-openjdk-18/openjdk18-openshift redhat-sso-7/sso71-openshift rhscl/nodejs-6-rhel7 rhscl/mariadb-101-rhel7"
time for image in ${S2I_BUILDER_IMAGES}
do
echo "Copying image: $image"
skopeo copy --dest-tls-verify=false docker://registry.redhat.io/${image} docker://localhost:5000/${image}
echo "Copied image: $image"
done
# Ansible inventory file for OpenShift Container Platform v3.11.59
# AgnosticD ansible-config: ocp-ha-disconnected-lab
[OSEv3:vars]
###########################################################################
### Ansible Vars
###########################################################################
timeout=60
ansible_user=ec2-user
ansible_become=yes
###########################################################################
### OpenShift Basic Vars
###########################################################################
openshift_deployment_type=openshift-enterprise
openshift_disable_check="disk_availability,memory_availability,docker_image_availability"
# OpenShift Version:
# If you modify the openshift_image_tag or the openshift_pkg_version variables after the cluster is set up, then an upgrade can be triggered, resulting in downtime.
# If openshift_image_tag is set, its value is used for all hosts in system container environments, even those that have another version installed. If
# Use this variable to specify a container image tag to install or configure.
#openshift_pkg_version is set, its value is used for all hosts in RPM-based environments, even those that have another version installed.
openshift_image_tag=v3.11.59
# Use this variable to specify an RPM version to install or configure.
openshift_pkg_version=-3.11.59
openshift_release="3.11.59"
# Node Groups
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true','runtime=docker']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true','runtime=docker']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true','runtime=docker'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -> These need to go into the above
# openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['75']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7","size 500M", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
# Deploy Operator Lifecycle Manager Tech Preview
#openshift_enable_olm=false
###########################################################################
### OpenShift Registries Locations
###########################################################################
#oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version}
oreg_url=isolated1.b88e.internal:5000/openshift3/ose-${component}:${version}
#oreg_auth_user=REPLACE_ME
#oreg_auth_password=REPLACE_ME
openshift_docker_insecure_registries=isolated1.b88e.internal:5000
openshift_docker_blocked_registries=registry.redhat.io,registry.access.redhat.com,docker.io
# openshift_docker_additional_registries=
# For Operator Framework Images
# openshift_additional_registry_credentials=[{'host':'registry.connect.redhat.com','user':'REPLACE_ME','password':'REPLACE_ME','test_image':'mongodb/enterprise-operator:0.3.2'}]
openshift_examples_modify_imagestreams=true
# Set this line to enable NFS
openshift_enable_unsupported_configurations=True
###########################################################################
### OpenShift Master Vars
###########################################################################
openshift_master_api_port=443
openshift_master_console_port=443
#Default: openshift_master_cluster_method=native
openshift_master_cluster_hostname=loadbalancer.b88e.internal
openshift_master_cluster_public_hostname=loadbalancer.b88e.example.opentlc.com
openshift_master_default_subdomain=apps.b88e.example.opentlc.com
#openshift_master_ca_certificate={'certfile': '/root/intermediate_ca.crt', 'keyfile': '/root/intermediate_ca.key'}
openshift_master_overwrite_named_certificates=True
# Audit log
# openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# ocp-ha-lab
# AWS Autoscaler
#openshift_master_bootstrap_auto_approve=false
# This variable is a cluster identifier unique to the AWS Availability Zone. Using this avoids potential issues in Amazon Web Services (AWS) with multiple zones or multiple clusters.
#openshift_clusterid
###########################################################################
### OpenShift Network Vars
###########################################################################
osm_cluster_network_cidr=10.1.0.0/16
openshift_portal_net=172.30.0.0/16
# os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy'
###########################################################################
### OpenShift Authentication Vars
###########################################################################
# LDAP AND HTPASSWD Authentication (download ipa-ca.crt first)
#openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'},{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# Just LDAP
#openshift_master_identity_providers=[{'name': 'ldap', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider','attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': 'uid=admin,cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com', 'bindPassword': 'r3dh4t1!', 'ca': '/etc/origin/master/ipa-ca.crt','insecure': 'false', 'url': 'ldaps://ipa.shared.example.opentlc.com:636/cn=users,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com?uid?sub?(memberOf=cn=ocp-users,cn=groups,cn=accounts,dc=shared,dc=example,dc=opentlc,dc=com)'}]
# Just HTPASSWD
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# LDAP and HTPASSWD dependencies
openshift_master_htpasswd_file=/root/htpasswd.openshift
#openshift_master_ldap_ca_file=/root/ipa-ca.crt
###########################################################################
### OpenShift Metrics and Logging Vars
###########################################################################
#########################
# Prometheus Metrics
#########################
openshift_hosted_prometheus_deploy=true
openshift_prometheus_namespace=openshift-metrics
openshift_prometheus_node_selector={"node-role.kubernetes.io/infra":"true"}
openshift_cluster_monitoring_operator_install=true
########################
# Cluster Metrics
########################
openshift_metrics_install_metrics=True
openshift_metrics_storage_kind=nfs
openshift_metrics_storage_access_modes=['ReadWriteOnce']
openshift_metrics_storage_nfs_directory=/srv/nfs
openshift_metrics_storage_nfs_options='*(rw,root_squash)'
openshift_metrics_storage_volume_name=metrics
openshift_metrics_storage_volume_size=10Gi
openshift_metrics_storage_labels={'storage': 'metrics'}
openshift_metrics_cassandra_pvc_storage_class_name=''
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"}
# Store Metrics for 2 days
openshift_metrics_duration=2
# Suggested Quotas and limits for Prometheus components:
openshift_prometheus_memory_requests=2Gi
openshift_prometheus_cpu_requests=750m
openshift_prometheus_memory_limit=2Gi
openshift_prometheus_cpu_limit=750m
openshift_prometheus_alertmanager_memory_requests=300Mi
openshift_prometheus_alertmanager_cpu_requests=200m
openshift_prometheus_alertmanager_memory_limit=300Mi
openshift_prometheus_alertmanager_cpu_limit=200m
openshift_prometheus_alertbuffer_memory_requests=300Mi
openshift_prometheus_alertbuffer_cpu_requests=200m
openshift_prometheus_alertbuffer_memory_limit=300Mi
openshift_prometheus_alertbuffer_cpu_limit=200m
# Grafana
openshift_grafana_storage_type=pvc
openshift_grafana_pvc_size=2Gi
openshift_grafana_node_exporter=true
########################
# Cluster Logging
########################
openshift_logging_install_logging=True
openshift_logging_install_eventrouter=True
openshift_logging_storage_kind=nfs
openshift_logging_storage_access_modes=['ReadWriteOnce']
openshift_logging_storage_nfs_directory=/srv/nfs
openshift_logging_storage_nfs_options='*(rw,root_squash)'
openshift_logging_storage_volume_name=logging
openshift_logging_storage_volume_size=10Gi
openshift_logging_storage_labels={'storage': 'logging'}
openshift_logging_es_pvc_storage_class_name=''
openshift_logging_es_memory_limit=8Gi
openshift_logging_es_cluster_size=1
openshift_logging_curator_default_days=2
openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_eventrouter_nodeselector={"node-role.kubernetes.io/infra": "true"}
###########################################################################
### OpenShift Router and Registry Vars
###########################################################################
# default selectors for router and registry services
# openshift_router_selector='node-role.kubernetes.io/infra=true'
# openshift_registry_selector='node-role.kubernetes.io/infra=true'
openshift_hosted_router_replicas=2
# openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
openshift_hosted_registry_replicas=1
openshift_hosted_registry_pullthrough=true
openshift_hosted_registry_acceptschema2=true
openshift_hosted_registry_enforcequota=true
openshift_hosted_registry_storage_kind=nfs
openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
openshift_hosted_registry_storage_nfs_directory=/srv/nfs
openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
openshift_hosted_registry_storage_volume_name=registry
openshift_hosted_registry_storage_volume_size=20Gi
###########################################################################
### OpenShift Service Catalog Vars
###########################################################################
# default=true
openshift_enable_service_catalog=true
# default=true
template_service_broker_install=true
openshift_template_service_broker_namespaces=['openshift']
# default=true
ansible_service_broker_install=true
ansible_service_broker_local_registry_whitelist=['.*-apb$']
###########################################################################
### OpenShift Hosts
###########################################################################
# openshift_node_labels DEPRECATED
# openshift_node_problem_detector_install
[OSEv3:children]
lb
masters
etcd
nodes
nfs
[lb]
loadbalancer.b88e.internal
[masters]
master1.b88e.internal
master2.b88e.internal
master3.b88e.internal
[etcd]
master1.b88e.internal
master2.b88e.internal
master3.b88e.internal
[nodes]
## These are the masters
master1.b88e.internal openshift_node_group_name='node-config-master'
master2.b88e.internal openshift_node_group_name='node-config-master'
master3.b88e.internal openshift_node_group_name='node-config-master'
## These are infranodes
infranode1.b88e.internal openshift_node_group_name='node-config-infra'
infranode2.b88e.internal openshift_node_group_name='node-config-infra'
## These are regular nodes
node1.b88e.internal openshift_node_group_name='node-config-compute'
node2.b88e.internal openshift_node_group_name='node-config-compute'
[nfs]
support1.b88e.internal
#!/bin/bash
ansible nodes -mshell -a'systemctl status docker| grep Active'
ansible all -mshell -a'yum repolist -v| grep baseurl'
cd /usr/share/ansible/openshift-ansible/
ansible-playbook playbooks/prerequisites.yml
ansible-playbook playbooks/deploy_cluster.yml
# In case of issues, uninstall, fix inventory and try again...
# ansible-playbook playbooks/adhoc/uninstall.yml
# ansible nodes -m shell -a "rm -rf /etc/origin"
# ansible nfs -m shell -a 'rm -rf /srv/nfs/*'
# Post-installation procedure
ansible masters[0] -b -m fetch -a "src=/root/.kube/config dest=/root/.kube/config flat=yes"
oc adm policy add-cluster-role-to-user cluster-admin marina
export GUID=$(hostname | cut -d'.' -f2)
mkdir /root/pvs
# create RWO PVs
for volume in pv{1..100} ; do
cat << EOF > /root/pvs/${volume}
{
"apiVersion": "v1",
"kind": "PersistentVolume",
"metadata": {
"name": "${volume}"
},
"spec": {
"capacity": {
"storage": "10Gi"
},
"accessModes": [ "ReadWriteOnce" ],
"nfs": {
"path": "/srv/nfs/user-vols/${volume}",
"server": "support1.b88e.internal"
},
"persistentVolumeReclaimPolicy": "Recycle"
}
}
EOF
echo "Created def file for ${volume}";
done;
# create RWX PVs
for volume in pv{101..200} ; do
cat << EOF > /root/pvs/${volume}
{
"apiVersion": "v1",
"kind": "PersistentVolume",
"metadata": {
"name": "${volume}"
},
"spec": {
"capacity": {
"storage": "10Gi"
},
"accessModes": [ "ReadWriteMany" ],
"nfs": {
"path": "/srv/nfs/user-vols/${volume}",
"server": "support1.b88e.internal"
},
"persistentVolumeReclaimPolicy": "Recycle"
}
}
EOF
echo "Created def file for ${volume}";
done;
cat /root/pvs/* | oc create -f -
#!/bin/bash
firewall-cmd --permanent --add-service=http
firewall-cmd --reload
# Configure /etc/haproxy/haproxy.cfg
export GUID=`hostname | awk -F. '{print $2}'`
MASTER1=`host master1.$GUID.internal | cut -f4 -d" "`
MASTER2=`host master2.$GUID.internal | cut -f4 -d" "`
MASTER3=`host master3.$GUID.internal | cut -f4 -d" "`
INFRANODE1=`host infranode1.$GUID.internal | cut -f4 -d" "`
INFRANODE2=`host infranode2.$GUID.internal | cut -f4 -d" "`
cat <<EOF > /etc/haproxy/haproxy.cfg
# Global settings
#---------------------------------------------------------------------
global
maxconn 20000
log /dev/log local0 info
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
# option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 300s
timeout server 300s
timeout http-keep-alive 10s
timeout check 10s
maxconn 20000
listen stats :9000
mode http
stats enable
stats uri /
frontend atomic-openshift-all-the-things-http
bind *:80
mode tcp
option tcplog
default_backend atomic-openshift-apps-http
frontend atomic-openshift-all-the-things-https
bind *:443
mode tcp
option tcplog
tcp-request inspect-delay 5s
tcp-request content accept if { req_ssl_hello_type 1 }
acl host_masters req_ssl_sni -i loadbalancer.${GUID}.example.opentlc.com loadbalancer.${GUID}.internal
use_backend atomic-openshift-api if host_masters
default_backend atomic-openshift-apps-https
frontend atomic-openshift-all-the-things-http
bind *:80
mode tcp
option tcplog
default_backend atomic-openshift-apps-http
backend atomic-openshift-api
balance source
mode tcp
server master0 $MASTER1:443 check
server master1 $MASTER2:443 check
server master2 $MASTER3:443 check
backend atomic-openshift-apps-https
balance source
mode tcp
server infranode1 $INFRANODE1:443 check
server infranode2 $INFRANODE2:443 check
backend atomic-openshift-apps-http
balance source
mode tcp
server infranode1 $INFRANODE1:80 check
server infranode2 $INFRANODE2:80 check
EOF
systemctl restart haproxy ; systemctl status haproxy
ss -lntp
#!/bin/bash
# Import Build Images into Integrated Registry
export GUID=`hostname | awk -F. '{print $2}'`
# gogs container image was pulled into the isolated registry with skopeo. Now import that image into your integrated registry with oc tools.
oc import-image docker-registry.default.svc:5000/gogs:latest --from=isolated1.$GUID.internal:5000/wkulhanek/gogs:latest --confirm --insecure=true -n openshift
# nexus was pulled into the isolated registry with skopeo. Now import that image into your integrated registry with oc tools so you can deploy it soon.
oc import-image docker-registry.default.svc:5000/sonatype/nexus3:latest --from=isolated1.$GUID.internal:5000/sonatype/nexus3:latest --confirm --insecure=true -n openshift
# gogs will require a postgresql database. Import it from the isolated registry into the integrated registry, so we can deploy it soon.
# delete the postgresql image stream first
oc delete is postgresql -n openshift
oc import-image docker-registry.default.svc:5000/rhscl/postgresql:9.6 --from=isolated1.$GUID.internal:5000/rhscl/postgresql-96-rhel7:latest --confirm --insecure=true -n openshift
# tag the postgresql as the lastest available
oc tag postgresql:9.6 postgresql:latest -n openshift
oc delete is jboss-eap71-openshift -n openshift
oc import-image docker-registry.default.svc:5000/openshift/jboss-eap71-openshift:1.3 --from=isolated1.$GUID.internal:5000/jboss-eap-7/eap71-openshift --confirm --insecure=true -n openshift
#!/bin/bash
oc new-project cicd
echo "apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nexus-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi" | oc create -f -
oc new-app openshift/nexus3:latest
oc rollout pause dc nexus3
oc patch dc nexus3 --patch='{ "spec": { "strategy": { "type": "Recreate" }}}'
oc set resources dc nexus3 --limits=memory=2Gi --requests=memory=1Gi
oc set volume dc/nexus3 --add --overwrite --name=nexus3-volume-1 --mount-path=/nexus-data/ --type persistentVolumeClaim --claim-name=nexus-pvc
oc set probe dc/nexus3 --liveness --failure-threshold 3 --initial-delay-seconds 60 -- echo ok
oc set probe dc/nexus3 --readiness --failure-threshold 3 --initial-delay-seconds 60 --get-url=http://:8081/repository/maven-public/
oc rollout resume dc nexus3
oc expose svc nexus3
#!/bin/bash
echo 'kind: Template
apiVersion: v1
metadata:
annotations:
description: The Gogs git server. Requires a PostgreSQL database.
tags: instant-app,gogs,datastore
iconClass: icon-github
name: gogs
objects:
- kind: Service
apiVersion: v1
metadata:
annotations:
description: The Gogs servers http port
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
ports:
- name: 3000-tcp
port: 3000
protocol: TCP
targetPort: 3000
selector:
app: ${APPLICATION_NAME}
deploymentconfig: ${APPLICATION_NAME}
sessionAffinity: None
type: ClusterIP
- kind: Route
apiVersion: v1
id: ${APPLICATION_NAME}-http
metadata:
annotations:
description: Route for applications http service.
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
host: ${GOGS_ROUTE}
to:
name: ${APPLICATION_NAME}
- kind: DeploymentConfig
apiVersion: v1
metadata:
labels:
app: ${APPLICATION_NAME}
name: ${APPLICATION_NAME}
spec:
replicas: 1
selector:
app: ${APPLICATION_NAME}
deploymentconfig: ${APPLICATION_NAME}
strategy:
rollingParams:
intervalSeconds: 1
maxSurge: 25%
maxUnavailable: 25%
timeoutSeconds: 600
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
labels:
app: ${APPLICATION_NAME}
deploymentconfig: ${APPLICATION_NAME}
spec:
containers:
- image: \"\"
imagePullPolicy: Always
name: ${APPLICATION_NAME}
ports:
- containerPort: 3000
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
volumeMounts:
- name: gogs-data
mountPath: /data
- name: gogs-config
mountPath: /opt/gogs/custom/conf
readinessProbe:
httpGet:
path: /
port: 3000
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 1
periodSeconds: 20
successThreshold: 1
failureThreshold: 3
livenessProbe:
httpGet:
path: /
port: 3000
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- name: gogs-data
persistentVolumeClaim:
claimName: gogs-data
- name: gogs-config
configMap:
name: gogs-config
items:
- key: app.ini
path: app.ini
test: false
triggers:
- type: ConfigChange
- imageChangeParams:
automatic: true
containerNames:
- ${APPLICATION_NAME}
from:
kind: ImageStreamTag
name: ${GOGS_IMAGE}
namespace: openshift
type: ImageChange
- kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: gogs-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: ${GOGS_VOLUME_CAPACITY}
- kind: ConfigMap
apiVersion: v1
metadata:
name: gogs-config
data:
app.ini: |
APP_NAME = Gogs
RUN_MODE = prod
RUN_USER = gogs
[database]
DB_TYPE = postgres
HOST = postgresql:5432
NAME = ${DATABASE_NAME}
USER = ${DATABASE_USER}
PASSWD = ${DATABASE_PASSWORD}
SSL_MODE = disable
[repository]
ROOT = /data/repositories
[server]
ROOT_URL=http://${GOGS_ROUTE}
[security]
INSTALL_LOCK = true
[mailer]
ENABLED = false
[service]
ENABLE_CAPTCHA = false
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
DISABLE_REGISTRATION = false
REQUIRE_SIGNIN_VIEW = false
[picture]
DISABLE_GRAVATAR = false
ENABLE_FEDERATED_AVATAR = true
[webhook]
SKIP_TLS_VERIFY = true
parameters:
- name: APPLICATION_NAME
description: The name for the application.
required: true
value: gogs
- name: GOGS_ROUTE
description: The route for the Gogs Application
required: true
- name: GOGS_VOLUME_CAPACITY
description: Volume space available for data, e.g. 512Mi, 2Gi
required: true
value: 4Gi
- name: DATABASE_USER
displayName: Database Username
required: true
value: gogs
- name: DATABASE_PASSWORD
displayName: Database Password
required: true
value: gogs
- name: DATABASE_NAME
displayName: Database Name
required: true
value: gogs
- name: GOGS_IMAGE
displayName: Gogs Image and tag
required: true
value: gogs:latest' > $HOME/gogs.yaml
oc process -f $HOME/gogs.yaml --param GOGS_ROUTE=gogs-cicd.apps.b88e.example.opentlc.com | oc create -f -
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment