Skip to content

Instantly share code, notes, and snippets.

@marciodf
Last active April 5, 2020 14:49
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save marciodf/0467f0c03066a7d1717355aac1334d63 to your computer and use it in GitHub Desktop.
Save marciodf/0467f0c03066a7d1717355aac1334d63 to your computer and use it in GitHub Desktop.
backup dos comandos linux
38 2018-08-14 09:27:23 yum info
39 2018-08-14 16:12:02 rpm -qa
40 2018-08-14 16:12:18 rpm -qa | grep spider
41 2018-08-14 16:12:27 rpm -qa | grep oak
42 2018-08-14 16:14:02 rpm -qa | grep semaphor
43 2018-08-14 16:14:10 rpm -qa | grep sem
44 2018-08-14 16:14:44 rpm -qa | grep Spi
45 2018-08-14 16:15:04 yum remove SpiderOakGroups
46 2018-08-14 16:15:58 systemctl --list-units
47 2018-08-14 16:16:03 systemctl list-units
48 2018-08-14 16:16:11 systemctl list-units | grep Spi
49 2018-08-14 16:16:14 systemctl list-units | grep spi
50 2018-08-14 16:16:16 systemctl list-units | grep oak
51 2018-08-14 16:16:22 yum remove SpiderOakGroups
52 2018-08-14 16:16:27 rpm -qa | grep Spi
53 2018-08-14 16:16:53 rpm --help
54 2018-08-14 16:17:34 rpm -e SpiderOakGroups
55 2018-08-14 16:18:02 yum remove redhat-internal-software-management
56 2018-08-14 16:18:12 yum --help | grep force
57 2018-08-14 16:18:19 yum --help
58 2018-08-14 16:19:00 rpm -e --nodeps SpiderOakGroups
59 2018-08-14 16:19:07 rpm -qa | grep Spi
60 2018-08-14 16:20:25 rpm -qa | grep redhat-internal-software-management
64 2018-08-15 00:24:21 sfdisk
65 2018-08-15 00:24:44 sfdisk -l
66 2018-08-15 00:42:58 passwd administrator
67 2018-08-15 00:43:17 passwd |more
68 2018-08-15 00:43:35 passwd user administrator
69 2018-08-15 00:43:48 passwd admin
70 2018-08-15 00:43:53 passwd administrator
71 2018-08-15 00:44:04 passwd root
72 2018-08-15 00:44:58 sudo root
73 2018-08-15 00:45:05 sudo su
74 2018-08-15 15:39:39 yum search lvm | grep gui
75 2018-08-15 15:39:51 yum search lvm
76 2018-08-15 15:40:40 vgs
77 2018-08-15 15:42:49 pvs
78 2018-08-15 15:43:15 vgs
79 2018-08-15 15:43:36 lvs
80 2018-08-15 15:43:55 vgdisplay
81 2018-08-15 15:44:45 lvcreate -h
82 2018-08-15 15:44:56 man lvcreate
83 2018-08-15 15:46:54 lvcreate -L 200GiB -n VMData RHEL7CSB
84 2018-08-15 15:46:57 vgs
85 2018-08-15 15:47:09 lvs
86 2018-08-15 15:47:55 mkfs.xfs /dev/mapper/RHEL7CSB-VMData
87 2018-08-15 15:48:57 mount
88 2018-08-15 15:49:00 df -h
89 2018-08-15 15:50:53 mount /dev/mapper/RHEL7CSB-VMData /home/mdalacor/VirtualMachines
90 2018-08-15 15:51:13 df -h
lsblk
sudo virt-manager
scp config-nfs.sh services:/home/student/config-nfs.sh
91 2018-08-15 15:51:30 ll /home/mdalacor/
92 2018-08-15 15:51:56 chown mdalacor:mdalacor /home/mdalacor/VirtualMachines/
93 2018-08-15 15:51:58 ll /home/mdalacor/
94 2018-08-15 15:52:19 umount /dev/mapper/RHEL7CSB-VMData /home/mdalacor/VirtualMachines
95 2018-08-15 15:52:29 df -h
96 2018-08-15 15:53:14 ll -Z /home/mdalacor/
97 2018-08-15 15:53:33 getsebool
98 2018-08-15 15:53:40 getenforce
99 2018-08-15 15:54:04 history
100 2018-08-15 15:56:12 mount /dev/mapper/RHEL7CSB-VMData /home/mdalacor/VirtualMachines
101 2018-08-15 15:56:15 ll -Z
102 2018-08-15 15:56:49 restorecon /home/mdalacor/VirtualMachines/
setfacl -m u:qemu:rw /run/media/mdalacor/CEPH/
103 2018-08-15 15:56:56 ll -Z
104 2018-08-15 15:58:50 ip addr
105* 2018-08-15 15:58:54 ip link [A
106 2018-08-15 15:58:58 ip addr
107 2018-08-15 16:44:13 history
subscription-manager register --username <username> --password <password> --auto-attach
subscription-manager list --available
ip a |grep inet
root@infra1 etc]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:sFNDZk00AxVovzWsqVY8oozfuzAXiU1SdoKtS7H+5o4 root@mdalacor.remote.csb
The key's randomart image is:
+---[RSA 2048]----+
| oOBO. |
| o*+o.o |
| o=+. . |
| +O o. + |
| o+.S. = . |
| o...B |
| ooo.+ . |
| . o*= |
| .E=*o |
+----[SHA256]-----+
dnsmasq
300 vi dnsmasq.conf
301 systemctl enable dnsmasq.service
302 systemctl enable dnsmasq
303 systemctl enable dnsmasq.service
304 ls dns*
305 rd dnsmasq.conf
306 rm dnsmasq.conf
307 ip a |grep inet
308 ping infra1.contoso.com
309 ping app1.contoso.com
310 ping infra1.contoso.com
311 ping app1.contoso.com
312 ping app2.contoso.com
313 ping infra1.contoso.com
314 ping app2.contoso.com
315 cat /etc/resolv.conf
316 route -n
317 nslookup infra1.
318 nslookup infra1.contoso.com
319 ping app1.contoso.com
320 ping app2.contoso.com
321 ssh-keygen
322 for host in infra1.contoso.com infra1.contoso.com app1.contoso.com app2.contoso.com do ssh-copy-id -i ~/ .ssh/id_rsa.pub $host; done
323 for host in infra1.contoso.com infra1.contoso.com app1.contoso.com app2.contoso.com do ssh-copy-id -i ~/ .ssh/id_rsa.pub $host done; done
324 for host in infra1.contoso.com infra1.contoso.com app1.contoso.com app2.contoso.com do ssh-copy-id -i ~/.ssh/id_rsa.pub $host; done
325 for host in infra1.contoso.com infra1.contoso.com app1.contoso.com app2.contoso.com; do ssh-copy-id -i ~/ .ssh/id_rsa.pub $host; done
326 for host in infra1.contoso.com infra1.contoso.com app1.contoso.com app2.contoso.com; do ssh-copy-id -i ~/root/ .ssh/id_rsa.pub $host; done
327 for host in infra1.contoso.com infra1.contoso.com app1.contoso.com app2.contoso.com; do ssh-copy-id -i ~/ .ssh/id_rsa.pub $host; done -f
328 for host in infra1.contoso.com infra1.contoso.com app1.contoso.com app2.contoso.com; do ssh-copy-id -i ~/ .ssh/id_rsa.pub $host -f; done
334 ll
335 cd .ssh
338 cd .ssh
while [ true ]; do curl http://teste1-meu-projeto1.apps.contoso.com/; sleep 1; echo; done
340 cd id_rsa
341 ssh-copy-id -i id_rsa.pub master1.contoso.int
343 ssh-copy-id -i id_rsa.pub app1.contoso.int
344 ssh-copy-id -i id_rsa.pub app2.contoso.int
345 ssh
346 ssh root@app1.contoso.com
347 cd ..
348 ll
349 ssh
350 ssh root@app2.contoso.com
351 ssh root@app1.contoso.com
352 exit
353 history
354 shutdown
355 cat /etc/sysconfig/docker
356 shutdown
357 cat /etc/sysconfig/docker
358 vi /etc/sysconfig/docker
361 vi /etc/sysconfig/docker
364 cat /etc/sysconfig/docker-storage
365 vi /etc/sysconfig/docker-storage
366 cat /etc/sysconfig/docker-storage
367 ansible
368 cd /etc/ansible/
369 ll
370 cat hosts
371 lsblk
372 pwd
373 ls -la
374 vi hosts
375 ansible -m ping nodes
376 ansible -m ping infra1.contoso.com
377 ansible -m shell -a "free -m" infra1.contoso.com
378 ansible -m shell -a "free -m" nodes
379 ansible -m shell -a "fdisk -l" nodes
380 nslookup
381 subscription-manager list --available
382 ansible -m ping nodes
383 ansible -m shell -a "fdisk -l |grep vdc" nodes
384 ansible -m ping nodes
385 nslookup
386 nslookup app1.contoso.com
387 ansible -m shell -a "fdisk -l |grep vdc" nodes
388 ansible -m shell -a "fdisk -l |grep vdd" nodes
389 vi hosts
390 ansible -m shell -a "date" all
391 ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml
392 shutdown
393 cat /etc/sysconfig/docker
394 ping app1
395 ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml
senha Pistoladear
(/root/.ssh/id_rsa)
126 htpasswd /etc/origin/master/htpasswd admin
127 oc get po --all-namespaces
128 htpasswd /etc/origin/master/htpasswd superadmin
senha 12345
129 oc adm policy add-cluster-role-to-user cluster-admin superadmin
130 scp hosts-ansible 10.0.0.1:/home/mdalacor/.
cfdisk(8), sfdisk(8), mkfs(8), parted(8), partprobe(8), kpartx(8)
yum-config-manager --enable <repo>
id
sudo docker stats
522 2019-02-20 18:06:55 sudo systemctl status docker
523 2019-02-20 18:07:17 sudo systemctl enable docker
524 2019-02-20 18:07:31 sudo systemctl start docker
525 2019-02-20 18:07:38 sudo systemctl status docker
526 2019-02-20 18:07:50 sudo docker info
527 2019-02-20 18:08:40 sudo systemctl status firewalld
528 2019-02-20 18:14:31 vi /etc/docker/daemon.json
529 2019-02-20 18:15:35 sudo vi /etc/docker/daemon.json
530 2019-02-20 18:16:56 sudo groupadd docker
531 2019-02-20 18:17:13 sudo usermod -a -G docker mdalacor
532 2019-02-20 18:17:21 id
533 2019-02-20 18:17:49 sudo systemctl restart docker
534 2019-02-20 18:17:53 id
535 2019-02-20 18:18:06 docker version
536 2019-02-22 09:01:29 sudo vi /etc/docker/daemon.json
2019-02-23 15:47:12 pvs
1383 2019-02-23 15:47:35 pvscan
1384 2019-02-23 15:47:45 pvdelete
1385 2019-02-23 15:48:02 pvremove
1386 2019-02-23 15:48:08 pvremove --help
1387 2019-02-23 15:49:29 pvremove [unknown]
1388 2019-02-23 15:50:59 pvremove daxHos-uHJD-g9WF-DOae-0ume-rVgd-ePW0Yp
1389 2019-02-23 15:51:37 pvremove daxHos-uHJD-g9WF-DOae-0ume-rVgd-ePW0Yp -f
1390 2019-02-23 15:51:43 pvs
1391 2019-02-23 15:56:42 pvdisplay
1392 2019-02-23 15:57:59 lsblk
1393 2019-02-23 15:59:27 vgreduce --removemissing
1394 2019-02-23 15:59:36 vgreduce --help
1395 2019-02-23 16:00:28 vgreduce --removemissing RHEL7CSB
1396 2019-02-23 16:00:51 pvs
1397 2019-02-23 16:01:01 pvscan
1398 2019-02-23 16:11:59 vgck --help
1399 2019-02-23 16:13:43 vgck RHEL7CSB
1400 2019-02-23 16:13:51 vgck RHEL7CSB -y
1401 2019-02-23 16:13:57 reboot
1402 2019-02-24 09:03:20 pvs
1403 2019-02-24 09:11:01 yum update
1404 2019-02-24 12:24:35 yum update --skip-broken
while [ true ]; do curl http://teste2-teste2.apps.contoso.int/; sleep 1; echo; done
oc cluster up \
--public-hostname=192.168.15.54 \
--host-data-dir=/var/lib/origin/openshift.local.etcd \
--host-config-dir=/var/lib/origin/openshift.local.config \
--host-pv-dir=/var/lib/origin/openshift.local.pv \
--metrics \
--version=v3.7.0 \
--use-existing-config
oc cluster up --public-hostname=192.168.15.54 --base-dir=/var/lib/origin/
oc cluster up --public-hostname=192.168.15.54 --loglevel=9
ip addr |grep inet
for m in $(mount | grep openshift | awk '{print $3}'); do umount $m; done
rm -rf *
ctr shfit T
iptables -L
iptables -t nat -F
iptables -F
iptables -L
1638 2019-03-07 11:35:20 ll
1639 2019-03-07 11:35:32 iptables -L
1640 2019-03-07 11:35:43 iptables -t nat -F
1641 2019-03-07 11:35:51 iptables -F
1642 2019-03-07 11:35:56 iptables -L
1643 2019-03-07 11:36:02 ll
1644 2019-03-07 11:36:06 pwd
1645 2019-03-07 11:36:16 systemctl docker stop
1646 2019-03-07 11:36:59 systemctl stop docker
1647 2019-03-07 11:37:10 pwd
1648 2019-03-07 11:37:12 cd ..
1649 2019-03-07 11:37:14 ll
1650 2019-03-07 11:37:16 cd docker
1651 2019-03-07 11:37:17 ll
1652 2019-03-07 11:37:52 systemctl start docker
1653 2019-03-07 11:38:22 ip a
1654* 2019-03-07 11:39:16
1655 2019-03-07 11:45:55 cd ..
1656 2019-03-07 11:46:24 oc login -u system:admin
1657 2019-03-07 11:46:59 oc get po
1658 2019-03-07 11:48:58 oc project openshift-web-console
1659 2019-03-07 11:49:05 oc get po
1660 2019-03-07 11:49:33 oc delete po webconsole-6899bd77b8-lgtf4
1661 2019-03-07 11:49:44 oc get po
1662 2019-03-07 12:00:25 oc login -u system:admin
1663 2019-03-07 12:00:35 oc create namespace openshift-web-console
1664 2019-03-07 12:00:55 oc process -f install/origin-web-console/console-template.yaml -p "API_SERVER_CONFIG=$(cat install/origin-web-console/console-config.yaml)" | oc apply -n openshift-web-console -f -
1665 2019-03-07 12:01:06 oc process -f install/origin-web-console/console-template.yaml -p "API_SERVER_CONFIG=$(cat install/origin-web-console/console-config.yaml)" | oc apply -n openshift-web-console -f
1666 2019-03-07 13:30:03 oc cluster down
1602 2019-03-07 11:23:11 oc cluster up --public-hostname=10.125.137.190 --loglevel=9
1603 2019-03-07 11:23:24 oc cluster down
1604 2019-03-07 11:23:45 ll /var/lib/origin/
1609 2019-03-07 11:25:14 for m in $(mount | grep openshift | awk '{print $3}'); do umount $m; done
1610 2019-03-07 11:25:23 rm -rf *
1611 2019-03-07 11:25:44 oc cluster up --public-hostname=172.17.0.1/ --loglevel=9
1615 2019-03-07 11:27:50 cd /var/lib/origin/
1616 2019-03-07 11:27:53 ll
1617 2019-03-07 11:28:04 rm -rf *
1618 2019-03-07 11:28:09 for m in $(mount | grep openshift | awk '{print $3}'); do umount $m; done
1623 2019-03-07 11:29:45 iptables -F
1624 2019-03-07 11:29:50 iptables -L
1625 2019-03-07 11:30:45 iptables -t nat -F
service iptables stop
systemctl status iptables
OR
# service iptables status
https://opensource.com/article/18/11/local-okd-cluster-linux
Password g4qzJrX2nV
usuario comum : mdalacor
minishift start --skip-registration
1721 2019-03-07 22:06:08 docker network inspect bridge | grep Subnet
usuario normal executando SUDO !!!
1726 2019-03-07 22:08:51 systemctl enable firewalld
1727 2019-03-07 22:09:05 systemctl start firewalld
1728 2019-03-07 22:09:24 firewall-cmd --permanent --new-zone okdlocal
1729 2019-03-07 22:09:36 firewall-cmd --permanent --zone okdlocal --add-source 172.17.0.0/16
1730 2019-03-07 22:09:51 irewall-cmd --permanent --zone okdlocal --add-port 8443/tcp
1731 2019-03-07 22:09:56 firewall-cmd --permanent --zone okdlocal --add-port 8443/tcp
1732 2019-03-07 22:10:19 firewall-cmd --permanent --zone okdlocal --add-port 53/udp
1733 2019-03-07 22:10:40 firewall-cmd --permanent --zone okdlocal --add-port 8053/udp
1734 2019-03-07 22:11:39 firewall-cmd --reload
1735 2019-03-07 22:11:53 firewall-cmd --zone okdlocal --list-sources
1736 2019-03-07 22:12:03 firewall-cmd --zone okdlocal --list-ports
1737 2019-03-07 22:12:31 oc version
1738 2019-03-07 22:13:03 ip a |grep inet
1149 2019-05-08 17:37:35 dig +short cakephp-mysql-example-meu-projeto1.apps.contoso.com
1150 2019-05-08 17:37:50 dig +short google.com
1151 2019-05-08 17:37:57 dig +short cakephp-mysql-example-meu-projeto1.apps.contoso.com
1152 2019-05-08 17:38:04 dig +short *.apps.contoso.com
[root@mdalacor mdalacor]# ansible --version
config file = /etc/ansible/ansible.cfg
[root@mdalacor mdalacor]# touch ansible.cfg
[root@mdalacor mdalacor]# ansible --version
config file = /home/mdalacor/ansible.cfg
dig +short cakephp-mysql-example-meu-projeto1.apps.contoso.com
virtlib
1919 2019-06-04 15:08:52 man getsebool
1920 2019-06-04 15:09:30 man getenforce
1921 2019-06-04 15:09:48 man restorecon
1922 2019-06-04 15:10:06 ll
1923 2019-06-04 15:11:00 pwd
1924 2019-06-04 15:11:19 restorecon /run/media/mdalacor/CEPH/
1925 2019-06-04 15:19:35 chown mdalacor:mdalacor /run/media/mdalacor/CEPH/
1926 2019-06-04 15:20:39 ll
1927 2019-06-04 15:20:43 cd CEPH/
PATH=$PATH:/usr/src/git-2.9.5
export PATH=$PATH:/usr/src/git-2.9.5
To identify the repo file that contains the repo, run the following command:
grep $reponame /etc/yum.repos.d/*
Example: grep rhel-7-workstation-htb-rpms /etc/yum.repos.d/*
Edit the file shown in the output with the editor of your choice
Search for the repo name
Change the line in the repo section from "enabled = 1" to "enabled = 0"
Save the file
Run updates again -> yum update
para o relay
relay login -k ba46d47e-4740-44ec-9eb8-4607c2cb0e7e -s 7yyh6ZqIKnuA
export RELAY_KEY=ba46d47e-4740-44ec-9eb8-4607c2cb0e7e
export RELAY_SECRET=7yyh6ZqIKnuA
na primeira vez, cria o bucket com nome , por exemplo, "workshop"
relay forward -b workshop https://master1.contoso.int:8443/apis/build.openshift.io/v1/namespaces/teste-01/buildconfigs/workshop-1/webhooks/b406b6f523b73d62/github
https://master1.contoso.int:8443/apis/build.openshift.io/v1/namespaces/portoa/buildconfigs/web1/webhooks/e927a4701da5ee78/github
(copia do openshift, em builds/builds)
depois, só o comando : relay forward --bucket workshop
pega na pagina, o bucket criado , por exemplo : https://my.webhookrelay.com/v1/webhooks/e592e9b2-cd16-4d9b-a1e9-f5998ec6b242 e poe no webhook do git
para instalar : sudo wget -O /usr/local/bin/relay https://storage.googleapis.com/webhookrelay/downloads/relay-linux-amd64
depois vai para o diretorio /usr/local/bin/
chmod 777 relay
docker run -it webhookrelay/webhookrelayd -k ba46d47e-4740-44ec-9eb8-4607c2cb0e7e -s 7yyh6ZqIKnuA -b wokshop
while [ true ]; do curl http://wokshop-meu-projeto1.apps.contoso.com/; sleep 1; echo; done
while [ true ]; do curl http://mobile.apps.contoso.com/; sleep 1; echo; done
grep $reponame /etc/yum.repos.d/*
Example: grep rhel-7-workstation-htb-rpms /etc/yum.repos.d
sudo nautilus
sudo virt-manager
O sistema foi registrado com ID: 596ebb66-190f-42ed-8494-9bf41f97e257
O nome do sistema registrado é: rhel8-demo.contoso.com
oc describe node
oc adm top nodes
oc adm top pods
oc adm manage-node --schedulable=false node2.lab.example.com
oc get node node1.lab.example.com -L region -L zone
oc label node1 region=us-west zone=power1a --overwrite
oc label node node2 region=us-west zone=power1a --overwrite
oc patch dc myapp --patch '{"spec":{"template":{"nodeSelector":{"env":"qa"}}}}' para "env=qa"
oc annotate --overwrite namespace default openshift.io/node-selector='region=infra' para projetos default serem para um determinado no
while [ true ]; do curl http://phpapp-phpapp-dev.apps.contoso.com; sleep 1; echo; done
imagem php para o pod https://bitbucket.org/redhatbsb/phpapp/src/master/
https://hub.docker.com/_/wordpress
Vinyl-72@esquila
https://apstudents.collegeboard.org/
https://apscore.collegeboard.org/scores
[root@mdalacor mdalacor]# vi /etc/libvirt/qemu.conf #retirar o comentario user = "root"
[root@mdalacor mdalacor]# systemctl restart libvirtd.service
chown mdalacor:mdalacor /run/
getfacl /run/media/mdalacor/CEPH/cfme-rhevm-5.10.5.1-1.x86_64.qcow2
setfacl -m u:libvirt:rwx /run/media/mdalacor/CEPH/cfme-rhevm-5.10.5.1-1.x86_64.qcow2
https://master1.contoso.com:8443/apis/build.openshift.io/v1/namespaces/meu-projeto1/buildconfigs/wokshop/webhooks/6b1dc8040cf06a92/github
https://192.168.42.28:8443/apis/build.openshift.io/v1/namespaces/workshop-okd/buildconfigs/workshop-okd1/webhooks/b1c794d56e6377d5/github
[root@mdalacor mdalacor]# findmnt /dev/sda1
TARGET SOURCE FSTYPE OPTIONS
/run/media/mdalacor/RHEL-7.5 Server.x86_64 /dev/sda1 iso9660 ro,nosuid,nodev,relatime,uid=114236,gid=114236,iocharset=utf8,mode=0400,dmode=0500
***sei
615 oc project openshift
616 oc get templates
617 oc edit template sei
618 oc login https://master1.contoso.com:8443 --token=Y-u3KDSqKkzu0nTOSae30hH4FvaepSE0ZVmibarnJiY
619 oc edit template sei
620 oc get storageclass
621 oc edit storageclass glusterfs-storage
622 oc patch storageclass glusterfs-storage -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "true"}}}'
623 oc get storageclass
624 oc get networkpolicy
625 oc get pods --all-namespaces
626 oc project sei-03
627 oc get job
628 oc rollout latest job sei-job-mysql
629 oc get jobs/sei-job-mysql -o yaml > job.yaml
630 oc delete job sei-job-mysql
631 oc create -f job.yaml
632 oc get job
633 vim job.yaml
634 vi job.yaml
635 oc create -f job.yaml
636 oc get jobs
637 oc get pods
638 oc logs -f sei-job-my
639 oc logs -f sei-job-mysql-zxkfk
640 history
641 vi job.yaml
642 oc get jobs
643 oc get job
644 oc create -f job.yaml
645 vi job.yaml
646 oc create -f job.yaml
647 oc delete job sei-job-mysql
648 oc create -f job.yaml
649 oc get job
650 oc project sei-04
651 oc get job
652 oc delete job sei-job-mysql
653 oc create -f job.yaml
654 oc get jobs
655 oc delete job sei-job-mysql
656 oc project sei-04
657 oc get job
658 oc get jobs/sei-job-mysql -o yaml > job.yaml
659 oc rollout latest job sei-job-mysql
660 oc create -f job.yaml
661 ssh root@app1.contoso.com
662 ssh root@app2.contoso.com
663 shutdown 0
664 relay forward --bucket workshop
665 oc project sei-05
666 oc login https://master1.contoso.com:8443 --token=L4wTHhhoM0K_vFzQvo87eXvZMGDVxB-Hvxl9ZklqgXY
667 oc project sei-05
668 oc get job
669 oc delete job sei-job-mysql
670 oc create -f job.yaml
671 ll
672 cat job.yaml
673 vi job.yaml
674 oc login https://master1.contoso.com:8443 --token=L4wTHhhoM0K_vFzQvo87eXvZMGDVxB-Hvxl9ZklqgXY
675 oc project sei-06
676 oc get job
677 oc login https://master1.contoso.com:8443 --token=L4wTHhhoM0K_vFzQvo87eXvZMGDVxB-Hvxl9ZklqgXY
678 oc edit template sei
679 oc project openshift
680 oc edit template sei
681 clear
682 oc edit template sei
683 oc delete template sei
***sei
estudar para aprova:
criação de usuarios, colocação em projetos
PVCs para registry e apontamentos para registry interna
uso do YAML exemplos para criação destes recursos
playbook para instalação de monitoração
*****
119 vi /etc/sysconfig/network-scripts/ifcfg-eth0
120 ip as
121 ip a s
122 nmcli con show
123 nmcli del redewire
124 ifconfig eth0
125 vi /etc/sysconfig/network-scripts/ifcfg-eth0
126 ls /etc/sysconfig/network-scripts/
127 rm /etc/sysconfig/network-scripts/ifcfg-redewire
128 systemctl restart network
129 ip a s
130 ip route
131 curl https://www.uol.com.br
sysctl -n kernel.hostname
***
hostnamectl set-hostname app2.contoso.int
vi /etc/sysconfig/network-scripts/ifcfg-ens3
#ifcfg-xxxx
TYPE=Ethernet
DEVICE=eth0
BOOTPROTO=static
IPADDR=10.0.0.10
MASK=255.255.240.0
GATEWAY=10.0.0.1
PEER_DNS=yes
DNSDOMAIN=contoso.int
NM_CONTROLLED=yes
DNS1=10.0.0.7
HWADDR=52:54:00:37:f3:52
ONBOOT=yes
USERCTL=no
********
sudo virt-customize -a ./mdalacor/VirtualMachines/app1.qcow2 --root-password password:pistoladear --uninstall cloud-init
guestfish --rw -a ./mdalacor/VirtualMachines/app1.qcow2
><fs> run
><fs> list-filesystems
/dev/sda1: xfs
><fs> mount /dev/sda1 /
><fs> vi /etc/shadow
><fs> quit
************
2472 2019-09-19 11:10:16 ssh-copy-id -i /root/.ssh/id_rsa.pub root@infra1.contoso.int
2473 2019-09-19 11:10:35 ssh-copy-id -i id_rsa.pub root@10.0.0.7
1349 2019-09-19 18:22:24 cat /etc/resolv.conf
1350 2019-09-19 18:23:01 cd /etc/NetworkManager/
1351 2019-09-19 18:23:01 ls
1352 2019-09-19 18:23:06 cat conf.d/crc-nm-dnsmasq.conf
1353 2019-09-19 18:23:13 cat dnsmasq.d/crc.conf
1354 2019-09-19 18:23:33 systemctl restart NetworkManager
1355 2019-09-19 18:23:47 cat /etc/re
1356 2019-09-19 18:23:50 cat /etc/resolv.conf
1357 2019-09-19 18:24:03 ps aux | grep dnsmasq
1358 2019-09-19 18:24:11 crc
1359 2019-09-19 18:24:15 which crc
1360 2019-09-19 18:24:27 crc version
1361 2019-09-19 18:24:29 crc setup
1362 2019-09-19 18:24:57 cat /etc/resolv.conf
[try 1]: Forwarding 'host_mod' to json server 'https://milan1.contoso.int/ipa/session/json'
SSSD enabled
Configured /etc/openldap/ldap.conf
Configured /etc/ssh/ssh_config
Configured /etc/ssh/sshd_config
Configuring contoso.int as NIS domain.
Client configuration complete.
The ipa-client-install command was successful
==============================================================================
Setup complete
Next steps:
1. You must make sure these network ports are open:
TCP Ports:
* 80, 443: HTTP/HTTPS
* 389, 636: LDAP/LDAPS
* 88, 464: kerberos
* 53: bind
UDP Ports:
* 88, 464: kerberos
* 53: bind
* 123: ntp
2. You can now obtain a kerberos ticket using the command: 'kinit admin'
This ticket will allow you to use the IPA tools (e.g., ipa user-add)
and the web user interface.
Be sure to back up the CA certificates stored in /root/cacert.p12
These files are required to create replicas. The password for these
files is the Directory Manager password
SKU: ES0113909
Contract: 11946936
Pool ID: 8a85f99b6b498682016b521dfe463949
Provides Management: Yes
subscription-manager attach --pool=8a85f99b6b498682016b521dfe463949
subscription-manager repos --disable="*"
subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
--enable="rhel-7-server-ose-3.11-rpms" \
--enable="rhel-7-server-ansible-2.6-rpms"
# yum install wget git net-tools bind-utils yum-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct
# yum update
# reboot
no servidor bastion :
yum install openshift-ansible
colocar o inventory file em /etc/ansible/hosts
yum install docker-1.13.1
# cat <<EOF > /etc/sysconfig/docker-storage-setup
DEVS=/dev/sdb
VG=docker-vg
EOF
# docker-storage-setup
no arquivo /etc/sysconfig/docker
OPTIONS='--selinux-enabled --signature-verification=False --insecure-registry=172.30.0.0/16 --selinux-enabled --log-opt max-size=1M --log-opt max-file=3'
# yum install glusterfs-fuse
# subscription-manager repos --enable=rh-gluster-3-client-for-rhel-7-server-rpms
# yum update glusterfs-fuse
vi /etc/environment
no_proxy=.contoso.int,10.0.0.1,10.0.0.2,10.0.0.10,.cluster.local,.svc,localhost,127.0.0.1,172.30.0.1
inventory file is in the /etc/ansible/hosts
$ cd /usr/share/ansible/openshift-ansible
$ ansible-playbook [-i /path/to/inventory] \
playbooks/prerequisites.yml
$ cd /usr/share/ansible/openshift-ansible
$ ansible-playbook [-i /path/to/inventory] \
playbooks/deploy_cluster.yml
TASK [openshift_node_group : Wait for the sync daemonset to become ready and available] ***************************************************************
FAILED - RETRYING: Wait for the sync daemonset to become ready and available
This phase can be restarted by running: playbooks/openshift-master/config.yml
# openshift_node_groups=[{'name': 'node-config-master-infra', 'labels': ['node-role.kubernetes.io/master=true','node-role.kubernetes.io/infra=true']}, 'edits': [{ 'key': 'kubeletArguments.node-ip','value': ['10.0.0.10']}], {'name': 'node-config-compute1', 'labels': ['node-role.kubernetes.io/compute=true']}, 'edits': [{ 'key': 'kubeletArguments.node-ip','value': ['10.0.0.11']}], {'name': 'node-config-compute2', 'labels': ['node-role.kubernetes.io/compute=true']}, 'edits': [{ 'key': 'kubeletArguments.node-ip','value': ['10.0.0.12']}]}]
openshift_node_groups=[{'name': 'node-config-master-infra', 'labels': ['node-role.kubernetes.io/master=true','node-role.kubernetes.io/infra=true']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true']}]
https://docs.openshift.com/container-platform/3.11/install/prerequisites.html#system-requirements
subscription-manager repos --enable="rhel-7-server-ose-3.11-rpms"
Username is 11009103|turntable-mk2
eyJhbGciOiJSUzUxMiJ9.eyJzdWIiOiIxNTc0NGIxZWFmMjk0YzllYTcxMjdkZGZhZjk5MDMyYSJ9.QcFXkaZxjle8TaJ3HZvsY-9h_ySQJs_jbKkULraqpJ_N9HC0z9uZAatm_nOCgshcoV9LMoSzVZPs4m_vP32jjtt47zf8mWu74TWEI11RLzrMLMIpC3xmsPM0qx6AlsYXUKRRNqxu94Hw1o0W0i-k7RmtuJwuFKw7iPy6FPBksiMMcGPHT_6rA1242G4sZD1rpzVdPvp3lBcfSWHXULlDKh19SNZeKgWt3zsc6Y_V-QjrBoTgZ1OZdsYAR-DSyKps7sD4h8ajXedffRkgr1NK4Job2fL5kxM42FQkFQWmk8xor-Wz2Up5Prie8nykWiHAsGmWa7ebKlV6ue6ExvBWSgD4Bp744vOLLxU3nqjg8a-mf7CsIh4556ivy2nGgZB9H7fEM8PuAmBPaRMjy4oeCYw5u_dQcGPdlGo61Xl9Vcxcj8G74_u7EP7tcJ6pfWZuWFlPrXrFIvyhYPGu1fsAmnga7jGvqJ36RTenPr1u5l5WzrUK7XMgIrFhBaTow2ZZaRkl0WeLcdI3LeeL5YH-4RpL97GlrBmqBQCdf9EGPxvuSETMHYLj5ws2Zb2Ny8gEfElV215hxtJkODiE5AQdf6UkHAbR0xaWZzas_4QfLpiVeyAEU0R1M800k8vOUnjp1abvjhOcYDBtW5Oim-tVp4bLT8IMgAEdBMlpBFM-bMQ
cat /etc/origin/master/htpasswd
htpasswd /etc/origin/master/htpasswd admin
cat /etc/origin/master/htpasswd
htpasswd /etc/origin/master/htpasswd superadmin
registrar o wildcar no DNS
https://access.redhat.com/solutions/971653
https://fedoramagazine.org/using-the-networkmanagers-dnsmasq-plugin/ -> DNS Masq
oc adm policy add-cluster-role-to-user cluster-admin superadmin
****NFS
63 pvremove /dev/vdc --force --force
64 lvremove vg-nfs
65 vgremove vg-nfs
66 pvs
67 fdisk /dev/vdc
68 pvs
69 pfs
70 pvs
71 fdisk /dev/vdc
72 pvs
73 pvcreate /dev/vdc3
74 pvs
75 vgcreate nfs-vg /dev/vdc3
76 lvcreate -L 78G -n nfs-lv nfs-vg
77 mkfs --help
78 man mkfs
79 mkfs -t ext4 /dev/mapper/nfs--vg-nfs--lv
80 ll
81 mount -t ext4 /dev/mapper/nfs--vg-nfs--lv /nfs
82 cd nfs
92 cat /etc/nfs
93 cat /etc/exports
94 service nfs start
95 systemctl status nfs
96 systemctl nfs status
97 service nfs status
98 touch
99 touch --help
100 cd /nfs
101 touch NFS.test
107 mkdir -p /nfs-test
108 mount 10.0.0.9:/opt:/nfs /nfs-test
115 vi nfs-pv.yaml
116 oc create -f nfs-pv.yaml
117 oc get pv
118 vi nfs-claim.yaml
119 oc create -f nfs-claim.yaml
120 oc get pv
[root@master1 ~]# cat /etc/origin/master/htpasswd
[root@master1 ~]# htpasswd /etc/origin/master/htpasswd dev1
[root@master1 ~]# oc adm policy add-cluster-role-to-user cluster-user dev1
journalctl -xe
?<command> para voltar ultimo comando
Tekton pipelines , opção para o Jenkins
code ready
[root@mdalacor mdalacor]# cd /usr/local/bin/
[root@mdalacor bin]# cp /home/mdalacor/Documents/Installs/crc-linux-1.2.0-amd64/crc .
[root@mdalacor bin]# chmod 777 crc
[mdalacor@mdalacor ~]$ oc login --token=3is
[mdalacor@mdalacor ~]$ oc scale --replicas=1 statefulset --all -n openshift-monitoring; oc scale --replicas=1 deployment --all -n openshift-monitoring
add your app to your path : export PATH=$PWD/bin:$PATH
vi .bashrc (incluir PATH definitivo)
Skipping the automatic Kiali installation.
To install Kiali, create a Kiali custom resource in the namespace [<any namespace you choose>].
An example Kiali CR with all settings documented can be found here:
https://raw.githubusercontent.com/kiali/kiali/v1.0.0/operator/deploy/kiali/kiali_cr.yaml
To install Kiali with all default settings, you can run:
/usr/bin/oc apply -n <any namespace you choose> -f https://raw.githubusercontent.com/kiali/kiali/v1.0.0/operator/deploy/kiali/kiali_cr.yaml
Do not forget to create a secret if you wish to use an auth strategy of 'login' (This is
the default setting when installing in Kubernetes but not OpenShift).
An example would be:
/usr/bin/oc create secret generic kiali -n istio-system --from-literal 'username=admin' --from-literal 'passphrase=admin'
Senha do portal tmf contra-cheque Ferrorama-xp200
oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth
Warning: Your changes may get lost whenever a master is restarted, unless you prevent reconciliation of this rolebinding using the following command: oc annotate clusterrolebinding.rbac self-provisioners 'rbac.authorization.kubernetes.io/autoupdate=false' --overwritecluster role "self-provisioner" removed: "system:authenticated:oauth"
cat /var/lib/origin/.docker/config.json
master-logs api api
oc get crd
oc get pv
oc get pvc --all-namespaces
oc get pv registry-volume -o yaml
ping glusterfs-registry-endpoints
oc get svc --all-namespaces | grep gluster
oc project default
oc get pods
oc get svc
oc get svc glusterfs-registry-endpoints -o yaml
oc get pv
oc get pv nfs1 -o yaml
htpasswd -n <user_name>
openshift_master_htpasswd_users={'myuser': '$apr1$wIwXkFLI$bAygtISk2eKGmqaJftB'}
htpasswd -c -b <user_name> <password>
wget -r --no-parent https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/
ls
cd access.redhat.com
fgrep -Ri "oc new-app"
firefox ./documentation/en-us/openshift_container_platform/3.11/html-single/architecture/index.html
docker images
docker pull docker.io/php:7.4-apache
docker images
cd docker-test/
vi Dockerfile
FROM docker.io/php:7.4-apache
RUN echo "pagina web" > /var/www/html/pagina.html
docker build -t docker.io/marciodf/web1 .
docker images
docker run -it --rm docker.io/marciodf/web1 ( /bin/bash )
docker images
docker tag c7b44f38a6fc docker.io/marcio-user/marcio
docker images
docker push docker.io/marcio-user/marcio
docker run -it docker.io/centos /bin/bash
docker ps -a
docker commit 7c9b519d8981 marcionew
docker images
/var/lib/docker
docker run -it -p 8080:8080 -p 9990:9990 mywildfly (example)
sudo yum install -y nmap
sudo nmap -sn <Your-IP>
sudo nmap -O <Your-IP>
https://www.redhat.com/sysadmin/nmap-troubleshooting?sc_cid=701f2000000txokAAA&utm_source=bambu&utm_medium=social&utm_campaign=abm
while [ true ]; do curl http://fabrica-sp-sp-demo.apps.contoso.int/ ; sleep 1; echo; done
cria outra app php com xxx-prod-orig
oc get -o yaml --export all > project.yaml
for object in rolebindings serviceaccounts secrets imagestreamtags cm egressnetworkpolicies rolebindingrestrictions limitranges resourcequotas pvc templates cronjobs statefulsets hpa deployments replicasets poddisruptionbudget endpoints
do
oc get -o yaml --export $object > $object.yaml
done
oc export all --as-template=cluster-geral > cluster-geral.yaml
oc get -o yaml --export LimitRange > limite-quota
oc new-project <projectname>
$ oc create -f project.yaml
$ oc create -f secret.yaml
$ oc create -f serviceaccount.yaml
$ oc create -f pvc.yaml
$ oc create -f rolebindings.yaml
oc create user bob
htpasswd -b /etc/origin/master/htpasswd bob 12345
oc adm policy add-role-to-user bob edit -n fonte01
oc new-app --name=sitedf php:7.1~https://github.com/marciodf/workshop-ocp
oc start-build rodinha1 -n bicicleta
oc set triggers bc <name> --from-github
oc set triggers bc <name> --from-image='<image>'
oc set triggers bc <name> --from-bitbucket --remove
cat /etc/redhat-release
http://lagosuldf-lagosul.apps.contoso.int/
http://lagosuldf-lagosul.apps.contoso.int/wp-admin/
oc new-app -e OPENSHIFT_ENABLE_OAUTH=true jenkins-persistent
oc get pods
oc get pod jenkins-1-deploy -o yaml
oc get pvc jenkins
$ for object in rolebindings serviceaccounts secrets imagestreamtags cm egressnetworkpolicies rolebindingrestrictions limitranges resourcequotas pvc templates cronjobs statefulsets hpa deployments replicasets poddisruptionbudget endpoints
do
oc get -o yaml --export $object > $object.yaml
done
oc new-app https://github.com/wordpress/wordpress
$ oc get -o yaml --export all > project.yaml
$ oc api-resources --namespaced=true -o name
******************************************************
gcloud auth configure-docker
gcloud source repos list
git clone https://source.developers.google.com/p/qwiklabs-gcp-00-d21ed1987b30/r/valkyrie-app
docker build -t valkyrie-app .
docker images
docker tag 6739dd35668f gcr.io/qwiklabs-gcp-00-d21ed1987b30/valkyrie-app:v0.0.1
docker run -p 8080:8080 gcr.io/qwiklabs-gcp-00-d21ed1987b30/valkyrie-app:v0.0.1 &
docker push gcr.io/qwiklabs-gcp-00-d21ed1987b30/valkyrie-app:v0.0.1
kubectl config view (importe!!! para autenticação)
gcloud container clusters list
gcloud container clusters get-credentials valkyrie-dev --zone us-east1-d
kubectl apply -f deployment.yaml
kubectl scale deployment valkyrie-dev --replicas=3
gcr.io/qwiklabs-gcp-00-d21ed1987b30/valkyrie-app:v0.0.1
git init
git config credential.helper gcloud.sh
git remote add origin https://source.developers.google.com/p/qwiklabs-gcp-00-d21ed1987b30/r/valkyrie-app
git config --global user.name "Marcio"
git config --global user.email "student-00-05cbb20bfb0b@qwiklabs.net"
git add .
git commit -m "Initial commit"
git push origin master
git merge origin/kurt-dev
git commit -m "v0.0.2" (nao precisa)
docker build -t valkyrie-app .
docker images
docker tag 3a8d0a0be356 gcr.io/qwiklabs-gcp-00-d21ed1987b30/valkyrie-app:v0.0.2
docker push gcr.io/qwiklabs-gcp-00-d21ed1987b30/valkyrie-app:v0.0.2
kubectl edit deployment.v1.apps/valkyrie-dev
kubectl rollout status deployment.v1.apps/valkyrie-dev
printf $(kubectl get secret cd-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo
git checkout -b new-feature //checar o pq do "new-feature" ou deixar em branco para ser o branch Master
<fazer as alterações dos arquivos>
git add Jenkinsfile source/html.go
git commit -m "Version 2.0.0"
git push origin new-feature //aqui no new-feature
****comandos de referencia****
kubectl set image deployment/valkyrie-dev valkyrie-app=valkyrie-app:v0.0.2 --record (precisa revisar)
kubectl create deployment hello-web --image=gcr.io/${PROJECT_ID}/hello-app:v1
kubectl set image deployment/hello-web hello-app=gcr.io/${PROJECT_ID}/hello-app:v2
kubectl set image deployment/valkyrie-dev valkyrie-app=gcr.io/qwiklabs-gcp-00-3906e103c23e/valkyrie-app:v0.0.2
kubectl run my-app --image gcr.io/my-bucket/my-app:1.0 --cluster my-new-cluster
***********
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment