Connection are reused if the task has a loop.
lib/ansible/executor/task_executor.py -> self._connection = self._get_connection(variables=variables, templar=templar)
#!/bin/bash | |
server='https://vcenter.test' | |
password='z!Zq4Y&`/!E(o<br?:L>' | |
session_id=$(curl -q -X POST -k -u "administrator@vsphere.local:${password}" ${server}/rest/com/vmware/cis/session|jq -r .value) | |
for i in $(seq 10); do | |
curl -ik -H 'Accept:application/json' -H "vmware-api-session-id:${session_id}" -X GET ${server}/rest/vcenter/cluster | |
done |
#!/bin/bash | |
set -eux | |
commits=$* | |
current_dir=$(pwd) | |
temp_dir=$(mktemp -du) | |
github_user=$(git config --get github.user) | |
origin_url=$(git config --get remote.origin.url) | |
project=$(git config --get remote.origin.url|sed 's,/$,,'|sed 's,.*/,,') |
#!/bin/bash | |
set -eux | |
function clean_up_volumes() { | |
openstack volume list -f value -c ID -c Status|awk '/available/ {print $1}'|xargs -r openstack volume delete | |
} | |
OS_CLOUD=vexxhost OS_REGION_NAME=ca-ymq-1 clean_up_volumes | |
OS_CLOUD=vexxhost OS_REGION_NAME=sjc1 clean_up_volumes | |
OS_CLOUD=limestone OS_REGION_NAME=us-slc clean_up_volumes | |
OS_CLOUD=limestone OS_REGION_NAME=us-dfw-1 clean_up_volumes |
#!/usr/bin/python | |
import re | |
import urllib.request | |
import argparse | |
import subprocess | |
parser = argparse.ArgumentParser() | |
parser.add_argument("pr_id", type=int) |
import json | |
import re | |
from pprint import pprint | |
def path_to_module_name(path, value): | |
def is_element(i): | |
if i and not '{' in i: | |
return True | |
else: |
#!/bin/bash | |
set -eux | |
git clone https://github.com/goneri/ansible_vmware_collection /tmp/cherry_pick_demo | |
cd /tmp/cherry_pick_demo | |
# starts a dev branch | |
git checkout -B dev origin/master | |
git mv community/vmware/plugins/modules/vmware_dvswitch.py ansible/vmware/plugins/modules/ |
#!/bin/bash | |
echo "module,status" | |
for module in $(find lib -type f -name 'vmware*.py' -or -name 'vca*.py' -or -name 'vcenter*.py'|xargs -n1 basename -s .py|sort -u); do | |
aliases_file="test/integration/targets/${module}/aliases" | |
current_status="no_test" | |
if [ -f "${aliases_file}" ]; then | |
if [ -n "$(egrep '(disabled)' ${aliases_file})" ]; then | |
current_status="disabled" | |
elif [ -n "$(grep zuul ${aliases_file})" ]; then |
#!/bin/bash | |
curl -o minimal.qcow2 http://mirror.csclub.uwaterloo.ca/fedora/linux/releases/31/Cloud/x86_64/images/Fedora-Cloud-Base-31-1.9.x86_64.qcow2 | |
virt-sysprep --network --install open-vm-tools --uninstall cloud-init --root-password password:root --selinux-relabel -a minimal.qcow2 | |
qemu-img convert -p -f qcow2 -O vmdk -o compat6=on -o adapter_type=lsilogic minimal.qcow2 minimal.vmdk | |
scp minimal.vmdk centos@datastore.test:/srv/share/vms/minimal.vmdk | |
ssh root@esxi1.test 'rm -r /vmfs/volumes/rw_datastore/vm_sample1' | |
ssh root@esxi1.test mkdir /vmfs/volumes/rw_datastore/vm_sample1 | |
ssh root@esxi1.test vmkfstools -i /vmfs/volumes/rw_datastore/minimal.vmdk -d thin /vmfs/volumes/rw_datastore/vm_sample1/vm_sample1.vmdk |
sudo mkfs.xfs -L nvme_cache -f /dev/nvme0n1 | |
sudo mkdir -p /srv/nvme | |
sudo bash -c 'echo /dev/disk/by-label/nvme_cache /srv/nvme xfs defaults 0 0 >> /etc/fstab' | |
sudo mount /srv/nvme | |
echo " | |
[[local|localrc]] | |
ADMIN_PASSWORD=secret | |
DATABASE_PASSWORD=$ADMIN_PASSWORD | |
RABBIT_PASSWORD=$ADMIN_PASSWORD |