These are primarily useful for developing the IBM Cloud datasource.
See also cloud-init's qa-scripts/bin/launch-softlayer which can launch softlayer instances in either 'template' mode or 'os_code'.
These are primarily useful for developing the IBM Cloud datasource.
See also cloud-init's qa-scripts/bin/launch-softlayer which can launch softlayer instances in either 'template' mode or 'os_code'.
#!/bin/sh | |
[ $# -eq 0 ] && { echo "no. give description of environment"; exit 1; } | |
echo "== $* ==" | |
runcmd() { echo "=== $@ ==="; "$@" 2>&1; echo; } | |
showdisk() { | |
local dev="" label="" | |
for label in "CONFIG-2" "config-2" "METADATA"; do | |
dev=$(blkid -t LABEL="$label" -l -o device) && break | |
done | |
if [ -z "$dev" ]; then | |
echo "=== showdisk: no config disk found ===" | |
fi | |
echo "=== showdisk: $dev ===" | |
umount "$dev" >/dev/null 2>&1 | |
mount -o ro "$dev" /mnt | |
( cd /mnt && for f in $(find . -type f); do | |
echo == ${f#./} ==; cat "$f"; echo; done ) | |
umount /mnt | |
} | |
getfile() { | |
echo === "$1" ===; | |
[ -f "$1" ] && cat "$1" || echo "no exist"; | |
echo; | |
} | |
files=" | |
/run/cloud-init/result.json | |
/run/cloud-init/ds-identify.log | |
/etc/fstab | |
$(find /etc/cloud -type f | LANG=C sort) | |
$(find /var/lib/cloud/seed -type f 2>/dev/null) | |
/etc/network/interfaces | |
$(find /etc/network/interfaces.d -type f 2>/dev/null) | |
/var/log/cloud-init.log | |
/var/log/cloud-init-output.log | |
/boot/grub/grub.cfg | |
/boot/grub/menu.lst | |
" | |
runcmd dpkg-query --show cloud-init | |
runcmd blkid -o full | |
runcmd sh -c 'cd /sys/hypervisor && grep -r . *' | |
showdisk | |
runcmd ls -l /etc/network/interfaces | |
for file in $files; do getfile "$file"; done |
#!/bin/sh | |
[ $# -eq 0 ] && { echo "no. give description of environment"; exit 1; } | |
echo "== $* ==" | |
runcmd() { echo "=== $@ ==="; "$@" 2>&1; echo; } | |
showdisk() { | |
local dev="" label="" | |
for label in "CONFIG-2" "config-2" "METADATA"; do | |
dev=$(blkid -t LABEL="$label" -l -o device) && break | |
done | |
if [ -z "$dev" ]; then | |
echo "=== showdisk: no config disk found ===" | |
return | |
fi | |
echo "=== showdisk: $dev ===" | |
umount "$dev" >/dev/null 2>&1 | |
mount -o ro "$dev" /mnt | |
( cd /mnt && for f in $(find_files . ); do | |
echo == ${f#./} ==; cat "$f"; echo; done ) | |
umount /mnt | |
} | |
getfile() { | |
echo === "$1" ===; | |
[ -f "$1" ] && cat "$1" || echo "no exist"; | |
echo; | |
} | |
get_ec2md() { | |
python3 -c ' | |
import json; | |
import sys | |
from cloudinit import ec2_utils; | |
versions = sys.argv[1:] | |
if len(sys.argv) < 2: | |
versions = ["latest"] | |
for ver in versions: | |
print("====== %s ======" % ver) | |
print( | |
json.dumps(ec2_utils.get_instance_metadata(api_version=ver), | |
indent=1, sort_keys=True)) | |
' "$@" | |
} | |
get_osmd() { | |
python3 -c ' | |
import sys | |
from cloudinit.sources.helpers import openstack as ostack | |
from cloudinit.util import json_dumps | |
versions = sys.argv[1:] | |
if len(sys.argv) < 2: | |
versions = ["latest"] | |
for ver in versions: | |
print("====== %s ======" % ver) | |
md = ostack.MetadataReader("http://169.254.169.254/") | |
md._versions = [ver] | |
print(json_dumps(md.read_v2())) | |
' "$@" | |
} | |
find_files() { | |
find "$@" -type f -o -type l 2>/dev/null | LANG=C sort | |
} | |
files=" | |
$(find_files /run/cloud-init) | |
/proc/cmdline | |
/etc/fstab | |
$(find_files /etc/cloud | grep -v /templates/) | |
$(find_files /var/lib/cloud/seed) | |
/etc/network/interfaces | |
$(find_files /etc/network/interfaces.d) | |
$(find_files /etc/netplan) | |
/var/log/cloud-init.log | |
/var/log/cloud-init-output.log | |
/boot/grub/grub.cfg | |
/boot/grub/menu.lst | |
" | |
runcmd hostname | |
runcmd systemd-detect-virt | |
runcmd dpkg-query --show cloud-init | |
runcmd blkid -o full | |
runcmd sh -c 'cd /sys/hypervisor && grep -r . *' | |
runcmd sh -c 'cd /sys/class/dmi/id && grep -r . *' | |
showdisk | |
fpath="/sys/class/dmi/id/chassis_asset_tag" | |
atag="" | |
[ -f "$fpath" ] && read atag < "$fpath" | |
echo "=== chassis_asset_tag=$atag ===" | |
case "$atag" in | |
*OracleCloud.com*) | |
md_vers=$(curl --silent http://169.254.169.254/openstack/ | | |
egrep "^(20??-|<a href)" | sed -e 's,<[^>]*>,,g' -e 's,/.*,,') | |
runcmd get_osmd ${md_vers} | |
;; | |
*) | |
md_vers=$(curl --silent http://169.254.169.254/) | |
if [ -n "$md_vers" ]; then | |
runcmd get_ec2md ${md_vers} | |
fi | |
;; | |
esac | |
for file in $files; do getfile "$file"; done |
#!/bin/sh | |
host=$1 | |
shell=${2:-"true"} | |
ssh-keygen -f ~/.ssh/known_hosts -R "${host#*@}" | |
script=$(mktemp) | |
trap "rm -f $script" EXIT | |
cat > "$script" <<"EOF" | |
prog=/root/swinstall.sh | |
echo "running, looking for $prog" | |
while :; do | |
pid=$(ps axw | awk '$6 == prog { print $1 }' "prog=$prog") | |
pid=$(echo $pid) | |
[ -n "$pid" ] && kill -STOP $pid && echo "stopped pid=$pid" && break | |
echo -n . | |
sleep 1 || exit 1 | |
done | |
echo "kill -CONT $pid" > resume-provision | |
chmod 755 resume-provision | |
echo "run $PWD/resume-provision to resume." | |
EOF | |
opts="-o StrictHostKeyChecking=no" | |
while :; do | |
ssh $opts -o ConnectTimeout=2 $host sh -s <"$script" && break | |
sleep .5 || exit | |
done | |
ssh -o StrictHostKeyChecking=no $host |
#!/bin/sh | |
# clean out CPC modifications so upstream cloud-init can run. | |
set -x | |
umount /var/lib/cloud/seed/config_drive | |
umount /var/lib/cloud/seed/nocloud-net | |
rm -Rf /var/lib/cloud | |
rm -Rf /var/log/cloud-init* | |
[ -f /etc/fstab.dist ] || cp /etc/fstab /etc/fstab.dist | |
sed -i -e '/METADATA/d' -e '/cloudconfig/d' /etc/fstab | |
eni="/etc/network/interfaces" | |
if ! grep -q 'source.*interfaces.d' "$eni"; then | |
[ -f "${eni}.dist" ] || cp "$eni" "$eni.dist" | |
echo "source /etc/network/interfaces.d/*.cfg" >> "$eni" | |
fi | |
ncfg="/etc/cloud/cloud.cfg.d/99_networklayer_common.cfg" | |
if [ -f "$ncfg" ]; then | |
[ -f "$ncfg.dist" ] || cp "$ncfg" "$ncfg.dist" | |
sed -i 's,^datasource_list,#datasource_list,' "$ncfg" | |
fi | |
dcfg="/etc/cloud/cloud.cfg.d/90_dpkg.cfg" | |
[ -f "$dcfg.dist" ] || cp "$dcfg" "$dcfg.dist" | |
if ! grep -q "IBMCloud" "$dcfg"; then | |
sed -i 's/ None/ IBMCloud, None/' "$dcfg" | |
fi |
#!/bin/bash | |
## I ran this script to launch different softlayer instances | |
# and then upgrade them and collect logs to attach to stable release | |
# update (SRU) bugs. | |
# | |
ud="/tmp/my-user-data.txt" | |
set -e | |
go() { | |
local image="$1" hn="$2" ud=${3} | |
local results="$PWD/$hn-results" | |
log="$hn.log" | |
rm -Rf "$results" | |
cmd=( | |
launch-softlayer | |
"--image=$image" "--hostname=$hn" | |
"--script-dir=$PWD/scripts" | |
"--script-artifacts-dir=$results" | |
--proposed --clean | |
${ud:+"--user-data-file=$ud"} ) | |
echo "$hn -> $log" | |
echo "$" "${cmd[@]}" > "$log" | |
"${cmd[@]}" >> "$log" 2>&1 | |
} | |
cat > "$ud" <<"EOF" | |
#!/bin/sh | |
read up idle < /proc/uptime | |
( | |
echo "=== $(date -R): up ${up}s ===" | |
cat /run/cloud-init/ds-identify.log | |
echo | |
echo | |
) | tee -a /run/user-data-out.txt /root/user-data-out.txt | |
EOF | |
mkdir -p scripts || { echo "failed make scripts"; exit 1; } | |
cat > scripts/collect <<"EOF" | |
#!/bin/sh | |
[ -z "$SCRIPT_ARTIFACT_DIR" ] || cd "$SCRIPT_ARTIFACT_DIR" | |
cloud-init collect-logs | |
EOF | |
chmod 755 scripts/collect | |
[ -f "$ud" ] || { echo "no $ud"; exit 1; } | |
rel=xenial | |
letter=${rel:0:1} | |
bh=${letter}t- | |
#bh=bt1- | |
go os:$rel ${bh}oscode-with $ud & | |
go os:$rel ${bh}oscode-without & | |
go template:$rel ${bh}tmpl-with $ud & | |
go template:$rel ${bh}tmpl-without & | |
wait |
#!/bin/bash | |
Usage() { | |
cat <<EOF | |
${0##*/}: output [type] | |
type is one of 'config' or 'metadata'. | |
Disk can be attached to a cloud image and booted. | |
In order to use ibmcloud, must be hypevisor xen, or patch out that | |
requirement. | |
EOF | |
} | |
error() { echo "$@" 1>&2; } | |
fail() { [ $# -eq 0 ] || error "$@"; exit 1; } | |
cleanup() { | |
[ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" | |
} | |
userdata() { | |
cat <<EOF | |
#!/bin/sh | |
echo "root:passw0rd" | chpasswd | |
EOF | |
} | |
pop_metadata() { | |
local d="$1" outd="" | |
outd="$d/openstack/latest" | |
mkdir -p "$outd" | |
cat > "$outd/meta_data.json" <<EOF | |
{ | |
"files": [], | |
"network_config": { | |
"content_path": "/content/interfaces" | |
}, | |
"hostname": "ci-fond-ram", | |
"name": "ci-fond-ram", | |
"domain": "testing.ci.cloud-init.org", | |
"meta": { | |
"dsmode": "net" | |
}, | |
"uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f", | |
"public_keys": { | |
"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey" | |
} | |
} | |
EOF | |
cat > "$d/openstack/content/interfaces" <<"EOF" | |
# ignored by cloud-init | |
auto lo | |
iface lo inet loopback | |
auto eth0 | |
allow-hotplug eth0 | |
iface eth0 inet static | |
address 10.82.43.5 | |
netmask 255.255.255.192 | |
EOF | |
userdata > "$outd/user_data" | |
} | |
pop_config() { | |
local d="$1" outd="" | |
outd="$d/openstack/latest" | |
mkdir -p "$outd" | |
cat > "$outd/meta_data.json" <<EOF | |
{ | |
"hostname": "ci-grand-gannet.testing.ci.cloud-init.org", | |
"name": "ci-grand-gannet", | |
"uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785", | |
"random_seed": "bm90LXJhbmRvbQo=", | |
"crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/", | |
"configuration_token": "eyJhbGciOi..M3ZA", | |
"public_keys": { | |
"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey" | |
} | |
} | |
EOF | |
# "ethernet_mac_address": "52:54:00:12:34:01", | |
cat > "$outd/network_data.json" <<EOF | |
{ | |
"links": [ | |
{ | |
"id": "iface_1", | |
"name": "ens4", | |
"mtu": null, | |
"type": "phy" | |
} | |
], | |
"networks": [ | |
{ | |
"id": "network_1", | |
"link": "iface_1", | |
"type": "ipv4_dhcp" | |
} | |
] | |
} | |
EOF | |
userdata > "$outd/user_data" | |
} | |
copy_dir() { | |
local bdir="$1" img="$2" | |
for i in $(cd "$bdir" && find . -type d); do | |
[ "$i" = "." ] && continue; | |
i=${i#./} | |
mmd -i "$img" ::"$i" | |
done | |
for i in $(cd "$bdir" && find . -type f); do | |
i=${i#./} | |
mcopy -oi "$img" "$bdir/$i" ::"$i" | |
done | |
} | |
TEMP_D="" | |
out=${1} | |
dtype="${2:-config}" | |
[ -n "$out" ] || { Usage; exit 1; } | |
case "$dtype" in | |
config|metadata) :;; | |
*) fail "bad type '$dtype'";; | |
esac | |
TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || | |
fail "failed to make tempdir" | |
trap cleanup EXIT | |
case "$dtype" in | |
config) | |
label="config-2" | |
uuid="9796932E";; | |
metadata) label="METADATA"; uuid="";; | |
esac | |
case "$dtype" in | |
config) pop_config "$TEMP_D";; | |
metadata) pop_metadata "$TEMP_D";; | |
esac | |
set -e | |
truncate --size 128K "$out" | |
mkfs.vfat -n "$label" ${uuid:+-i "${uuid}"} "$out" | |
copy_dir "$TEMP_D" "$out" |
#!/bin/sh | |
# | |
# slwift: just a wrapper showing you how to set creds for the | |
# python swift client. | |
# This is functional as of 2018-03-21 on public softlayer. | |
# | |
# Get these values at: | |
# https://control.softlayer.com/storage/objectstorage/ | |
# storage -> object storage -> <account-name> -> <location> -> View Credentials | |
# | |
ST_AUTH=https://dal05.objectstorage.softlayer.net/auth/v1.0/ \ | |
ST_USER=SLOSXXXXXXX-X:SLXXXXXXX \ | |
ST_KEY=abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789 \ | |
exec swift "$@" |