Skip to content

Instantly share code, notes, and snippets.

@philroche
Created September 28, 2018 11:39
Show Gist options
  • Star 4 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save philroche/6667d58a8452605278159deb2e6944ff to your computer and use it in GitHub Desktop.
Save philroche/6667d58a8452605278159deb2e6944ff to your computer and use it in GitHub Desktop.
Script for publishing, booting and gathering logs from a locally produced Azure image
#!/bin/bash
## Pass local .tar.gz path as first argument
# Update the following variables
# This will be created if it does not exist
ACCOUNT_NAME="philrocheubuntu"
# This will be created if it does not exist
CONTAINER="philrocheubuntuimages"
# Customize this as all vm resouces with this group will be cleaned up. This will be created if it does not exist
RESOURCE_GROUP="philrocheubuntuimages"
LOCATION="ukwest"
SSH_PUB_KEY="~/.ssh/philroche.pub"
## There should be no need to change anything below this
set -ex
LOCAL_TGZ=$1
UPLOAD_DATE=$(date +%Y%m%d)
LOCAL_FILENAME=$(basename ${LOCAL_TGZ} .tar.gz)
LOCAL_VHD_DIR=$(mktemp -d)
LOCAL_VHD=$(tar -xzvf $LOCAL_TGZ --directory "${LOCAL_VHD_DIR}")
LOCAL_FILE="${LOCAL_VHD_DIR}/${LOCAL_VHD}"
BLOB="${UPLOAD_DATE}-${LOCAL_FILENAME}"
echo $LOCAL_FILE
## Create your storage account (You can run this even if it has already been created)
STORAGE_ACCOUNT_EXISTS=$(az storage account check-name \
--name ${ACCOUNT_NAME} \
--output=tsv \
--query "nameAvailable")
if [ "${STORAGE_ACCOUNT_EXISTS}" == "true" ] || [ "${STORAGE_ACCOUNT_EXISTS}" == "True" ]; then
az storage account create \
--name ${ACCOUNT_NAME} \
--resource-group ${RESOURCE_GROUP} \
--location ${LOCATION} \
--sku Standard_LRS
fi
ACCOUNT_KEY=$(az storage account keys list \
--account-name ${ACCOUNT_NAME} \
--resource-group ${RESOURCE_GROUP} \
--query "[?keyName == 'key1'].value" \
--output=tsv)
STORAGE_URL="https://${ACCOUNT_NAME}.blob.core.windows.net/"
## Create your resource group (You can run this even if it has already been created)
RESOURCE_GROUP_EXISTS=$(az group exists \
--name ${RESOURCE_GROUP} \
--output tsv)
if [ "${RESOURCE_GROUP_EXISTS}" == "false" ] || [ "${RESOURCE_GROUP_EXISTS}" == "False" ]; then
az group create \
--location ${LOCATION} \
--name ${RESOURCE_GROUP} \
--output tsv
fi
## Create your container (You can run this even if it has already been created)
STORAGE_CONTAINER_EXISTS=$(az storage container exists \
--name ${CONTAINER} \
--account-name ${ACCOUNT_NAME} \
--account-key "${ACCOUNT_KEY}" \
--output tsv)
if [ "${STORAGE_CONTAINER_EXISTS}" == "false" ] || [ "${STORAGE_CONTAINER_EXISTS}" == "False" ]; then
az storage container create \
--account-name ${ACCOUNT_NAME} \
--account-key "${ACCOUNT_KEY}" \
--name ${CONTAINER} \
--public-access blob \
--fail-on-exist
fi
## Now upload
az storage blob upload \
--file ${LOCAL_FILE} \
--name ${BLOB} \
--type page \
--container-name ${CONTAINER} \
--account-name ${ACCOUNT_NAME} \
--account-key "${ACCOUNT_KEY}"
rm --force ${LOCAL_FILE}
## Show progress of blob copy
az storage blob show --account-name ${ACCOUNT_NAME} --account-key "${ACCOUNT_KEY}" --container-name ${CONTAINER} --name ${BLOB}
## Get the blob URL
BLOB_URL=$(az storage blob url \
--account-name ${ACCOUNT_NAME} \
--account-key "${ACCOUNT_KEY}" \
--container-name ${CONTAINER} \
--name ${BLOB} \
--output tsv)
## Generate the SAS url
EXPIRY_DATE=$(date -d "+15 days" +%Y-%-m-%-d)
BLOB_SAS=$(az storage blob generate-sas \
--account-name ${ACCOUNT_NAME} \
--account-key "${ACCOUNT_KEY}" \
--container-name ${CONTAINER} \
--name ${BLOB} \
--permissions r \
--expiry ${EXPIRY_DATE} \
--output tsv)
## Append the
BLOB_SAS_URL="${BLOB_URL}?${BLOB_SAS}"
## For sanity check you can create an image from the blob and then launch that image
## Create a random image and vm name
RANDOM_IMAGE_NAME=$(pwgen 13 1)
RANDOM_IMAGE_NAME="${RANDOM_IMAGE_NAME}-image-imagedebug"
RANDOM_VM_NAME=$(pwgen 13 1)
RANDOM_VM_NAME="${RANDOM_VM_NAME}-vm-imagedebug"
## Create image from blob (We'll delete it later)
az image create \
--source ${BLOB_URL} \
--location ${LOCATION} \
--resource-group ${RESOURCE_GROUP} \
--os-type linux \
--name ${RANDOM_IMAGE_NAME}
## Create a VM from this image
az vm create \
--name ${RANDOM_VM_NAME} \
--resource-group ${RESOURCE_GROUP} \
--location ${LOCATION} \
--image ${RANDOM_IMAGE_NAME} \
--data-disk-sizes-gb 10 20 \
--size Standard_DS2_v2 \
--admin-username ubuntu \
--ssh-key-value @${SSH_PUB_KEY} \
--boot-diagnostics-storage ${STORAGE_URL}
IP_ADDRESS=$(az vm show \
--name ${RANDOM_VM_NAME} \
--resource-group ${RESOURCE_GROUP} \
--show-details \
--query "publicIps" \
--output tsv)
# Enable boot diagnostics
az vm boot-diagnostics enable \
--storage ${STORAGE_URL} \
--name ${RANDOM_VM_NAME} \
--resource-group ${RESOURCE_GROUP}
## Gather logs from instance
## Get the boot logs
BOOT_LOG="${LOCAL_VHD_DIR}/${RANDOM_VM_NAME}-boot.log"
az vm boot-diagnostics get-boot-log \
--name ${RANDOM_VM_NAME} \
--resource-group ${RESOURCE_GROUP} \
> "${BOOT_LOG}"
## Gather systemd boot information
SYSTEMD_ANALYZE_OUTPUT="${LOCAL_VHD_DIR}/${RANDOM_VM_NAME}-systemd-analyze.log"
ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS} systemd-analyze > ${SYSTEMD_ANALYZE_OUTPUT}
UNAME_OUTPUT="${LOCAL_VHD_DIR}/${RANDOM_VM_NAME}-uname.log"
ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS} uname --kernel-release > ${UNAME_OUTPUT}
DPKG_OUTPUT="${LOCAL_VHD_DIR}/${RANDOM_VM_NAME}-dpkg.log"
ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS} dpkg --get-selections > ${DPKG_OUTPUT}
BUILD_INFO_OUTPUT="${LOCAL_VHD_DIR}/${RANDOM_VM_NAME}-build-info.log"
ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS} cat /etc/cloud/build.info > ${BUILD_INFO_OUTPUT}
LSB_RELEASE_OUTPUT="${LOCAL_VHD_DIR}/${RANDOM_VM_NAME}-lsb-release.log"
ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS} lsb_release --all > ${LSB_RELEASE_OUTPUT}
## Gather cloud-init information
CLOUD_INIT_ANALYZE_SHOW_OUTPUT="${LOCAL_VHD_DIR}/${RANDOM_VM_NAME}-cloud-init-analyze-show.log"
ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS} cloud-init analyze show > ${CLOUD_INIT_ANALYZE_SHOW_OUTPUT}
CLOUD_INIT_ANALYZE_BLAME_OUTPUT="${LOCAL_VHD_DIR}/${RANDOM_VM_NAME}-cloud-init-analyze-blame.log"
ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS} cloud-init analyze blame > ${CLOUD_INIT_ANALYZE_BLAME_OUTPUT}
CLOUD_INIT_COLLECT_LOGS="${RANDOM_VM_NAME}-cloud-init-collect-logs.tar.gz"
CLOUD_INIT_COLLECT_LOGS_REMOTE="/tmp/${CLOUD_INIT_COLLECT_LOGS}"
CLOUD_INIT_COLLECT_LOGS_LOCAL="${LOCAL_VHD_DIR}/${CLOUD_INIT_COLLECT_LOGS}"
ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS} sudo cloud-init collect-logs --tarfile "${CLOUD_INIT_COLLECT_LOGS_REMOTE}"
scp -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS}:${CLOUD_INIT_COLLECT_LOGS_REMOTE} ${CLOUD_INIT_COLLECT_LOGS_LOCAL}
## Log in to the VM and do your sanity checks
echo "You can now log in to the VM and do your sanity checks - ssh -oStrictHostKeyChecking=no ubuntu@${IP_ADDRESS}"
read -p "Would you like to delete this image and vm and all associated resources [y|n]? " -n 1 -r DELETE_RESOURCES
echo # (optional) move to a new line
if [[ ${DELETE_RESOURCES} =~ ^[Yy]$ ]]
then
## Delete the VM when finished
az vm delete \
--name ${RANDOM_VM_NAME} \
--resource-group ${RESOURCE_GROUP} \
--yes
## delete the image
az image delete \
--resource-group ${RESOURCE_GROUP} \
--name ${RANDOM_IMAGE_NAME}
## Delete all other resources created during vm creation
az resource delete --ids $(az resource list --resource-group ${RESOURCE_GROUP} --query "[].id" -o tsv | grep ${RANDOM_VM_NAME})
fi
echo "BOOT_LOG=${BOOT_LOG}"
echo "LOG DIRECTORY=${LOCAL_VHD_DIR}"
ls -al "${LOCAL_VHD_DIR}"
echo "BLOB_SAS_URL=${BLOB_SAS_URL}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment