Skip to content

Instantly share code, notes, and snippets.

@harlowja
Created July 14, 2016 21:08
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save harlowja/781a010bbe2634982dbb0878982852bc to your computer and use it in GitHub Desktop.
Save harlowja/781a010bbe2634982dbb0878982852bc to your computer and use it in GitHub Desktop.
# Godaddy specific cloud-init module that does all things
# related to godaddy first boot activities.
#
# It is based on the concept of a single module (this one) that
# itself activates a set of submodules/actions that can be configured
# via vendordata and godaddy additions to metadata.
#
# It can be configured by the following.
#
# Vendordata format (only the 'godaddy' key here is used in this module):
# {
# ...
# "godaddy": {
# "actions": <list of actions> (defaults to `default_actions`),
# "disable_spacewalk": bool (defaults to false to disable spacewalk action),
# "disable_pbis": bool (defaults to false to disable pbis),
# "spacewalk": {
# "proxy": <proxy>,
# "activation_key": <activation_key>,
# },
# "init": {
# "remove_packages": <list of pkg_names> (defaults to `init_pkg_removals_default`,
# "remove_files": <list of paths> (defaults to `init_file_removals_default`),
# },
# "cleanup": {
# "remove_files": <list of files to remove> (defaults to `cleanup_files_default`),
# "disable_services": <list of services to remove> (defaults to `cleanup_services_default`),
# "remove_files_post_disable_services": <list of files to remove after services disabled>
# (defaults to `cleanup_files_post_services_disable`),
# },
# "sysctl_params": <dict of params to write into `cloud_sysctl_pth`>,
# "ntpd_params": <additional dict of template params to write into `ntpd_conf_path`>
# (default set comes from `ntpd_params_default`),
# "ds_agent_max_rhel_os": <version tuple that identifies on which rhel
# versions the ds_agent service should be setup>
# (defaults to (6, 9999, 9999) so that ds_agent
# only gets setup on less 6.x),
# "secrets_key": <string that identifies where the key file is>
# (defaults to `key_path`)
# "secrets": a base64 encoded <string> that is encoded using a shared
# key (found in the image at `key_path` and padded with
# `key_padding`) and AES that is decrypted into a json
# dictionary with the following keys and values (only
# the 'pbis' key is used in this module):
# {
# ...
# "pbis": {
# "domain": <string>,
# "bind_user": <string>,
# "bind_pass": <string>,
# "ou_path": <string>,
# "ou": <string>,
# },
# ...
# }
# },
# ...
# }
#
# The following keys are used also from metadata/meta (godaddy specific
# dict added into metadata):
#
# sudo_users: <string> (defaults to login_users)
# sudo_groups: <string> (defaults to login_groups)
# login_users: <string>
# login_groups: <string>
# disable_spacewalk: <bool> (defaults to false)
# disable_pbis: <bool> (defaults to false)
# service_account: <string> (defaults to created_by)
# created_by: <string>
import base64
import copy
import errno
import os
import platform
import re
import time
from Crypto.Cipher import AES
from cloudinit import helpers
from cloudinit import templater
from cloudinit import util
from cloudinit.settings import (PER_ONCE, PER_ALWAYS, PER_INSTANCE)
import six
# This is used so that cloud-init will warn users if this is ever ran
# on a non-rhel system (which it wasn't tested on...)
distros = ['rhel']
# Default actions that always get triggered (in this order).
default_actions = tuple([
'init',
'sysctl',
'service_account',
'sudoers',
'ntpd',
'cron',
'ds_agent',
'cleanup',
])
# NOTE(harlowja): we set this as per always so that we can do our own
# state tracking instead of relying on cloud-init and its macro level
# state tracking; this code will handle already being ran correctly.
frequency = PER_ALWAYS
# Key file that this program will look for in an image to be able
# to load a needed AES key to be able to decode a b64-encoded blob that will
# contain secrets needed for godaddy internal systems to be setup
# correctly.
key_path = ""
key_padding = "="
# This is a miniature version of what puppet does, and is good enough
# for what we need to do; see
# https://github.com/puppetlabs/puppetlabs-ntp#module-description
ntpd_template = """## template: jinja
{{ header }}
#
{% if tinker_vm %}
#
# Keep ntpd from panicking in the event of a large clock skew
# when a VM guest is suspended and resumed.
tinker panic 0
{% endif %}
{% if disable_monitor %}
disable monitor
{% endif %}
{% for restrict_server in restrict %}
restrict {{ restrict_server }}
{% endfor %}
{% for server in servers %}
server {{ server }}
{% endfor %}
{% if drift_file %}
# Driftfile.
driftfile {{ drift_file }}
{% endif %}
"""
ntpd_params_default = {
'drift_file': '/var/lib/ntp/drift',
'restrict': ['127.0.0.1'],
'disable_monitor': True,
'tinker_vm': True,
'header': util.make_header(base="Managed"),
'servers': [],
}
ntpd_conf_path = '/etc/ntp.conf'
# Various executables and paths and packages used by this module...
ds_agent_register_path = '/opt/ds_agent/register_ds_agent.sh'
pbis_get_status_exec = '/opt/pbis/bin/get-status'
pbis_get_dc_name_exec = '/opt/pbis/bin/get-dc-name'
cloud_users_sudoers_path = '/etc/sudoers.d/openstack-users'
# NOTE(harlowja); from sudo pages, sudo will read each file
# in /etc/sudoers.d, skipping file names that end in '~' or contain a '.'
# character to avoid causing problems with package manager or
# editor temporary/backup files.
cloud_users_sudoers_tmp_path = '/etc/sudoers.d/openstack-users~'
cloud_users_script_path = '/usr/sbin/update-sudoers'
cloud_users_cron_contents = [
('*/10 * * * * root let "SLEEP = $RANDOM / 55" &&'
" /bin/sleep $SLEEP && %s &>/dev/null" % cloud_users_script_path),
]
cloud_users_cron_contents = "\n".join(cloud_users_cron_contents)
cloud_users_cron_path = "/etc/cron.d/update-sudoers"
pbis_allow_login_path = "/usr/sbin/pbis-allow-login"
pbis_allow_login_cron_path = "/etc/cron.d/pbis-allow-login"
pbis_cron_contents = [
('*/10 * * * * root let "SLEEP = $RANDOM / 55" &&'
" /bin/sleep $SLEEP && %s &>/dev/null" % pbis_allow_login_cron_path),
]
pbis_allow_login_cron_contents = "\n".join(pbis_cron_contents)
cloud_sysctl_pth = '/etc/sysctl.d/100-openstack-sysctl.conf'
pbis_packages = tuple([
'pbis-enterprise',
'pbis-enterprise-upgrade',
'gd-pbis-utils',
])
pbis_upgrade_packages = tuple([
'gd-pbis-utils',
])
pbis_ad_tool_exec = '/opt/pbis/bin/adtool'
pbis_config_exec = '/opt/pbis/bin/config'
pbis_allow_login_exec = '/usr/sbin/gd-pbis-allow-login'
pbis_sysconfig_path = '/etc/sysconfig/gd-pbis-utils'
pbis_domain_join_exec = '/opt/pbis/bin/domainjoin-cli'
pbis_add_to_group_exec = '/usr/sbin/gd-pbis-add-to-group'
# At a point in the future we can just remove these since they are the
# legacy files that did the same thing via puppet a mix of rpm and a mix
# of git...
init_pkg_removals_default = tuple([
'els-vmprovisioning',
])
init_file_removals_default = tuple([
# We install a better version of all of these.
"/etc/cron.d/update-sudoers",
"/usr/sbin/gd-update-sudoers",
'/etc/cron.d/gd-pbis-allow-login',
'/usr/sbin/gd-pbis-allow-login',
])
cleanup_files_default = tuple([
'/root/anaconda-ks.cfg',
'/var/log/install/anaconda-ks.cfg',
])
cleanup_services_default = tuple([
'iptables',
])
cleanup_files_post_services_disable = tuple([
'/etc/sysconfig/iptables',
])
cron_config_what = tuple([
(("gd-update-sudoers.tmpl", cloud_users_script_path),
(cloud_users_cron_contents, cloud_users_cron_path)),
((None, None), # This is already installed by `gd-pbis-utils`
# and we should likely in the future have this package
# set its own cron job up...
(pbis_allow_login_cron_contents, pbis_allow_login_cron_path)),
])
def safe_makedirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def fetch_gd_meta(cloud):
try:
md_cfg = cloud.datasource.metadata['meta']
if not md_cfg:
md_cfg = {}
return md_cfg
except (KeyError, TypeError):
return {}
def split_domain(domain_ug):
try:
domain, other = tuple(domain_ug.split("\\", 1))
except ValueError:
return (domain_ug, None)
else:
return (domain.rstrip("\\"), other.lstrip("\\"))
def fetch_joined_domain(ad_domain):
stdout, _stderr = util.subp([pbis_get_dc_name_exec, ad_domain])
for line in stdout.splitlines():
line = line.strip()
if line.startswith('pszNetBIOSDomainName'):
_prefix, dn = line.split("=", 1)
return dn.strip()
return None
def fetch_domain():
stdout, _stderr = util.subp([pbis_get_status_exec])
for line in stdout.splitlines():
line = line.strip()
if line.startswith("Domain:"):
return line[len("Domain:"):].strip()
return None
def is_rhel_like(version_check_func=None):
"""Checks if the running host is rhel-like."""
(dist_name, dist_version, _id) = platform.linux_distribution()
if not dist_name:
return False
dist_name = dist_name.lower()
matched_dist = False
for d in ['centos', 'rhel', 'fedora']:
if d in dist_name:
matched_dist = True
break
if not matched_dist:
return False
if version_check_func is not None:
dist_version = dist_version.split(".")
try:
dist_version = tuple([int(v) for v in dist_version])
except ValueError:
return False
else:
return version_check_func(dist_version)
return True
def needs_secrets(func):
"""Marks function as using/needing secrets.
If no actions need secrets in the first place, then we can avoid
calling the secrets function from the get go; so where applicable we
use this function to denote the function will need & use secrets.
"""
func.needs_secrets = True
return func
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def service_exists(name):
# NOTE(harlowja): we may have to rework this if this ever wants
# to support rhel7 or centos7 or other systemd based systems...
try:
util.subp(['chkconfig', '--list', name])
return True
except util.ProcessExecutionError as e:
# Seems to produce 1 when the service just doesn't exist
# in the first place (at least on centos6).
if e.exit_code != 1:
raise
return False
def start_service(name, log,
ensure_always_on=False, max_wait=5,
wait_between_checks=0.5):
try:
util.subp(['chkconfig', '--list', name])
except util.ProcessExecutionError as e:
# Seems to produce 1 when the service just doesn't exist
# in the first place (at least on centos6).
if e.exit_code != 1:
raise
if ensure_always_on:
util.subp(['chkconfig', 'on', name])
# Check if its already running.
stdout, _stderr = util.subp(['service', name, 'status'])
if 'running' in stdout:
return
stdout, _stderr = util.subp(['service', name, 'start'])
start = time.time()
while True:
stdout, _stderr = util.subp(['service', name, 'status'])
if 'running' in stdout:
break
elapsed = time.time() - start
if elapsed >= max_wait:
raise RuntimeError("Unable to start service '%s' waited"
" %s seconds for it to become active"
% (name, elapsed))
log.debug("Waiting %s seconds for '%s' to become active",
wait_between_checks, name)
time.sleep(wait_between_checks)
def read_secrets(key, b64_blob):
cipher = AES.new(key)
binary_blob = base64.b64decode(b64_blob)
yaml_blob = cipher.decrypt(binary_blob)
# Required since the encryption routine aligns the input blob
# to the required block size, and pads if needed to fit to that.
yaml_blob = yaml_blob.rstrip(key_padding)
return util.load_yaml(yaml_blob)
def do_cleanup(mod_name, name, cfg, cloud, log, secrets):
# This will look for a 'cleanup' key in the provided configuration
# and expects a dict under that key that will have the following
# keys to be acted upon:
#
# remove_files: paths of files to remove
# disable_services: service names to disable
# remove_files_post_disable: files to remove post service disable
cfg = cfg.get('cleanup', {})
for path in cfg.get('remove_files', cleanup_files_default):
util.del_file(path)
for name in cfg.get('disable_services', cleanup_services_default):
try:
util.subp(['chkconfig', '--list', name])
except util.ProcessExecutionError as e:
if e.exit_code == 1:
# Not an actual service...
continue
else:
raise
else:
util.subp(['chkconfig', 'off', name])
util.subp(['service', name, 'stop'])
for path in cfg.get('remove_files_post_disable_services',
cleanup_files_post_services_disable):
util.del_file(path)
def do_ds_agent(mod_name, name, cfg, cloud, log, secrets):
# Ensure ds_agent is running and registered
# (This requires that gd-ds_agent and ds_agent are installed in the image.)
# This will ensure on rhel/centos/fedora 6.x or less that the ds_agent
# program is enabled at the current run level and ensure it is
# started and that the register agent program has been called (if
# it exists).
ds_agent_max_rhel_os = tuple(
cfg.get('ds_agent_max_rhel_os', (6, 9999, 9999)))
if not is_rhel_like(
lambda dist_version: dist_version <= ds_agent_max_rhel_os):
log.debug(("Skipping module named %s.%s, not running"
" on a host operating system where ds_agent"
" is required"), mod_name, name)
return
if service_exists('ds_agent'):
start_service('ds_agent', log, ensure_always_on=True)
if is_exe(ds_agent_register_path):
util.subp([ds_agent_register_path], capture=False)
def do_ntpd(mod_name, name, cfg, cloud, log, secrets):
# TODO(harlowja): this seems like it should just be moved to a generic
# cloud-init module that everyone can share, so do that at some point...
ntpd_params = copy.deepcopy(ntpd_params_default)
ntpd_params.update(cfg.get('ntpd_params', {}))
ntpd_buf = templater.render_string(ntpd_template, ntpd_params)
util.write_file(ntpd_conf_path, ntpd_buf)
def do_sudoers(mod_name, name, cfg, cloud, log, secrets,
gd_meta=None, distro=None):
"""
Updates /etc/sudoers.d/openstack-users file to include
users/groups who are allowed to sudo in this VM. Comes from the
sudo_users and sudo_groups metadata fields. If those are not present, we
revert back to whatever is listed in login_users and login_groups.
"""
# TODO(harlowja): change this so that at a future point that it
# actually uses the cloud-init functionality to manage users directly
# vs doing it out of bounds...
def split_and_clean(data):
if not data:
return []
data_pieces = data.split(",")
return [p.strip() for p in data_pieces if p.strip()]
# Now add all the required users that we want to add
if not gd_meta:
# If not provided (which will typically happen if called
# via the cron job that polls the metadata service), then assume
# it comes in via the cloud datasource and its metadata instead.
gd_meta = fetch_gd_meta(cloud)
sudo_users = gd_meta.get('sudo_users')
sudo_groups = gd_meta.get('sudo_groups')
if not sudo_users:
sudo_users = gd_meta.get('login_users')
if not sudo_groups:
sudo_groups = gd_meta.get('login_groups')
sudo_users = split_and_clean(sudo_users)
sudo_groups = split_and_clean(sudo_groups)
ad_domain = fetch_domain()
if not ad_domain:
log.warn("No active directory domain found!")
else:
joined_domain = fetch_joined_domain(ad_domain)
if not joined_domain:
log.warn("No joined active directory domain found!")
else:
# This makes life a little easier for the cron job so
# that it doesn't need to create a dummy cloud object when
# it really only cares about the distro object.
if not distro:
distro = cloud.distro
# Remove the old file (if it exists) since we are going to
# now regenerate it (and setup a blank one to be used for the
# rest of the additions/changes).
util.del_file(cloud_users_sudoers_tmp_path)
util.write_file(
cloud_users_sudoers_tmp_path,
"\n".join([util.make_header(), ""]),
mode=0o0440)
for (sudo_what, sudo_items) in [('user', sudo_users),
('group', sudo_groups)]:
for sudo_item in sudo_items:
domain, real_item = split_domain(sudo_item)
if not real_item:
raise RuntimeError(
"Not adding sudo %s '%s' due to"
" malformed input" % (sudo_what, sudo_item))
else:
if domain and domain != joined_domain:
distro.write_sudo_rules(
"%s\\\\%s" % (domain, real_item),
['ALL = ALL'],
sudo_file=cloud_users_sudoers_tmp_path)
else:
distro.write_sudo_rules(
real_item, ['ALL = ALL'],
sudo_file=cloud_users_sudoers_tmp_path)
# Verify the file is good before doing anything else.
try:
util.subp(
['visudo', '-c', '-f', cloud_users_sudoers_tmp_path],
capture=False)
except util.ProcessExecutionError as e:
raise RuntimeError(
"Could not validate temporary sudoers file at"
" %s, visudo exited with status code %s,"
" not modifying %s" % (cloud_users_sudoers_tmp_path,
e.exit_code,
cloud_users_sudoers_path))
else:
log.debug("Renaming %s to its final home at %s",
cloud_users_sudoers_tmp_path,
cloud_users_sudoers_path)
os.rename(cloud_users_sudoers_tmp_path,
cloud_users_sudoers_path)
def do_service_account(mod_name, name, cfg, cloud, log, secrets):
gd_meta = fetch_gd_meta(cloud)
login_user = gd_meta.get("service_account", gd_meta.get('created_by'))
if not login_user:
log.debug(("Skipping module named %s.%s, no"
" 'service_account' or 'created_by' key(s) found"
" in metadata"), mod_name, name)
else:
cloud.distro.create_user(login_user, lock_passwd=False, sudo=True)
def do_sysctl(mod_name, name, cfg, cloud, log, secrets):
# TODO(harlowja): this seems like it should just be moved to a generic
# cloud-init module that everyone can share, so do that at some point...
#
# It may also need to be made more interface aware?
sysctl_params = cfg.get('sysctl_params', {})
if sysctl_params:
safe_makedirs('/etc/sysctl.d')
lines = [
util.make_header(base="Created"),
"",
]
for (k, v) in six.iteritems(sysctl_params):
lines.append("%s = %s" % (k, v))
util.write_file(cloud_sysctl_pth, "\n".join(lines))
def do_spacewalk_register(mod_name, name, cfg, cloud, log, secrets):
# TODO(harlowja): turn this into a real cloud-init native module...
cfg = cfg.get("spacewalk", {})
spacewalk_proxy = cfg.get('proxy')
spacewalk_activation_key = cfg.get('activation_key')
if spacewalk_proxy and spacewalk_activation_key:
# See: https://github.com/tmclaugh/puppet-spacewalk/blob/master/manifests/client.pp
distro = cloud.distro
distro.install_packages(['rhn-setup'])
# Check to see if already registered and don't bother; this is
# apparently done by trying to sync and if that fails then we
# assume we aren't registered; which is sorta ghetto...
already_registered = False
try:
util.subp(['rhn-profile-sync', '--verbose'], capture=False)
already_registered = True
except util.ProcessExecutionError as e:
if e.exit_code != 1:
raise
if not already_registered:
util.subp([
'rhnreg_ks',
'--serverUrl=https://%s/XMLRPC' % spacewalk_proxy,
'--sslCACert=/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT',
'--activationkey=%s' % spacewalk_activation_key,
'--profilename=%s' % cloud.datasource.get_hostname(fqdn=True),
], capture=False)
else:
log.debug(("Skipping module named %s.%s,"
" 'spacewalk_proxy' or 'spacewalk_activation_key' key(s)"
" were not found in configured data"), mod_name, name)
def do_init(mod_name, name, cfg, cloud, log, secrets):
cfg = cfg.get("init", {})
for name in cfg.get("remove_packages", init_pkg_removals_default):
stdout, _stderr = util.subp(['rpm', '-qa', name])
full_rpm = stdout.strip()
if not full_rpm:
continue
log.info("Removing rpm package %s", full_rpm)
util.subp(['yum', '-y', 'remove', full_rpm], capture=False)
for path in cfg.get("remove_files", init_file_removals_default):
util.del_file(path)
def get_pbis_status(default='unknown'):
stdout, _stderr = util.subp([pbis_get_status_exec])
status = ''
for line in stdout.splitlines():
m = re.match(r"^\s*Status:\s+(.*)$", line)
if m and m.group(1):
status = m.group(1)
break
if status:
return status.lower()
return default
def is_in_cn(cloud, bind_user, bind_pass, cn):
# See if already joined and don't even bother if so...
stdout, _stderr = util.subp([
pbis_ad_tool_exec, '-a', 'search-computer',
'--name', cloud.datasource.get_hostname(),
'-n', bind_user, '-x', bind_pass,
])
total_computers = 0
lookup_lines = stdout.splitlines()
for line in lookup_lines:
m = re.match(r"^Total computers:\s*(.*)\s*", line)
if m and m.group(1):
try:
total_computers = int(m.group(1))
except ValueError:
pass
try:
dn = lookup_lines[0].strip()
except IndexError:
dn = ''
if dn and total_computers:
stdout, _stderr = util.subp([
pbis_ad_tool_exec, '-a', 'lookup-object',
'-n', bind_user, '-x', bind_pass,
'--dn', dn,
])
search_cn = 'CN=%s' % cn
for line in stdout.splitlines():
if line.startswith('memberOf:') and search_cn in line:
return True
return False
def do_cron(mod_name, name, cfg, cloud, log, secrets):
for ((tpl_name, tpl_target_path),
(cron_data, cron_path)) in cron_config_what:
if tpl_name and tpl_target_path:
util.write_file(
tpl_target_path,
util.load_file(cloud.get_template_filename(tpl_name)),
mode=0o755)
if cron_path and cron_data:
util.write_file(cron_path, cron_data)
@needs_secrets
def do_pbis_auth(mod_name, name, cfg, cloud, log, secrets):
pbis_secrets = secrets.get('pbis', {})
try:
domain = pbis_secrets['domain']
bind_user = pbis_secrets['bind_user']
bind_pass = pbis_secrets['bind_pass']
ou_path = pbis_secrets['ou_path']
ou = pbis_secrets['ou']
except KeyError:
log.warn(("Skipping module named %s.%s, error"
" in reading required pbis keys"), mod_name, name)
return
# Ensure these are always present (otherwise this isn't going to
# work out to well)...
cloud.distro.install_packages(pbis_packages)
if pbis_upgrade_packages:
cmd = ['yum', '-y', 'upgrade']
cmd.extend(pbis_upgrade_packages)
util.subp(cmd, capture=False)
# Ensure the service is ready so that we can further do ops.
start_service('lwsmd', log)
util.subp(
[pbis_config_exec, 'AssumeDefaultDomain', 'True'], capture=False)
if get_pbis_status() == 'unknown':
cmd = [
pbis_domain_join_exec, 'join', '--disable',
'--notimesync', '-ou', "%s/%s" % (ou_path, ou),
domain, bind_user, bind_pass,
]
util.subp(cmd, capture=False)
# Ensure we turn on for openstack/vms so that further usage
# of gd-pbis-utils correctly works out.
try:
gd_pbis_config = util.load_file(pbis_sysconfig_path)
except IOError as e:
if e.errno != errno.ENOENT:
raise
else:
gd_pbis_config_lines = gd_pbis_config.splitlines()
gd_pbis_config_changed = False
seen = False
for i, line in enumerate(gd_pbis_config_lines):
if re.match(r"^([#])?USE_OPENSTACK_METADATA\s*=\s*", line):
gd_pbis_config_changed[i] = 'USE_OPENSTACK_METADATA=y'
gd_pbis_config_changed = True
seen = True
if not seen:
gd_pbis_config_lines.append('USE_OPENSTACK_METADATA=y')
gd_pbis_config_changed = True
if gd_pbis_config_changed:
util.write_file(pbis_sysconfig_path,
"\n".join(gd_pbis_config_lines))
# Run `gd-pbis-allow-login` once so everything will be ready to
# go when the VM completes provisioning
util.subp([pbis_allow_login_exec], capture=False)
# Join the PBIS_clients group (if not already in).
if not is_in_cn(cloud, bind_user, bind_pass, 'PBIS_clients'):
util.subp([
pbis_add_to_group_exec,
'-b', '-u', bind_user, '-p', bind_pass,
'-g', 'PBIS_clients'], capture=False)
# Map action name to (function, frequency).
actions_to_func_freq = {
'init': (do_init, PER_ONCE),
'cron': (do_cron, PER_ONCE),
'pbis_auth': (do_pbis_auth, PER_INSTANCE),
'spacewalk_register': (do_spacewalk_register, PER_INSTANCE),
'sysctl': (do_sysctl, PER_INSTANCE),
'service_account': (do_service_account, PER_INSTANCE),
# This one is special since a cron job will also call into it
# periodically to trigger sudoers refreshing... (see `update-sudoers`
# script and cron action).
'sudoers': (do_sudoers, PER_INSTANCE),
'ds_agent': (do_ds_agent, PER_INSTANCE),
'cleanup': (do_cleanup, PER_ALWAYS),
}
def handle(mod_name, cfg, cloud, log, args):
# This uses vendordata *only* and currently does not allow itself
# to be overridden (at least for the time being); at a future point
# we can consider merging the two or doing something different, but
# for now this module can't really be user 'controlled/tweaked'.
vd_cfg = cloud.datasource.get_vendordata()
if not vd_cfg:
vd_cfg = {}
# Use a subkey so that we could at some point have other root keys
# in vendordata that may or may not be godaddy related...
try:
gd_cfg = vd_cfg['godaddy']
if not gd_cfg:
gd_cfg = {}
except KeyError:
gd_cfg = {}
gd_meta = fetch_gd_meta(cloud)
# The actions that we will run (and the order in which they
# will run); these must match to a method of the action class/object.
actions = list(gd_cfg.get('actions', default_actions))
disable_spacewalk = util.get_cfg_option_bool(
gd_cfg, 'disable_spacewalk',
default=util.get_cfg_option_bool(gd_meta, 'disable_spacewalk',
default=False))
if not disable_spacewalk:
actions.append('spacewalk_register')
# See: https://www.powerbrokeropen.org/ (PowerBroker Identity Services)
disable_pbis = util.get_cfg_option_bool(
gd_cfg, 'disable_pbis',
default=util.get_cfg_option_bool(gd_meta, 'disable_pbis',
default=False))
if not disable_pbis:
# Ensure that pbis auth happens before 'sudoers' or 'service_account'
# activities...
find_mods = ['sudoers', 'service_account']
if not disable_spacewalk:
# Make sure PBIS is *also* done before
# spacewalk registration, if we are doing both.
find_mods.append('spacewalk_register')
idx_choices = []
for mod in find_mods:
try:
idx_choices.append(actions.index(mod))
except (IndexError, ValueError):
pass
if len(idx_choices):
actions.insert(min(idx_choices), 'pbis_auth')
else:
actions.append('pbis_auth')
needs_secrets = False
for action in actions:
handler_func, _freq = actions_to_func_freq[action]
if getattr(handler_func, 'needs_secrets', False):
needs_secrets = True
if needs_secrets:
secrets = read_secrets(
util.load_file(gd_cfg.get('secrets_key', key_path)),
gd_cfg['secrets'])
else:
secrets = {}
# TODO(harlowja): it'd be nice to have sub-modules in cloud-init at
# some point so that we don't have to do this ourselves here...
log.info("Godaddy module %s will run the"
" following subactions (in order): %s", mod_name, actions)
runner = helpers.Runners(cloud.paths)
for action in actions:
handler_func, freq = actions_to_func_freq[action]
action_name = "%s.%s" % (mod_name, action)
runner.run(
action_name, handler_func,
[mod_name, action, gd_cfg, cloud, log, secrets], freq=freq)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment