Skip to content

Instantly share code, notes, and snippets.

@gtmanfred
Created October 2, 2018 15:25
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save gtmanfred/a75516499a259ba8f375a559202d5435 to your computer and use it in GitHub Desktop.
Save gtmanfred/a75516499a259ba8f375a559202d5435 to your computer and use it in GitHub Desktop.
diff --git a/salt/beacons/diskusage.py b/salt/beacons/diskusage.py
index 635f13954b..2b1aa0eacb 100644
--- a/salt/beacons/diskusage.py
+++ b/salt/beacons/diskusage.py
@@ -79,7 +79,7 @@ def beacon(config):
The second one will match disks from A:\ to Z:\ on a Windows system
Note that if a regular expression are evaluated after static mount points,
- which means that if a regular expression matches an other defined mount point,
+ which means that if a regular expression matches another defined mount point,
it will override the previously defined threshold.
'''
diff --git a/salt/beacons/load.py b/salt/beacons/load.py
index a3cba14fcb..64a3f43436 100644
--- a/salt/beacons/load.py
+++ b/salt/beacons/load.py
@@ -113,7 +113,7 @@ def beacon(config):
for k in ['1m', '5m', '15m']:
LAST_STATUS[k] = avg_dict[k]
if not config['emitatstartup']:
- log.debug('Dont emit because emitatstartup is False')
+ log.debug("Don't emit because emitatstartup is False")
return ret
send_beacon = False
diff --git a/salt/cache/localfs.py b/salt/cache/localfs.py
index 45dfa6d086..22c030599c 100644
--- a/salt/cache/localfs.py
+++ b/salt/cache/localfs.py
@@ -14,6 +14,7 @@ from __future__ import absolute_import
import logging
import os
import os.path
+import errno
import shutil
import tempfile
@@ -45,13 +46,14 @@ def store(bank, key, data, cachedir):
Store information in a file.
'''
base = os.path.join(cachedir, os.path.normpath(bank))
- if not os.path.isdir(base):
- try:
- os.makedirs(base)
- except OSError as exc:
+ try:
+ os.makedirs(base)
+ except OSError as exc:
+ if exc.errno != errno.EEXIST:
raise SaltCacheError(
- 'The cache directory, {0}, does not exist and could not be '
- 'created: {1}'.format(base, exc)
+ 'The cache directory, {0}, could not be created: {1}'.format(
+ base, exc
+ )
)
outfile = os.path.join(base, '{0}.p'.format(key))
diff --git a/salt/cache/redis_cache.py b/salt/cache/redis_cache.py
index 35bce55198..6d006234d4 100644
--- a/salt/cache/redis_cache.py
+++ b/salt/cache/redis_cache.py
@@ -333,7 +333,7 @@ def flush(bank, key=None):
An improvement for this would be loading a custom Lua script in the Redis instance of the user
(using the ``register_script`` feature) and call it whenever we flush.
This script would only need to build this sub-tree causing problems. It can be added later and the behaviour
- should not change as the user needs to explicitely allow Salt inject scripts in their Redis instance.
+ should not change as the user needs to explicitly allow Salt inject scripts in their Redis instance.
'''
redis_server = _get_redis_server()
redis_pipe = redis_server.pipeline()
diff --git a/salt/cli/api.py b/salt/cli/api.py
index 139ba1e299..ff4e410967 100644
--- a/salt/cli/api.py
+++ b/salt/cli/api.py
@@ -9,11 +9,11 @@
# Import Python libs
from __future__ import absolute_import, print_function
-import os
import logging
# Import Salt libs
import salt.client.netapi
+import salt.utils.files
import salt.utils.parsers as parsers
from salt.utils.verify import check_user, verify_files, verify_log
@@ -42,9 +42,8 @@ class SaltAPI(parsers.SaltAPIParser):
'udp://',
'file://')):
# Logfile is not using Syslog, verify
- current_umask = os.umask(0o027)
- verify_files([logfile], self.config['user'])
- os.umask(current_umask)
+ with salt.utils.files.set_umask(0o027):
+ verify_files([logfile], self.config['user'])
except OSError as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)
diff --git a/salt/cli/salt.py b/salt/cli/salt.py
index 9abf626a72..152baa07a9 100644
--- a/salt/cli/salt.py
+++ b/salt/cli/salt.py
@@ -406,7 +406,7 @@ class SaltCMD(parsers.SaltCMDOptionParser):
for host in ret:
if isinstance(ret[host], string_types) \
and (ret[host].startswith("Minion did not return")
- or ret[host] == 'VALUE TRIMMED'):
+ or ret[host] == 'VALUE_TRIMMED'):
continue
for fun in ret[host]:
if fun not in docs and ret[host][fun]:
diff --git a/salt/cli/spm.py b/salt/cli/spm.py
index 3cecc76905..b279cecc2a 100644
--- a/salt/cli/spm.py
+++ b/salt/cli/spm.py
@@ -30,7 +30,7 @@ class SPM(parsers.SPMParser):
self.parse_args()
self.setup_logfile_logger()
v_dirs = [
- self.config['cachedir'],
+ self.config['spm_cache_dir'],
]
verify_env(v_dirs,
self.config['user'],
diff --git a/salt/client/ssh/__init__.py b/salt/client/ssh/__init__.py
index f0dc2b3b1b..287ebe7e81 100644
--- a/salt/client/ssh/__init__.py
+++ b/salt/client/ssh/__init__.py
@@ -157,17 +157,17 @@ do
py_cmd_path=`"$py_cmd" -c \
'from __future__ import print_function;
import sys; print(sys.executable);'`
- cmdpath=$(command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null)
+ cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
if file $cmdpath | grep "shell script" > /dev/null
then
ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
- export $($py_cmd -c \
+ export `$py_cmd -c \
"from __future__ import print_function;
import sys;
import os;
map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \
- .format(x, os.environ[x]) for x in [$ex_vars]])")
+ .format(x, os.environ[x]) for x in [$ex_vars]])"`
exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
@@ -220,7 +220,8 @@ class SSH(object):
if self.opts['regen_thin']:
self.opts['ssh_wipe'] = True
if not salt.utils.which('ssh'):
- raise salt.exceptions.SaltSystemExit('No ssh binary found in path -- ssh must be installed for salt-ssh to run. Exiting.')
+ raise salt.exceptions.SaltSystemExit(code=-1,
+ msg='No ssh binary found in path -- ssh must be installed for salt-ssh to run. Exiting.')
self.opts['_ssh_version'] = ssh_version()
self.tgt_type = self.opts['selected_target_option'] \
if self.opts['selected_target_option'] else 'glob'
@@ -914,6 +915,7 @@ class Single(object):
opts_pkg['pillar_roots'] = self.opts['pillar_roots']
opts_pkg['ext_pillar'] = self.opts['ext_pillar']
opts_pkg['extension_modules'] = self.opts['extension_modules']
+ opts_pkg['module_dirs'] = self.opts['module_dirs']
opts_pkg['_ssh_version'] = self.opts['_ssh_version']
opts_pkg['__master_opts__'] = self.context['master_opts']
if '_caller_cachedir' in self.opts:
diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py
index 64161f8a4a..d3d3c8ea3f 100644
--- a/salt/client/ssh/ssh_py_shim.py
+++ b/salt/client/ssh/ssh_py_shim.py
@@ -101,13 +101,15 @@ def is_windows():
def need_deployment():
"""
Salt thin needs to be deployed - prep the target directory and emit the
- delimeter and exit code that signals a required deployment.
+ delimiter and exit code that signals a required deployment.
"""
if os.path.exists(OPTIONS.saltdir):
shutil.rmtree(OPTIONS.saltdir)
- old_umask = os.umask(0o077)
- os.makedirs(OPTIONS.saltdir)
- os.umask(old_umask)
+ old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
+ try:
+ os.makedirs(OPTIONS.saltdir)
+ finally:
+ os.umask(old_umask) # pylint: disable=blacklisted-function
# Verify perms on saltdir
if not is_windows():
euid = os.geteuid()
@@ -153,10 +155,10 @@ def get_hash(path, form='sha1', chunk_size=4096):
def unpack_thin(thin_path):
"""Unpack the Salt thin archive."""
tfile = tarfile.TarFile.gzopen(thin_path)
- old_umask = os.umask(0o077)
+ old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=OPTIONS.saltdir)
tfile.close()
- os.umask(old_umask)
+ os.umask(old_umask) # pylint: disable=blacklisted-function
try:
os.unlink(thin_path)
except OSError:
@@ -180,10 +182,10 @@ def unpack_ext(ext_path):
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
- old_umask = os.umask(0o077)
+ old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=modcache)
tfile.close()
- os.umask(old_umask)
+ os.umask(old_umask) # pylint: disable=blacklisted-function
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
@@ -288,7 +290,7 @@ def main(argv): # pylint: disable=W0613
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.cmd_umask is not None:
- old_umask = os.umask(OPTIONS.cmd_umask)
+ old_umask = os.umask(OPTIONS.cmd_umask) # pylint: disable=blacklisted-function
if OPTIONS.tty:
# Returns bytes instead of string on python 3
stdout, _ = subprocess.Popen(salt_argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
@@ -302,7 +304,7 @@ def main(argv): # pylint: disable=W0613
else:
subprocess.call(salt_argv)
if OPTIONS.cmd_umask is not None:
- os.umask(old_umask)
+ os.umask(old_umask) # pylint: disable=blacklisted-function
if __name__ == '__main__':
sys.exit(main(sys.argv))
diff --git a/salt/client/ssh/wrapper/state.py b/salt/client/ssh/wrapper/state.py
index 6b74105803..00539e89e7 100644
--- a/salt/client/ssh/wrapper/state.py
+++ b/salt/client/ssh/wrapper/state.py
@@ -15,6 +15,7 @@ import logging
import salt.client.ssh.shell
import salt.client.ssh.state
import salt.utils
+import salt.utils.files
import salt.utils.thin
import salt.roster
import salt.state
@@ -496,17 +497,16 @@ def request(mods=None,
'kwargs': kwargs
}
})
- cumask = os.umask(0o77)
- try:
- if salt.utils.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
- with salt.utils.fopen(notify_path, 'w+b') as fp_:
- serial.dump(req, fp_)
- except (IOError, OSError):
- msg = 'Unable to write state request file {0}. Check permission.'
- log.error(msg.format(notify_path))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
+ with salt.utils.fopen(notify_path, 'w+b') as fp_:
+ serial.dump(req, fp_)
+ except (IOError, OSError):
+ msg = 'Unable to write state request file {0}. Check permission.'
+ log.error(msg.format(notify_path))
return ret
@@ -560,17 +560,16 @@ def clear_request(name=None):
req.pop(name)
else:
return False
- cumask = os.umask(0o77)
- try:
- if salt.utils.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
- with salt.utils.fopen(notify_path, 'w+b') as fp_:
- serial.dump(req, fp_)
- except (IOError, OSError):
- msg = 'Unable to write state request file {0}. Check permission.'
- log.error(msg.format(notify_path))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
+ with salt.utils.fopen(notify_path, 'w+b') as fp_:
+ serial.dump(req, fp_)
+ except (IOError, OSError):
+ msg = 'Unable to write state request file {0}. Check permission.'
+ log.error(msg.format(notify_path))
return True
diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py
index 35b01aa241..566b75a5fa 100644
--- a/salt/cloud/__init__.py
+++ b/salt/cloud/__init__.py
@@ -1493,8 +1493,8 @@ class Cloud(object):
vm_name = vm_details['id']
else:
log.debug(
- 'vm:{0} in provider:{1} is not in name '
- 'list:\'{2}\''.format(vm_name, driver, names)
+ 'vm:%s in provider:%s is not in name '
+ 'list:\'%s\'', vm_name, driver, names
)
continue
diff --git a/salt/cloud/clouds/digital_ocean.py b/salt/cloud/clouds/digital_ocean.py
index daabcbddfe..358faaea4e 100644
--- a/salt/cloud/clouds/digital_ocean.py
+++ b/salt/cloud/clouds/digital_ocean.py
@@ -159,7 +159,7 @@ def avail_sizes(call=None):
'-f or --function, or with the --list-sizes option'
)
- items = query(method='sizes')
+ items = query(method='sizes', command='?per_page=100')
ret = {}
for size in items['sizes']:
ret[size['slug']] = {}
diff --git a/salt/cloud/clouds/dimensiondata.py b/salt/cloud/clouds/dimensiondata.py
index d022c5719f..017832f6be 100644
--- a/salt/cloud/clouds/dimensiondata.py
+++ b/salt/cloud/clouds/dimensiondata.py
@@ -32,7 +32,7 @@ from salt.utils.versions import LooseVersion as _LooseVersion
# Import libcloud
try:
import libcloud
- from libcloud.compute.base import NodeState
+ from libcloud.compute.base import NodeDriver, NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
@@ -52,12 +52,6 @@ try:
except ImportError:
HAS_LIBCLOUD = False
-# Import generic libcloud functions
-# from salt.cloud.libcloudfuncs import *
-
-# Import salt libs
-import salt.utils
-
# Import salt.cloud libs
from salt.cloud.libcloudfuncs import * # pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
from salt.utils import namespaced_function
@@ -220,7 +214,6 @@ def create(vm_):
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
- rootPw = NodeAuthPassword(vm_['auth'])
location = conn.ex_get_location_by_id(vm_['location'])
images = conn.list_images(location=location)
@@ -251,15 +244,13 @@ def create(vm_):
kwargs = {
'name': vm_['name'],
'image': image,
- 'auth': rootPw,
'ex_description': vm_['description'],
'ex_network_domain': network_domain,
'ex_vlan': vlan,
'ex_is_started': vm_['is_started']
}
- event_data = kwargs.copy()
- del event_data['auth']
+ event_data = _to_event_data(kwargs)
__utils__['cloud.fire_event'](
'event',
@@ -270,6 +261,10 @@ def create(vm_):
transport=__opts__['transport']
)
+ # Initial password (excluded from event payload)
+ initial_password = NodeAuthPassword(vm_['auth'])
+ kwargs['auth'] = initial_password
+
try:
data = conn.create_node(**kwargs)
except Exception as exc:
@@ -283,7 +278,7 @@ def create(vm_):
return False
try:
- data = salt.utils.cloud.wait_for_ip(
+ data = __utils__['cloud.wait_for_ip'](
_query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
@@ -309,7 +304,7 @@ def create(vm_):
ip_address = preferred_ip(vm_, data.public_ips)
log.debug('Using IP address %s', ip_address)
- if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips':
+ if __utils__['cloud.get_salt_interface'](vm_, __opts__) == 'private_ips':
salt_ip_address = preferred_ip(vm_, data.private_ips)
log.info('Salt interface set to: %s', salt_ip_address)
else:
@@ -325,7 +320,7 @@ def create(vm_):
vm_['ssh_host'] = ip_address
vm_['password'] = vm_['auth']
- ret = salt.utils.cloud.bootstrap(vm_, __opts__)
+ ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
@@ -418,11 +413,13 @@ def create_lb(kwargs=None, call=None):
log.debug('Network Domain: %s', network_domain.id)
lb_conn.ex_set_current_network_domain(network_domain.id)
+ event_data = _to_event_data(kwargs)
+
__utils__['cloud.fire_event'](
'event',
'create load_balancer',
'salt/cloud/loadbalancer/creating',
- args=kwargs,
+ args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
@@ -431,11 +428,13 @@ def create_lb(kwargs=None, call=None):
name, port, protocol, algorithm, members
)
+ event_data = _to_event_data(kwargs)
+
__utils__['cloud.fire_event'](
'event',
'created load_balancer',
'salt/cloud/loadbalancer/created',
- args=kwargs,
+ args=event_data,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
@@ -577,3 +576,46 @@ def get_lb_conn(dd_driver=None):
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
+
+
+def _to_event_data(obj):
+ '''
+ Convert the specified object into a form that can be serialised by msgpack as event data.
+
+ :param obj: The object to convert.
+ '''
+
+ if obj is None:
+ return None
+ if isinstance(obj, bool):
+ return obj
+ if isinstance(obj, int):
+ return obj
+ if isinstance(obj, float):
+ return obj
+ if isinstance(obj, str):
+ return obj
+ if isinstance(obj, bytes):
+ return obj
+ if isinstance(obj, dict):
+ return obj
+
+ if isinstance(obj, NodeDriver): # Special case for NodeDriver (cyclic references)
+ return obj.name
+
+ if isinstance(obj, list):
+ return [_to_event_data(item) for item in obj]
+
+ event_data = {}
+ for attribute_name in dir(obj):
+ if attribute_name.startswith('_'):
+ continue
+
+ attribute_value = getattr(obj, attribute_name)
+
+ if callable(attribute_value): # Strip out methods
+ continue
+
+ event_data[attribute_name] = _to_event_data(attribute_value)
+
+ return event_data
diff --git a/salt/cloud/clouds/ec2.py b/salt/cloud/clouds/ec2.py
index dccfe2590f..640021360a 100644
--- a/salt/cloud/clouds/ec2.py
+++ b/salt/cloud/clouds/ec2.py
@@ -2802,6 +2802,7 @@ def create(vm_=None, call=None):
# Ensure that the latest node data is returned
node = _get_node(instance_id=vm_['instance_id'])
+ __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
ret.update(node)
return ret
diff --git a/salt/cloud/clouds/openstack.py b/salt/cloud/clouds/openstack.py
index 690ad2037b..5a6fe8d8e7 100644
--- a/salt/cloud/clouds/openstack.py
+++ b/salt/cloud/clouds/openstack.py
@@ -700,7 +700,7 @@ def _query_node_data(vm_, data, floating, conn):
if not result and ssh_interface(vm_) == 'private_ips':
for private_ip in private:
ignore_ip = ignore_cidr(vm_, private_ip)
- if private_ip not in data.private_ips and not ignore_ip:
+ if not ignore_ip:
result.append(private_ip)
if result:
diff --git a/salt/cloud/clouds/vmware.py b/salt/cloud/clouds/vmware.py
index 7b646525f1..a88ace688f 100644
--- a/salt/cloud/clouds/vmware.py
+++ b/salt/cloud/clouds/vmware.py
@@ -2594,14 +2594,15 @@ def create(vm_):
non_hostname_chars = compile(r'[^\w-]')
if search(non_hostname_chars, vm_name):
hostName = split(non_hostname_chars, vm_name, maxsplit=1)[0]
+ domainName = split(non_hostname_chars, vm_name, maxsplit=1)[-1]
else:
hostName = vm_name
- domainName = hostName.split('.', 1)[-1]
+ domainName = domain
if 'Windows' not in object_ref.config.guestFullName:
identity = vim.vm.customization.LinuxPrep()
identity.hostName = vim.vm.customization.FixedName(name=hostName)
- identity.domain = domainName if hostName != domainName else domain
+ identity.domain = domainName
else:
identity = vim.vm.customization.Sysprep()
identity.guiUnattended = vim.vm.customization.GuiUnattended()
diff --git a/salt/cloud/deploy/bootstrap-salt.sh b/salt/cloud/deploy/bootstrap-salt.sh
index f1e86bb44d..a9baf0680b 100755
--- a/salt/cloud/deploy/bootstrap-salt.sh
+++ b/salt/cloud/deploy/bootstrap-salt.sh
@@ -95,7 +95,7 @@ echoinfo() {
#--- FUNCTION -------------------------------------------------------------------------------------------------------
# NAME: echowarn
-# DESCRIPTION: Echo warning informations to stdout.
+# DESCRIPTION: Echo warning information to stdout.
#----------------------------------------------------------------------------------------------------------------------
echowarn() {
printf "${YC} * WARN${EC}: %s\n" "$@";
@@ -338,7 +338,7 @@ __usage() {
-U If set, fully upgrade the system prior to bootstrapping Salt
-I If set, allow insecure connections while downloading any files. For
example, pass '--no-check-certificate' to 'wget' or '--insecure' to
- 'curl'. On Debian and Ubuntu, using this option with -U allows to obtain
+ 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining
GnuPG archive keys insecurely if distro has changed release signatures.
-F Allow copied files to overwrite existing (config, init.d, etc)
-K If set, keep the temporary files in the temporary directories specified
diff --git a/salt/config/__init__.py b/salt/config/__init__.py
index 236cd3ca81..b3af2f755e 100644
--- a/salt/config/__init__.py
+++ b/salt/config/__init__.py
@@ -600,10 +600,11 @@ VALID_OPTS = {
# Frequency of the proxy_keep_alive, in minutes
'proxy_keep_alive_interval': int,
- 'git_pillar_base': str,
- 'git_pillar_branch': str,
- 'git_pillar_env': str,
- 'git_pillar_root': str,
+ # NOTE: git_pillar_base, git_pillar_branch, git_pillar_env, and
+ # git_pillar_root omitted here because their values could conceivably be
+ # loaded as non-string types, which is OK because git_pillar will normalize
+ # them to strings. But rather than include all the possible types they
+ # could be, we'll just skip type-checking.
'git_pillar_ssl_verify': bool,
'git_pillar_global_lock': bool,
'git_pillar_user': str,
@@ -615,10 +616,11 @@ VALID_OPTS = {
'git_pillar_refspecs': list,
'git_pillar_includes': bool,
'git_pillar_verify_config': bool,
+ # NOTE: gitfs_base, gitfs_mountpoint, and gitfs_root omitted here because
+ # their values could conceivably be loaded as non-string types, which is OK
+ # because gitfs will normalize them to strings. But rather than include all
+ # the possible types they could be, we'll just skip type-checking.
'gitfs_remotes': list,
- 'gitfs_mountpoint': str,
- 'gitfs_root': str,
- 'gitfs_base': str,
'gitfs_user': str,
'gitfs_password': str,
'gitfs_insecure_auth': bool,
@@ -820,6 +822,10 @@ VALID_OPTS = {
# (used by win_pkg.py, minion only)
'winrepo_source_dir': str,
+ # NOTE: winrepo_branch omitted here because its value could conceivably be
+ # loaded as a non-string type, which is OK because winrepo will normalize
+ # them to strings. But rather than include all the possible types it could
+ # be, we'll just skip type-checking.
'winrepo_dir': str,
'winrepo_dir_ng': str,
'winrepo_cachefile': str,
@@ -827,7 +833,6 @@ VALID_OPTS = {
'winrepo_cache_expire_min': int,
'winrepo_remotes': list,
'winrepo_remotes_ng': list,
- 'winrepo_branch': str,
'winrepo_ssl_verify': bool,
'winrepo_user': str,
'winrepo_password': str,
@@ -1486,6 +1491,7 @@ DEFAULT_MASTER_OPTS = {
'keep_acl_in_token': False,
'eauth_acl_module': '',
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
+ 'module_dirs': [],
'file_recv': False,
'file_recv_max_size': 100,
'file_buffer_size': 1048576,
diff --git a/salt/crypt.py b/salt/crypt.py
index fc6aca5cae..7ffec3ff64 100644
--- a/salt/crypt.py
+++ b/salt/crypt.py
@@ -51,6 +51,7 @@ import salt.utils.decorators
import salt.payload
import salt.transport.client
import salt.transport.frame
+import salt.utils.files
import salt.utils.rsax931
import salt.utils.verify
import salt.version
@@ -69,8 +70,7 @@ def dropfile(cachedir, user=None):
'''
dfn = os.path.join(cachedir, '.dfn')
# set a mask (to avoid a race condition on file creation) and store original.
- mask = os.umask(191)
- try:
+ with salt.utils.files.set_umask(0o277):
log.info('Rotating AES key')
if os.path.isfile(dfn):
log.info('AES key rotation already requested')
@@ -88,8 +88,6 @@ def dropfile(cachedir, user=None):
os.chown(dfn, uid, -1)
except (KeyError, ImportError, OSError, IOError):
pass
- finally:
- os.umask(mask) # restore original umask
def gen_keys(keydir, keyname, keysize, user=None):
@@ -119,10 +117,9 @@ def gen_keys(keydir, keyname, keysize, user=None):
if not os.access(keydir, os.W_OK):
raise IOError('Write access denied to "{0}" for user "{1}".'.format(os.path.abspath(keydir), getpass.getuser()))
- cumask = os.umask(191)
- with salt.utils.fopen(priv, 'wb+') as f:
- f.write(gen.exportKey('PEM'))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o277):
+ with salt.utils.fopen(priv, 'wb+') as f:
+ f.write(gen.exportKey('PEM'))
with salt.utils.fopen(pub, 'wb+') as f:
f.write(gen.publickey().exportKey('PEM'))
os.chmod(priv, 256)
diff --git a/salt/daemons/masterapi.py b/salt/daemons/masterapi.py
index 343bc90376..79d9544616 100644
--- a/salt/daemons/masterapi.py
+++ b/salt/daemons/masterapi.py
@@ -32,6 +32,7 @@ import salt.fileserver
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
+import salt.utils.files
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
@@ -94,8 +95,8 @@ def init_git_pillar(opts):
pillar.init_remotes(
opts_dict['git'],
git_pillar.PER_REMOTE_OVERRIDES,
- git_pillar.PER_REMOTE_ONLY
- )
+ git_pillar.PER_REMOTE_ONLY,
+ git_pillar.GLOBAL_ONLY)
ret.append(pillar)
except FileserverConfigError:
if opts.get('git_pillar_verify_config', True):
@@ -231,10 +232,9 @@ def mk_key(opts, user):
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
- cumask = os.umask(191)
- with salt.utils.fopen(keyfile, 'w+') as fp_:
- fp_.write(key)
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o277):
+ with salt.utils.fopen(keyfile, 'w+') as fp_:
+ fp_.write(key)
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.
diff --git a/salt/engines/hipchat.py b/salt/engines/hipchat.py
index 66cc6d8bdc..705a5b693e 100644
--- a/salt/engines/hipchat.py
+++ b/salt/engines/hipchat.py
@@ -235,13 +235,13 @@ def start(token,
- ``html``: send the output as HTML
- ``code``: send the output as code
- This can be overriden when executing a command, using the ``--out-type`` argument.
+ This can be overridden when executing a command, using the ``--out-type`` argument.
.. versionadded:: 2017.7.0
outputter: ``nested``
The format to display the data, using the outputters available on the CLI.
- This argument can also be overriden when executing a command, using the ``--out`` option.
+ This argument can also be overridden when executing a command, using the ``--out`` option.
.. versionadded:: 2017.7.0
diff --git a/salt/engines/webhook.py b/salt/engines/webhook.py
index 08cbc5a7fa..40ed053989 100644
--- a/salt/engines/webhook.py
+++ b/salt/engines/webhook.py
@@ -70,7 +70,10 @@ def start(address=None, port=5000, ssl_crt=None, ssl_key=None):
def post(self, tag): # pylint: disable=arguments-differ
body = self.request.body
headers = self.request.headers
- payload = {'headers': headers, 'body': body}
+ payload = {
+ 'headers': headers if isinstance(headers, dict) else dict(headers),
+ 'body': body,
+ }
fire('salt/engines/hook/' + tag, payload)
application = tornado.web.Application([(r"/(.*)", WebHook), ])
diff --git a/salt/fileclient.py b/salt/fileclient.py
index 7b4e2235df..b6cc308107 100644
--- a/salt/fileclient.py
+++ b/salt/fileclient.py
@@ -138,22 +138,20 @@ class Client(object):
saltenv,
path)
destdir = os.path.dirname(dest)
- cumask = os.umask(63)
+ with salt.utils.files.set_umask(0o077):
+ # remove destdir if it is a regular file to avoid an OSError when
+ # running os.makedirs below
+ if os.path.isfile(destdir):
+ os.remove(destdir)
- # remove destdir if it is a regular file to avoid an OSError when
- # running os.makedirs below
- if os.path.isfile(destdir):
- os.remove(destdir)
-
- # ensure destdir exists
- try:
- os.makedirs(destdir)
- except OSError as exc:
- if exc.errno != errno.EEXIST: # ignore if it was there already
- raise
+ # ensure destdir exists
+ try:
+ os.makedirs(destdir)
+ except OSError as exc:
+ if exc.errno != errno.EEXIST: # ignore if it was there already
+ raise
- yield dest
- os.umask(cumask)
+ yield dest
def get_cachedir(self, cachedir=None):
if cachedir is None:
@@ -1241,7 +1239,7 @@ class RemoteClient(Client):
load = {'saltenv': saltenv,
'prefix': prefix,
'cmd': '_file_list_emptydirs'}
- self.channel.send(load)
+ return self.channel.send(load)
def dir_list(self, saltenv='base', prefix=''):
'''
diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py
index 1496e2898e..cbdf99d056 100644
--- a/salt/fileserver/__init__.py
+++ b/salt/fileserver/__init__.py
@@ -536,7 +536,7 @@ class Fileserver(object):
if '../' in path:
return fnd
if salt.utils.url.is_escaped(path):
- # don't attempt to find URL query arguements in the path
+ # don't attempt to find URL query arguments in the path
path = salt.utils.url.unescape(path)
else:
if '?' in path:
diff --git a/salt/grains/core.py b/salt/grains/core.py
index a5b1c441a8..3818083039 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -1218,19 +1218,22 @@ def _get_interfaces():
return _INTERFACES
-def _parse_os_release():
+def _parse_os_release(os_release_files):
'''
- Parse /etc/os-release and return a parameter dictionary
+ Parse os-release and return a parameter dictionary
See http://www.freedesktop.org/software/systemd/man/os-release.html
for specification of the file format.
'''
- filename = '/etc/os-release'
- if not os.path.isfile(filename):
- filename = '/usr/lib/os-release'
-
data = dict()
+ for filename in os_release_files:
+ if os.path.isfile(filename):
+ break
+ else:
+ # None of the specified os-release files exist
+ return data
+
with salt.utils.fopen(filename) as ifile:
regex = re.compile('^([\\w]+)=(?:\'|")?(.*?)(?:\'|")?$')
for line in ifile:
@@ -1414,13 +1417,15 @@ def os_data():
# to be incorrectly set to "Arch".
grains['osfullname'] = 'Antergos Linux'
elif 'lsb_distrib_id' not in grains:
- if os.path.isfile('/etc/os-release') or os.path.isfile('/usr/lib/os-release'):
- os_release = _parse_os_release()
+ os_release = _parse_os_release(['/etc/os-release', '/usr/lib/os-release'])
+ if os_release:
if 'NAME' in os_release:
grains['lsb_distrib_id'] = os_release['NAME'].strip()
if 'VERSION_ID' in os_release:
grains['lsb_distrib_release'] = os_release['VERSION_ID']
- if 'PRETTY_NAME' in os_release:
+ if 'VERSION_CODENAME' in os_release:
+ grains['lsb_distrib_codename'] = os_release['VERSION_CODENAME']
+ elif 'PRETTY_NAME' in os_release:
codename = os_release['PRETTY_NAME']
# https://github.com/saltstack/salt/issues/44108
if os_release['ID'] == 'debian':
@@ -1689,7 +1694,7 @@ def os_data():
grains['osrelease_info']
)
os_name = grains['os' if grains.get('os') in (
- 'FreeBSD', 'OpenBSD', 'NetBSD', 'Mac', 'Raspbian') else 'osfullname']
+ 'Debian', 'FreeBSD', 'OpenBSD', 'NetBSD', 'Mac', 'Raspbian') else 'osfullname']
grains['osfinger'] = '{0}-{1}'.format(
os_name, grains['osrelease'] if os_name in ('Ubuntu',) else grains['osrelease_info'][0])
diff --git a/salt/grains/disks.py b/salt/grains/disks.py
index ce40682842..6a27f765b6 100644
--- a/salt/grains/disks.py
+++ b/salt/grains/disks.py
@@ -127,16 +127,21 @@ def _linux_disks():
ret = {'disks': [], 'SSDs': []}
for entry in glob.glob('/sys/block/*/queue/rotational'):
- with salt.utils.fopen(entry) as entry_fp:
- device = entry.split('/')[3]
- flag = entry_fp.read(1)
- if flag == '0':
- ret['SSDs'].append(device)
- log.trace('Device {0} reports itself as an SSD'.format(device))
- elif flag == '1':
- ret['disks'].append(device)
- log.trace('Device {0} reports itself as an HDD'.format(device))
- else:
- log.trace('Unable to identify device {0} as an SSD or HDD.'
- ' It does not report 0 or 1'.format(device))
+ try:
+ with salt.utils.fopen(entry) as entry_fp:
+ device = entry.split('/')[3]
+ flag = entry_fp.read(1)
+ if flag == '0':
+ ret['SSDs'].append(device)
+ log.trace('Device %s reports itself as an SSD', device)
+ elif flag == '1':
+ ret['disks'].append(device)
+ log.trace('Device %s reports itself as an HDD', device)
+ else:
+ log.trace(
+ 'Unable to identify device %s as an SSD or HDD. It does '
+ 'not report 0 or 1', device
+ )
+ except IOError:
+ pass
return ret
diff --git a/salt/grains/napalm.py b/salt/grains/napalm.py
index fcfbdcfe9f..f15c970d4a 100644
--- a/salt/grains/napalm.py
+++ b/salt/grains/napalm.py
@@ -326,7 +326,7 @@ def host(proxy=None):
.. note::
- The diference betwen ``host`` and ``hostname`` is that
+ The diference between ``host`` and ``hostname`` is that
``host`` provides the physical location - either domain name or IP address,
while ``hostname`` provides the hostname as configured on the device.
They are not necessarily the same.
diff --git a/salt/key.py b/salt/key.py
index 1439732621..78b359bc3b 100644
--- a/salt/key.py
+++ b/salt/key.py
@@ -24,6 +24,7 @@ import salt.exceptions
import salt.minion
import salt.utils
import salt.utils.event
+import salt.utils.files
import salt.utils.kinds
# pylint: disable=import-error,no-name-in-module,redefined-builtin
@@ -1017,7 +1018,7 @@ class RaetKey(Key):
'''
Use libnacl to generate and safely save a private key
'''
- import libnacl.dual # pylint: disable=3rd-party-module-not-gated
+ import libnacl.dual # pylint: disable=import-error,3rd-party-module-not-gated
d_key = libnacl.dual.DualSecret()
keydir, keyname, _, _ = self._get_key_attrs(keydir, keyname,
keysize, user)
@@ -1411,14 +1412,13 @@ class RaetKey(Key):
keydata = {'priv': priv,
'sign': sign}
path = os.path.join(self.opts['pki_dir'], 'local.key')
- c_umask = os.umask(191)
- if os.path.exists(path):
- #mode = os.stat(path).st_mode
- os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
- with salt.utils.fopen(path, 'w+') as fp_:
- fp_.write(self.serial.dumps(keydata))
- os.chmod(path, stat.S_IRUSR)
- os.umask(c_umask)
+ with salt.utils.files.set_umask(0o277):
+ if os.path.exists(path):
+ #mode = os.stat(path).st_mode
+ os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
+ with salt.utils.fopen(path, 'w+') as fp_:
+ fp_.write(self.serial.dumps(keydata))
+ os.chmod(path, stat.S_IRUSR)
def delete_local(self):
'''
diff --git a/salt/loader.py b/salt/loader.py
index 20d9b1339c..f2a3e2fb74 100644
--- a/salt/loader.py
+++ b/salt/loader.py
@@ -22,6 +22,7 @@ from zipimport import zipimporter
import salt.config
import salt.syspaths
import salt.utils.context
+import salt.utils.files
import salt.utils.lazy
import salt.utils.event
import salt.utils.odict
@@ -75,6 +76,9 @@ if USE_IMPORTLIB:
else:
SUFFIXES = imp.get_suffixes()
+BIN_PRE_EXT = '' if six.PY2 \
+ else '.cpython-{0}{1}'.format(sys.version_info.major, sys.version_info.minor)
+
# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`
# which simplifies code readability, it adds some unsupported functions into
# the driver's module scope.
@@ -778,24 +782,23 @@ def grains(opts, force_refresh=False, proxy=None):
grains_data.update(opts['grains'])
# Write cache if enabled
if opts.get('grains_cache', False):
- cumask = os.umask(0o77)
- try:
- if salt.utils.is_windows():
- # Late import
- import salt.modules.cmdmod
- # Make sure cache file isn't read-only
- salt.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
- with salt.utils.fopen(cfn, 'w+b') as fp_:
- try:
- serial = salt.payload.Serial(opts)
- serial.dump(grains_data, fp_)
- except TypeError:
- # Can't serialize pydsl
- pass
- except (IOError, OSError):
- msg = 'Unable to write to grains cache file {0}'
- log.error(msg.format(cfn))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.is_windows():
+ # Late import
+ import salt.modules.cmdmod
+ # Make sure cache file isn't read-only
+ salt.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
+ with salt.utils.fopen(cfn, 'w+b') as fp_:
+ try:
+ serial = salt.payload.Serial(opts)
+ serial.dump(grains_data, fp_)
+ except TypeError:
+ # Can't serialize pydsl
+ pass
+ except (IOError, OSError):
+ msg = 'Unable to write to grains cache file {0}'
+ log.error(msg.format(cfn))
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts['grains'])
@@ -1172,6 +1175,7 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.suffix_map = {}
suffix_order = [''] # local list to determine precedence of extensions
# Prefer packages (directories) over modules (single files)!
+
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
suffix_order.append(suffix)
@@ -1200,19 +1204,32 @@ class LazyLoader(salt.utils.lazy.LazyDict):
self.file_mapping = salt.utils.odict.OrderedDict()
for mod_dir in self.module_dirs:
- files = []
try:
- # Make sure we have a sorted listdir in order to have expectable override results
+ # Make sure we have a sorted listdir in order to have
+ # expectable override results
files = sorted(os.listdir(mod_dir))
except OSError:
continue # Next mod_dir
+ if six.PY3:
+ try:
+ pycache_files = [
+ os.path.join('__pycache__', x) for x in
+ sorted(os.listdir(os.path.join(mod_dir, '__pycache__')))
+ ]
+ except OSError:
+ pass
+ else:
+ pycache_files.extend(files)
+ files = pycache_files
for filename in files:
try:
- if filename.startswith('_'):
+ dirname, basename = os.path.split(filename)
+ if basename.startswith('_'):
# skip private modules
# log messages omitted for obviousness
continue # Next filename
- f_noext, ext = os.path.splitext(filename)
+ f_noext, ext = os.path.splitext(basename)
+ f_noext = f_noext.replace(BIN_PRE_EXT, '')
# make sure it is a suffix we support
if ext not in self.suffix_map:
continue # Next filename
@@ -1249,6 +1266,12 @@ class LazyLoader(salt.utils.lazy.LazyDict):
if not curr_ext or suffix_order.index(ext) >= suffix_order.index(curr_ext):
continue # Next filename
+ if six.PY3 and not dirname and ext == '.pyc':
+ # On Python 3, we should only load .pyc files from the
+ # __pycache__ subdirectory (i.e. when dirname is not an
+ # empty string).
+ continue
+
# Made it this far - add it
self.file_mapping[f_noext] = (fpath, ext)
diff --git a/salt/log/handlers/__init__.py b/salt/log/handlers/__init__.py
index ceb8f50232..cb498b1bae 100644
--- a/salt/log/handlers/__init__.py
+++ b/salt/log/handlers/__init__.py
@@ -17,6 +17,7 @@ import logging.handlers
# Import salt libs
from salt.log.mixins import NewStyleClassMixIn, ExcInfoOnLogLevelFormatMixIn
+from salt.ext.six.moves import queue
log = logging.getLogger(__name__)
@@ -174,7 +175,12 @@ if sys.version_info < (3, 2):
this method if you want to use blocking, timeouts or custom queue
implementations.
'''
- self.queue.put_nowait(record)
+ try:
+ self.queue.put_nowait(record)
+ except queue.Full:
+ sys.stderr.write('[WARNING ] Message queue is full, '
+ 'unable to write "{0}" to log'.format(record)
+ )
def prepare(self, record):
'''
diff --git a/salt/log/setup.py b/salt/log/setup.py
index c15c2bc3d5..bc68e22621 100644
--- a/salt/log/setup.py
+++ b/salt/log/setup.py
@@ -933,6 +933,12 @@ def shutdown_multiprocessing_logging():
logging.root.removeHandler(__MP_LOGGING_QUEUE_HANDLER)
__MP_LOGGING_QUEUE_HANDLER = None
__MP_LOGGING_CONFIGURED = False
+ if not logging.root.handlers:
+ # Ensure we have at least one logging root handler so
+ # something can handle logging messages. This case should
+ # only occur on Windows since on Windows we log to console
+ # and file through the Multiprocessing Logging Listener.
+ setup_console_logger()
finally:
logging._releaseLock()
@@ -946,6 +952,12 @@ def shutdown_multiprocessing_logging_listener(daemonizing=False):
# We're in the MainProcess and we're not daemonizing, return!
# No multiprocessing logging listener shutdown shall happen
return
+
+ if not daemonizing:
+ # Need to remove the queue handler so that it doesn't try to send
+ # data over a queue that was shut down on the listener end.
+ shutdown_multiprocessing_logging()
+
if __MP_LOGGING_QUEUE_PROCESS is None:
return
if __MP_LOGGING_QUEUE_PROCESS.is_alive():
diff --git a/salt/master.py b/salt/master.py
index 513858f2e6..e451d4a83e 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -67,6 +67,7 @@ import salt.log.setup
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
+import salt.utils.files
import salt.utils.job
import salt.utils.verify
import salt.utils.minions
@@ -481,9 +482,8 @@ class Master(SMaster):
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
- prev_umask = os.umask(0o077)
- os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
- os.umask(prev_umask)
+ with salt.utils.files.set_umask(0o077):
+ os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
@@ -504,7 +504,8 @@ class Master(SMaster):
git_pillar.init_remotes(
repo['git'],
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
- salt.pillar.git_pillar.PER_REMOTE_ONLY)
+ salt.pillar.git_pillar.PER_REMOTE_ONLY,
+ salt.pillar.git_pillar.GLOBAL_ONLY)
except FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
diff --git a/salt/minion.py b/salt/minion.py
index 77e22c3aa7..e728659e1d 100644
--- a/salt/minion.py
+++ b/salt/minion.py
@@ -1761,7 +1761,9 @@ class Minion(MinionBase):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
- salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
+ if ret['jid'] == 'req':
+ ret['jid'] = salt.utils.jid.gen_jid()
+ salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
diff --git a/salt/modules/aliases.py b/salt/modules/aliases.py
index 0be801837b..1f55f7e267 100644
--- a/salt/modules/aliases.py
+++ b/salt/modules/aliases.py
@@ -33,7 +33,7 @@ def __get_aliases_filename():
'''
Return the path to the appropriate aliases file
'''
- return __salt__['config.option']('aliases.file')
+ return os.path.realpath(__salt__['config.option']('aliases.file'))
def __parse_aliases():
diff --git a/salt/modules/aptpkg.py b/salt/modules/aptpkg.py
index f781db28c9..7f66dfb236 100644
--- a/salt/modules/aptpkg.py
+++ b/salt/modules/aptpkg.py
@@ -111,7 +111,7 @@ def __init__(opts):
a few env variables to keep apt happy and
non-interactive.
'''
- if __virtual__():
+ if __virtual__() == __virtualname__:
# Export these puppies so they persist
os.environ.update(DPKG_ENV_VARS)
diff --git a/salt/modules/archive.py b/salt/modules/archive.py
index 5a1f727ac3..c3699374fe 100644
--- a/salt/modules/archive.py
+++ b/salt/modules/archive.py
@@ -1061,8 +1061,7 @@ def unzip(zip_file,
if extract_perms:
perm = zfile.getinfo(target).external_attr >> 16
if perm == 0:
- umask_ = os.umask(0)
- os.umask(umask_)
+ umask_ = salt.utils.files.get_umask()
if target.endswith('/'):
perm = 0o777 & ~umask_
else:
diff --git a/salt/modules/bcache.py b/salt/modules/bcache.py
index 8e87256dbd..9e39827d15 100644
--- a/salt/modules/bcache.py
+++ b/salt/modules/bcache.py
@@ -927,7 +927,7 @@ def _wipe(dev):
def _wait(lfunc, log_lvl=None, log_msg=None, tries=10):
'''
Wait for lfunc to be True
- :return: True if lfunc succeeded within tries, False if it didnt
+ :return: True if lfunc succeeded within tries, False if it didn't
'''
i = 0
while i < tries:
diff --git a/salt/modules/boto3_route53.py b/salt/modules/boto3_route53.py
index 4828ba181f..db194c1aae 100644
--- a/salt/modules/boto3_route53.py
+++ b/salt/modules/boto3_route53.py
@@ -554,7 +554,7 @@ def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
return False
-def diassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
+def disassociate_vpc_from_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
VPCName=None, VPCRegion=None, Comment=None,
region=None, key=None, keyid=None, profile=None):
'''
diff --git a/salt/modules/boto_vpc.py b/salt/modules/boto_vpc.py
index 033c0086d7..612afd7537 100644
--- a/salt/modules/boto_vpc.py
+++ b/salt/modules/boto_vpc.py
@@ -2576,6 +2576,7 @@ def describe_route_tables(route_table_id=None, route_table_name=None,
'instance_id': 'Instance',
'interface_id': 'NetworkInterfaceId',
'nat_gateway_id': 'NatGatewayId',
+ 'vpc_peering_connection_id': 'VpcPeeringConnectionId',
}
assoc_keys = {'id': 'RouteTableAssociationId',
'main': 'Main',
@@ -2633,7 +2634,7 @@ def _maybe_set_tags(tags, obj):
def _maybe_set_dns(conn, vpcid, dns_support, dns_hostnames):
if dns_support:
conn.modify_vpc_attribute(vpc_id=vpcid, enable_dns_support=dns_support)
- log.debug('DNS spport was set to: {0} on vpc {1}'.format(dns_support, vpcid))
+ log.debug('DNS support was set to: {0} on vpc {1}'.format(dns_support, vpcid))
if dns_hostnames:
conn.modify_vpc_attribute(vpc_id=vpcid, enable_dns_hostnames=dns_hostnames)
log.debug('DNS hostnames was set to: {0} on vpc {1}'.format(dns_hostnames, vpcid))
diff --git a/salt/modules/capirca_acl.py b/salt/modules/capirca_acl.py
index f925f71df8..d780bb7425 100644
--- a/salt/modules/capirca_acl.py
+++ b/salt/modules/capirca_acl.py
@@ -424,7 +424,7 @@ def _merge_list_of_dict(first, second, prepend=True):
if first and not second:
return first
# Determine overlaps
- # So we dont change the position of the existing terms/filters
+ # So we don't change the position of the existing terms/filters
overlaps = []
merged = []
appended = []
diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py
index cd23383654..c2dbb2e040 100644
--- a/salt/modules/cmdmod.py
+++ b/salt/modules/cmdmod.py
@@ -36,7 +36,6 @@ from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \
SaltInvocationError
from salt.log import LOG_LEVELS
from salt.ext.six.moves import range, zip
-from salt.ext.six.moves import shlex_quote as _cmd_quote
from salt.utils.locales import sdecode
# Only available on POSIX systems, nonfatal on windows
@@ -47,8 +46,10 @@ except ImportError:
if salt.utils.is_windows():
from salt.utils.win_runas import runas as win_runas
+ from salt.utils.win_functions import escape_argument as _cmd_quote
HAS_WIN_RUNAS = True
else:
+ from salt.ext.six.moves import shlex_quote as _cmd_quote
HAS_WIN_RUNAS = False
__proxyenabled__ = ['*']
@@ -289,6 +290,7 @@ def _run(cmd,
'Check to ensure that the shell <{0}> is valid for this user.'
.format(shell))
+ output_loglevel = _check_loglevel(output_loglevel)
log_callback = _check_cb(log_callback)
if runas is None and '__context__' in globals():
@@ -316,6 +318,10 @@ def _run(cmd,
# yaml-ified into non-string types
cwd = str(cwd)
+ if bg:
+ ignore_retcode = True
+ use_vt = False
+
if not salt.utils.is_windows():
if not os.path.isfile(shell) or not os.access(shell, os.X_OK):
msg = 'The shell {0} is not available'.format(shell)
@@ -371,7 +377,7 @@ def _run(cmd,
else:
return cmd
- if _check_loglevel(output_loglevel) is not None:
+ if output_loglevel is not None:
# Always log the shell commands at INFO unless quiet logging is
# requested. The command output is what will be controlled by the
# 'loglevel' parameter.
@@ -425,7 +431,7 @@ def _run(cmd,
elif __grains__['os_family'] in ['Solaris']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
elif __grains__['os_family'] in ['AIX']:
- env_cmd = ('su', runas, '-c', sys.executable)
+ env_cmd = ('su', '-', runas, '-c', sys.executable)
else:
env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable)
env_encoded = subprocess.Popen(
@@ -443,6 +449,10 @@ def _run(cmd,
env_runas = dict((sdecode(k), sdecode(v)) for k, v in six.iteritems(env_runas))
env_runas.update(env)
+ # Fix platforms like Solaris that don't set a USER env var in the
+ # user's default environment as obtained above.
+ if env_runas.get('USER') != runas:
+ env_runas['USER'] = runas
env = env_runas
# Encode unicode kwargs to filesystem encoding to avoid a
# UnicodeEncodeError when the subprocess is invoked.
@@ -547,7 +557,7 @@ def _run(cmd,
msg = (
'Unable to run command \'{0}\' with the context \'{1}\', '
'reason: '.format(
- cmd if _check_loglevel(output_loglevel) is not None
+ cmd if output_loglevel is not None
else 'REDACTED',
kwargs
)
@@ -594,7 +604,7 @@ def _run(cmd,
to = ''
if timeout:
to = ' (timeout: {0}s)'.format(timeout)
- if _check_loglevel(output_loglevel) is not None:
+ if output_loglevel is not None:
msg = 'Running {0} in VT{1}'.format(cmd, to)
log.debug(log_callback(msg))
stdout, stderr = '', ''
@@ -668,6 +678,26 @@ def _run(cmd,
except NameError:
# Ignore the context error during grain generation
pass
+
+ # Log the output
+ if output_loglevel is not None:
+ if not ignore_retcode and ret['retcode'] != 0:
+ if output_loglevel < LOG_LEVELS['error']:
+ output_loglevel = LOG_LEVELS['error']
+ msg = (
+ 'Command \'{0}\' failed with return code: {1}'.format(
+ cmd,
+ ret['retcode']
+ )
+ )
+ log.error(log_callback(msg))
+ if ret['stdout']:
+ log.log(output_loglevel, 'stdout: {0}'.format(log_callback(ret['stdout'])))
+ if ret['stderr']:
+ log.log(output_loglevel, 'stderr: {0}'.format(log_callback(ret['stderr'])))
+ if ret['retcode']:
+ log.log(output_loglevel, 'retcode: {0}'.format(ret['retcode']))
+
return ret
@@ -949,21 +979,6 @@ def run(cmd,
encoded_cmd=encoded_cmd,
**kwargs)
- log_callback = _check_cb(log_callback)
-
- lvl = _check_loglevel(output_loglevel)
- if lvl is not None:
- if not ignore_retcode and ret['retcode'] != 0:
- if lvl < LOG_LEVELS['error']:
- lvl = LOG_LEVELS['error']
- msg = (
- 'Command \'{0}\' failed with return code: {1}'.format(
- cmd,
- ret['retcode']
- )
- )
- log.error(log_callback(msg))
- log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['stdout']
@@ -1315,26 +1330,6 @@ def run_stdout(cmd,
password=password,
**kwargs)
- log_callback = _check_cb(log_callback)
-
- lvl = _check_loglevel(output_loglevel)
- if lvl is not None:
- if not ignore_retcode and ret['retcode'] != 0:
- if lvl < LOG_LEVELS['error']:
- lvl = LOG_LEVELS['error']
- msg = (
- 'Command \'{0}\' failed with return code: {1}'.format(
- cmd,
- ret['retcode']
- )
- )
- log.error(log_callback(msg))
- if ret['stdout']:
- log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
- if ret['stderr']:
- log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
- if ret['retcode']:
- log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stdout']
@@ -1497,26 +1492,6 @@ def run_stderr(cmd,
password=password,
**kwargs)
- log_callback = _check_cb(log_callback)
-
- lvl = _check_loglevel(output_loglevel)
- if lvl is not None:
- if not ignore_retcode and ret['retcode'] != 0:
- if lvl < LOG_LEVELS['error']:
- lvl = LOG_LEVELS['error']
- msg = (
- 'Command \'{0}\' failed with return code: {1}'.format(
- cmd,
- ret['retcode']
- )
- )
- log.error(log_callback(msg))
- if ret['stdout']:
- log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
- if ret['stderr']:
- log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
- if ret['retcode']:
- log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stderr']
@@ -1699,26 +1674,6 @@ def run_all(cmd,
password=password,
**kwargs)
- log_callback = _check_cb(log_callback)
-
- lvl = _check_loglevel(output_loglevel)
- if lvl is not None:
- if not ignore_retcode and ret['retcode'] != 0:
- if lvl < LOG_LEVELS['error']:
- lvl = LOG_LEVELS['error']
- msg = (
- 'Command \'{0}\' failed with return code: {1}'.format(
- cmd,
- ret['retcode']
- )
- )
- log.error(log_callback(msg))
- if ret['stdout']:
- log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
- if ret['stderr']:
- log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
- if ret['retcode']:
- log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret
@@ -1882,21 +1837,6 @@ def retcode(cmd,
password=password,
**kwargs)
- log_callback = _check_cb(log_callback)
-
- lvl = _check_loglevel(output_loglevel)
- if lvl is not None:
- if not ignore_retcode and ret['retcode'] != 0:
- if lvl < LOG_LEVELS['error']:
- lvl = LOG_LEVELS['error']
- msg = (
- 'Command \'{0}\' failed with return code: {1}'.format(
- cmd,
- ret['retcode']
- )
- )
- log.error(log_callback(msg))
- log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['retcode']
@@ -3117,7 +3057,6 @@ def run_bg(cmd,
output_loglevel='debug',
log_callback=None,
reset_system_locale=True,
- ignore_retcode=False,
saltenv='base',
password=None,
**kwargs):
@@ -3277,7 +3216,6 @@ def run_bg(cmd,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
- ignore_retcode=ignore_retcode,
saltenv=saltenv,
password=password,
**kwargs
diff --git a/salt/modules/debbuild.py b/salt/modules/debbuild.py
index 02631e9353..b4c8373345 100644
--- a/salt/modules/debbuild.py
+++ b/salt/modules/debbuild.py
@@ -420,6 +420,9 @@ def build(runas,
# use default /var/cache/pbuilder/result
results_dir = '/var/cache/pbuilder/result'
+ ## ensure clean
+ __salt__['cmd.run']('rm -fR {0}'.format(results_dir))
+
# dscs should only contain salt orig and debian tarballs and dsc file
for dsc in dscs:
afile = os.path.basename(dsc)
@@ -430,10 +433,10 @@ def build(runas,
try:
__salt__['cmd.run']('chown {0} -R {1}'.format(runas, dbase))
- cmd = 'pbuilder --update --override-config'
+ cmd = 'pbuilder update --override-config'
__salt__['cmd.run'](cmd, runas=runas, python_shell=True)
- cmd = 'pbuilder --build {0}'.format(dsc)
+ cmd = 'pbuilder build --debbuildopts "-sa" {0}'.format(dsc)
__salt__['cmd.run'](cmd, runas=runas, python_shell=True)
# ignore local deps generated package file
diff --git a/salt/modules/debconfmod.py b/salt/modules/debconfmod.py
index 98fd1aaaa4..2acdeaaa44 100644
--- a/salt/modules/debconfmod.py
+++ b/salt/modules/debconfmod.py
@@ -127,7 +127,7 @@ def set_(package, question, type, value, *extra):
fd_, fname = salt.utils.files.mkstemp(prefix="salt-", close_fd=False)
line = "{0} {1} {2} {3}".format(package, question, type, value)
- os.write(fd_, line)
+ os.write(fd_, salt.utils.to_bytes(line))
os.close(fd_)
_set_file(fname)
diff --git a/salt/modules/dockermod.py b/salt/modules/dockermod.py
index 877a196065..db188f0da1 100644
--- a/salt/modules/dockermod.py
+++ b/salt/modules/dockermod.py
@@ -575,6 +575,15 @@ def _scrub_links(links, name):
return ret
+def _ulimit_sort(ulimit_val):
+ if isinstance(ulimit_val, list):
+ return sorted(ulimit_val,
+ key=lambda x: (x.get('Name'),
+ x.get('Hard', 0),
+ x.get('Soft', 0)))
+ return ulimit_val
+
+
def _size_fmt(num):
'''
Format bytes as human-readable file sizes
@@ -912,6 +921,9 @@ def compare_container(first, second, ignore=None):
if item == 'Links':
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
+ if item == 'Ulimits':
+ val1 = _ulimit_sort(val1)
+ val2 = _ulimit_sort(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
# Check for optionally-present items that were in the second container
@@ -935,6 +947,9 @@ def compare_container(first, second, ignore=None):
if item == 'Links':
val1 = sorted(_scrub_links(val1, first))
val2 = sorted(_scrub_links(val2, second))
+ if item == 'Ulimits':
+ val1 = _ulimit_sort(val1)
+ val2 = _ulimit_sort(val2)
if val1 != val2:
ret.setdefault(conf_dict, {})[item] = {'old': val1, 'new': val2}
return ret
@@ -5439,7 +5454,7 @@ def sls_build(name, base='opensuse/python', mods=None, saltenv='base',
the salt environment to use
dryrun: False
- when set to True the container will not be commited at the end of
+ when set to True the container will not be committed at the end of
the build. The dryrun succeed also when the state contains errors.
**RETURN DATA**
diff --git a/salt/modules/etcd_mod.py b/salt/modules/etcd_mod.py
index 78088c306a..d6b0d66eec 100644
--- a/salt/modules/etcd_mod.py
+++ b/salt/modules/etcd_mod.py
@@ -215,7 +215,7 @@ def rm_(key, recurse=False, profile=None):
'''
.. versionadded:: 2014.7.0
- Delete a key from etcd. Returns True if the key was deleted, False if it wasn
+ Delete a key from etcd. Returns True if the key was deleted, False if it was
not and None if there was a failure.
CLI Example:
diff --git a/salt/modules/file.py b/salt/modules/file.py
index 33194cabc8..1e91741304 100644
--- a/salt/modules/file.py
+++ b/salt/modules/file.py
@@ -2270,8 +2270,7 @@ def blockreplace(path,
backup='.bak',
dry_run=False,
show_changes=True,
- append_newline=False,
- ):
+ append_newline=False):
'''
.. versionadded:: 2014.1.0
@@ -2318,18 +2317,30 @@ def blockreplace(path,
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
- dry_run
- Don't make any edits to the file.
-
- show_changes
- Output a unified diff of the old file and the new file. If ``False``,
- return a boolean if any changes were made.
+ dry_run : False
+ If ``True``, do not make any edits to the file and simply return the
+ changes that *would* be made.
- append_newline:
- Append a newline to the content block. For more information see:
- https://github.com/saltstack/salt/issues/33686
+ show_changes : True
+ Controls how changes are presented. If ``True``, this function will
+ return a unified diff of the changes made. If False, then it will
+ return a boolean (``True`` if any changes were made, otherwise
+ ``False``).
+
+ append_newline : False
+ Controls whether or not a newline is appended to the content block. If
+ the value of this argument is ``True`` then a newline will be added to
+ the content block. If it is ``False``, then a newline will *not* be
+ added to the content block. If it is ``None`` then a newline will only
+ be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
+ .. versionchanged:: 2017.7.5,2018.3.1
+ New behavior added when value is ``None``.
+ .. versionchanged:: Fluorine
+ The default value of this argument will change to ``None`` to match
+ the behavior of the :py:func:`file.blockreplace state
+ <salt.states.file.blockreplace>`
CLI Example:
@@ -2339,87 +2350,137 @@ def blockreplace(path,
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
- path = os.path.expanduser(path)
-
- if not os.path.exists(path):
- raise SaltInvocationError('File not found: {0}'.format(path))
-
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
+ path = os.path.expanduser(path)
+
+ if not os.path.exists(path):
+ raise SaltInvocationError('File not found: {0}'.format(path))
+
if not salt.utils.istextfile(path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
- # Search the file; track if any changes have been made for the return val
+ if append_newline is None and not content.endswith((os.linesep, '\n')):
+ append_newline = True
+
+ # Split the content into a list of lines, removing newline characters. To
+ # ensure that we handle both Windows and POSIX newlines, first split on
+ # Windows newlines, and then split on POSIX newlines.
+ split_content = []
+ for win_line in content.split('\r\n'):
+ for content_line in win_line.split('\n'):
+ split_content.append(content_line)
+
+ line_count = len(split_content)
+
has_changes = False
orig_file = []
new_file = []
in_block = False
- old_content = ''
- done = False
- # we do not use in_place editing to avoid file attrs modifications when
+ block_found = False
+ linesep = None
+
+ def _add_content(linesep, lines=None, include_marker_start=True,
+ end_line=None):
+ if lines is None:
+ lines = []
+ include_marker_start = True
+
+ if end_line is None:
+ end_line = marker_end
+ end_line = end_line.rstrip('\r\n') + linesep
+
+ if include_marker_start:
+ lines.append(marker_start + linesep)
+
+ if split_content:
+ for index, content_line in enumerate(split_content, 1):
+ if index != line_count:
+ lines.append(content_line + linesep)
+ else:
+ # We're on the last line of the content block
+ if append_newline:
+ lines.append(content_line + linesep)
+ lines.append(end_line)
+ else:
+ lines.append(content_line + end_line)
+ else:
+ lines.append(end_line)
+
+ return lines
+
+ # We do not use in-place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
- # we could also use salt.utils.filebuffer.BufferedReader
+ #
+ # We could also use salt.utils.filebuffer.BufferedReader
try:
- fi_file = fileinput.input(path,
- inplace=False, backup=False,
- bufsize=1, mode='rb')
- for line in fi_file:
+ fi_file = fileinput.input(
+ path,
+ inplace=False,
+ backup=False,
+ bufsize=1,
+ mode='rb')
+ for line in fi_file:
line = salt.utils.to_str(line)
- result = line
+ write_line_to_new_file = True
+
+ if linesep is None:
+ # Auto-detect line separator
+ if line.endswith('\r\n'):
+ linesep = '\r\n'
+ elif line.endswith('\n'):
+ linesep = '\n'
+ else:
+ # No newline(s) in file, fall back to system's linesep
+ linesep = os.linesep
if marker_start in line:
- # managed block start found, start recording
+ # We've entered the content block
in_block = True
-
else:
if in_block:
- if marker_end in line:
- # end of block detected
- in_block = False
-
- # Handle situations where there may be multiple types
- # of line endings in the same file. Separate the content
- # into lines. Account for Windows-style line endings
- # using os.linesep, then by linux-style line endings
- # using '\n'
- split_content = []
- for linesep_line in content.split(os.linesep):
- for content_line in linesep_line.split('\n'):
- split_content.append(content_line)
-
- # Trim any trailing new lines to avoid unwanted
- # additional new lines
- while not split_content[-1]:
- split_content.pop()
-
- # push new block content in file
- for content_line in split_content:
- new_file.append(content_line + os.linesep)
+ # We're not going to write the lines from the old file to
+ # the new file until we have exited the block.
+ write_line_to_new_file = False
- done = True
+ marker_end_pos = line.find(marker_end)
+ if marker_end_pos != -1:
+ # End of block detected
+ in_block = False
+ # We've found and exited the block
+ block_found = True
- else:
- # remove old content, but keep a trace
- old_content += line
- result = None
- # else: we are not in the marked block, keep saving things
+ _add_content(linesep, lines=new_file,
+ include_marker_start=False,
+ end_line=line[marker_end_pos:])
+ # Save the line from the original file
orig_file.append(line)
- if result is not None:
- new_file.append(result)
- # end for. If we are here without block management we maybe have some problems,
- # or we need to initialise the marked block
+ if write_line_to_new_file:
+ new_file.append(line)
+ except (IOError, OSError) as exc:
+ raise CommandExecutionError(
+ 'Failed to read from {0}: {1}'.format(path, exc)
+ )
finally:
- fi_file.close()
+ if linesep is None:
+ # If the file was empty, we will not have set linesep yet. Assume
+ # the system's line separator. This is needed for when we
+ # prepend/append later on.
+ linesep = os.linesep
+ try:
+ fi_file.close()
+ except Exception:
+ pass
if in_block:
# unterminated block => bad, always fail
@@ -2427,35 +2488,27 @@ def blockreplace(path,
'Unterminated marked block. End of file reached before marker_end.'
)
- if not done:
+ if not block_found:
if prepend_if_not_found:
# add the markers and content at the beginning of file
- new_file.insert(0, marker_end + os.linesep)
- if append_newline is True:
- new_file.insert(0, content + os.linesep)
- else:
- new_file.insert(0, content)
- new_file.insert(0, marker_start + os.linesep)
- done = True
+ prepended_content = _add_content(linesep)
+ prepended_content.extend(new_file)
+ new_file = prepended_content
+ block_found = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
- if not new_file[-1].endswith(os.linesep):
- new_file[-1] += os.linesep
+ if not new_file[-1].endswith(linesep):
+ new_file[-1] += linesep
# add the markers and content at the end of file
- new_file.append(marker_start + os.linesep)
- if append_newline is True:
- new_file.append(content + os.linesep)
- else:
- new_file.append(content)
- new_file.append(marker_end + os.linesep)
- done = True
+ _add_content(linesep, lines=new_file)
+ block_found = True
else:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
- if done:
+ if block_found:
diff = ''.join(difflib.unified_diff(orig_file, new_file))
has_changes = diff is not ''
if has_changes and not dry_run:
@@ -3500,7 +3553,7 @@ def path_exists_glob(path):
Expansion allows usage of ? * and character ranges []. Tilde expansion
is not supported. Returns True/False.
- .. versionadded:: Hellium
+ .. versionadded:: 2014.7.0
CLI Example:
@@ -5032,30 +5085,25 @@ def manage_file(name,
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
- if mode:
- current_umask = os.umask(0o77)
-
- # Create a new file when test is False and source is None
- if contents is None:
- if not __opts__['test']:
- if touch(name):
- ret['changes']['new'] = 'file {0} created'.format(name)
- ret['comment'] = 'Empty file'
- else:
- return _error(
- ret, 'Empty file {0} not created'.format(name)
- )
- else:
- if not __opts__['test']:
- if touch(name):
- ret['changes']['diff'] = 'New file'
- else:
- return _error(
- ret, 'File {0} not created'.format(name)
- )
-
- if mode:
- os.umask(current_umask)
+ with salt.utils.files.set_umask(0o077 if mode else None):
+ # Create a new file when test is False and source is None
+ if contents is None:
+ if not __opts__['test']:
+ if touch(name):
+ ret['changes']['new'] = 'file {0} created'.format(name)
+ ret['comment'] = 'Empty file'
+ else:
+ return _error(
+ ret, 'Empty file {0} not created'.format(name)
+ )
+ else:
+ if not __opts__['test']:
+ if touch(name):
+ ret['changes']['diff'] = 'New file'
+ else:
+ return _error(
+ ret, 'File {0} not created'.format(name)
+ )
if contents is not None:
# Write the static contents to a temporary file
@@ -5089,8 +5137,7 @@ def manage_file(name,
# out what mode to use for the new file.
if mode is None and not salt.utils.is_windows():
# Get current umask
- mask = os.umask(0)
- os.umask(mask)
+ mask = salt.utils.files.get_umask()
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
diff --git a/salt/modules/gentoo_service.py b/salt/modules/gentoo_service.py
index 32dfcde69c..748f96ea09 100644
--- a/salt/modules/gentoo_service.py
+++ b/salt/modules/gentoo_service.py
@@ -37,9 +37,9 @@ def __virtual__():
'only available on Gentoo/Open-RC systems.')
-def _ret_code(cmd):
+def _ret_code(cmd, ignore_retcode=False):
log.debug('executing [{0}]'.format(cmd))
- sts = __salt__['cmd.retcode'](cmd, python_shell=False)
+ sts = __salt__['cmd.retcode'](cmd, python_shell=False, ignore_retcode=ignore_retcode)
return sts
@@ -248,8 +248,9 @@ def status(name, sig=None):
'''
if sig:
return bool(__salt__['status.pid'](sig))
+
cmd = _service_cmd(name, 'status')
- return not _ret_code(cmd)
+ return not _ret_code(cmd, ignore_retcode=True)
def enable(name, **kwargs):
diff --git a/salt/modules/glusterfs.py b/salt/modules/glusterfs.py
index fb26e1c610..72fb75d558 100644
--- a/salt/modules/glusterfs.py
+++ b/salt/modules/glusterfs.py
@@ -124,7 +124,7 @@ def peer_status():
The return value is a dictionary with peer UUIDs as keys and dicts of peer
information as values. Hostnames are listed in one list. GlusterFS separates
one of the hostnames but the only reason for this seems to be which hostname
- happens to be used firts in peering.
+ happens to be used first in peering.
CLI Example:
diff --git a/salt/modules/gnomedesktop.py b/salt/modules/gnomedesktop.py
index 01202cc21b..7ce550922d 100644
--- a/salt/modules/gnomedesktop.py
+++ b/salt/modules/gnomedesktop.py
@@ -20,6 +20,7 @@ try:
except ImportError:
HAS_GLIB = False
+import salt.utils
log = logging.getLogger(__name__)
@@ -50,6 +51,17 @@ class _GSettings(object):
self.UID = None
self.HOME = None
+ @property
+ def gsetting_command(self):
+ '''
+ return the command to run the gsettings binary
+ '''
+ if salt.utils.which_bin(['dbus-run-session']):
+ cmd = ['dbus-run-session', '--', 'gsettings']
+ else:
+ cmd = ['dbus-launch', '--exit-with-session', 'gsettings']
+ return cmd
+
def _get(self):
'''
get the value for user in gsettings
@@ -62,7 +74,7 @@ class _GSettings(object):
log.info('User does not exist')
return False
- cmd = 'dbus-launch --exit-with-session gsettings get {0} {1}'.format(self.SCHEMA, self.KEY)
+ cmd = self.gsetting_command + ['get', str(self.SCHEMA), str(self.KEY)]
environ = {}
environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)
result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)
@@ -90,7 +102,7 @@ class _GSettings(object):
result['stdout'] = 'User {0} does not exist'.format(user)
return result
- cmd = 'dbus-launch --exit-with-session gsettings set {0} {1} "{2}"'.format(self.SCHEMA, self.KEY, str(value))
+ cmd = self.gsetting_command + ['set', str(self.SCHEMA), str(self.KEY), str(value)]
environ = {}
environ['XDG_RUNTIME_DIR'] = '/run/user/{0}'.format(uid)
result = __salt__['cmd.run_all'](cmd, runas=user, env=environ, python_shell=False)
diff --git a/salt/modules/gpg.py b/salt/modules/gpg.py
index 9d8db1c28b..3cbfb947d1 100644
--- a/salt/modules/gpg.py
+++ b/salt/modules/gpg.py
@@ -71,14 +71,13 @@ VERIFY_TRUST_LEVELS = {
'4': 'Ultimate'
}
-HAS_LIBS = False
GPG_1_3_1 = False
-
try:
import gnupg
- HAS_LIBS = True
+ HAS_GPG_BINDINGS = True
+ GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
- pass
+ HAS_GPG_BINDINGS = False
def _gpg():
@@ -96,15 +95,10 @@ def __virtual__():
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
- if HAS_LIBS:
- gnupg_version = _LooseVersion(gnupg.__version__)
- if gnupg_version >= '1.3.1':
- global GPG_1_3_1
- GPG_1_3_1 = True
- return __virtualname__
- return (False, 'The gpg execution module cannot be loaded; the'
- ' gnupg python module is not installed.')
+ return __virtualname__ if HAS_GPG_BINDINGS \
+ else (False, 'The gpg execution module cannot be loaded; the '
+ 'gnupg python module is not installed.')
def _get_user_info(user=None):
diff --git a/salt/modules/heat.py b/salt/modules/heat.py
index e2b3f97ded..92f2e6a251 100644
--- a/salt/modules/heat.py
+++ b/salt/modules/heat.py
@@ -223,7 +223,7 @@ def _parse_template(tmpl_str):
return tpl
-def _parse_enviroment(env_str):
+def _parse_environment(env_str):
'''
Parsing template
'''
@@ -460,9 +460,9 @@ def delete_stack(name=None, poll=0, timeout=60, profile=None):
return ret
-def create_stack(name=None, template_file=None, enviroment=None,
+def create_stack(name=None, template_file=None, environment=None,
parameters=None, poll=0, rollback=False, timeout=60,
- profile=None):
+ profile=None, enviroment=None):
'''
Create a stack (heat stack-create)
@@ -472,8 +472,8 @@ def create_stack(name=None, template_file=None, enviroment=None,
template_file
File of template
- enviroment
- File of enviroment
+ environment
+ File of environment
parameters
Parameter dict used to create the stack
@@ -496,11 +496,23 @@ def create_stack(name=None, template_file=None, enviroment=None,
salt '*' heat.create_stack name=mystack \\
template_file=salt://template.yaml \\
- enviroment=salt://enviroment.yaml \\
+ environment=salt://environment.yaml \\
parameters="{"image": "Debian 8", "flavor": "m1.small"}" \\
poll=5 rollback=False timeout=60 profile=openstack1
+ .. versionadded:: 2017.7.5,2018.3.1
+
+ The spelling mistake in parameter `enviroment` was corrected to `environment`.
+ The misspelled version is still supported for backward compatibility, but will
+ be removed in Salt Neon.
+
'''
+ if environment is None and enviroment is not None:
+ salt.utils.warn_until('Neon', (
+ "Please use the 'environment' parameter instead of the misspelled 'enviroment' "
+ "parameter which will be removed in Salt Neon."
+ ))
+ environment = enviroment
h_client = _auth(profile)
ret = {
'result': True,
@@ -570,12 +582,12 @@ def create_stack(name=None, template_file=None, enviroment=None,
ret['comment'] = 'Template not valid {0}'.format(ex)
return ret
env = {}
- if enviroment:
- enviroment_tmp_file = salt.utils.files.mkstemp()
+ if environment:
+ environment_tmp_file = salt.utils.files.mkstemp()
esfn, source_sum, comment_ = __salt__['file.get_managed'](
- name=enviroment_tmp_file,
+ name=environment_tmp_file,
template=None,
- source=enviroment,
+ source=environment,
source_hash=None,
user=None,
group=None,
@@ -586,11 +598,11 @@ def create_stack(name=None, template_file=None, enviroment=None,
skip_verify=False,
kwargs=None)
- enviroment_manage_result = __salt__['file.manage_file'](
- name=enviroment_tmp_file,
+ environment_manage_result = __salt__['file.manage_file'](
+ name=environment_tmp_file,
sfn=esfn,
ret=None,
- source=enviroment,
+ source=environment,
source_sum=source_sum,
user=None,
group=None,
@@ -602,18 +614,18 @@ def create_stack(name=None, template_file=None, enviroment=None,
show_changes=False,
contents=None,
dir_mode=None)
- if enviroment_manage_result['result']:
- with salt.utils.fopen(enviroment_tmp_file, 'r') as efp_:
+ if environment_manage_result['result']:
+ with salt.utils.fopen(environment_tmp_file, 'r') as efp_:
env_str = efp_.read()
- salt.utils.safe_rm(enviroment_tmp_file)
+ salt.utils.safe_rm(environment_tmp_file)
try:
- env = _parse_enviroment(env_str)
+ env = _parse_environment(env_str)
except ValueError as ex:
ret['result'] = False
ret['comment'] = 'Error parsing template {0}'.format(ex)
else:
ret['result'] = False
- ret['comment'] = 'Can not open enviroment: {0}, {1}'.format(enviroment, comment_)
+ ret['comment'] = 'Can not open environment: {0}, {1}'.format(environment, comment_)
if ret['result'] is False:
return ret
@@ -645,9 +657,9 @@ def create_stack(name=None, template_file=None, enviroment=None,
return ret
-def update_stack(name=None, template_file=None, enviroment=None,
+def update_stack(name=None, template_file=None, environment=None,
parameters=None, poll=0, rollback=False, timeout=60,
- profile=None):
+ profile=None, enviroment=None):
'''
Update a stack (heat stack-template)
@@ -657,8 +669,8 @@ def update_stack(name=None, template_file=None, enviroment=None,
template_file
File of template
- enviroment
- File of enviroment
+ environment
+ File of environment
parameters
Parameter dict used to update the stack
@@ -681,11 +693,23 @@ def update_stack(name=None, template_file=None, enviroment=None,
salt '*' heat.update_stack name=mystack \\
template_file=salt://template.yaml \\
- enviroment=salt://enviroment.yaml \\
+ environment=salt://environment.yaml \\
parameters="{"image": "Debian 8", "flavor": "m1.small"}" \\
poll=5 rollback=False timeout=60 profile=openstack1
+ .. versionadded:: 2017.7.5,2018.3.1
+
+ The spelling mistake in parameter `enviroment` was corrected to `environment`.
+ The misspelled version is still supported for backward compatibility, but will
+ be removed in Salt Neon.
+
'''
+ if environment is None and enviroment is not None:
+ salt.utils.warn_until('Neon', (
+ "Please use the 'environment' parameter instead of the misspelled 'enviroment' "
+ "parameter which will be removed in Salt Neon."
+ ))
+ environment = enviroment
h_client = _auth(profile)
ret = {
'result': True,
@@ -759,12 +783,12 @@ def update_stack(name=None, template_file=None, enviroment=None,
ret['comment'] = 'Template not valid {0}'.format(ex)
return ret
env = {}
- if enviroment:
- enviroment_tmp_file = salt.utils.files.mkstemp()
+ if environment:
+ environment_tmp_file = salt.utils.files.mkstemp()
esfn, source_sum, comment_ = __salt__['file.get_managed'](
- name=enviroment_tmp_file,
+ name=environment_tmp_file,
template=None,
- source=enviroment,
+ source=environment,
source_hash=None,
user=None,
group=None,
@@ -775,11 +799,11 @@ def update_stack(name=None, template_file=None, enviroment=None,
skip_verify=False,
kwargs=None)
- enviroment_manage_result = __salt__['file.manage_file'](
- name=enviroment_tmp_file,
+ environment_manage_result = __salt__['file.manage_file'](
+ name=environment_tmp_file,
sfn=esfn,
ret=None,
- source=enviroment,
+ source=environment,
source_sum=source_sum,
user=None,
group=None,
@@ -791,18 +815,18 @@ def update_stack(name=None, template_file=None, enviroment=None,
show_changes=False,
contents=None,
dir_mode=None)
- if enviroment_manage_result['result']:
- with salt.utils.fopen(enviroment_tmp_file, 'r') as efp_:
+ if environment_manage_result['result']:
+ with salt.utils.fopen(environment_tmp_file, 'r') as efp_:
env_str = efp_.read()
- salt.utils.safe_rm(enviroment_tmp_file)
+ salt.utils.safe_rm(environment_tmp_file)
try:
- env = _parse_enviroment(env_str)
+ env = _parse_environment(env_str)
except ValueError as ex:
ret['result'] = False
ret['comment'] = 'Error parsing template {0}'.format(ex)
else:
ret['result'] = False
- ret['comment'] = 'Can not open enviroment: {0}, {1}'.format(enviroment, comment_)
+ ret['comment'] = 'Can not open environment: {0}, {1}'.format(environment, comment_)
if ret['result'] is False:
return ret
diff --git a/salt/modules/hipchat.py b/salt/modules/hipchat.py
index 744f2d4655..c3a84649c2 100644
--- a/salt/modules/hipchat.py
+++ b/salt/modules/hipchat.py
@@ -176,8 +176,8 @@ def _query(function,
if result.get('status', None) == salt.ext.six.moves.http_client.OK:
response = hipchat_functions.get(api_version).get(function).get('response')
return result.get('dict', {}).get(response, None)
- elif result.get('status', None) == salt.ext.six.moves.http_client.NO_CONTENT:
- return False
+ elif result.get('status', None) == salt.ext.six.moves.http_client.NO_CONTENT and api_version == 'v2':
+ return True
else:
log.debug(url)
log.debug(query_params)
diff --git a/salt/modules/inspectlib/collector.py b/salt/modules/inspectlib/collector.py
index b8ebe7e804..17c860880d 100644
--- a/salt/modules/inspectlib/collector.py
+++ b/salt/modules/inspectlib/collector.py
@@ -512,7 +512,7 @@ if __name__ == '__main__':
sys.exit(1)
os.setsid()
- os.umask(0)
+ os.umask(0o000) # pylint: disable=blacklisted-function
try:
pid = os.fork()
diff --git a/salt/modules/jboss7_cli.py b/salt/modules/jboss7_cli.py
index 89c7d203c4..ca7829ab4e 100644
--- a/salt/modules/jboss7_cli.py
+++ b/salt/modules/jboss7_cli.py
@@ -61,7 +61,7 @@ def run_command(jboss_config, command, fail_on_error=True):
command
Command to execute against jboss instance
fail_on_error (default=True)
- Is true, raise CommandExecutionException exception if execution fails.
+ Is true, raise CommandExecutionError exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
CLI Example:
@@ -93,7 +93,7 @@ def run_operation(jboss_config, operation, fail_on_error=True, retries=1):
An operation to execute against jboss instance
fail_on_error (default=True)
- Is true, raise CommandExecutionException exception if execution fails.
+ Is true, raise CommandExecutionError exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
retries:
Number of retries in case of "JBAS012144: Could not connect to remote" error.
diff --git a/salt/modules/junos.py b/salt/modules/junos.py
index 9e41482bb5..845e6ac970 100644
--- a/salt/modules/junos.py
+++ b/salt/modules/junos.py
@@ -144,7 +144,7 @@ def rpc(cmd=None, dest=None, format='xml', **kwargs):
The rpc to be executed. (default = None)
Optional
* dest:
- Destination file where the rpc ouput is stored. (default = None)
+ Destination file where the rpc output is stored. (default = None)
Note that the file will be stored on the proxy minion. To push the
files to the master use the salt's following execution module:
:py:func:`cp.push <salt.modules.cp.push>`
@@ -589,7 +589,7 @@ def ping(dest_ip=None, **kwargs):
def cli(command=None, format='text', **kwargs):
'''
Executes the CLI commands and returns the output in specified format. \
- (default is text) The ouput can also be stored in a file.
+ (default is text) The output can also be stored in a file.
Usage:
diff --git a/salt/modules/kubernetes.py b/salt/modules/kubernetes.py
index 12d965bb4d..bbee7adf5a 100644
--- a/salt/modules/kubernetes.py
+++ b/salt/modules/kubernetes.py
@@ -17,7 +17,7 @@ Module for handling kubernetes calls.
kubernetes.client-key-file: '/path/to/client.key'
-These settings can be also overrided by adding `api_url`, `api_user`,
+These settings can be also overridden by adding `api_url`, `api_user`,
`api_password`, `api_certificate_authority_file`, `api_client_certificate_file`
or `api_client_key_file` parameters when calling a function:
@@ -25,12 +25,19 @@ The data format for `kubernetes.*-data` values is the same as provided in `kubec
It's base64 encoded certificates/keys in one line.
For an item only one field should be provided. Either a `data` or a `file` entry.
-In case both are provided the `file` entry is prefered.
+In case both are provided the `file` entry is preferred.
.. code-block:: bash
salt '*' kubernetes.nodes api_url=http://k8s-api-server:port api_user=myuser api_password=pass
.. versionadded: 2017.7.0
+
+.. warning::
+
+ Configuration options will change in Flourine. All options above will be replaced by:
+
+ - kubernetes.kubeconfig or kubernetes.kubeconfig-data
+ - kubernetes.context
'''
# Import Python Futures
@@ -1461,7 +1468,7 @@ def __dict_to_deployment_spec(spec):
'''
Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance.
'''
- spec_obj = AppsV1beta1DeploymentSpec()
+ spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', ''))
for key, value in iteritems(spec):
if hasattr(spec_obj, key):
setattr(spec_obj, key, value)
diff --git a/salt/modules/logmod.py b/salt/modules/logmod.py
index aa6fcbf165..2c8ab7b512 100644
--- a/salt/modules/logmod.py
+++ b/salt/modules/logmod.py
@@ -18,7 +18,7 @@ CLI Example:
.. code-block:: bash
- salt '*' log.error 'Please dont do that, this module is not for CLI use!'
+ salt '*' log.error "Please don't do that, this module is not for CLI use!"
'''
from __future__ import absolute_import
diff --git a/salt/modules/lxc.py b/salt/modules/lxc.py
index 0e369e5cc6..4a3a5ad9dc 100644
--- a/salt/modules/lxc.py
+++ b/salt/modules/lxc.py
@@ -868,7 +868,7 @@ def _network_conf(conf_tuples=None, **kwargs):
for row in val:
ret.append(salt.utils.odict.OrderedDict([(row, val[row])]))
# on old versions of lxc, still support the gateway auto mode
- # if we didnt explicitly say no to
+ # if we didn't explicitly say no to
# (lxc.network.ipv4.gateway: auto)
if _LooseVersion(version()) <= '1.0.7' and \
True not in ['lxc.network.ipv4.gateway' in a for a in ret] and \
diff --git a/salt/modules/mac_ports.py b/salt/modules/mac_ports.py
index 8a842cdb8e..d3c75769e5 100644
--- a/salt/modules/mac_ports.py
+++ b/salt/modules/mac_ports.py
@@ -306,13 +306,13 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
if pkgs is None:
version_num = kwargs.get('version')
variant_spec = kwargs.get('variant')
- spec = None
+ spec = {}
if version_num:
- spec = (spec or '') + '@' + version_num
+ spec['version'] = version_num
if variant_spec:
- spec = (spec or '') + variant_spec
+ spec['variant'] = variant_spec
pkg_params = {name: spec}
@@ -321,7 +321,14 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
formulas_array = []
for pname, pparams in six.iteritems(pkg_params):
- formulas_array.append(pname + (pparams or ''))
+ formulas_array.append(pname)
+
+ if pparams:
+ if 'version' in pparams:
+ formulas_array.append('@' + pparams['version'])
+
+ if 'variant' in pparams:
+ formulas_array.append(pparams['variant'])
old = list_pkgs()
cmd = ['port', 'install']
diff --git a/salt/modules/mssql.py b/salt/modules/mssql.py
index 1c9d8d5929..841e85aa53 100644
--- a/salt/modules/mssql.py
+++ b/salt/modules/mssql.py
@@ -102,7 +102,7 @@ def version(**kwargs):
def db_list(**kwargs):
'''
- Return the databse list created on a MS SQL server.
+ Return the database list created on a MS SQL server.
CLI Example:
diff --git a/salt/modules/napalm_acl.py b/salt/modules/napalm_acl.py
index 9b0b60a8ed..0a37dd7c50 100644
--- a/salt/modules/napalm_acl.py
+++ b/salt/modules/napalm_acl.py
@@ -198,7 +198,7 @@ def load_term_config(filter_name,
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
@@ -543,7 +543,7 @@ def load_filter_config(filter_name,
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
@@ -743,7 +743,7 @@ def load_policy_config(filters=None,
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
diff --git a/salt/modules/napalm_network.py b/salt/modules/napalm_network.py
index 4821edff99..81af4cbf03 100644
--- a/salt/modules/napalm_network.py
+++ b/salt/modules/napalm_network.py
@@ -99,7 +99,7 @@ def _filter_dict(input_dict, search_key, search_value):
def _explicit_close(napalm_device):
'''
- Will explicitely close the config session with the network device,
+ Will explicily close the config session with the network device,
when running in a now-always-alive proxy minion or regular minion.
This helper must be used in configuration-related functions,
as the session is preserved and not closed before making any changes.
@@ -137,7 +137,7 @@ def _config_logic(napalm_device,
# then the decorator will make sure that
# if not proxy (when the connection is always alive)
# and the `inherit_napalm_device` is set,
- # `napalm_device` will be overriden.
+ # `napalm_device` will be overridden.
# See `salt.utils.napalm.proxy_napalm_wrap` decorator.
loaded_result['already_configured'] = False
@@ -853,10 +853,10 @@ def config(source=None, **kwargs): # pylint: disable=unused-argument
- running (string): Representation of the native running configuration.
- candidate (string): Representation of the native candidate configuration.
- If the device doesnt differentiate between running and startup
+ If the device doesn't differentiate between running and startup
configuration this will an empty string.
- startup (string): Representation of the native startup configuration.
- If the device doesnt differentiate between running and startup
+ If the device doesn't differentiate between running and startup
configuration this will an empty string.
CLI Example:
@@ -1366,7 +1366,7 @@ def load_template(template_name,
# use the custom template path
saltenv = template_path if not salt_render else 'base'
elif salt_render and not saltenv:
- # if saltenv not overrided and path specified as salt:// or http:// etc.
+ # if saltenv not overridden and path specified as salt:// or http:// etc.
# will use the default environment, from the base
saltenv = template_path if template_path else 'base'
if not saltenv:
@@ -1454,7 +1454,7 @@ def load_template(template_name,
# after running the other features:
# compare_config, discard / commit
# which have to be over the same session
- # so we'll set the CLOSE global explicitely as False
+ # so we'll set the CLOSE global explicitly as False
napalm_device['CLOSE'] = False # pylint: disable=undefined-variable
_loaded = salt.utils.napalm.call(
napalm_device, # pylint: disable=undefined-variable
diff --git a/salt/modules/napalm_route.py b/salt/modules/napalm_route.py
index 1a81d53e9d..cffdb5bb73 100644
--- a/salt/modules/napalm_route.py
+++ b/salt/modules/napalm_route.py
@@ -67,7 +67,7 @@ def show(destination, protocol=None, **kwargs): # pylint: disable=unused-argume
In case the destination prefix is too short,
there may be too many routes matched.
Therefore in cases of devices having a very high number of routes
- it may be necessary to adjust the prefix lenght and request
+ it may be necessary to adjust the prefix length and request
using a longer prefix.
destination
diff --git a/salt/modules/napalm_yang_mod.py b/salt/modules/napalm_yang_mod.py
index f4dd9f6358..b655df47b2 100644
--- a/salt/modules/napalm_yang_mod.py
+++ b/salt/modules/napalm_yang_mod.py
@@ -435,7 +435,7 @@ def load_config(data, models, **kwargs):
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
diff --git a/salt/modules/npm.py b/salt/modules/npm.py
index e794f8fdd5..b2c035f493 100644
--- a/salt/modules/npm.py
+++ b/salt/modules/npm.py
@@ -157,7 +157,7 @@ def install(pkg=None,
if runas:
uid = salt.utils.get_uid(runas)
if uid:
- env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
+ env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](cmd, python_shell=True, cwd=dir, runas=runas, env=env)
@@ -236,7 +236,7 @@ def uninstall(pkg, dir=None, runas=None, env=None):
if runas:
uid = salt.utils.get_uid(runas)
if uid:
- env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
+ env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ['npm', 'uninstall', '"{0}"'.format(pkg)]
if not dir:
@@ -295,7 +295,7 @@ def list_(pkg=None, dir=None, runas=None, env=None, depth=None):
if runas:
uid = salt.utils.get_uid(runas)
if uid:
- env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
+ env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ['npm', 'list', '--json', '--silent']
@@ -358,7 +358,7 @@ def cache_clean(path=None, runas=None, env=None, force=False):
if runas:
uid = salt.utils.get_uid(runas)
if uid:
- env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
+ env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ['npm', 'cache', 'clean']
if path:
@@ -405,7 +405,7 @@ def cache_list(path=None, runas=None, env=None):
if runas:
uid = salt.utils.get_uid(runas)
if uid:
- env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
+ env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ['npm', 'cache', 'ls']
if path:
@@ -445,7 +445,7 @@ def cache_path(runas=None, env=None):
if runas:
uid = salt.utils.get_uid(runas)
if uid:
- env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
+ env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = 'npm config get cache'
diff --git a/salt/modules/pdbedit.py b/salt/modules/pdbedit.py
index d8cb4a75a6..d1be29b632 100644
--- a/salt/modules/pdbedit.py
+++ b/salt/modules/pdbedit.py
@@ -266,7 +266,7 @@ def modify(
specify user account control properties
.. note::
- Only the follwing can be set:
+ Only the following can be set:
- N: No password required
- D: Account disabled
- H: Home directory required
diff --git a/salt/modules/pillar.py b/salt/modules/pillar.py
index 7293f39244..a70ab7db72 100644
--- a/salt/modules/pillar.py
+++ b/salt/modules/pillar.py
@@ -39,8 +39,8 @@ def get(key,
Attempt to retrieve the named value from pillar, if the named value is not
available return the passed default. The default return is an empty string
- except __opts__['pillar_raise_on_missing'] is set to True, in which case a
- KeyError will be raised.
+ except ``__opts__['pillar_raise_on_missing']`` is set to True, in which
+ case a ``KeyError`` exception will be raised.
If the merge parameter is set to ``True``, the default will be recursively
merged into the returned pillar data.
@@ -50,11 +50,18 @@ def get(key,
{'pkg': {'apache': 'httpd'}}
- To retrieve the value associated with the apache key in the pkg dict this
- key can be passed::
+ To retrieve the value associated with the ``apache`` key in the ``pkg``
+ dict this key can be passed as::
pkg:apache
+ key
+ The pillar key to get value from
+
+ default
+ If specified, return this value in case when named pillar value does
+ not exist.
+
merge : ``False``
If ``True``, the retrieved values will be merged into the passed
default. When the default and the retrieved value are both
diff --git a/salt/modules/pip.py b/salt/modules/pip.py
index d29bb1515e..4175175a05 100644
--- a/salt/modules/pip.py
+++ b/salt/modules/pip.py
@@ -335,6 +335,20 @@ def _process_requirements(requirements, cmd, cwd, saltenv, user):
return cleanup_requirements, None
+def _format_env_vars(env_vars):
+ ret = {}
+ if env_vars:
+ if isinstance(env_vars, dict):
+ for key, val in six.iteritems(env_vars):
+ if not isinstance(val, six.string_types):
+ val = str(val)
+ ret[key] = val
+ else:
+ raise CommandExecutionError(
+ 'env_vars {0} is not a dictionary'.format(env_vars))
+ return ret
+
+
def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
requirements=None,
bin_env=None,
@@ -804,14 +818,7 @@ def install(pkgs=None, # pylint: disable=R0912,R0913,R0914
cmd_kwargs = dict(saltenv=saltenv, use_vt=use_vt, runas=user)
if env_vars:
- if isinstance(env_vars, dict):
- for key, val in six.iteritems(env_vars):
- if not isinstance(val, six.string_types):
- val = str(val)
- cmd_kwargs.setdefault('env', {})[key] = val
- else:
- raise CommandExecutionError(
- 'env_vars {0} is not a dictionary'.format(env_vars))
+ cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
try:
if cwd:
@@ -966,7 +973,8 @@ def uninstall(pkgs=None,
def freeze(bin_env=None,
user=None,
cwd=None,
- use_vt=False):
+ use_vt=False,
+ env_vars=None):
'''
Return a list of installed packages either globally or in the specified
virtualenv
@@ -1019,6 +1027,8 @@ def freeze(bin_env=None,
cmd_kwargs = dict(runas=user, cwd=cwd, use_vt=use_vt, python_shell=False)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
+ if env_vars:
+ cmd_kwargs.setdefault('env', {}).update(_format_env_vars(env_vars))
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
if result['retcode'] > 0:
@@ -1030,7 +1040,8 @@ def freeze(bin_env=None,
def list_(prefix=None,
bin_env=None,
user=None,
- cwd=None):
+ cwd=None,
+ env_vars=None):
'''
Filter list of installed apps from ``freeze`` and check to see if
``prefix`` exists in the list of packages installed.
@@ -1059,7 +1070,7 @@ def list_(prefix=None,
if prefix is None or 'pip'.startswith(prefix):
packages['pip'] = version(bin_env)
- for line in freeze(bin_env=bin_env, user=user, cwd=cwd):
+ for line in freeze(bin_env=bin_env, user=user, cwd=cwd, env_vars=env_vars):
if line.startswith('-f') or line.startswith('#'):
# ignore -f line as it contains --find-links directory
# ignore comment lines
diff --git a/salt/modules/reg.py b/salt/modules/reg.py
index 1b6b70d4f4..7bb22638a6 100644
--- a/salt/modules/reg.py
+++ b/salt/modules/reg.py
@@ -29,8 +29,8 @@ Values/Entries are name/data pairs. There can be many values in a key. The
# When production windows installer is using Python 3, Python 2 code can be removed
# Import _future_ python libs first & before any other code
-from __future__ import absolute_import
-from __future__ import unicode_literals
+from __future__ import absolute_import, unicode_literals
+
# Import python libs
import sys
import logging
@@ -38,7 +38,6 @@ from salt.ext.six.moves import range # pylint: disable=W0622,import-error
# Import third party libs
try:
- import win32gui
import win32api
import win32con
import pywintypes
@@ -48,6 +47,7 @@ except ImportError:
# Import salt libs
import salt.utils
+import salt.utils.win_functions
from salt.exceptions import CommandExecutionError
PY2 = sys.version_info[0] == 2
@@ -68,7 +68,7 @@ def __virtual__():
if not HAS_WINDOWS_MODULES:
return (False, 'reg execution module failed to load: '
'One of the following libraries did not load: '
- + 'win32gui, win32con, win32api')
+ 'win32con, win32api, pywintypes')
return __virtualname__
@@ -95,9 +95,13 @@ class Registry(object): # pylint: disable=R0903
'''
def __init__(self):
self.hkeys = {
+ 'HKEY_CURRENT_CONFIG': win32con.HKEY_CURRENT_CONFIG,
+ 'HKEY_CLASSES_ROOT': win32con.HKEY_CLASSES_ROOT,
'HKEY_CURRENT_USER': win32con.HKEY_CURRENT_USER,
'HKEY_LOCAL_MACHINE': win32con.HKEY_LOCAL_MACHINE,
'HKEY_USERS': win32con.HKEY_USERS,
+ 'HKCC': win32con.HKEY_CURRENT_CONFIG,
+ 'HKCR': win32con.HKEY_CLASSES_ROOT,
'HKCU': win32con.HKEY_CURRENT_USER,
'HKLM': win32con.HKEY_LOCAL_MACHINE,
'HKU': win32con.HKEY_USERS,
@@ -130,9 +134,11 @@ class Registry(object): # pylint: disable=R0903
# delete_key_recursive uses this to check the subkey contains enough \
# as we do not want to remove all or most of the registry
self.subkey_slash_check = {
- win32con.HKEY_CURRENT_USER: 0,
- win32con.HKEY_LOCAL_MACHINE: 1,
- win32con.HKEY_USERS: 1
+ win32con.HKEY_CURRENT_USER: 0,
+ win32con.HKEY_LOCAL_MACHINE: 1,
+ win32con.HKEY_USERS: 1,
+ win32con.HKEY_CURRENT_CONFIG: 1,
+ win32con.HKEY_CLASSES_ROOT: 1
}
self.registry_32 = {
@@ -149,9 +155,10 @@ class Registry(object): # pylint: disable=R0903
raise CommandExecutionError(msg.format(k, hkeys))
-def _key_exists(hive, key, use_32bit_registry=False):
+def key_exists(hive, key, use_32bit_registry=False):
'''
- Check that the key is found in the registry
+ Check that the key is found in the registry. This refers to keys and not
+ value/data pairs.
:param str hive: The hive to connect to.
:param str key: The key to check
@@ -173,6 +180,10 @@ def _key_exists(hive, key, use_32bit_registry=False):
return True
except WindowsError: # pylint: disable=E0602
return False
+ except pywintypes.error as exc:
+ if exc.winerror == 2:
+ return False
+ raise
def broadcast_change():
@@ -187,11 +198,7 @@ def broadcast_change():
salt '*' reg.broadcast_change
'''
- # https://msdn.microsoft.com/en-us/library/windows/desktop/ms644952(v=vs.85).aspx
- _, res = win32gui.SendMessageTimeout(
- win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 0,
- win32con.SMTO_ABORTIFHUNG, 5000)
- return not bool(res)
+ return salt.utils.win_functions.broadcast_setting_change('Environment')
def list_keys(hive, key=None, use_32bit_registry=False):
@@ -203,6 +210,8 @@ def list_keys(hive, key=None, use_32bit_registry=False):
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
+ - HKEY_CLASSES_ROOT or HKCR
+ - HKEY_CURRENT_CONFIG or HKCC
:param str key: The key (looks like a path) to the value name. If a key is
not passed, the keys under the hive will be returned.
@@ -257,6 +266,8 @@ def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
+ - HKEY_CLASSES_ROOT or HKCR
+ - HKEY_CURRENT_CONFIG or HKCC
:param str key: The key (looks like a path) to the value name. If a key is
not passed, the values under the hive will be returned.
@@ -325,6 +336,8 @@ def read_value(hive, key, vname=None, use_32bit_registry=False):
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
+ - HKEY_CLASSES_ROOT or HKCR
+ - HKEY_CURRENT_CONFIG or HKCC
:param str key: The key (looks like a path) to the value name.
@@ -422,6 +435,8 @@ def set_value(hive,
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
+ - HKEY_CLASSES_ROOT or HKCR
+ - HKEY_CURRENT_CONFIG or HKCC
:param str key: The key (looks like a path) to the value name.
@@ -562,6 +577,8 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
+ - HKEY_CLASSES_ROOT or HKCR
+ - HKEY_CURRENT_CONFIG or HKCC
:param key: The key to remove (looks like a path)
@@ -591,7 +608,7 @@ def delete_key_recursive(hive, key, use_32bit_registry=False):
key_path = local_key
access_mask = registry.registry_32[use_32bit_registry] | win32con.KEY_ALL_ACCESS
- if not _key_exists(local_hive, local_key, use_32bit_registry):
+ if not key_exists(local_hive, local_key, use_32bit_registry):
return False
if (len(key) > 1) and (key.count('\\', 1) < registry.subkey_slash_check[hkey]):
@@ -656,6 +673,8 @@ def delete_value(hive, key, vname=None, use_32bit_registry=False):
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
+ - HKEY_CLASSES_ROOT or HKCR
+ - HKEY_CURRENT_CONFIG or HKCC
:param str key: The key (looks like a path) to the value name.
diff --git a/salt/modules/rh_service.py b/salt/modules/rh_service.py
index c7ce0d51f7..e4f6e57bc1 100644
--- a/salt/modules/rh_service.py
+++ b/salt/modules/rh_service.py
@@ -97,6 +97,15 @@ def __virtual__():
'RedHat-based distros >= version 7 use systemd, will not '
'load rh_service.py as virtual \'service\''
)
+ if __grains__['os'] == 'Amazon':
+ if int(osrelease_major) in (2016, 2017):
+ return __virtualname__
+ else:
+ return (
+ False,
+ 'Amazon Linux >= version 2 uses systemd. Will not '
+ 'load rh_service.py as virtual \'service\''
+ )
return __virtualname__
return (False, 'Cannot load rh_service module: OS not in {0}'.format(enable))
diff --git a/salt/modules/selinux.py b/salt/modules/selinux.py
index aecadd7a14..e65913a9ea 100644
--- a/salt/modules/selinux.py
+++ b/salt/modules/selinux.py
@@ -453,7 +453,7 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
'''
if filetype:
_validate_filetype(filetype)
- re_spacer = '[ ]{2,}'
+ re_spacer = '[ ]+'
cmd_kwargs = {'spacer': re_spacer,
'filespec': re.escape(name),
'sel_user': sel_user or '[^:]+',
@@ -466,11 +466,14 @@ def fcontext_get_policy(name, filetype=None, sel_type=None, sel_user=None, sel_l
current_entry_text = __salt__['cmd.shell'](cmd, ignore_retcode=True)
if current_entry_text == '':
return None
- ret = {}
- current_entry_list = re.split(re_spacer, current_entry_text)
- ret['filespec'] = current_entry_list[0]
- ret['filetype'] = current_entry_list[1]
- ret.update(_context_string_to_dict(current_entry_list[2]))
+
+ parts = re.match(r'^({filespec}) +([a-z ]+) (.*)$'.format(**{'filespec': re.escape(name)}), current_entry_text)
+ ret = {
+ 'filespec': parts.group(1).strip(),
+ 'filetype': parts.group(2).strip(),
+ }
+ ret.update(_context_string_to_dict(parts.group(3).strip()))
+
return ret
@@ -514,7 +517,9 @@ def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, se
if action not in ['add', 'delete']:
raise SaltInvocationError('Actions supported are "add" and "delete", not "{0}".'.format(action))
cmd = 'semanage fcontext --{0}'.format(action)
- if filetype is not None:
+ # "semanage --ftype a" isn't valid on Centos 6,
+ # don't pass --ftype since "a" is the default filetype.
+ if filetype is not None and filetype != 'a':
_validate_filetype(filetype)
cmd += ' --ftype {0}'.format(filetype)
if sel_type is not None:
@@ -556,7 +561,7 @@ def fcontext_apply_policy(name, recursive=False):
.. versionadded:: 2017.7.0
Applies SElinux policies to filespec using `restorecon [-R]
- filespec`. Returns dict with changes if succesful, the output of
+ filespec`. Returns dict with changes if successful, the output of
the restorecon command otherwise.
name
diff --git a/salt/modules/sensehat.py b/salt/modules/sensehat.py
index 12da3b0742..132663728b 100644
--- a/salt/modules/sensehat.py
+++ b/salt/modules/sensehat.py
@@ -140,7 +140,7 @@ def show_message(message, msg_type=None,
message
The message to display
msg_type
- The type of the message. Changes the appearence of the message.
+ The type of the message. Changes the appearance of the message.
Available types are::
diff --git a/salt/modules/smartos_vmadm.py b/salt/modules/smartos_vmadm.py
index 90f0d83599..1bf857912f 100644
--- a/salt/modules/smartos_vmadm.py
+++ b/salt/modules/smartos_vmadm.py
@@ -165,7 +165,7 @@ def _create_update_from_cfg(mode='create', uuid=None, vmcfg=None):
ret['Error'] = res['stderr']
return ret
else:
- # cleanup json file (only when succesful to help troubleshooting)
+ # cleanup json file (only when successful to help troubleshooting)
salt.utils.safe_rm(vmadm_json_file)
# return uuid
diff --git a/salt/modules/solrcloud.py b/salt/modules/solrcloud.py
index ab95f02af4..5a55353aa1 100644
--- a/salt/modules/solrcloud.py
+++ b/salt/modules/solrcloud.py
@@ -195,7 +195,7 @@ def cluster_status(**kwargs):
def alias_exists(alias_name, **kwargs):
'''
- Check alias existance
+ Check alias existence
Additional parameters (kwargs) may be passed, they will be proxied to http.query
diff --git a/salt/modules/state.py b/salt/modules/state.py
index 5232ded592..cd5a7b84ba 100644
--- a/salt/modules/state.py
+++ b/salt/modules/state.py
@@ -29,6 +29,7 @@ import salt.config
import salt.payload
import salt.state
import salt.utils
+import salt.utils.files
import salt.utils.jid
import salt.utils.url
from salt.exceptions import CommandExecutionError, SaltInvocationError
@@ -632,17 +633,16 @@ def request(mods=None,
'kwargs': kwargs
}
})
- cumask = os.umask(0o77)
- try:
- if salt.utils.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
- with salt.utils.fopen(notify_path, 'w+b') as fp_:
- serial.dump(req, fp_)
- except (IOError, OSError):
- msg = 'Unable to write state request file {0}. Check permission.'
- log.error(msg.format(notify_path))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
+ with salt.utils.fopen(notify_path, 'w+b') as fp_:
+ serial.dump(req, fp_)
+ except (IOError, OSError):
+ msg = 'Unable to write state request file {0}. Check permission.'
+ log.error(msg.format(notify_path))
return ret
@@ -696,17 +696,16 @@ def clear_request(name=None):
req.pop(name)
else:
return False
- cumask = os.umask(0o77)
- try:
- if salt.utils.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
- with salt.utils.fopen(notify_path, 'w+b') as fp_:
- serial.dump(req, fp_)
- except (IOError, OSError):
- msg = 'Unable to write state request file {0}. Check permission.'
- log.error(msg.format(notify_path))
- os.umask(cumask)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run']('attrib -R "{0}"'.format(notify_path))
+ with salt.utils.fopen(notify_path, 'w+b') as fp_:
+ serial.dump(req, fp_)
+ except (IOError, OSError):
+ msg = 'Unable to write state request file {0}. Check permission.'
+ log.error(msg.format(notify_path))
return True
@@ -777,8 +776,8 @@ def highstate(test=None, queue=False, **kwargs):
.. code-block:: bash
- salt '*' state.higstate exclude=bar,baz
- salt '*' state.higstate exclude=foo*
+ salt '*' state.highstate exclude=bar,baz
+ salt '*' state.highstate exclude=foo*
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
saltenv
@@ -1111,13 +1110,12 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
return ['Pillar failed to render with the following messages:'] + errors
orchestration_jid = kwargs.get('orchestration_jid')
- umask = os.umask(0o77)
- if kwargs.get('cache'):
- if os.path.isfile(cfn):
- with salt.utils.fopen(cfn, 'rb') as fp_:
- high_ = serial.load(fp_)
- return st_.state.call_high(high_, orchestration_jid)
- os.umask(umask)
+ with salt.utils.files.set_umask(0o077):
+ if kwargs.get('cache'):
+ if os.path.isfile(cfn):
+ with salt.utils.fopen(cfn, 'rb') as fp_:
+ high_ = serial.load(fp_)
+ return st_.state.call_high(high_, orchestration_jid)
if isinstance(mods, six.string_types):
mods = mods.split(',')
@@ -1144,32 +1142,32 @@ def sls(mods, test=None, exclude=None, queue=False, **kwargs):
if __salt__['config.option']('state_data', '') == 'terse' or kwargs.get('terse'):
ret = _filter_running(ret)
cache_file = os.path.join(__opts__['cachedir'], 'sls.p')
- cumask = os.umask(0o77)
- try:
- if salt.utils.is_windows():
- # Make sure cache file isn't read-only
- __salt__['cmd.run'](['attrib', '-R', cache_file], python_shell=False)
- with salt.utils.fopen(cache_file, 'w+b') as fp_:
- serial.dump(ret, fp_)
- except (IOError, OSError):
- msg = 'Unable to write to SLS cache file {0}. Check permission.'
- log.error(msg.format(cache_file))
- _set_retcode(ret, high_)
- # Work around Windows multiprocessing bug, set __opts__['test'] back to
- # value from before this function was run.
- __opts__['test'] = orig_test
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.is_windows():
+ # Make sure cache file isn't read-only
+ __salt__['cmd.run'](['attrib', '-R', cache_file], python_shell=False)
+ with salt.utils.fopen(cache_file, 'w+b') as fp_:
+ serial.dump(ret, fp_)
+ except (IOError, OSError):
+ msg = 'Unable to write to SLS cache file {0}. Check permission.'
+ log.error(msg.format(cache_file))
+ _set_retcode(ret, high_)
+ # Work around Windows multiprocessing bug, set __opts__['test'] back to
+ # value from before this function was run.
+ __opts__['test'] = orig_test
+
+ try:
+ with salt.utils.fopen(cfn, 'w+b') as fp_:
+ try:
+ serial.dump(high_, fp_)
+ except TypeError:
+ # Can't serialize pydsl
+ pass
+ except (IOError, OSError):
+ msg = 'Unable to write to highstate cache file {0}. Do you have permissions?'
+ log.error(msg.format(cfn))
- try:
- with salt.utils.fopen(cfn, 'w+b') as fp_:
- try:
- serial.dump(high_, fp_)
- except TypeError:
- # Can't serialize pydsl
- pass
- except (IOError, OSError):
- msg = 'Unable to write to highstate cache file {0}. Do you have permissions?'
- log.error(msg.format(cfn))
- os.umask(cumask)
_snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
return ret
diff --git a/salt/modules/upstart.py b/salt/modules/upstart.py
index 57a798ceee..842faf5642 100644
--- a/salt/modules/upstart.py
+++ b/salt/modules/upstart.py
@@ -97,7 +97,7 @@ def _find_utmp():
result[os.stat(utmp).st_mtime] = utmp
except Exception:
pass
- if result > 0:
+ if len(result):
return result[sorted(result).pop()]
else:
return False
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
index 15c452289a..d16740c968 100644
--- a/salt/modules/virt.py
+++ b/salt/modules/virt.py
@@ -638,8 +638,7 @@ def init(name,
else:
log.debug('Copying {0} to {1}'.format(sfn, img_dest))
salt.utils.files.copyfile(sfn, img_dest)
- mask = os.umask(0)
- os.umask(mask)
+ mask = salt.utils.files.get_umask()
# Apply umask and remove exec bit
mode = (0o0777 ^ mask) & 0o0666
os.chmod(img_dest, mode)
diff --git a/salt/modules/win_dsc.py b/salt/modules/win_dsc.py
index 90aef68630..eec2eed952 100644
--- a/salt/modules/win_dsc.py
+++ b/salt/modules/win_dsc.py
@@ -39,33 +39,35 @@ def __virtual__():
'''
# Verify Windows
if not salt.utils.is_windows():
- log.debug('Module DSC: Only available on Windows systems')
- return False, 'Module DSC: Only available on Windows systems'
+ log.debug('DSC: Only available on Windows systems')
+ return False, 'DSC: Only available on Windows systems'
# Verify PowerShell
powershell_info = __salt__['cmd.shell_info']('powershell')
if not powershell_info['installed']:
- log.debug('Module DSC: Requires PowerShell')
- return False, 'Module DSC: Requires PowerShell'
+ log.debug('DSC: Requires PowerShell')
+ return False, 'DSC: Requires PowerShell'
# Verify PowerShell 5.0 or greater
if salt.utils.compare_versions(powershell_info['version'], '<', '5.0'):
- log.debug('Module DSC: Requires PowerShell 5 or later')
- return False, 'Module DSC: Requires PowerShell 5 or later'
+ log.debug('DSC: Requires PowerShell 5 or later')
+ return False, 'DSC: Requires PowerShell 5 or later'
return __virtualname__
-def _pshell(cmd, cwd=None, json_depth=2):
+def _pshell(cmd, cwd=None, json_depth=2, ignore_retcode=False):
'''
Execute the desired PowerShell command and ensure that it returns data
- in json format and load that into python
+ in json format and load that into python. Either return a dict or raise a
+ CommandExecutionError.
'''
if 'convertto-json' not in cmd.lower():
cmd = '{0} | ConvertTo-Json -Depth {1}'.format(cmd, json_depth)
log.debug('DSC: {0}'.format(cmd))
results = __salt__['cmd.run_all'](
- cmd, shell='powershell', cwd=cwd, python_shell=True)
+ cmd, shell='powershell', cwd=cwd, python_shell=True,
+ ignore_retcode=ignore_retcode)
if 'pid' in results:
del results['pid']
@@ -75,12 +77,17 @@ def _pshell(cmd, cwd=None, json_depth=2):
raise CommandExecutionError(
'Issue executing PowerShell {0}'.format(cmd), info=results)
+ # Sometimes Powershell returns an empty string, which isn't valid JSON
+ if results['stdout'] == '':
+ results['stdout'] = '{}'
+
try:
ret = json.loads(results['stdout'], strict=False)
except ValueError:
raise CommandExecutionError(
'No JSON results from PowerShell', info=results)
+ log.info('DSC: Returning "{0}"'.format(ret))
return ret
@@ -98,8 +105,8 @@ def run_config(path,
script, the desired configuration can be applied by passing the name in the
``config`` option.
- This command would be the equivalent of running ``dsc.compile_config`` and
- ``dsc.apply_config`` separately.
+ This command would be the equivalent of running ``dsc.compile_config``
+ followed by ``dsc.apply_config``.
Args:
@@ -141,7 +148,7 @@ def run_config(path,
Default is 'base'
Returns:
- bool: True if successfully compiled and applied, False if not
+ bool: True if successfully compiled and applied, otherwise False
CLI Example:
@@ -149,13 +156,13 @@ def run_config(path,
.. code-block:: bash
- salt '*' dsc.compile_apply_config C:\\DSC\\WebsiteConfig.ps1
+ salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1
To cache a config script to the system from the master and compile it:
.. code-block:: bash
- salt '*' dsc.compile_apply_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1
+ salt '*' dsc.run_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1
'''
ret = compile_config(path=path,
source=source,
@@ -240,31 +247,31 @@ def compile_config(path,
salt '*' dsc.compile_config C:\\DSC\\WebsiteConfig.ps1 salt://dsc/configs/WebsiteConfig.ps1
'''
if source:
- log.info('Caching {0}'.format(source))
+ log.info('DSC: Caching {0}'.format(source))
cached_files = __salt__['cp.get_file'](path=source,
dest=path,
saltenv=salt_env,
makedirs=True)
if not cached_files:
error = 'Failed to cache {0}'.format(source)
- log.error(error)
+ log.error('DSC: {0}'.format(error))
raise CommandExecutionError(error)
if config_data_source:
- log.info('Caching {0}'.format(config_data_source))
+ log.info('DSC: Caching {0}'.format(config_data_source))
cached_files = __salt__['cp.get_file'](path=config_data_source,
dest=config_data,
saltenv=salt_env,
makedirs=True)
if not cached_files:
error = 'Failed to cache {0}'.format(config_data_source)
- log.error(error)
+ log.error('DSC: {0}'.format(error))
raise CommandExecutionError(error)
# Make sure the path exists
if not os.path.exists(path):
- error = '"{0} not found.'.format(path)
- log.error(error)
+ error = '"{0}" not found'.format(path)
+ log.error('DSC: {0}'.format(error))
raise CommandExecutionError(error)
if config_name is None:
@@ -290,10 +297,11 @@ def compile_config(path,
if ret:
# Script compiled, return results
if ret.get('Exists'):
- log.info('DSC Compile Config: {0}'.format(ret))
+ log.info('DSC: Compile Config: {0}'.format(ret))
return ret
- # Run the script and run the compile command
+ # If you get to this point, the script did not contain a compile command
+ # dot source the script to compile the state and generate the mof file
cmd = ['.', path]
if script_parameters:
cmd.append(script_parameters)
@@ -311,12 +319,12 @@ def compile_config(path,
if ret:
# Script compiled, return results
if ret.get('Exists'):
- log.info('DSC Compile Config: {0}'.format(ret))
+ log.info('DSC: Compile Config: {0}'.format(ret))
return ret
error = 'Failed to compile config: {0}'.format(path)
error += '\nReturned: {0}'.format(ret)
- log.error('DSC Compile Config: {0}'.format(error))
+ log.error('DSC: {0}'.format(error))
raise CommandExecutionError(error)
@@ -348,13 +356,13 @@ def apply_config(path, source=None, salt_env='base'):
.. code-block:: bash
- salt '*' dsc.run_config C:\\DSC\\WebSiteConfiguration
+ salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration
To cache a configuration from the master and apply it:
.. code-block:: bash
- salt '*' dsc.run_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration
+ salt '*' dsc.apply_config C:\\DSC\\WebSiteConfiguration salt://dsc/configs/WebSiteConfiguration
'''
# If you're getting an error along the lines of "The client cannot connect
@@ -368,38 +376,35 @@ def apply_config(path, source=None, salt_env='base'):
if path_name.lower() != source_name.lower():
# Append the Source name to the Path
path = '{0}\\{1}'.format(path, source_name)
- log.debug('{0} appended to the path.'.format(source_name))
+ log.debug('DSC: {0} appended to the path'.format(source_name))
# Destination path minus the basename
dest_path = os.path.dirname(os.path.normpath(path))
- log.info('Caching {0}'.format(source))
+ log.info('DSC: Caching {0}'.format(source))
cached_files = __salt__['cp.get_dir'](source, dest_path, salt_env)
if not cached_files:
error = 'Failed to copy {0}'.format(source)
- log.error(error)
+ log.error('DSC: {0}'.format(error))
raise CommandExecutionError(error)
else:
config = os.path.dirname(cached_files[0])
# Make sure the path exists
if not os.path.exists(config):
- error = '{0} not found.'.format(config)
- log.error(error)
+ error = '{0} not found'.format(config)
+ log.error('DSC: {0}'.format(error))
raise CommandExecutionError(error)
# Run the DSC Configuration
# Putting quotes around the parameter protects against command injection
cmd = 'Start-DscConfiguration -Path "{0}" -Wait -Force'.format(config)
- ret = _pshell(cmd)
-
- if ret is False:
- raise CommandExecutionError('Apply Config Failed: {0}'.format(path))
+ _pshell(cmd)
cmd = '$status = Get-DscConfigurationStatus; $status.Status'
ret = _pshell(cmd)
- log.info('DSC Apply Config: {0}'.format(ret))
+ log.info('DSC: Apply Config: {0}'.format(ret))
- return ret == 'Success'
+ return ret == 'Success' or ret == {}
def get_config():
@@ -409,15 +414,153 @@ def get_config():
Returns:
dict: A dictionary representing the DSC Configuration on the machine
+ Raises:
+ CommandExecutionError: On failure
+
CLI Example:
.. code-block:: bash
salt '*' dsc.get_config
'''
- cmd = 'Get-DscConfiguration | ' \
- 'Select-Object * -ExcludeProperty Cim*'
- return _pshell(cmd)
+ cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*'
+
+ try:
+ raw_config = _pshell(cmd, ignore_retcode=True)
+ except CommandExecutionError as exc:
+ if 'Current configuration does not exist' in exc.info['stderr']:
+ raise CommandExecutionError('Not Configured')
+ raise
+
+ config = dict()
+ if raw_config:
+ # Get DSC Configuration Name
+ if 'ConfigurationName' in raw_config[0]:
+ config[raw_config[0]['ConfigurationName']] = {}
+ # Add all DSC Configurations by ResourceId
+ for item in raw_config:
+ config[item['ConfigurationName']][item['ResourceId']] = {}
+ for key in item:
+ if key not in ['ConfigurationName', 'ResourceId']:
+ config[item['ConfigurationName']][item['ResourceId']][key] = item[key]
+
+ return config
+
+
+def remove_config(reset=False):
+ '''
+ Remove the current DSC Configuration. Removes current, pending, and previous
+ dsc configurations.
+
+ .. versionadded:: 2017.7.5
+
+ Args:
+ reset (bool):
+ Attempts to reset the DSC configuration by removing the following
+ from ``C:\\Windows\\System32\\Configuration``:
+
+ - File: DSCStatusHistory.mof
+ - File: DSCEngineCache.mof
+ - Dir: ConfigurationStatus
+
+ Default is False
+
+ .. warning::
+ ``remove_config`` may fail to reset the DSC environment if any
+ of the files in the ``ConfigurationStatus`` directory. If you
+ wait a few minutes and run again, it may complete successfully.
+
+ Returns:
+ bool: True if successful
+
+ Raises:
+ CommandExecutionError: On failure
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt '*' dsc.remove_config True
+ '''
+ # Stopping a running config (not likely to occur)
+ cmd = 'Stop-DscConfiguration'
+ log.info('DSC: Stopping Running Configuration')
+ try:
+ _pshell(cmd)
+ except CommandExecutionError as exc:
+ if exc.info['retcode'] != 0:
+ raise CommandExecutionError('Failed to Stop DSC Configuration',
+ info=exc.info)
+ log.info('DSC: {0}'.format(exc.info['stdout']))
+
+ # Remove configuration files
+ cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous ' \
+ '-Force'
+ log.info('DSC: Removing Configuration')
+ try:
+ _pshell(cmd)
+ except CommandExecutionError as exc:
+ if exc.info['retcode'] != 0:
+ raise CommandExecutionError('Failed to remove DSC Configuration',
+ info=exc.info)
+ log.info('DSC: {0}'.format(exc.info['stdout']))
+
+ if not reset:
+ return True
+
+ def _remove_fs_obj(path):
+ if os.path.exists(path):
+ log.info('DSC: Removing {0}'.format(path))
+ if not __salt__['file.remove'](path):
+ error = 'Failed to remove {0}'.format(path)
+ log.error('DSC: {0}'.format(error))
+ raise CommandExecutionError(error)
+
+ dsc_config_dir = '{0}\\System32\\Configuration' \
+ ''.format(os.getenv('SystemRoot', 'C:\\Windows'))
+
+ # Remove History
+ _remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir))
+
+ # Remove Engine Cache
+ _remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir))
+
+ # Remove Status Directory
+ _remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir))
+
+ return True
+
+
+def restore_config():
+ '''
+ Reapplies the previous configuration.
+
+ .. versionadded:: 2017.7.5
+
+ .. note::
+ The current configuration will be come the previous configuration. If
+ run a second time back-to-back it is like toggling between two configs.
+
+ Returns:
+ bool: True if successfully restored
+
+ Raises:
+ CommandExecutionError: On failure
+
+ CLI Example:
+
+ .. code-block:: bash
+
+ salt '*' dsc.restore_config
+ '''
+ cmd = 'Restore-DscConfiguration'
+ try:
+ _pshell(cmd, ignore_retcode=True)
+ except CommandExecutionError as exc:
+ if 'A previous configuration does not exist' in exc.info['stderr']:
+ raise CommandExecutionError('Previous Configuration Not Found')
+ raise
+ return True
def test_config():
@@ -433,9 +576,13 @@ def test_config():
salt '*' dsc.test_config
'''
- cmd = 'Test-DscConfiguration *>&1'
- ret = _pshell(cmd)
- return ret == 'True'
+ cmd = 'Test-DscConfiguration'
+ try:
+ _pshell(cmd, ignore_retcode=True)
+ except CommandExecutionError as exc:
+ if 'Current configuration does not exist' in exc.info['stderr']:
+ raise CommandExecutionError('Not Configured')
+ raise
def get_config_status():
@@ -456,7 +603,12 @@ def get_config_status():
'Select-Object -Property HostName, Status, MetaData, ' \
'@{Name="StartDate";Expression={Get-Date ($_.StartDate) -Format g}}, ' \
'Type, Mode, RebootRequested, NumberofResources'
- return _pshell(cmd)
+ try:
+ return _pshell(cmd, ignore_retcode=True)
+ except CommandExecutionError as exc:
+ if 'No status information available' in exc.info['stderr']:
+ raise CommandExecutionError('Not Configured')
+ raise
def get_lcm_config():
@@ -638,8 +790,8 @@ def set_lcm_config(config_mode=None,
ret = __salt__['cmd.run_all'](cmd, shell='powershell', python_shell=True)
__salt__['file.remove'](r'{0}\SaltConfig'.format(temp_dir))
if not ret['retcode']:
- log.info('LCM config applied successfully')
+ log.info('DSC: LCM config applied successfully')
return True
else:
- log.error('Failed to apply LCM config. Error {0}'.format(ret))
+ log.error('DSC: Failed to apply LCM config. Error {0}'.format(ret))
return False
diff --git a/salt/modules/win_groupadd.py b/salt/modules/win_groupadd.py
index 9c0307265b..3ee9f81056 100644
--- a/salt/modules/win_groupadd.py
+++ b/salt/modules/win_groupadd.py
@@ -218,7 +218,7 @@ def getent(refresh=False):
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
- the groups in ``__context__`` wil be returned. If True the
+ the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
@@ -469,7 +469,7 @@ def list_groups(refresh=False):
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
- the groups in ``__context__`` wil be returned. If True, the
+ the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py
index ebaff9f040..89b48b8b79 100644
--- a/salt/modules/win_lgpo.py
+++ b/salt/modules/win_lgpo.py
@@ -2442,7 +2442,7 @@ class _policy_info(object):
elif ord(val) == 1:
return 'Enabled'
else:
- return 'Invalid Value'
+ return 'Invalid Value: {0!r}'.format(val) # pylint: disable=repr-flag-used-in-string
else:
return 'Not Defined'
except TypeError:
@@ -5047,7 +5047,10 @@ def get(policy_class=None, return_full_policy_names=True,
class_vals[policy_name] = __salt__['reg.read_value'](_pol['Registry']['Hive'],
_pol['Registry']['Path'],
_pol['Registry']['Value'])['vdata']
- log.debug('Value {0} found for reg policy {1}'.format(class_vals[policy_name], policy_name))
+ log.debug(
+ 'Value %r found for reg policy %s',
+ class_vals[policy_name], policy_name
+ )
elif 'Secedit' in _pol:
# get value from secedit
_ret, _val = _findOptionValueInSeceditFile(_pol['Secedit']['Option'])
diff --git a/salt/modules/win_path.py b/salt/modules/win_path.py
index 3b446f7cea..833d605c01 100644
--- a/salt/modules/win_path.py
+++ b/salt/modules/win_path.py
@@ -16,14 +16,13 @@ from salt.ext.six.moves import map
# Third party libs
try:
- from win32con import HWND_BROADCAST, WM_SETTINGCHANGE
- from win32api import SendMessage
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
# Import salt libs
import salt.utils
+import salt.utils.win_functions
# Settings
log = logging.getLogger(__name__)
@@ -47,7 +46,15 @@ def _normalize_dir(string):
def rehash():
'''
- Send a WM_SETTINGCHANGE Broadcast to Windows to refresh the Environment variables
+ Send a WM_SETTINGCHANGE Broadcast to Windows to refresh the Environment
+ variables for new processes.
+
+ .. note::
+ This will only affect new processes that aren't launched by services. To
+ apply changes to the path to services, the host must be restarted. The
+ ``salt-minion``, if running as a service, will not see changes to the
+ environment until the system is restarted. See
+ `MSDN Documentation <https://support.microsoft.com/en-us/help/821761/changes-that-you-make-to-environment-variables-do-not-affect-services>`_
CLI Example:
@@ -55,7 +62,7 @@ def rehash():
salt '*' win_path.rehash
'''
- return bool(SendMessage(HWND_BROADCAST, WM_SETTINGCHANGE, 0, 'Environment'))
+ return salt.utils.win_functions.broadcast_setting_change('Environment')
def get_path():
diff --git a/salt/modules/win_pkg.py b/salt/modules/win_pkg.py
index b4c1e199e5..4592dbe20b 100644
--- a/salt/modules/win_pkg.py
+++ b/salt/modules/win_pkg.py
@@ -27,6 +27,14 @@ As the creation of this metadata can take some time, the
:conf_minion:`winrepo_cache_expire_min` minion config option can be used to
suppress refreshes when the metadata is less than a given number of seconds
old.
+
+.. note::
+ Version numbers can be `version number string`, `latest` and `Not Found`.
+ Where `Not Found` means this module was not able to determine the version of
+ the software installed, it can also be used as the version number in sls
+ definitions file in these cases. Versions numbers are sorted in order of
+ 0,`Not Found`,`order version numbers`,...,`latest`.
+
'''
# Import python future libs
@@ -61,7 +69,6 @@ import salt.syspaths
import salt.payload
from salt.exceptions import MinionError
from salt.utils.versions import LooseVersion
-
log = logging.getLogger(__name__)
# Define the module's virtual name
@@ -386,20 +393,23 @@ def list_pkgs(versions_as_list=False, **kwargs):
ret = {}
name_map = _get_name_map(saltenv)
- for pkg_name, val in six.iteritems(_get_reg_software()):
+ for pkg_name, val_list in six.iteritems(_get_reg_software()):
if pkg_name in name_map:
key = name_map[pkg_name]
- if val in ['(value not set)', 'Not Found', None, False]:
- # Look up version from winrepo
- pkg_info = _get_package_info(key, saltenv=saltenv)
- if not pkg_info:
- continue
- for pkg_ver in pkg_info:
- if pkg_info[pkg_ver]['full_name'] == pkg_name:
- val = pkg_ver
+ for val in val_list:
+ if val == 'Not Found':
+ # Look up version from winrepo
+ pkg_info = _get_package_info(key, saltenv=saltenv)
+ if not pkg_info:
+ continue
+ for pkg_ver in pkg_info.keys():
+ if pkg_info[pkg_ver]['full_name'] == pkg_name:
+ val = pkg_ver
+ __salt__['pkg_resource.add_pkg'](ret, key, val)
else:
key = pkg_name
- __salt__['pkg_resource.add_pkg'](ret, key, val)
+ for val in val_list:
+ __salt__['pkg_resource.add_pkg'](ret, key, val)
__salt__['pkg_resource.sort_pkglist'](ret)
if not versions_as_list:
@@ -407,21 +417,6 @@ def list_pkgs(versions_as_list=False, **kwargs):
return ret
-def _search_software(target):
- '''
- This searches the msi product databases for name matches of the list of
- target products, it will return a dict with values added to the list passed
- in
- '''
- search_results = {}
- software = dict(_get_reg_software().items())
- for key, value in six.iteritems(software):
- if key is not None:
- if target.lower() in key.lower():
- search_results[key] = value
- return search_results
-
-
def _get_reg_software():
'''
This searches the uninstall keys in the registry to find a match in the sub
@@ -445,29 +440,62 @@ def _get_reg_software():
None]
reg_software = {}
-
hive = 'HKLM'
key = "Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall"
def update(hive, key, reg_key, use_32bit):
-
+ # 2018 this code has been update to reflect some of utils/pkg/win.py logic
+ # i.e. check_reg, and checking of DisplayName and DisplayVersion.
d_name = ''
d_vers = ''
- d_name = __salt__['reg.read_value'](hive,
- '{0}\\{1}'.format(key, reg_key),
- 'DisplayName',
- use_32bit)['vdata']
+ d_name_regdata = __salt__['reg.read_value'](hive,
+ '{0}\\{1}'.format(key, reg_key),
+ 'DisplayName',
+ use_32bit)
+ if (not d_name_regdata['success'] or
+ d_name_regdata['vtype'] not in ['REG_SZ', 'REG_EXPAND_SZ'] or
+ d_name_regdata['vdata'] in ['(value not set)', None, False]):
+ return
+ d_name = d_name_regdata['vdata']
+
+ d_vers_regdata = __salt__['reg.read_value'](hive,
+ '{0}\\{1}'.format(key, reg_key),
+ 'DisplayVersion',
+ use_32bit)
+ if (not d_vers_regdata['success'] or
+ d_vers_regdata['vtype'] not in ['REG_SZ', 'REG_EXPAND_SZ', 'REG_DWORD'] or
+ d_vers_regdata['vdata'] in [None, False]):
+ return
+
+ if isinstance(d_vers_regdata['vdata'], int):
+ d_vers = six.text_type(d_vers_regdata['vdata'])
+ else:
+ d_vers = d_vers_regdata['vdata']
+
+ if not d_vers or d_vers == '(value not set)':
+ d_vers = 'Not Found'
+
+ check_ok = False
+ for check_reg in ['UninstallString', 'QuietUninstallString', 'ModifyPath']:
+ check_regdata = __salt__['reg.read_value'](hive,
+ '{0}\\{1}'.format(key, reg_key),
+ check_reg,
+ use_32bit)
+ if (not check_regdata['success'] or
+ check_regdata['vtype'] not in ['REG_SZ', 'REG_EXPAND_SZ'] or
+ check_regdata['vdata'] in ['(value not set)', None, False]):
+ continue
+ else:
+ check_ok = True
- d_vers = __salt__['reg.read_value'](hive,
- '{0}\\{1}'.format(key, reg_key),
- 'DisplayVersion',
- use_32bit)['vdata']
+ if not check_ok:
+ return
if d_name not in ignore_list:
# some MS Office updates don't register a product name which means
# their information is useless
- reg_software.update({d_name: six.text_type(d_vers)})
+ reg_software.setdefault(d_name, []).append(d_vers)
for reg_key in __salt__['reg.list_keys'](hive, key):
update(hive, key, reg_key, False)
@@ -548,28 +576,77 @@ def _refresh_db_conditional(saltenv, **kwargs):
def refresh_db(**kwargs):
- '''
- Fetches metadata files and calls :py:func:`pkg.genrepo
- <salt.modules.win_pkg.genrepo>` to compile updated repository metadata.
+ r'''
+ Generates the local software metadata database (`winrepo.p`) on the minion.
+ The database is stored in a serialized format located by default at the
+ following location:
+
+ `C:\salt\var\cache\salt\minion\files\base\win\repo-ng\winrepo.p`
+
+ This module performs the following steps to generate the software metadata
+ database:
+
+ - Fetch the package definition files (.sls) from `winrepo_source_dir`
+ (default `salt://win/repo-ng`) and cache them in
+ `<cachedir>\files\<saltenv>\<winrepo_source_dir>`
+ (default: `C:\salt\var\cache\salt\minion\files\base\win\repo-ng`)
+ - Call :py:func:`pkg.genrepo <salt.modules.win_pkg.genrepo>` to parse the
+ package definition files and generate the repository metadata database
+ file (`winrepo.p`)
+ - Return the report received from
+ :py:func:`pkg.genrepo <salt.modules.win_pkg.genrepo>`
+
+ The default winrepo directory on the master is `/srv/salt/win/repo-ng`. All
+ files that end with `.sls` in this and all subdirectories will be used to
+ generate the repository metadata database (`winrepo.p`).
+
+ .. note::
+ - Hidden directories (directories beginning with '`.`', such as
+ '`.git`') will be ignored.
+
+ .. note::
+ There is no need to call `pkg.refresh_db` every time you work with the
+ pkg module. Automatic refresh will occur based on the following minion
+ configuration settings:
+ - `winrepo_cache_expire_min`
+ - `winrepo_cache_expire_max`
+ However, if the package definition files have changed, as would be the
+ case if you are developing a new package definition, this function
+ should be called to ensure the minion has the latest information about
+ packages available to it.
+
+ .. warning::
+ Directories and files fetched from <winrepo_source_dir>
+ (`/srv/salt/win/repo-ng`) will be processed in alphabetical order. If
+ two or more software definition files contain the same name, the last
+ one processed replaces all data from the files processed before it.
+
+ For more information see
+ :ref:`Windows Software Repository <windows-package-manager>`
Kwargs:
saltenv (str): Salt environment. Default: ``base``
verbose (bool):
- Return verbose data structure which includes 'success_list', a list
- of all sls files and the package names contained within. Default
- 'False'
+ Return a verbose data structure which includes 'success_list', a
+ list of all sls files and the package names contained within.
+ Default is 'False'
failhard (bool):
- If ``True``, an error will be raised if any repo SLS files failed to
+ If ``True``, an error will be raised if any repo SLS files fails to
process. If ``False``, no error will be raised, and a dictionary
containing the full results will be returned.
Returns:
dict: A dictionary containing the results of the database refresh.
- .. Warning::
+ .. note::
+ A result with a `total: 0` generally means that the files are in the
+ wrong location on the master. Try running the following command on the
+ minion: `salt-call -l debug pkg.refresh saltenv=base`
+
+ .. warning::
When calling this command from a state using `module.run` be sure to
pass `failhard: False`. Otherwise the state will report failure if it
encounters a bad software definition file.
@@ -615,10 +692,12 @@ def refresh_db(**kwargs):
)
# Cache repo-ng locally
+ log.info('Fetching *.sls files from {0}'.format(repo_details.winrepo_source_dir))
__salt__['cp.cache_dir'](
- repo_details.winrepo_source_dir,
- saltenv,
- include_pat='*.sls'
+ path=repo_details.winrepo_source_dir,
+ saltenv=saltenv,
+ include_pat='*.sls',
+ exclude_pat=r'E@\/\..*?\/' # Exclude all hidden directories (.git)
)
return genrepo(saltenv=saltenv, verbose=verbose, failhard=failhard)
@@ -721,6 +800,10 @@ def genrepo(**kwargs):
to process. If ``False``, no error will be raised, and a dictionary
containing the full results will be returned.
+ .. note::
+ - Hidden directories (directories beginning with '`.`', such as
+ '`.git`') will be ignored.
+
Returns:
dict: A dictionary of the results of the command
@@ -744,9 +827,16 @@ def genrepo(**kwargs):
repo_details = _get_repo_details(saltenv)
for root, _, files in os.walk(repo_details.local_dest, followlinks=False):
+
+ # Skip hidden directories (.git)
+ if re.search(r'[\\/]\..*', root):
+ log.debug('Skipping files in directory: {0}'.format(root))
+ continue
+
short_path = os.path.relpath(root, repo_details.local_dest)
if short_path == '.':
short_path = ''
+
for name in files:
if name.endswith('.sls'):
total_files_processed += 1
@@ -801,9 +891,9 @@ def genrepo(**kwargs):
def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
renderers = salt.loader.render(__opts__, __salt__)
- def _failed_compile(msg):
- log.error(msg)
- ret.setdefault('errors', {})[short_path_name] = [msg]
+ def _failed_compile(prefix_msg, error_msg):
+ log.error('{0} \'{1}\': {2} '.format(prefix_msg, short_path_name, error_msg))
+ ret.setdefault('errors', {})[short_path_name] = ['{0}, {1} '.format(prefix_msg, error_msg)]
return False
try:
@@ -814,16 +904,14 @@ def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
__opts__.get('renderer_blacklist', ''),
__opts__.get('renderer_whitelist', ''))
except SaltRenderError as exc:
- msg = 'Failed to compile \'{0}\': {1}'.format(short_path_name, exc)
- return _failed_compile(msg)
+ return _failed_compile('Failed to compile', exc)
except Exception as exc:
- msg = 'Failed to read \'{0}\': {1}'.format(short_path_name, exc)
- return _failed_compile(msg)
+ return _failed_compile('Failed to read', exc)
- if config:
+ if config and isinstance(config, dict):
revmap = {}
errors = []
- for pkgname, versions in six.iteritems(config):
+ for pkgname, version_list in six.iteritems(config):
if pkgname in ret['repo']:
log.error(
'package \'%s\' within \'%s\' already defined, skipping',
@@ -831,7 +919,7 @@ def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
)
errors.append('package \'{0}\' already defined'.format(pkgname))
break
- for version_str, repodata in six.iteritems(versions):
+ for version_str, repodata in six.iteritems(version_list):
# Ensure version is a string/unicode
if not isinstance(version_str, six.string_types):
msg = (
@@ -862,6 +950,8 @@ def _repo_process_pkg_sls(filename, short_path_name, ret, successful_verbose):
ret.setdefault('repo', {}).update(config)
ret.setdefault('name_map', {}).update(revmap)
successful_verbose[short_path_name] = config.keys()
+ elif config:
+ return _failed_compile('Compiled contents', 'not a dictionary/hash')
else:
log.debug('No data within \'%s\' after processing', short_path_name)
# no pkgname found after render
@@ -1104,7 +1194,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
}
# Get a list of currently installed software for comparison at the end
- old = list_pkgs(saltenv=saltenv, refresh=refresh)
+ old = list_pkgs(saltenv=saltenv, refresh=refresh, versions_as_list=True)
# Loop through each package
changed = []
@@ -1121,16 +1211,20 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
continue
# Get the version number passed or the latest available (must be a string)
- version_num = ''
+ version_num = None
if options:
- version_num = options.get('version', '')
+ version_num = options.get('version', None)
# Using the salt cmdline with version=5.3 might be interpreted
# as a float it must be converted to a string in order for
# string matching to work.
- if not isinstance(version_num, six.string_types) and version_num is not None:
+ if not isinstance(version_num, six.text_type) and version_num is not None:
version_num = six.text_type(version_num)
if not version_num:
+ if pkg_name in old:
+ log.debug('A version ({0}) already installed for package '
+ '{1}'.format(version_num, pkg_name))
+ continue
# following can be version number or latest or Not Found
version_num = _get_latest_pkg_version(pkginfo)
@@ -1138,8 +1232,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
version_num = _get_latest_pkg_version(pkginfo)
# Check if the version is already installed
- if version_num in old.get(pkg_name, '').split(',') \
- or (old.get(pkg_name, '') == 'Not Found'):
+ if version_num in old.get(pkg_name, []):
# Desired version number already installed
ret[pkg_name] = {'current': version_num}
continue
@@ -1174,11 +1267,11 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
# single files
if cache_dir and installer.startswith('salt:'):
path, _ = os.path.split(installer)
- __salt__['cp.cache_dir'](path,
- saltenv,
- False,
- None,
- 'E@init.sls$')
+ __salt__['cp.cache_dir'](path=path,
+ saltenv=saltenv,
+ include_empty=False,
+ include_pat=None,
+ exclude_pat='E@init.sls$')
# Check to see if the cache_file is cached... if passed
if cache_file and cache_file.startswith('salt:'):
@@ -1311,9 +1404,9 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
else:
# Make sure the task is running, try for 5 secs
- from time import time
- t_end = time() + 5
- while time() < t_end:
+ t_end = time.time() + 5
+ while time.time() < t_end:
+ time.sleep(0.25)
task_running = __salt__['task.status'](
'update-salt-software') == 'Running'
if task_running:
@@ -1360,7 +1453,11 @@ def install(name=None, refresh=False, pkgs=None, **kwargs):
ret[pkg_name] = {'install status': 'failed'}
# Get a new list of installed software
- new = list_pkgs(saltenv=saltenv)
+ new = list_pkgs(saltenv=saltenv, refresh=False)
+
+ # Take the "old" package list and convert the values to strings in
+ # preparation for the comparison below.
+ __salt__['pkg_resource.stringify'](old)
# For installers that have no specific version (ie: chrome)
# The software definition file will have a version of 'latest'
@@ -1401,9 +1498,9 @@ def upgrade(**kwargs):
salt '*' pkg.upgrade
'''
- log.warning('pkg.upgrade not implemented on Windows yet')
refresh = salt.utils.is_true(kwargs.get('refresh', True))
saltenv = kwargs.get('saltenv', 'base')
+ log.warning('pkg.upgrade not implemented on Windows yet refresh:%s saltenv:%s', refresh, saltenv)
# Uncomment the below once pkg.upgrade has been implemented
# if salt.utils.is_true(refresh):
@@ -1411,7 +1508,7 @@ def upgrade(**kwargs):
return {}
-def remove(name=None, pkgs=None, version=None, **kwargs):
+def remove(name=None, pkgs=None, **kwargs):
'''
Remove the passed package(s) from the system using winrepo
@@ -1422,6 +1519,12 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
The name(s) of the package(s) to be uninstalled. Can be a
single package or a comma delimited list of packages, no spaces.
+ pkgs (list):
+ A list of packages to delete. Must be passed as a python list. The
+ ``name`` parameter will be ignored if this option is passed.
+
+ Kwargs:
+
version (str):
The version of the package to be uninstalled. If this option is
used to to uninstall multiple packages, then this version will be
@@ -1429,11 +1532,6 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
uninstalling a single package. If this parameter is omitted, the
latest version will be uninstalled.
- pkgs (list):
- A list of packages to delete. Must be passed as a python list. The
- ``name`` parameter will be ignored if this option is passed.
-
- Kwargs:
saltenv (str): Salt environment. Default ``base``
refresh (bool): Refresh package metadata. Default ``False``
@@ -1473,7 +1571,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
old = list_pkgs(saltenv=saltenv, refresh=refresh, versions_as_list=True)
# Loop through each package
- changed = []
+ changed = [] # list of changed package names
for pkgname, version_num in six.iteritems(pkg_params):
# Load package information for the package
@@ -1486,44 +1584,57 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
ret[pkgname] = msg
continue
- if version_num is not None:
- # Using the salt cmdline with version=5.3 might be interpreted
- # as a float it must be converted to a string in order for
- # string matching to work.
- if not isinstance(version_num, six.string_types) and version_num is not None:
- version_num = six.text_type(version_num)
- if version_num not in pkginfo and 'latest' in pkginfo:
- version_num = 'latest'
- elif 'latest' in pkginfo:
- version_num = 'latest'
-
# Check to see if package is installed on the system
- removal_targets = []
if pkgname not in old:
- log.error('%s %s not installed', pkgname, version)
+ log.debug('%s %s not installed', pkgname, version_num if version_num else '')
ret[pkgname] = {'current': 'not installed'}
continue
+
+ removal_targets = []
+ # Only support a single version number
+ if version_num is not None:
+ # Using the salt cmdline with version=5.3 might be interpreted
+ # as a float it must be converted to a string in order for
+ # string matching to work.
+ version_num = six.text_type(version_num)
+
+ # At least one version of the software is installed.
+ if version_num is None:
+ for ver_install in old[pkgname]:
+ if ver_install not in pkginfo and 'latest' in pkginfo:
+ log.debug('%s %s using package latest entry to to remove', pkgname, version_num)
+ removal_targets.append('latest')
+ else:
+ removal_targets.append(ver_install)
else:
- if version_num is None:
- removal_targets.extend(old[pkgname])
- elif version_num not in old[pkgname] \
- and 'Not Found' not in old[pkgname] \
- and version_num != 'latest':
- log.error('%s %s not installed', pkgname, version)
- ret[pkgname] = {
- 'current': '{0} not installed'.format(version_num)
+ if version_num in pkginfo:
+ # we known how to remove this version
+ if version_num in old[pkgname]:
+ removal_targets.append(version_num)
+ else:
+ log.debug('%s %s not installed', pkgname, version_num)
+ ret[pkgname] = {'current': '{0} not installed'.format(version_num)}
+ continue
+ elif 'latest' in pkginfo:
+ # we do not have version entry, assume software can self upgrade and use latest
+ log.debug('%s %s using package latest entry to to remove', pkgname, version_num)
+ removal_targets.append('latest')
+
+ if not removal_targets:
+ log.error('%s %s no definition to remove this version', pkgname, version_num)
+ ret[pkgname] = {
+ 'current': '{0} no definition, cannot removed'.format(version_num)
}
- continue
- else:
- removal_targets.append(version_num)
+ continue
for target in removal_targets:
# Get the uninstaller
uninstaller = pkginfo[target].get('uninstaller', '')
cache_dir = pkginfo[target].get('cache_dir', False)
+ uninstall_flags = pkginfo[target].get('uninstall_flags', '')
- # If no uninstaller found, use the installer
- if not uninstaller:
+ # If no uninstaller found, use the installer with uninstall flags
+ if not uninstaller and uninstall_flags:
uninstaller = pkginfo[target].get('installer', '')
# If still no uninstaller found, fail
@@ -1532,7 +1643,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
'No installer or uninstaller configured for package %s',
pkgname,
)
- ret[pkgname] = {'no uninstaller': target}
+ ret[pkgname] = {'no uninstaller defined': target}
continue
# Where is the uninstaller
@@ -1591,9 +1702,6 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
# os.path.expandvars is not required as we run everything through cmd.exe /s /c
- # Get uninstall flags
- uninstall_flags = pkginfo[target].get('uninstall_flags', '')
-
if kwargs.get('extra_uninstall_flags'):
uninstall_flags = '{0} {1}'.format(
uninstall_flags, kwargs.get('extra_uninstall_flags', ''))
@@ -1615,6 +1723,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
arguments = '{0} {1}'.format(arguments, uninstall_flags)
# Uninstall the software
+ changed.append(pkgname)
# Check Use Scheduler Option
if pkginfo[target].get('use_scheduler', False):
# Create Scheduled Task
@@ -1664,29 +1773,32 @@ def remove(name=None, pkgs=None, version=None, **kwargs):
ret[pkgname] = {'uninstall status': 'failed'}
# Get a new list of installed software
- new = list_pkgs(saltenv=saltenv)
+ new = list_pkgs(saltenv=saltenv, refresh=False)
# Take the "old" package list and convert the values to strings in
# preparation for the comparison below.
__salt__['pkg_resource.stringify'](old)
+ # Check for changes in the registry
difference = salt.utils.compare_dicts(old, new)
- tries = 0
- while not all(name in difference for name in changed) and tries <= 1000:
- new = list_pkgs(saltenv=saltenv)
+ found_chgs = all(name in difference for name in changed)
+ end_t = time.time() + 3 # give it 3 seconds to catch up.
+ while not found_chgs and time.time() < end_t:
+ time.sleep(0.5)
+ new = list_pkgs(saltenv=saltenv, refresh=False)
difference = salt.utils.compare_dicts(old, new)
- tries += 1
- if tries == 1000:
- ret['_comment'] = 'Registry not updated.'
+ found_chgs = all(name in difference for name in changed)
+
+ if not found_chgs:
+ log.warning('Expected changes for package removal may not have occured')
# Compare the software list before and after
# Add the difference to ret
ret.update(difference)
-
return ret
-def purge(name=None, pkgs=None, version=None, **kwargs):
+def purge(name=None, pkgs=None, **kwargs):
'''
Package purges are not supported, this function is identical to
``remove()``.
@@ -1724,7 +1836,6 @@ def purge(name=None, pkgs=None, version=None, **kwargs):
'''
return remove(name=name,
pkgs=pkgs,
- version=version,
**kwargs)
@@ -1808,6 +1919,11 @@ def _reverse_cmp_pkg_versions(pkg1, pkg2):
def _get_latest_pkg_version(pkginfo):
+ '''
+ Returns the latest version of the package.
+ Will return 'latest' or version number string, and
+ 'Not Found' if 'Not Found' is the only entry.
+ '''
if len(pkginfo) == 1:
return next(six.iterkeys(pkginfo))
try:
diff --git a/salt/modules/win_snmp.py b/salt/modules/win_snmp.py
index 6fd3bd515f..baa119e388 100644
--- a/salt/modules/win_snmp.py
+++ b/salt/modules/win_snmp.py
@@ -45,7 +45,7 @@ def __virtual__():
if not salt.utils.is_windows():
return False, 'Module win_snmp: Requires Windows'
- if not __salt__['reg.read_value'](_HKEY, _SNMP_KEY)['success']:
+ if not __salt__['reg.key_exists'](_HKEY, _SNMP_KEY):
return False, 'Module win_snmp: SNMP not installed'
return __virtualname__
diff --git a/salt/modules/win_system.py b/salt/modules/win_system.py
index b8cf4ff20f..f3b6eab46f 100644
--- a/salt/modules/win_system.py
+++ b/salt/modules/win_system.py
@@ -1285,7 +1285,7 @@ def get_pending_servermanager():
key = r'SOFTWARE\Microsoft\ServerManager'
# There are situations where it's possible to have '(value not set)' as
- # the value data, and since an actual reboot wont be pending in that
+ # the value data, and since an actual reboot won't be pending in that
# instance, just catch instances where we try unsuccessfully to cast as int.
reg_ret = __salt__['reg.read_value']('HKLM', key, vname)
diff --git a/salt/modules/win_wua.py b/salt/modules/win_wua.py
index 63409951e2..9ab400a848 100644
--- a/salt/modules/win_wua.py
+++ b/salt/modules/win_wua.py
@@ -305,7 +305,7 @@ def list_update(name, download=False, install=False):
'''
salt.utils.warn_until(
'Fluorine',
- 'This function is replaced by \'get\' as of Salt 2017.7.0. This'
+ 'This function is replaced by \'get\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return get(name, download, install)
@@ -532,7 +532,7 @@ def list_updates(software=True,
'''
salt.utils.warn_until(
'Fluorine',
- 'This function is replaced by \'list\' as of Salt 2017.7.0. This'
+ 'This function is replaced by \'list\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return list(software, drivers, summary, skip_installed, categories,
severities, download, install)
@@ -720,7 +720,7 @@ def download_update(name):
'''
salt.utils.warn_until(
'Fluorine',
- 'This function is replaced by \'download\' as of Salt 2017.7.0. This'
+ 'This function is replaced by \'download\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return download(name)
@@ -752,7 +752,7 @@ def download_updates(names):
'''
salt.utils.warn_until(
'Fluorine',
- 'This function is replaced by \'download\' as of Salt 2017.7.0. This'
+ 'This function is replaced by \'download\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return download(names)
@@ -840,7 +840,7 @@ def install_update(name):
'''
salt.utils.warn_until(
'Fluorine',
- 'This function is replaced by \'install\' as of Salt 2017.7.0. This'
+ 'This function is replaced by \'install\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return install(name)
@@ -871,7 +871,7 @@ def install_updates(names):
'''
salt.utils.warn_until(
'Fluorine',
- 'This function is replaced by \'install\' as of Salt 2017.7.0. This'
+ 'This function is replaced by \'install\' as of Salt 2017.7.0. This '
'warning will be removed in Salt Fluorine.')
return install(names)
diff --git a/salt/modules/x509.py b/salt/modules/x509.py
index 3626bd42d8..ae1c4f0520 100644
--- a/salt/modules/x509.py
+++ b/salt/modules/x509.py
@@ -24,6 +24,7 @@ import ast
# Import salt libs
import salt.utils
+import salt.utils.files
import salt.exceptions
import salt.ext.six as six
from salt.utils.odict import OrderedDict
@@ -508,7 +509,7 @@ def get_pem_entries(glob_path):
.. code-block:: bash
- salt '*' x509.read_pem_entries "/etc/pki/*.crt"
+ salt '*' x509.get_pem_entries "/etc/pki/*.crt"
'''
ret = {}
@@ -757,28 +758,27 @@ def write_pem(text, path, overwrite=True, pem_type=None):
"-----BEGIN CERTIFICATE-----MIIGMzCCBBugA..." \\
path=/etc/pki/mycert.crt
'''
- old_umask = os.umask(0o77)
- text = get_pem_entry(text, pem_type=pem_type)
- _dhparams = ''
- _private_key = ''
- if pem_type and pem_type == 'CERTIFICATE' and os.path.isfile(path) and \
- not overwrite:
- _filecontents = _text_or_file(path)
- try:
- _dhparams = get_pem_entry(_filecontents, 'DH PARAMETERS')
- except salt.exceptions.SaltInvocationError:
- pass
- try:
- _private_key = get_pem_entry(_filecontents, '(?:RSA )?PRIVATE KEY')
- except salt.exceptions.SaltInvocationError:
- pass
- with salt.utils.fopen(path, 'w') as _fp:
- if pem_type and pem_type == 'CERTIFICATE' and _private_key:
- _fp.write(_private_key)
- _fp.write(text)
- if pem_type and pem_type == 'CERTIFICATE' and _dhparams:
- _fp.write(_dhparams)
- os.umask(old_umask)
+ with salt.utils.files.set_umask(0o077):
+ text = get_pem_entry(text, pem_type=pem_type)
+ _dhparams = ''
+ _private_key = ''
+ if pem_type and pem_type == 'CERTIFICATE' and os.path.isfile(path) and \
+ not overwrite:
+ _filecontents = _text_or_file(path)
+ try:
+ _dhparams = get_pem_entry(_filecontents, 'DH PARAMETERS')
+ except salt.exceptions.SaltInvocationError:
+ pass
+ try:
+ _private_key = get_pem_entry(_filecontents, '(?:RSA )?PRIVATE KEY')
+ except salt.exceptions.SaltInvocationError:
+ pass
+ with salt.utils.fopen(path, 'w') as _fp:
+ if pem_type and pem_type == 'CERTIFICATE' and _private_key:
+ _fp.write(_private_key)
+ _fp.write(text)
+ if pem_type and pem_type == 'CERTIFICATE' and _dhparams:
+ _fp.write(_dhparams)
return 'PEM written to {0}'.format(path)
diff --git a/salt/modules/yumpkg.py b/salt/modules/yumpkg.py
index 5464d752dc..7b4e4e6d36 100644
--- a/salt/modules/yumpkg.py
+++ b/salt/modules/yumpkg.py
@@ -204,25 +204,29 @@ def _check_versionlock():
)
-def _get_repo_options(**kwargs):
+def _get_options(**kwargs):
'''
- Returns a list of '--enablerepo' and '--disablerepo' options to be used
- in the yum command, based on the kwargs.
+ Returns a list of options to be used in the yum/dnf command, based on the
+ kwargs passed.
'''
# Get repo options from the kwargs
fromrepo = kwargs.pop('fromrepo', '')
repo = kwargs.pop('repo', '')
disablerepo = kwargs.pop('disablerepo', '')
enablerepo = kwargs.pop('enablerepo', '')
+ disableexcludes = kwargs.pop('disableexcludes', '')
+ branch = kwargs.pop('branch', '')
+ get_extra_options = kwargs.pop('get_extra_options', False)
# Support old 'repo' argument
if repo and not fromrepo:
fromrepo = repo
ret = []
+
if fromrepo:
log.info('Restricting to repo \'%s\'', fromrepo)
- ret.extend(['--disablerepo=*', '--enablerepo=' + fromrepo])
+ ret.extend(['--disablerepo=*', '--enablerepo={0}'.format(fromrepo)])
else:
if disablerepo:
targets = [disablerepo] \
@@ -238,46 +242,30 @@ def _get_repo_options(**kwargs):
else enablerepo
log.info('Enabling repo(s): %s', ', '.join(targets))
ret.extend(['--enablerepo={0}'.format(x) for x in targets])
- return ret
-
-
-def _get_excludes_option(**kwargs):
- '''
- Returns a list of '--disableexcludes' option to be used in the yum command,
- based on the kwargs.
- '''
- disable_excludes = kwargs.pop('disableexcludes', '')
- ret = []
- if disable_excludes:
- log.info('Disabling excludes for \'%s\'', disable_excludes)
- ret.append('--disableexcludes={0}'.format(disable_excludes))
- return ret
+ if disableexcludes:
+ log.info('Disabling excludes for \'%s\'', disableexcludes)
+ ret.append('--disableexcludes={0}'.format(disableexcludes))
-def _get_branch_option(**kwargs):
- '''
- Returns a list of '--branch' option to be used in the yum command,
- based on the kwargs. This feature requires 'branch' plugin for YUM.
- '''
- branch = kwargs.pop('branch', '')
- ret = []
if branch:
log.info('Adding branch \'%s\'', branch)
- ret.append('--branch=\'{0}\''.format(branch))
- return ret
+ ret.append('--branch={0}'.format(branch))
+ if get_extra_options:
+ # sorting here to make order uniform, makes unit testing more reliable
+ for key in sorted(kwargs):
+ if key.startswith('__'):
+ continue
+ value = kwargs[key]
+ if isinstance(value, six.string_types):
+ log.info('Found extra option --%s=%s', key, value)
+ ret.append('--{0}={1}'.format(key, value))
+ elif value is True:
+ log.info('Found extra option --%s', key)
+ ret.append('--{0}'.format(key))
+ if ret:
+ log.info('Adding extra options: %s', ret)
-def _get_extra_options(**kwargs):
- '''
- Returns list of extra options for yum
- '''
- ret = []
- kwargs = salt.utils.clean_kwargs(**kwargs)
- for key, value in six.iteritems(kwargs):
- if isinstance(value, six.string_types):
- ret.append('--{0}=\'{1}\''.format(key, value))
- elif value is True:
- ret.append('--{0}'.format(key))
return ret
@@ -441,8 +429,7 @@ def latest_version(*names, **kwargs):
if len(names) == 0:
return ''
- repo_arg = _get_repo_options(**kwargs)
- exclude_arg = _get_excludes_option(**kwargs)
+ options = _get_options(**kwargs)
# Refresh before looking for the latest version available
if refresh:
@@ -452,8 +439,7 @@ def latest_version(*names, **kwargs):
# Get available versions for specified package(s)
cmd = [_yum(), '--quiet']
- cmd.extend(repo_arg)
- cmd.extend(exclude_arg)
+ cmd.extend(options)
cmd.extend(['list', 'available'])
cmd.extend(names)
out = __salt__['cmd.run_all'](cmd,
@@ -761,7 +747,7 @@ def list_repo_pkgs(*args, **kwargs):
disablerepo = kwargs.pop('disablerepo', '') or ''
enablerepo = kwargs.pop('enablerepo', '') or ''
- repo_arg = _get_repo_options(fromrepo=fromrepo, **kwargs)
+ repo_arg = _get_options(fromrepo=fromrepo, **kwargs)
if fromrepo and not isinstance(fromrepo, list):
try:
@@ -913,15 +899,13 @@ def list_upgrades(refresh=True, **kwargs):
salt '*' pkg.list_upgrades
'''
- repo_arg = _get_repo_options(**kwargs)
- exclude_arg = _get_excludes_option(**kwargs)
+ options = _get_options(**kwargs)
if salt.utils.is_true(refresh):
refresh_db(check_update=False, **kwargs)
cmd = [_yum(), '--quiet']
- cmd.extend(repo_arg)
- cmd.extend(exclude_arg)
+ cmd.extend(options)
cmd.extend(['list', 'upgrades' if _yum() == 'dnf' else 'updates'])
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
@@ -1039,21 +1023,19 @@ def refresh_db(**kwargs):
check_update_ = kwargs.pop('check_update', True)
- repo_arg = _get_repo_options(**kwargs)
- exclude_arg = _get_excludes_option(**kwargs)
- branch_arg = _get_branch_option(**kwargs)
+ options = _get_options(**kwargs)
- clean_cmd = [_yum(), '--quiet', 'clean', 'expire-cache']
- update_cmd = [_yum(), '--quiet', 'check-update']
+ clean_cmd = [_yum(), '--quiet', '--assumeyes', 'clean', 'expire-cache']
+ update_cmd = [_yum(), '--quiet', '--assumeyes', 'check-update']
- if __grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == '7':
- # This feature is disable because it is not used by Salt and lasts a lot with using large repo like EPEL
+ if __grains__.get('os_family') == 'RedHat' \
+ and __grains__.get('osmajorrelease') == 7:
+ # This feature is disabled because it is not used by Salt and adds a
+ # lot of extra time to the command with large repos like EPEL
update_cmd.append('--setopt=autocheck_running_kernel=false')
- for args in (repo_arg, exclude_arg, branch_arg):
- if args:
- clean_cmd.extend(args)
- update_cmd.extend(args)
+ clean_cmd.extend(options)
+ update_cmd.extend(options)
__salt__['cmd.run'](clean_cmd, python_shell=False)
if check_update_:
@@ -1090,6 +1072,7 @@ def install(name=None,
reinstall=False,
normalize=True,
update_holds=False,
+ saltenv='base',
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
@@ -1227,9 +1210,7 @@ def install(name=None,
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
'''
- repo_arg = _get_repo_options(**kwargs)
- exclude_arg = _get_excludes_option(**kwargs)
- branch_arg = _get_branch_option(**kwargs)
+ options = _get_options(**kwargs)
if salt.utils.is_true(refresh):
refresh_db(**kwargs)
@@ -1237,7 +1218,7 @@ def install(name=None,
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
- name, pkgs, sources, normalize=normalize, **kwargs
+ name, pkgs, sources, saltenv=saltenv, normalize=normalize
)
except MinionError as exc:
raise CommandExecutionError(exc)
@@ -1415,8 +1396,8 @@ def install(name=None,
break
else:
if pkgname is not None:
- if re.match('kernel(-.+)?', pkgname):
- # kernel and its subpackages support multiple
+ if re.match('^kernel(|-devel)$', pkgname):
+ # kernel and kernel-devel support multiple
# installs as their paths do not conflict.
# Performing a yum/dnf downgrade will be a
# no-op so just do an install instead. It will
@@ -1439,9 +1420,7 @@ def install(name=None,
'''
DRY function to add args common to all yum/dnf commands
'''
- for arg in (repo_arg, exclude_arg, branch_arg):
- if arg:
- cmd.extend(arg)
+ cmd.extend(options)
if skip_verify:
cmd.append('--nogpgcheck')
if downloadonly:
@@ -1706,17 +1685,14 @@ def upgrade(name=None,
.. note::
To add extra arguments to the ``yum upgrade`` command, pass them as key
- word arguments. For arguments without assignments, pass ``True``
+ word arguments. For arguments without assignments, pass ``True``
.. code-block:: bash
salt '*' pkg.upgrade security=True exclude='kernel*'
'''
- repo_arg = _get_repo_options(**kwargs)
- exclude_arg = _get_excludes_option(**kwargs)
- branch_arg = _get_branch_option(**kwargs)
- extra_args = _get_extra_options(**kwargs)
+ options = _get_options(get_extra_options=True, **kwargs)
if salt.utils.is_true(refresh):
refresh_db(**kwargs)
@@ -1745,9 +1721,7 @@ def upgrade(name=None,
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '--quiet', '-y'])
- for args in (repo_arg, exclude_arg, branch_arg, extra_args):
- if args:
- cmd.extend(args)
+ cmd.extend(options)
if skip_verify:
cmd.append('--nogpgcheck')
cmd.append('upgrade')
diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py
index 4ede437c30..7a308f0a25 100644
--- a/salt/modules/zypper.py
+++ b/salt/modules/zypper.py
@@ -706,6 +706,123 @@ def list_pkgs(versions_as_list=False, **kwargs):
return ret
+def list_repo_pkgs(*args, **kwargs):
+ '''
+ .. versionadded:: 2017.7.5,2018.3.1
+
+ Returns all available packages. Optionally, package names (and name globs)
+ can be passed and the results will be filtered to packages matching those
+ names. This is recommended as it speeds up the function considerably.
+
+ This function can be helpful in discovering the version or repo to specify
+ in a :mod:`pkg.installed <salt.states.pkg.installed>` state.
+
+ The return data will be a dictionary mapping package names to a list of
+ version numbers, ordered from newest to oldest. If ``byrepo`` is set to
+ ``True``, then the return dictionary will contain repository names at the
+ top level, and each repository will map packages to lists of version
+ numbers. For example:
+
+ .. code-block:: python
+
+ # With byrepo=False (default)
+ {
+ 'bash': ['4.3-83.3.1',
+ '4.3-82.6'],
+ 'vim': ['7.4.326-12.1']
+ }
+ {
+ 'OSS': {
+ 'bash': ['4.3-82.6'],
+ 'vim': ['7.4.326-12.1']
+ },
+ 'OSS Update': {
+ 'bash': ['4.3-83.3.1']
+ }
+ }
+
+ fromrepo : None
+ Only include results from the specified repo(s). Multiple repos can be
+ specified, comma-separated.
+
+ byrepo : False
+ When ``True``, the return data for each package will be organized by
+ repository.
+
+ CLI Examples:
+
+ .. code-block:: bash
+
+ salt '*' pkg.list_repo_pkgs
+ salt '*' pkg.list_repo_pkgs foo bar baz
+ salt '*' pkg.list_repo_pkgs 'python2-*' byrepo=True
+ salt '*' pkg.list_repo_pkgs 'python2-*' fromrepo='OSS Updates'
+ '''
+ byrepo = kwargs.pop('byrepo', False)
+ fromrepo = kwargs.pop('fromrepo', '') or ''
+ ret = {}
+
+ targets = [
+ arg if isinstance(arg, six.string_types) else six.text_type(arg)
+ for arg in args
+ ]
+
+ def _is_match(pkgname):
+ '''
+ When package names are passed to a zypper search, they will be matched
+ anywhere in the package name. This makes sure that only exact or
+ fnmatch matches are identified.
+ '''
+ if not args:
+ # No package names passed, everyone's a winner!
+ return True
+ for target in targets:
+ if fnmatch.fnmatch(pkgname, target):
+ return True
+ return False
+
+ for node in __zypper__.xml.call('se', '-s', *targets).getElementsByTagName('solvable'):
+ pkginfo = dict(node.attributes.items())
+ try:
+ if pkginfo['kind'] != 'package':
+ continue
+ reponame = pkginfo['repository']
+ if fromrepo and reponame != fromrepo:
+ continue
+ pkgname = pkginfo['name']
+ pkgversion = pkginfo['edition']
+ except KeyError:
+ continue
+ else:
+ if _is_match(pkgname):
+ repo_dict = ret.setdefault(reponame, {})
+ version_list = repo_dict.setdefault(pkgname, set())
+ version_list.add(pkgversion)
+
+ if byrepo:
+ for reponame in ret:
+ # Sort versions newest to oldest
+ for pkgname in ret[reponame]:
+ sorted_versions = sorted(
+ [LooseVersion(x) for x in ret[reponame][pkgname]],
+ reverse=True
+ )
+ ret[reponame][pkgname] = [x.vstring for x in sorted_versions]
+ return ret
+ else:
+ byrepo_ret = {}
+ for reponame in ret:
+ for pkgname in ret[reponame]:
+ byrepo_ret.setdefault(pkgname, []).extend(ret[reponame][pkgname])
+ for pkgname in byrepo_ret:
+ sorted_versions = sorted(
+ [LooseVersion(x) for x in byrepo_ret[pkgname]],
+ reverse=True
+ )
+ byrepo_ret[pkgname] = [x.vstring for x in sorted_versions]
+ return byrepo_ret
+
+
def _get_configured_repos():
'''
Get all the info about repositories from the configurations.
@@ -795,30 +912,31 @@ def mod_repo(repo, **kwargs):
be created, so long as the following values are specified:
repo or alias
- alias by which the zypper refers to the repo
+ alias by which Zypper refers to the repo
url, mirrorlist or baseurl
- the URL for zypper to reference
+ the URL for Zypper to reference
enabled
- enable or disable (True or False) repository,
+ Enable or disable (True or False) repository,
but do not remove if disabled.
refresh
- enable or disable (True or False) auto-refresh of the repository.
+ Enable or disable (True or False) auto-refresh of the repository.
cache
Enable or disable (True or False) RPM files caching.
gpgcheck
- Enable or disable (True or False) GOG check for this repository.
+ Enable or disable (True or False) GPG check for this repository.
- gpgautoimport
- Automatically trust and import new repository.
+ gpgautoimport : False
+ If set to True, automatically trust and import public GPG key for
+ the repository.
Key/Value pairs may also be removed from a repo's configuration by setting
a key to a blank value. Bear in mind that a name cannot be deleted, and a
- url can only be deleted if a mirrorlist is specified (or vice versa).
+ URL can only be deleted if a ``mirrorlist`` is specified (or vice versa).
CLI Examples:
@@ -1087,6 +1205,15 @@ def install(name=None,
return {}
version_num = Wildcard(__zypper__)(name, version)
+
+ if version_num:
+ if pkgs is None and sources is None:
+ # Allow "version" to work for single package target
+ pkg_params = {name: version_num}
+ else:
+ log.warning('"version" parameter will be ignored for multiple '
+ 'package targets')
+
if pkg_type == 'repository':
targets = []
problems = []
diff --git a/salt/netapi/rest_cherrypy/app.py b/salt/netapi/rest_cherrypy/app.py
index becb45b0a5..8ac79512a7 100644
--- a/salt/netapi/rest_cherrypy/app.py
+++ b/salt/netapi/rest_cherrypy/app.py
@@ -1993,7 +1993,7 @@ class Run(LowDataAdapter):
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
- When using salt-ssh, eauth credentials should not be supplied. Instad,
+ When using salt-ssh, eauth credentials should not be supplied. Instead,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
@@ -2178,7 +2178,7 @@ class Events(object):
very busy and can quickly overwhelm the memory allocated to a
browser tab.
- A full, working proof-of-concept JavaScript appliction is available
+ A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py
index 38e665b7df..fd2652692b 100644
--- a/salt/netapi/rest_tornado/saltnado.py
+++ b/salt/netapi/rest_tornado/saltnado.py
@@ -236,28 +236,6 @@ logger = logging.getLogger()
# - "wheel" (need async api...)
-class SaltClientsMixIn(object):
- '''
- MixIn class to container all of the salt clients that the API needs
- '''
- # TODO: load this proactively, instead of waiting for a request
- __saltclients = None
-
- @property
- def saltclients(self):
- if SaltClientsMixIn.__saltclients is None:
- local_client = salt.client.get_local_client(mopts=self.application.opts)
- # TODO: refreshing clients using cachedict
- SaltClientsMixIn.__saltclients = {
- 'local': local_client.run_job_async,
- # not the actual client we'll use.. but its what we'll use to get args
- 'local_async': local_client.run_job_async,
- 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async,
- 'runner_async': None, # empty, since we use the same client as `runner`
- }
- return SaltClientsMixIn.__saltclients
-
-
AUTH_TOKEN_HEADER = 'X-Auth-Token'
AUTH_COOKIE_NAME = 'session_id'
@@ -317,9 +295,9 @@ class EventListener(object):
'''
if request not in self.request_map:
return
- for tag, future in self.request_map[request]:
+ for tag, matcher, future in self.request_map[request]:
# timeout the future
- self._timeout_future(tag, future)
+ self._timeout_future(tag, matcher, future)
# remove the timeout
if future in self.timeout_map:
tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
@@ -327,9 +305,22 @@ class EventListener(object):
del self.request_map[request]
+ @staticmethod
+ def prefix_matcher(mtag, tag):
+ if mtag is None or tag is None:
+ raise TypeError('mtag or tag can not be None')
+ return mtag.startswith(tag)
+
+ @staticmethod
+ def exact_matcher(mtag, tag):
+ if mtag is None or tag is None:
+ raise TypeError('mtag or tag can not be None')
+ return mtag == tag
+
def get_event(self,
request,
tag='',
+ matcher=prefix_matcher.__func__,
callback=None,
timeout=None
):
@@ -349,46 +340,55 @@ class EventListener(object):
tornado.ioloop.IOLoop.current().add_callback(callback, future)
future.add_done_callback(handle_future)
# add this tag and future to the callbacks
- self.tag_map[tag].append(future)
- self.request_map[request].append((tag, future))
+ self.tag_map[(tag, matcher)].append(future)
+ self.request_map[request].append((tag, matcher, future))
if timeout:
- timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, future)
+ timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, matcher, future)
self.timeout_map[future] = timeout_future
return future
- def _timeout_future(self, tag, future):
+ def _timeout_future(self, tag, matcher, future):
'''
Timeout a specific future
'''
- if tag not in self.tag_map:
+ if (tag, matcher) not in self.tag_map:
return
if not future.done():
future.set_exception(TimeoutException())
- self.tag_map[tag].remove(future)
- if len(self.tag_map[tag]) == 0:
- del self.tag_map[tag]
+ self.tag_map[(tag, matcher)].remove(future)
+ if len(self.tag_map[(tag, matcher)]) == 0:
+ del self.tag_map[(tag, matcher)]
def _handle_event_socket_recv(self, raw):
'''
Callback for events on the event sub socket
'''
mtag, data = self.event.unpack(raw, self.event.serial)
+
# see if we have any futures that need this info:
- for tag_prefix, futures in six.iteritems(self.tag_map):
- if mtag.startswith(tag_prefix):
- for future in futures:
- if future.done():
- continue
- future.set_result({'data': data, 'tag': mtag})
- self.tag_map[tag_prefix].remove(future)
- if future in self.timeout_map:
- tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
- del self.timeout_map[future]
-
-
-class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylint: disable=W0223
+ for (tag, matcher), futures in six.iteritems(self.tag_map):
+ try:
+ is_matched = matcher(mtag, tag)
+ except Exception as e:
+ logger.error('Failed to run a matcher.', exc_info=True)
+ is_matched = False
+
+ if not is_matched:
+ continue
+
+ for future in futures:
+ if future.done():
+ continue
+ future.set_result({'data': data, 'tag': mtag})
+ self.tag_map[(tag, matcher)].remove(future)
+ if future in self.timeout_map:
+ tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future])
+ del self.timeout_map[future]
+
+
+class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', yaml.safe_dump),
@@ -416,6 +416,16 @@ class BaseSaltAPIHandler(tornado.web.RequestHandler, SaltClientsMixIn): # pylin
self.application.opts,
)
+ if not hasattr(self, 'saltclients'):
+ local_client = salt.client.get_local_client(mopts=self.application.opts)
+ self.saltclients = {
+ 'local': local_client.run_job_async,
+ # not the actual client we'll use.. but its what we'll use to get args
+ 'local_async': local_client.run_job_async,
+ 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async,
+ 'runner_async': None, # empty, since we use the same client as `runner`
+ }
+
@property
def token(self):
'''
@@ -745,7 +755,7 @@ class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223
self.write(self.serialize(ret))
-class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W0223
+class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223
'''
Main API handler for base "/"
'''
@@ -925,64 +935,83 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
if self.application.opts['order_masters']:
syndic_min_wait = tornado.gen.sleep(self.application.opts['syndic_wait'])
- job_not_running = self.job_not_running(pub_data['jid'],
- chunk['tgt'],
- f_call['kwargs']['tgt_type'],
- minions_remaining=minions_remaining
- )
+ # To ensure job_not_running and all_return are terminated by each other, communicate using a future
+ is_finished = Future()
+ job_not_running_future = self.job_not_running(pub_data['jid'],
+ chunk['tgt'],
+ f_call['kwargs']['tgt_type'],
+ is_finished,
+ minions_remaining=list(minions_remaining),
+ )
# if we have a min_wait, do that
if syndic_min_wait is not None:
yield syndic_min_wait
- # we are completed when either all minions return or the job isn't running anywhere
- chunk_ret = yield self.all_returns(pub_data['jid'],
- finish_futures=[job_not_running],
- minions_remaining=minions_remaining,
- )
- raise tornado.gen.Return(chunk_ret)
+ all_return_future = self.all_returns(pub_data['jid'],
+ is_finished,
+ minions_remaining=list(minions_remaining),
+ )
+ yield job_not_running_future
+ raise tornado.gen.Return((yield all_return_future))
@tornado.gen.coroutine
def all_returns(self,
jid,
- finish_futures=None,
+ is_finished,
minions_remaining=None,
):
'''
Return a future which will complete once all returns are completed
- (according to minions_remaining), or one of the passed in "finish_futures" completes
+ (according to minions_remaining), or one of the passed in "is_finished" completes
'''
- if finish_futures is None:
- finish_futures = []
if minions_remaining is None:
minions_remaining = []
- ret_tag = tagify([jid, 'ret'], 'job')
chunk_ret = {}
+
+ minion_events = {}
+ for minion in minions_remaining:
+ tag = tagify([jid, 'ret', minion], 'job')
+ minion_event = self.application.event_listener.get_event(self,
+ tag=tag,
+ matcher=EventListener.exact_matcher,
+ timeout=self.application.opts['timeout'])
+ minion_events[minion_event] = minion
+
while True:
- ret_event = self.application.event_listener.get_event(self,
- tag=ret_tag,
- )
- f = yield Any([ret_event] + finish_futures)
- if f in finish_futures:
- raise tornado.gen.Return(chunk_ret)
- event = f.result()
- chunk_ret[event['data']['id']] = event['data']['return']
- # its possible to get a return that wasn't in the minion_remaining list
+ f = yield Any(minion_events.keys() + [is_finished])
+ try:
+ if f is is_finished:
+ for event in minion_events:
+ if not event.done():
+ event.set_result(None)
+ raise tornado.gen.Return(chunk_ret)
+ f_result = f.result()
+ chunk_ret[f_result['data']['id']] = f_result['data']['return']
+ except TimeoutException:
+ pass
+
+ # clear finished event future
try:
- minions_remaining.remove(event['data']['id'])
+ minions_remaining.remove(minion_events[f])
+ del minion_events[f]
except ValueError:
pass
+
if len(minions_remaining) == 0:
+ if not is_finished.done():
+ is_finished.set_result(True)
raise tornado.gen.Return(chunk_ret)
@tornado.gen.coroutine
def job_not_running(self,
- jid,
- tgt,
- tgt_type,
- minions_remaining=None,
- ):
+ jid,
+ tgt,
+ tgt_type,
+ is_finished,
+ minions_remaining=None,
+ ):
'''
Return a future which will complete once jid (passed in) is no longer
running on tgt
@@ -999,12 +1028,21 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): # pylint: disable=W
minion_running = False
while True:
try:
- event = yield self.application.event_listener.get_event(self,
- tag=ping_tag,
- timeout=self.application.opts['gather_job_timeout'],
- )
+ event = self.application.event_listener.get_event(self,
+ tag=ping_tag,
+ timeout=self.application.opts['gather_job_timeout'],
+ )
+ f = yield Any([event, is_finished])
+ # When finished entire routine, cleanup other futures and return result
+ if f is is_finished:
+ if not event.done():
+ event.set_result(None)
+ raise tornado.gen.Return(True)
+ event = f.result()
except TimeoutException:
if not minion_running:
+ if not is_finished.done():
+ is_finished.set_result(True)
raise tornado.gen.Return(True)
else:
ping_pub_data = yield self.saltclients['local'](tgt,
diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py
index 13a0bb8621..7741661cae 100644
--- a/salt/pillar/__init__.py
+++ b/salt/pillar/__init__.py
@@ -374,6 +374,15 @@ class Pillar(object):
opts['ext_pillar'].append(self.ext)
else:
opts['ext_pillar'] = [self.ext]
+ if '__env__' in opts['file_roots']:
+ env = opts.get('pillarenv') or opts.get('saltenv') or 'base'
+ if env not in opts['file_roots']:
+ log.debug("pillar environment '%s' maps to __env__ pillar_roots directory", env)
+ opts['file_roots'][env] = opts['file_roots'].pop('__env__')
+ else:
+ log.debug("pillar_roots __env__ ignored (environment '%s' found in pillar_roots)",
+ env)
+ opts['file_roots'].pop('__env__')
return opts
def _get_envs(self):
@@ -792,7 +801,8 @@ class Pillar(object):
git_pillar.init_remotes(
self.ext['git'],
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
- salt.pillar.git_pillar.PER_REMOTE_ONLY)
+ salt.pillar.git_pillar.PER_REMOTE_ONLY,
+ salt.pillar.git_pillar.GLOBAL_ONLY)
git_pillar.fetch_remotes()
except TypeError:
# Handle malformed ext_pillar
diff --git a/salt/pillar/ec2_pillar.py b/salt/pillar/ec2_pillar.py
index 2fc014ae6e..dbcef73d93 100644
--- a/salt/pillar/ec2_pillar.py
+++ b/salt/pillar/ec2_pillar.py
@@ -1,19 +1,37 @@
# -*- coding: utf-8 -*-
'''
-Retrieve EC2 instance data for minions.
+Retrieve EC2 instance data for minions for ec2_tags and ec2_tags_list
-The minion id must be the instance-id retrieved from AWS. As an
-option, use_grain can be set to True. This allows the use of an
+The minion id must be the AWS instance-id or value in 'tag_match_key'.
+For example set 'tag_match_key' to 'Name', to have the minion-id matched against the
+tag 'Name'. The tag contents must be unique. The value of tag_match_value can
+be 'uqdn' or 'asis'. if 'uqdn' strips any domain before comparison.
+
+The option use_grain can be set to True. This allows the use of an
instance-id grain instead of the minion-id. Since this is a potential
security risk, the configuration can be further expanded to include
a list of minions that are trusted to only allow the alternate id
of the instances to specific hosts. There is no glob matching at
this time.
+The optional 'tag_list_key' indicates which keys should be added to
+'ec2_tags_list' and be split by tag_list_sep (default `;`). If a tag key is
+included in 'tag_list_key' it is removed from ec2_tags. If a tag does not
+exist it is still included as an empty list.
+
+
+ Note: restart the salt-master for changes to take effect.
+
+
.. code-block:: yaml
ext_pillar:
- ec2_pillar:
+ tag_match_key: 'Name'
+ tag_match_value: 'asis'
+ tag_list_key:
+ - Role
+ tag_list_sep: ';'
use_grain: True
minion_ids:
- trusted-minion-1
@@ -31,6 +49,8 @@ the instance.
from __future__ import absolute_import
import re
import logging
+import salt.ext.six as six
+from salt.ext.six.moves import range
# Import salt libs
from salt.utils.versions import StrictVersion as _StrictVersion
@@ -46,6 +66,8 @@ except ImportError:
# Set up logging
log = logging.getLogger(__name__)
+# DEBUG boto is far too verbose
+logging.getLogger('boto').setLevel(logging.WARNING)
def __virtual__():
@@ -59,7 +81,7 @@ def __virtual__():
required_boto_version = _StrictVersion('2.8.0')
if boto_version < required_boto_version:
log.error("%s: installed boto version %s < %s, can't retrieve instance data",
- __name__, boto_version, required_boto_version)
+ __name__, boto_version, required_boto_version)
return False
return True
@@ -76,68 +98,145 @@ def _get_instance_info():
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
use_grain=False,
- minion_ids=None):
+ minion_ids=None,
+ tag_match_key=None,
+ tag_match_value='asis',
+ tag_list_key=None,
+ tag_list_sep=';'):
'''
Execute a command and read the output as YAML
'''
-
- log.debug("Querying EC2 tags for minion id {0}".format(minion_id))
-
- # If minion_id is not in the format of an AWS EC2 instance, check to see
- # if there is a grain named 'instance-id' use that. Because this is a
- # security risk, the master config must contain a use_grain: True option
- # for this external pillar, which defaults to no
- if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is None:
- if 'instance-id' not in __grains__:
- log.debug("Minion-id is not in AWS instance-id formation, and there "
- "is no instance-id grain for minion {0}".format(minion_id))
- return {}
- if not use_grain:
- log.debug("Minion-id is not in AWS instance-id formation, and option "
- "not set to use instance-id grain, for minion {0}, use_grain "
- " is {1}".format(
- minion_id,
- use_grain))
+ valid_tag_match_value = ['uqdn', 'asis']
+
+ # meta-data:instance-id
+ grain_instance_id = __grains__.get('meta-data', {}).get('instance-id', None)
+ if not grain_instance_id:
+ # dynamic:instance-identity:document:instanceId
+ grain_instance_id = \
+ __grains__.get('dynamic', {}).get('instance-identity', {}).get('document', {}).get('instance-id', None)
+ if grain_instance_id and re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', grain_instance_id) is None:
+ log.error('External pillar {0}, instance-id \'{1}\' is not valid for '
+ '\'{2}\''.format(__name__, grain_instance_id, minion_id))
+ grain_instance_id = None # invalid instance id found, remove it from use.
+
+ # Check AWS Tag restrictions .i.e. letters, spaces, and numbers and + - = . _ : / @
+ if tag_match_key and re.match(r'[\w=.:/@-]+$', tag_match_key) is None:
+ log.error('External pillar %s, tag_match_key \'%s\' is not valid ',
+ __name__, tag_match_key if isinstance(tag_match_key, six.text_type) else 'non-string')
+ return {}
+
+ if tag_match_key and tag_match_value not in valid_tag_match_value:
+ log.error('External pillar {0}, tag_match_value \'{1}\' is not valid must be one '
+ 'of {2}'.format(__name__, tag_match_value, ' '.join(valid_tag_match_value)))
+ return {}
+
+ if not tag_match_key:
+ base_msg = ('External pillar {0}, querying EC2 tags for minion id \'{1}\' '
+ 'against instance-id'.format(__name__, minion_id))
+ else:
+ base_msg = ('External pillar {0}, querying EC2 tags for minion id \'{1}\' '
+ 'against instance-id or \'{2}\' against \'{3}\''.format(__name__, minion_id, tag_match_key, tag_match_value))
+
+ log.debug(base_msg)
+ find_filter = None
+ find_id = None
+
+ if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', minion_id) is not None:
+ find_filter = None
+ find_id = minion_id
+ elif tag_match_key:
+ if tag_match_value == 'uqdn':
+ find_filter = {'tag:{0}'.format(tag_match_key): minion_id.split('.', 1)[0]}
+ else:
+ find_filter = {'tag:{0}'.format(tag_match_key): minion_id}
+ if grain_instance_id:
+ # we have an untrusted grain_instance_id, use it to narrow the search
+ # even more. Combination will be unique even if uqdn is set.
+ find_filter.update({'instance-id': grain_instance_id})
+ # Add this if running state is not dependant on EC2Config
+ # find_filter.update('instance-state-name': 'running')
+
+ # no minion-id is instance-id and no suitable filter, try use_grain if enabled
+ if not find_filter and not find_id and use_grain:
+ if not grain_instance_id:
+ log.debug('Minion-id is not in AWS instance-id formation, and there '
+ 'is no instance-id grain for minion %s', minion_id)
return {}
- log.debug("use_grain set to {0}".format(use_grain))
if minion_ids is not None and minion_id not in minion_ids:
- log.debug("Minion-id is not in AWS instance ID format, and minion_ids "
- "is set in the ec2_pillar configuration, but minion {0} is "
- "not in the list of allowed minions {1}".format(minion_id,
- minion_ids))
- return {}
- if re.search(r'^i-([0-9a-z]{17}|[0-9a-z]{8})$', __grains__['instance-id']) is not None:
- minion_id = __grains__['instance-id']
- log.debug("Minion-id is not in AWS instance ID format, but a grain"
- " is, so using {0} as the minion ID".format(minion_id))
- else:
- log.debug("Nether minion id nor a grain named instance-id is in "
- "AWS format, can't query EC2 tags for minion {0}".format(
- minion_id))
+ log.debug('Minion-id is not in AWS instance ID format, and minion_ids '
+ 'is set in the ec2_pillar configuration, but minion {0} is '
+ 'not in the list of allowed minions {1}'.format(minion_id, minion_ids))
return {}
+ find_id = grain_instance_id
- m = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1)
- if len(m.keys()) < 1:
- log.info("%s: not an EC2 instance, skipping", __name__)
- return None
+ if not (find_filter or find_id):
+ log.debug('External pillar %s, querying EC2 tags for minion id \'%s\' against '
+ 'instance-id or \'%s\' against \'%s\' noughthing to match against',
+ __name__, minion_id, tag_match_key, tag_match_value)
+ return {}
+
+ myself = boto.utils.get_instance_metadata(timeout=0.1, num_retries=1)
+ if len(myself.keys()) < 1:
+ log.info("%s: salt master not an EC2 instance, skipping", __name__)
+ return {}
# Get the Master's instance info, primarily the region
- (instance_id, region) = _get_instance_info()
+ (_, region) = _get_instance_info()
try:
conn = boto.ec2.connect_to_region(region)
- except boto.exception as e: # pylint: disable=E0712
- log.error("%s: invalid AWS credentials.", __name__)
- return None
+ except boto.exception.AWSConnectionError as exc:
+ log.error('%s: invalid AWS credentials, %s', __name__, exc)
+ return {}
+ except:
+ raise
+
+ if conn is None:
+ log.error('%s: Could not connect to region %s', __name__, region)
+ return {}
- tags = {}
try:
- _tags = conn.get_all_tags(filters={'resource-type': 'instance',
- 'resource-id': minion_id})
- for tag in _tags:
- tags[tag.name] = tag.value
- except IndexError as e:
- log.error("Couldn't retrieve instance information: %s", e)
- return None
-
- return {'ec2_tags': tags}
+ if find_id:
+ instance_data = conn.get_only_instances(instance_ids=[find_id], dry_run=False)
+ else:
+ # filters and max_results can not be used togther.
+ instance_data = conn.get_only_instances(filters=find_filter, dry_run=False)
+
+ except boto.exception.EC2ResponseError as exc:
+ log.error('{0} failed with \'{1}\''.format(base_msg, exc))
+ return {}
+
+ if not instance_data:
+ log.debug('%s no match using \'%s\'', base_msg, find_id if find_id else find_filter)
+ return {}
+
+ # Find a active instance, i.e. ignore terminated and stopped instances
+ active_inst = []
+ for inst in range(0, len(instance_data)):
+ if instance_data[inst].state not in ['terminated', 'stopped']:
+ active_inst.append(inst)
+
+ valid_inst = len(active_inst)
+ if not valid_inst:
+ log.debug('%s match found but not active \'%s\'', base_msg, find_id if find_id else find_filter)
+ return {}
+
+ if valid_inst > 1:
+ log.error('%s multiple matches, ignored, using \'%s\'', base_msg, find_id if find_id else find_filter)
+ return {}
+
+ instance = instance_data[active_inst[0]]
+ if instance.tags:
+ ec2_tags = instance.tags
+ ec2_tags_list = {}
+ log.debug('External pillar {0}, for minion id \'{1}\', tags: {2}'.format(__name__, minion_id, instance.tags))
+ if tag_list_key and isinstance(tag_list_key, list):
+ for item in tag_list_key:
+ if item in ec2_tags:
+ ec2_tags_list[item] = ec2_tags[item].split(tag_list_sep)
+ del ec2_tags[item] # make sure its only in ec2_tags_list
+ else:
+ ec2_tags_list[item] = [] # always return a result
+
+ return {'ec2_tags': ec2_tags, 'ec2_tags_list': ec2_tags_list}
+ return {}
diff --git a/salt/pillar/file_tree.py b/salt/pillar/file_tree.py
index 1a825f80a8..97187bedde 100644
--- a/salt/pillar/file_tree.py
+++ b/salt/pillar/file_tree.py
@@ -1,175 +1,97 @@
# -*- coding: utf-8 -*-
'''
+The ``file_tree`` external pillar allows values from all files in a directory
+tree to be imported as Pillar data.
-``File_tree`` is an external pillar that allows
-values from all files in a directory tree to be imported as Pillar data.
+.. note::
-Note this is an external pillar, and is subject to the rules and constraints
-governing external pillars detailed here: :ref:`external-pillars`.
+ This is an external pillar and is subject to the :ref:`rules and
+ constraints <external-pillars>` governing external pillars.
.. versionadded:: 2015.5.0
-Example Configuration
----------------------
+In this pillar, data is organized by either Minion ID or Nodegroup name. To
+setup pillar data for a specific Minion, place it in
+``<root_dir>/hosts/<minion_id>``. To setup pillar data for an entire
+Nodegroup, place it in ``<root_dir>/nodegroups/<node_group>`` where
+``<node_group>`` is the Nodegroup's name.
+
+Example ``file_tree`` Pillar
+============================
+
+Master Configuration
+--------------------
.. code-block:: yaml
ext_pillar:
- file_tree:
- root_dir: /path/to/root/directory
+ root_dir: /srv/ext_pillar
follow_dir_links: False
keep_newline: True
-The ``root_dir`` parameter is required and points to the directory where files
-for each host are stored. The ``follow_dir_links`` parameter is optional and
-defaults to False. If ``follow_dir_links`` is set to True, this external pillar
-will follow symbolic links to other directories.
-
-.. warning::
- Be careful when using ``follow_dir_links``, as a recursive symlink chain
- will result in unexpected results.
-
-If ``keep_newline`` is set to ``True``, then the pillar values for files ending
-in newlines will keep that newline. The default behavior is to remove the
-end-of-file newline. ``keep_newline`` should be turned on if the pillar data is
-intended to be used to deploy a file using ``contents_pillar`` with a
-:py:func:`file.managed <salt.states.file.managed>` state.
-
-.. versionchanged:: 2015.8.4
- The ``raw_data`` parameter has been renamed to ``keep_newline``. In earlier
- releases, ``raw_data`` must be used. Also, this parameter can now be a list
- of globs, allowing for more granular control over which pillar values keep
- their end-of-file newline. The globs match paths relative to the
- directories named for minion IDs and nodegroups underneath the ``root_dir``
- (see the layout examples in the below sections).
-
- .. code-block:: yaml
-
- ext_pillar:
- - file_tree:
- root_dir: /path/to/root/directory
- keep_newline:
- - files/testdir/*
-
-.. note::
- In earlier releases, this documentation incorrectly stated that binary
- files would not affected by the ``keep_newline`` configuration. However,
- this module does not actually distinguish between binary and text files.
-
-.. versionchanged:: 2017.7.0
- Templating/rendering has been added. You can now specify a default render
- pipeline and a black- and whitelist of (dis)allowed renderers.
-
- ``template`` must be set to ``True`` for templating to happen.
-
- .. code-block:: yaml
-
- ext_pillar:
- - file_tree:
- root_dir: /path/to/root/directory
- render_default: jinja|yaml
- renderer_blacklist:
- - gpg
- renderer_whitelist:
- - jinja
- - yaml
- template: True
-
-Assigning Pillar Data to Individual Hosts
------------------------------------------
-
-To configure pillar data for each host, this external pillar will recursively
-iterate over ``root_dir``/hosts/``id`` (where ``id`` is a minion ID), and
-compile pillar data with each subdirectory as a dictionary key and each file
-as a value.
-
-For example, the following ``root_dir`` tree:
-
-.. code-block:: text
-
- ./hosts/
- ./hosts/test-host/
- ./hosts/test-host/files/
- ./hosts/test-host/files/testdir/
- ./hosts/test-host/files/testdir/file1.txt
- ./hosts/test-host/files/testdir/file2.txt
- ./hosts/test-host/files/another-testdir/
- ./hosts/test-host/files/another-testdir/symlink-to-file1.txt
-
-will result in the following pillar tree for minion with ID ``test-host``:
-
-.. code-block:: text
-
- test-host:
+ node_groups:
+ internal_servers: 'L@bob,stuart,kevin'
+
+Pillar Configuration
+--------------------
+
+.. code-block:: bash
+
+ (salt-master) # tree /srv/ext_pillar
+ /srv/ext_pillar/
+ |-- hosts
+ | |-- bob
+ | | |-- apache
+ | | | `-- config.d
+ | | | |-- 00_important.conf
+ | | | `-- 20_bob_extra.conf
+ | | `-- corporate_app
+ | | `-- settings
+ | | `-- bob_settings.cfg
+ | `-- kevin
+ | |-- apache
+ | | `-- config.d
+ | | `-- 00_important.conf
+ | `-- corporate_app
+ | `-- settings
+ | `-- kevin_settings.cfg
+ `-- nodegroups
+ `-- internal_servers
+ `-- corporate_app
+ `-- settings
+ `-- common_settings.cfg
+
+Verify Pillar Data
+------------------
+
+.. code-block:: bash
+
+ (salt-master) # salt bob pillar.items
+ bob:
----------
- files:
+ apache:
----------
- another-testdir:
+ config.d:
----------
- symlink-to-file1.txt:
- Contents of file #1.
-
- testdir:
- ----------
- file1.txt:
- Contents of file #1.
-
- file2.txt:
- Contents of file #2.
-
-.. note::
- Subdirectories underneath ``root_dir``/hosts/``id`` become nested
- dictionaries, as shown above.
-
-
-Assigning Pillar Data to Entire Nodegroups
-------------------------------------------
-
-To assign Pillar data to all minions in a given nodegroup, this external pillar
-recursively iterates over ``root_dir``/nodegroups/``nodegroup`` (where
-``nodegroup`` is the name of a nodegroup), and like for individual hosts,
-compiles pillar data with each subdirectory as a dictionary key and each file
-as a value.
-
-.. important::
- If the same Pillar key is set for a minion both by nodegroup and by
- individual host, then the value set for the individual host will take
- precedence.
-
-For example, the following ``root_dir`` tree:
-
-.. code-block:: text
-
- ./nodegroups/
- ./nodegroups/test-group/
- ./nodegroups/test-group/files/
- ./nodegroups/test-group/files/testdir/
- ./nodegroups/test-group/files/testdir/file1.txt
- ./nodegroups/test-group/files/testdir/file2.txt
- ./nodegroups/test-group/files/another-testdir/
- ./nodegroups/test-group/files/another-testdir/symlink-to-file1.txt
-
-will result in the following pillar data for minions in the node group
-``test-group``:
-
-.. code-block:: text
-
- test-host:
- ----------
- files:
+ 00_important.conf:
+ <important_config important_setting="yes" />
+ 20_bob_extra.conf:
+ <bob_specific_cfg has_freeze_ray="yes" />
+ corporate_app:
----------
- another-testdir:
+ settings:
----------
- symlink-to-file1.txt:
- Contents of file #1.
+ common_settings:
+ // This is the main settings file for the corporate
+ // internal web app
+ main_setting: probably
+ bob_settings:
+ role: bob
- testdir:
- ----------
- file1.txt:
- Contents of file #1.
+.. note::
- file2.txt:
- Contents of file #2.
+ The leaf data in the example shown is the contents of the pillar files.
'''
from __future__ import absolute_import
@@ -302,7 +224,123 @@ def ext_pillar(minion_id,
renderer_whitelist=None,
template=False):
'''
- Compile pillar data for the specified minion ID
+ Compile pillar data from the given ``root_dir`` specific to Nodegroup names
+ and Minion IDs.
+
+ If a Minion's ID is not found at ``<root_dir>/host/<minion_id>`` or if it
+ is not included in any Nodegroups named at
+ ``<root_dir>/nodegroups/<node_group>``, no pillar data provided by this
+ pillar module will be available for that Minion.
+
+ .. versionchanged:: 2017.7.0
+ Templating/rendering has been added. You can now specify a default
+ render pipeline and a black- and whitelist of (dis)allowed renderers.
+
+ :param:`template` must be set to ``True`` for templating to happen.
+
+ .. code-block:: yaml
+
+ ext_pillar:
+ - file_tree:
+ root_dir: /path/to/root/directory
+ render_default: jinja|yaml
+ renderer_blacklist:
+ - gpg
+ renderer_whitelist:
+ - jinja
+ - yaml
+ template: True
+
+ :param minion_id:
+ The ID of the Minion whose pillar data is to be collected
+
+ :param pillar:
+ Unused by the ``file_tree`` pillar module
+
+ :param root_dir:
+ Filesystem directory used as the root for pillar data (e.g.
+ ``/srv/ext_pillar``)
+
+ :param follow_dir_links:
+ Follow symbolic links to directories while collecting pillar files.
+ Defaults to ``False``.
+
+ .. warning::
+
+ Care should be exercised when enabling this option as it will
+ follow links that point outside of :param:`root_dir`.
+
+ .. warning::
+
+ Symbolic links that lead to infinite recursion are not filtered.
+
+ :param debug:
+ Enable debug information at log level ``debug``. Defaults to
+ ``False``. This option may be useful to help debug errors when setting
+ up the ``file_tree`` pillar module.
+
+ :param keep_newline:
+ Preserve the end-of-file newline in files. Defaults to ``False``.
+ This option may either be a boolean or a list of file globs (as defined
+ by the `Python fnmatch package
+ <https://docs.python.org/library/fnmatch.html>`_) for which end-of-file
+ newlines are to be kept.
+
+ ``keep_newline`` should be turned on if the pillar data is intended to
+ be used to deploy a file using ``contents_pillar`` with a
+ :py:func:`file.managed <salt.states.file.managed>` state.
+
+ .. versionchanged:: 2015.8.4
+ The ``raw_data`` parameter has been renamed to ``keep_newline``. In
+ earlier releases, ``raw_data`` must be used. Also, this parameter
+ can now be a list of globs, allowing for more granular control over
+ which pillar values keep their end-of-file newline. The globs match
+ paths relative to the directories named for Minion IDs and
+ Nodegroup namess underneath the :param:`root_dir`.
+
+ .. code-block:: yaml
+
+ ext_pillar:
+ - file_tree:
+ root_dir: /srv/ext_pillar
+ keep_newline:
+ - apache/config.d/*
+ - corporate_app/settings/*
+
+ .. note::
+ In earlier releases, this documentation incorrectly stated that
+ binary files would not affected by the ``keep_newline``. However,
+ this module does not actually distinguish between binary and text
+ files.
+
+
+ :param render_default:
+ Override Salt's :conf_master:`default global renderer <renderer>` for
+ the ``file_tree`` pillar.
+
+ .. code-block:: yaml
+
+ render_default: jinja
+
+ :param renderer_blacklist:
+ Disallow renderers for pillar files.
+
+ .. code-block:: yaml
+
+ renderer_blacklist:
+ - json
+
+ :param renderer_whitelist:
+ Allow renderers for pillar files.
+
+ .. code-block:: yaml
+
+ renderer_whitelist:
+ - yaml
+ - jinja
+
+ :param template:
+ Enable templating of pillar files. Defaults to ``False``.
'''
# Not used
del pillar
@@ -328,7 +366,7 @@ def ext_pillar(minion_id,
ngroup_pillar = {}
nodegroups_dir = os.path.join(root_dir, 'nodegroups')
- if os.path.exists(nodegroups_dir) and len(__opts__['nodegroups']) > 0:
+ if os.path.exists(nodegroups_dir) and len(__opts__.get('nodegroups', ())) > 0:
master_ngroups = __opts__['nodegroups']
ext_pillar_dirs = os.listdir(nodegroups_dir)
if len(ext_pillar_dirs) > 0:
@@ -354,8 +392,8 @@ def ext_pillar(minion_id,
else:
if debug is True:
log.debug(
- 'file_tree: no nodegroups found in file tree directory '
- 'ext_pillar_dirs, skipping...'
+ 'file_tree: no nodegroups found in file tree directory %s, skipping...',
+ ext_pillar_dirs
)
else:
if debug is True:
@@ -363,7 +401,12 @@ def ext_pillar(minion_id,
host_dir = os.path.join(root_dir, 'hosts', minion_id)
if not os.path.exists(host_dir):
- # No data for host with this ID
+ if debug is True:
+ log.debug(
+ 'file_tree: no pillar data for minion %s found in file tree directory %s',
+ minion_id,
+ host_dir
+ )
return ngroup_pillar
if not os.path.isdir(host_dir):
diff --git a/salt/pillar/git_pillar.py b/salt/pillar/git_pillar.py
index d183f63ac3..836311713b 100644
--- a/salt/pillar/git_pillar.py
+++ b/salt/pillar/git_pillar.py
@@ -491,12 +491,7 @@ except ImportError:
PER_REMOTE_OVERRIDES = ('env', 'root', 'ssl_verify', 'refspecs')
PER_REMOTE_ONLY = ('name', 'mountpoint')
-
-# Fall back to default per-remote-only. This isn't technically needed since
-# salt.utils.gitfs.GitBase.init_remotes() will default to
-# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
-# runners and other modules that import salt.pillar.git_pillar.
-PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
+GLOBAL_ONLY = ('base', 'branch')
# Set up logging
log = logging.getLogger(__name__)
@@ -550,7 +545,11 @@ def ext_pillar(minion_id, repo, pillar_dirs):
opts['pillar_roots'] = {}
opts['__git_pillar'] = True
pillar = salt.utils.gitfs.GitPillar(opts)
- pillar.init_remotes(repo, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
+ pillar.init_remotes(
+ repo,
+ PER_REMOTE_OVERRIDES,
+ PER_REMOTE_ONLY,
+ GLOBAL_ONLY)
if __opts__.get('__role') == 'minion':
# If masterless, fetch the remotes. We'll need to remove this once
# we make the minion daemon able to run standalone.
diff --git a/salt/renderers/pyobjects.py b/salt/renderers/pyobjects.py
index ae32a40d18..dda3d77806 100644
--- a/salt/renderers/pyobjects.py
+++ b/salt/renderers/pyobjects.py
@@ -263,7 +263,7 @@ different grain matches.
**Ubuntu** classes, since Ubuntu has an ``os_family`` grain of **Debian**
an an ``os`` grain of **Ubuntu**. As of the 2017.7.0 release, the order is
dictated by the order of declaration, with classes defined later overriding
- earlier ones. Addtionally, 2017.7.0 adds support for explicitly defining
+ earlier ones. Additionally, 2017.7.0 adds support for explicitly defining
the ordering using an optional attribute called ``priority``.
Given the above example, ``os_family`` matches will be processed first,
diff --git a/salt/returners/elasticsearch_return.py b/salt/returners/elasticsearch_return.py
index 2ddb4a26eb..2acf47e361 100644
--- a/salt/returners/elasticsearch_return.py
+++ b/salt/returners/elasticsearch_return.py
@@ -227,7 +227,7 @@ def returner(ret):
if ret.get('return', None) is None:
log.info('Won\'t push new data to Elasticsearch, job with jid={0} was '
- 'not succesful'.format(job_id))
+ 'not successful'.format(job_id))
return
# Build the index name
diff --git a/salt/returners/highstate_return.py b/salt/returners/highstate_return.py
index 04054cfb6e..c299a55f43 100644
--- a/salt/returners/highstate_return.py
+++ b/salt/returners/highstate_return.py
@@ -293,8 +293,7 @@ def _generate_states_report(sorted_data):
'''
states = []
for state, data in sorted_data:
- module, stateid, name, function = \
- [x.rstrip('_').lstrip('-') for x in state.split('|')]
+ module, stateid, name, function = state.split('_|-')
module_function = '.'.join((module, function))
result = data.get('result', '')
single = [
diff --git a/salt/runner.py b/salt/runner.py
index 95c602a0cf..88bd88137d 100644
--- a/salt/runner.py
+++ b/salt/runner.py
@@ -229,7 +229,7 @@ class Runner(RunnerClient):
async_pub = self._gen_async_pub()
self.jid = async_pub['jid']
- if low['fun'] == 'state.orchestrate':
+ if low['fun'] in ('state.orchestrate', 'state.orch'):
low['kwarg']['orchestration_jid'] = async_pub['jid']
# Run the runner!
diff --git a/salt/runners/bgp.py b/salt/runners/bgp.py
index b460bdcef9..c8a70b2ec8 100644
--- a/salt/runners/bgp.py
+++ b/salt/runners/bgp.py
@@ -53,7 +53,7 @@ Configuration
By default, the following extra fields are returned (displayed):
- - ``connection_stats``: connection stats, as descibed below
+ - ``connection_stats``: connection stats, as described below
- ``import_policy``: the name of the import policy
- ``export_policy``: the name of the export policy
diff --git a/salt/runners/cache.py b/salt/runners/cache.py
index 08685914b6..9ff445200f 100644
--- a/salt/runners/cache.py
+++ b/salt/runners/cache.py
@@ -342,7 +342,8 @@ def clear_git_lock(role, remote=None, **kwargs):
obj.init_remotes(
ext_pillar['git'],
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
- salt.pillar.git_pillar.PER_REMOTE_ONLY)
+ salt.pillar.git_pillar.PER_REMOTE_ONLY,
+ salt.pillar.git_pillar.GLOBAL_ONLY)
git_objects.append(obj)
elif role == 'winrepo':
winrepo_dir = __opts__['winrepo_dir']
@@ -357,7 +358,8 @@ def clear_git_lock(role, remote=None, **kwargs):
obj.init_remotes(
remotes,
salt.runners.winrepo.PER_REMOTE_OVERRIDES,
- salt.runners.winrepo.PER_REMOTE_ONLY)
+ salt.runners.winrepo.PER_REMOTE_ONLY,
+ salt.runners.winrepo.GLOBAL_ONLY)
git_objects.append(obj)
else:
raise SaltInvocationError('Invalid role \'{0}\''.format(role))
diff --git a/salt/runners/fileserver.py b/salt/runners/fileserver.py
index 6322aa7842..5978f1e299 100644
--- a/salt/runners/fileserver.py
+++ b/salt/runners/fileserver.py
@@ -164,6 +164,20 @@ def file_list(saltenv='base', backend=None):
.. versionadded:: 2015.5.0
+ .. note:
+ Keep in mind that executing this function spawns a new process,
+ separate from the master. This means that if the fileserver
+ configuration has been changed in some way since the master has been
+ restarted (e.g. if :conf_master:`fileserver_backend`,
+ :conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
+ been updated), then the results of this runner will not accurately
+ reflect what files are available to minions.
+
+ When in doubt, use :py:func:`cp.list_master
+ <salt.modules.cp.list_master>` to see what files the minion can see,
+ and always remember to restart the salt-master daemon when updating
+ the fileserver configuration.
+
CLI Examples:
.. code-block:: bash
@@ -196,6 +210,20 @@ def symlink_list(saltenv='base', backend=None):
.. versionadded:: 2015.5.0
+ .. note:
+ Keep in mind that executing this function spawns a new process,
+ separate from the master. This means that if the fileserver
+ configuration has been changed in some way since the master has been
+ restarted (e.g. if :conf_master:`fileserver_backend`,
+ :conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
+ been updated), then the results of this runner will not accurately
+ reflect what symlinks are available to minions.
+
+ When in doubt, use :py:func:`cp.list_master_symlinks
+ <salt.modules.cp.list_master_symlinks>` to see what symlinks the minion
+ can see, and always remember to restart the salt-master daemon when
+ updating the fileserver configuration.
+
CLI Example:
.. code-block:: bash
@@ -228,6 +256,20 @@ def dir_list(saltenv='base', backend=None):
.. versionadded:: 2015.5.0
+ .. note:
+ Keep in mind that executing this function spawns a new process,
+ separate from the master. This means that if the fileserver
+ configuration has been changed in some way since the master has been
+ restarted (e.g. if :conf_master:`fileserver_backend`,
+ :conf_master:`gitfs_remotes`, :conf_master:`hgfs_remotes`, etc. have
+ been updated), then the results of this runner will not accurately
+ reflect what dirs are available to minions.
+
+ When in doubt, use :py:func:`cp.list_master_dirs
+ <salt.modules.cp.list_master_dirs>` to see what dirs the minion can see,
+ and always remember to restart the salt-master daemon when updating
+ the fileserver configuration.
+
CLI Example:
.. code-block:: bash
diff --git a/salt/runners/git_pillar.py b/salt/runners/git_pillar.py
index 0e8e97beb3..7f6c3ad31c 100644
--- a/salt/runners/git_pillar.py
+++ b/salt/runners/git_pillar.py
@@ -87,7 +87,8 @@ def update(branch=None, repo=None):
pillar = salt.utils.gitfs.GitPillar(__opts__)
pillar.init_remotes(pillar_conf,
salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
- salt.pillar.git_pillar.PER_REMOTE_ONLY)
+ salt.pillar.git_pillar.PER_REMOTE_ONLY,
+ salt.pillar.git_pillar.GLOBAL_ONLY)
for remote in pillar.remotes:
# Skip this remote if it doesn't match the search criteria
if branch is not None:
diff --git a/salt/runners/manage.py b/salt/runners/manage.py
index 723052a09f..fcb7353bbe 100644
--- a/salt/runners/manage.py
+++ b/salt/runners/manage.py
@@ -67,6 +67,15 @@ def _ping(tgt, tgt_type, timeout, gather_job_timeout):
return returned, not_returned
+def _warn_expr_form():
+ salt.utils.warn_until(
+ 'Fluorine',
+ 'the target type should be passed using the \'tgt_type\' '
+ 'argument instead of \'expr_form\'. Support for using '
+ '\'expr_form\' will be removed in Salt Fluorine.'
+ )
+
+
def status(output=True, tgt='*', tgt_type='glob', expr_form=None, timeout=None, gather_job_timeout=None):
'''
.. versionchanged:: 2017.7.0
@@ -86,12 +95,7 @@ def status(output=True, tgt='*', tgt_type='glob', expr_form=None, timeout=None,
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
- salt.utils.warn_until(
- 'Fluorine',
- 'the target type should be passed using the \'tgt_type\' '
- 'argument instead of \'expr_form\'. Support for using '
- '\'expr_form\' will be removed in Salt Fluorine.'
- )
+ _warn_expr_form()
tgt_type = expr_form
ret = {}
@@ -175,6 +179,12 @@ def down(removekeys=False, tgt='*', tgt_type='glob', expr_form=None):
salt-run manage.down tgt="webservers" tgt_type="nodegroup"
'''
+ # remember to remove the expr_form argument from this function when
+ # performing the cleanup on this deprecation.
+ if expr_form is not None:
+ _warn_expr_form()
+ tgt_type = expr_form
+
ret = status(output=False, tgt=tgt, tgt_type=tgt_type).get('down', [])
for minion in ret:
if removekeys:
@@ -199,6 +209,12 @@ def up(tgt='*', tgt_type='glob', expr_form=None, timeout=None, gather_job_timeou
salt-run manage.up tgt="webservers" tgt_type="nodegroup"
salt-run manage.up timeout=5 gather_job_timeout=10
'''
+ # remember to remove the expr_form argument from this function when
+ # performing the cleanup on this deprecation.
+ if expr_form is not None:
+ _warn_expr_form()
+ tgt_type = expr_form
+
ret = status(
output=False,
tgt=tgt,
@@ -246,7 +262,7 @@ def list_state(subset=None, show_ipv4=False, state=None):
minions = [m for m in minions if m in subset]
else:
# Always return 'present' for 0MQ for now
- # TODO: implement other states spport for 0MQ
+ # TODO: implement other states support for 0MQ
ckminions = salt.utils.minions.CkMinions(__opts__)
minions = ckminions.connected_ids(show_ipv4=show_ipv4, subset=subset, include_localhost=True)
@@ -593,6 +609,12 @@ def safe_accept(target, tgt_type='glob', expr_form=None):
salt-run manage.safe_accept my_minion
salt-run manage.safe_accept minion1,minion2 tgt_type=list
'''
+ # remember to remove the expr_form argument from this function when
+ # performing the cleanup on this deprecation.
+ if expr_form is not None:
+ _warn_expr_form()
+ tgt_type = expr_form
+
salt_key = salt.key.Key(__opts__)
ssh_client = salt.client.ssh.client.SSHClient()
diff --git a/salt/runners/pillar.py b/salt/runners/pillar.py
index 7d9ae52efa..8084bac0c0 100644
--- a/salt/runners/pillar.py
+++ b/salt/runners/pillar.py
@@ -82,19 +82,17 @@ def show_pillar(minion='*', **kwargs):
pillar = runner.cmd('pillar.show_pillar', [])
print(pillar)
'''
-
+ pillarenv = None
saltenv = 'base'
- pillarenv = __opts__['pillarenv'] if 'pillarenv' in __opts__ else None
id_, grains, _ = salt.utils.minions.get_minion_data(minion, __opts__)
if grains is None:
grains = {'fqdn': minion}
for key in kwargs:
- if key == 'pillarenv':
- __opts__['pillarenv'] = kwargs[key]
if key == 'saltenv':
saltenv = kwargs[key]
elif key == 'pillarenv':
+ # pillarenv overridden on CLI
pillarenv = kwargs[key]
else:
grains[key] = kwargs[key]
diff --git a/salt/runners/salt.py b/salt/runners/salt.py
index 1cbae0ac27..38eb7b2e2e 100644
--- a/salt/runners/salt.py
+++ b/salt/runners/salt.py
@@ -32,11 +32,12 @@ Execution modules are also available to salt runners:
# import python libs
from __future__ import absolute_import
from __future__ import print_function
+import copy
import logging
# import salt libs
import salt.client
-from salt.loader import minion_mods, utils
+import salt.loader
from salt.exceptions import SaltClientError
log = logging.getLogger(__name__) # pylint: disable=invalid-name
@@ -64,9 +65,11 @@ def cmd(fun, *args, **kwargs):
kws = dict((k, v) for k, v in kwargs.items() if not k.startswith('__'))
# pylint: disable=undefined-variable
- return minion_mods(
- __opts__,
- utils=utils(__opts__)).get(fun)(*args, **kws)
+ opts = copy.deepcopy(__opts__)
+ opts['grains'] = salt.loader.grains(opts)
+ utils = salt.loader.utils(opts)
+ mods = salt.loader.minion_mods(opts, utils=utils)
+ return mods.get(fun)(*args, **kws)
def execute(tgt,
diff --git a/salt/runners/winrepo.py b/salt/runners/winrepo.py
index d6fd1bdb58..11741ca2c9 100644
--- a/salt/runners/winrepo.py
+++ b/salt/runners/winrepo.py
@@ -36,6 +36,7 @@ PER_REMOTE_OVERRIDES = ('ssl_verify', 'refspecs')
# salt.utils.gitfs.PER_REMOTE_ONLY for this value, so this is mainly for
# runners and other modules that import salt.runners.winrepo.
PER_REMOTE_ONLY = salt.utils.gitfs.PER_REMOTE_ONLY
+GLOBAL_ONLY = ('branch',)
def genrepo(opts=None, fire_event=True):
@@ -218,7 +219,10 @@ def update_git_repos(opts=None, clean=False, masterless=False):
try:
winrepo = salt.utils.gitfs.WinRepo(opts, base_dir)
winrepo.init_remotes(
- remotes, PER_REMOTE_OVERRIDES, PER_REMOTE_ONLY)
+ remotes,
+ PER_REMOTE_OVERRIDES,
+ PER_REMOTE_ONLY,
+ GLOBAL_ONLY)
winrepo.fetch_remotes()
# Since we're not running update(), we need to manually call
# clear_old_remotes() to remove directories from remotes that
diff --git a/salt/state.py b/salt/state.py
index 17a57a51a7..62b3b60104 100644
--- a/salt/state.py
+++ b/salt/state.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
'''
-The module used to execute states in salt. A state is unlike a module
-execution in that instead of just executing a command it ensure that a
-certain state is present on the system.
+The State Compiler is used to execute states in Salt. A state is unlike
+an execution module in that instead of just executing a command, it
+ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
@@ -162,6 +162,23 @@ def _l_tag(name, id_):
return _gen_tag(low)
+def _calculate_fake_duration():
+ '''
+ Generate a NULL duration for when states do not run
+ but we want the results to be consistent.
+ '''
+ utc_start_time = datetime.datetime.utcnow()
+ local_start_time = utc_start_time - \
+ (datetime.datetime.utcnow() - datetime.datetime.now())
+ utc_finish_time = datetime.datetime.utcnow()
+ start_time = local_start_time.time().isoformat()
+ delta = (utc_finish_time - utc_start_time)
+ # duration in milliseconds.microseconds
+ duration = (delta.seconds * 1000000 + delta.microseconds)/1000.0
+
+ return start_time, duration
+
+
def trim_req(req):
'''
Trim any function off of a requisite
@@ -686,8 +703,12 @@ class State(object):
except AttributeError:
pillar_enc = str(pillar_enc).lower()
self._pillar_enc = pillar_enc
- if initial_pillar:
+ if initial_pillar and not self._pillar_override:
self.opts['pillar'] = initial_pillar
+ else:
+ # Compile pillar data
+ self.opts['pillar'] = self._gather_pillar()
+ # Reapply overrides on top of compiled pillar
if self._pillar_override:
self.opts['pillar'] = salt.utils.dictupdate.merge(
self.opts['pillar'],
@@ -695,8 +716,6 @@ class State(object):
self.opts.get('pillar_source_merging_strategy', 'smart'),
self.opts.get('renderer', 'yaml'),
self.opts.get('pillar_merge_lists', False))
- else:
- self.opts['pillar'] = self._gather_pillar()
self.state_con = context or {}
self.load_modules()
self.active = set()
@@ -1245,7 +1264,7 @@ class State(object):
'''
err = []
for chunk in chunks:
- err += self.verify_data(chunk)
+ err.extend(self.verify_data(chunk))
return err
def order_chunks(self, chunks):
@@ -1725,7 +1744,7 @@ class State(object):
ret = {'name': cdata['args'][0],
'result': None,
'changes': {},
- 'comment': 'Started in a seperate process',
+ 'comment': 'Started in a separate process',
'proc': proc}
return ret
@@ -2317,8 +2336,11 @@ class State(object):
run_dict = self.pre
else:
run_dict = running
+ start_time, duration = _calculate_fake_duration()
run_dict[tag] = {'changes': {},
'result': False,
+ 'duration': duration,
+ 'start_time': start_time,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
@@ -2401,9 +2423,12 @@ class State(object):
_cmt = 'One or more requisite failed: {0}'.format(
', '.join(str(i) for i in failed_requisites)
)
+ start_time, duration = _calculate_fake_duration()
running[tag] = {
'changes': {},
'result': False,
+ 'duration': duration,
+ 'start_time': start_time,
'comment': _cmt,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']
@@ -2419,8 +2444,11 @@ class State(object):
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == 'pre':
+ start_time, duration = _calculate_fake_duration()
pre_ret = {'changes': {},
'result': True,
+ 'duration': duration,
+ 'start_time': start_time,
'comment': 'No changes detected',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
@@ -2428,15 +2456,21 @@ class State(object):
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == 'onfail':
+ start_time, duration = _calculate_fake_duration()
running[tag] = {'changes': {},
'result': True,
+ 'duration': duration,
+ 'start_time': start_time,
'comment': 'State was not run because onfail req did not change',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
elif status == 'onchanges':
+ start_time, duration = _calculate_fake_duration()
running[tag] = {'changes': {},
'result': True,
+ 'duration': duration,
+ 'start_time': start_time,
'comment': 'State was not run because none of the onchanges reqs changed',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
@@ -2457,14 +2491,13 @@ class State(object):
listeners = []
crefs = {}
for chunk in chunks:
- crefs[(chunk['state'], chunk['name'])] = chunk
- crefs[(chunk['state'], chunk['__id__'])] = chunk
+ crefs[(chunk['state'], chunk['__id__'], chunk['name'])] = chunk
if 'listen' in chunk:
- listeners.append({(chunk['state'], chunk['__id__']): chunk['listen']})
+ listeners.append({(chunk['state'], chunk['__id__'], chunk['name']): chunk['listen']})
if 'listen_in' in chunk:
for l_in in chunk['listen_in']:
for key, val in six.iteritems(l_in):
- listeners.append({(key, val): [{chunk['state']: chunk['__id__']}]})
+ listeners.append({(key, val, 'lookup'): [{chunk['state']: chunk['__id__']}]})
mod_watchers = []
errors = {}
for l_dict in listeners:
@@ -2473,7 +2506,7 @@ class State(object):
if not isinstance(listen_to, dict):
continue
for lkey, lval in six.iteritems(listen_to):
- if (lkey, lval) not in crefs:
+ if not any(lkey == cref[0] and lval in cref for cref in crefs):
rerror = {_l_tag(lkey, lval):
{
'comment': 'Referenced state {0}: {1} does not exist'.format(lkey, lval),
@@ -2483,27 +2516,32 @@ class State(object):
}}
errors.update(rerror)
continue
- to_tag = _gen_tag(crefs[(lkey, lval)])
- if to_tag not in running:
- continue
- if running[to_tag]['changes']:
- if key not in crefs:
- rerror = {_l_tag(key[0], key[1]):
- {'comment': 'Referenced state {0}: {1} does not exist'.format(key[0], key[1]),
- 'name': 'listen_{0}:{1}'.format(key[0], key[1]),
- 'result': False,
- 'changes': {}}}
- errors.update(rerror)
+ to_tags = [
+ _gen_tag(data) for cref, data in six.iteritems(crefs) if lkey == cref[0] and lval in cref
+ ]
+ for to_tag in to_tags:
+ if to_tag not in running:
continue
- chunk = crefs[key]
- low = chunk.copy()
- low['sfun'] = chunk['fun']
- low['fun'] = 'mod_watch'
- low['__id__'] = 'listener_{0}'.format(low['__id__'])
- for req in STATE_REQUISITE_KEYWORDS:
- if req in low:
- low.pop(req)
- mod_watchers.append(low)
+ if running[to_tag]['changes']:
+ if not any(key[0] == cref[0] and key[1] in cref for cref in crefs):
+ rerror = {_l_tag(key[0], key[1]):
+ {'comment': 'Referenced state {0}: {1} does not exist'.format(key[0], key[1]),
+ 'name': 'listen_{0}:{1}'.format(key[0], key[1]),
+ 'result': False,
+ 'changes': {}}}
+ errors.update(rerror)
+ continue
+
+ new_chunks = [data for cref, data in six.iteritems(crefs) if key[0] == cref[0] and key[1] in cref]
+ for chunk in new_chunks:
+ low = chunk.copy()
+ low['sfun'] = chunk['fun']
+ low['fun'] = 'mod_watch'
+ low['__id__'] = 'listener_{0}'.format(low['__id__'])
+ for req in STATE_REQUISITE_KEYWORDS:
+ if req in low:
+ low.pop(req)
+ mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
@@ -2519,12 +2557,12 @@ class State(object):
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
- errors += ext_errors
- errors += self.verify_high(high)
+ errors.extend(ext_errors)
+ errors.extend(self.verify_high(high))
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
- errors += req_in_errors
+ errors.extend(req_in_errors)
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
@@ -3684,22 +3722,21 @@ class BaseHighState(object):
return err
if not high:
return ret
- cumask = os.umask(0o77)
- try:
- if salt.utils.is_windows():
- # Make sure cache file isn't read-only
- self.state.functions['cmd.run']('attrib -R "{0}"'.format(cfn), output_loglevel='quiet')
- with salt.utils.fopen(cfn, 'w+b') as fp_:
- try:
- self.serial.dump(high, fp_)
- except TypeError:
- # Can't serialize pydsl
- pass
- except (IOError, OSError):
- msg = 'Unable to write to "state.highstate" cache file {0}'
- log.error(msg.format(cfn))
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if salt.utils.is_windows():
+ # Make sure cache file isn't read-only
+ self.state.functions['cmd.run']('attrib -R "{0}"'.format(cfn), output_loglevel='quiet')
+ with salt.utils.fopen(cfn, 'w+b') as fp_:
+ try:
+ self.serial.dump(high, fp_)
+ except TypeError:
+ # Can't serialize pydsl
+ pass
+ except (IOError, OSError):
+ msg = 'Unable to write to "state.highstate" cache file {0}'
+ log.error(msg.format(cfn))
- os.umask(cumask)
return self.state.call_high(high, orchestration_jid)
def compile_highstate(self):
diff --git a/salt/states/acme.py b/salt/states/acme.py
index 43649a6426..ad1c9d0564 100644
--- a/salt/states/acme.py
+++ b/salt/states/acme.py
@@ -85,6 +85,7 @@ def cert(name,
comment += 'would have been renewed'
else:
comment += 'would not have been touched'
+ ret['result'] = True
ret['comment'] = comment
return ret
diff --git a/salt/states/archive.py b/salt/states/archive.py
index ba8c94031c..8156452a9a 100644
--- a/salt/states/archive.py
+++ b/salt/states/archive.py
@@ -690,7 +690,7 @@ def extracted(name,
# True
# >>> os.path.isfile('/tmp/foo.txt/')
# False
- name = name.rstrip('/')
+ name = name.rstrip(os.sep)
if os.path.isfile(name):
ret['comment'] = '{0} exists and is not a directory'.format(name)
return ret
@@ -723,6 +723,11 @@ def extracted(name,
)
return ret
+ if if_missing is not None and os.path.exists(if_missing):
+ ret['result'] = True
+ ret['comment'] = 'Path {0} exists'.format(if_missing)
+ return ret
+
if user or group:
if salt.utils.is_windows():
ret['comment'] = \
@@ -1519,7 +1524,7 @@ def extracted(name,
if not if_missing:
# If is_missing was used, and both a) the archive had never been
# extracted, and b) the path referred to by if_missing exists, then
- # enforce_missing would contain paths of top_levle dirs/files that
+ # enforce_missing would contain paths of top_level dirs/files that
# _would_ have been extracted. Since if_missing can be used as a
# semaphore to conditionally extract, we don't want to make this a
# case where the state fails, so we only fail the state if
diff --git a/salt/states/boto3_route53.py b/salt/states/boto3_route53.py
index 0a49dcc3fe..dbeb2e2c80 100644
--- a/salt/states/boto3_route53.py
+++ b/salt/states/boto3_route53.py
@@ -137,7 +137,7 @@ def _from_aws_encoding(string): # XXX TODO
def hosted_zone_present(name, Name=None, PrivateZone=False,
- CallerReference=None, Comment='', VPCs=None,
+ CallerReference=None, Comment=None, VPCs=None,
region=None, key=None, keyid=None, profile=None):
'''
Ensure a hosted zone exists with the given attributes.
@@ -595,54 +595,60 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name
# Convert any magic RR values to something AWS will understand, and otherwise clean them up.
fixed_rrs = []
- for rr in ResourceRecords:
- if rr.startswith('magic:'):
- fields = rr.split(':')
- if fields[1] == 'ec2_instance_tag':
- if len(fields) != 5:
- log.warning("Invalid magic RR value seen: '{}'. Passing as-is.".format(rr))
- fixed_rrs += [rr]
- continue
- tag_name = fields[2]
- tag_value = fields[3]
- instance_attr = fields[4]
- good_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
- r = __salt__['boto_ec2.find_instances'](
- tags={tag_name: tag_value}, return_objs=True, in_states=good_states,
- region=region, key=key, keyid=keyid, profile=profile)
- if len(r) < 1:
- ret['comment'] = 'No EC2 instance with tag {} == {} found'.format(tag_name,
- tag_value)
- log.error(ret['comment'])
- ret['result'] = False
- return ret
- if len(r) > 1:
- ret['comment'] = 'Multiple EC2 instances with tag {} == {} found'.format(
- tag_name, tag_value)
- log.error(ret['comment'])
- ret['result'] = False
- return ret
- instance = r[0]
- res = getattr(instance, instance_attr, None)
- if res:
- log.debug('Found {} {} for instance {}'.format(instance_attr, res, instance.id))
- fixed_rrs += [_to_aws_encoding(res)]
+ if ResourceRecords:
+ for rr in ResourceRecords:
+ if rr.startswith('magic:'):
+ fields = rr.split(':')
+ if fields[1] == 'ec2_instance_tag':
+ if len(fields) != 5:
+ log.warning("Invalid magic RR value seen: '%s'. Passing as-is.", rr)
+ fixed_rrs += [rr]
+ continue
+ tag_name = fields[2]
+ tag_value = fields[3]
+ instance_attr = fields[4]
+ good_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
+ r = __salt__['boto_ec2.find_instances'](
+ tags={tag_name: tag_value}, return_objs=True, in_states=good_states,
+ region=region, key=key, keyid=keyid, profile=profile)
+ if len(r) < 1:
+ ret['comment'] = 'No EC2 instance with tag {} == {} found'.format(tag_name,
+ tag_value)
+ log.error(ret['comment'])
+ ret['result'] = False
+ return ret
+ if len(r) > 1:
+ ret['comment'] = 'Multiple EC2 instances with tag {} == {} found'.format(
+ tag_name, tag_value)
+ log.error(ret['comment'])
+ ret['result'] = False
+ return ret
+ instance = r[0]
+ res = getattr(instance, instance_attr, None)
+ if res:
+ log.debug('Found %s %s for instance %s', instance_attr, res, instance.id)
+ fixed_rrs += [_to_aws_encoding(res)]
+ else:
+ ret['comment'] = 'Attribute {} not found on instance {}'.format(instance_attr,
+ instance.id)
+ log.error(ret['comment'])
+ ret['result'] = False
+ return ret
else:
- ret['comment'] = 'Attribute {} not found on instance {}'.format(instance_attr,
- instance.id)
+ ret['comment'] = ('Unknown RR magic value seen: {}. Please extend the '
+ 'boto3_route53 state module to add support for your preferred '
+ 'incantation.'.format(fields[1]))
log.error(ret['comment'])
ret['result'] = False
return ret
else:
- ret['comment'] = ('Unknown RR magic value seen: {}. Please extend the '
- 'boto3_route53 state module to add support for your preferred '
- 'incantation.'.format(fields[1]))
- log.error(ret['comment'])
- ret['result'] = False
- return ret
- else:
- fixed_rrs += [rr]
- ResourceRecords = [{'Value': rr} for rr in sorted(fixed_rrs)]
+ # for TXT records the entry must be encapsulated in quotes as required by the API
+ # this appears to be incredibly difficult with the jinja templating engine
+ # so inject the quotations here to make a viable ChangeBatch
+ if Type == 'TXT':
+ rr = '"{}"'.format(rr)
+ fixed_rrs += [rr]
+ ResourceRecords = [{'Value': rr} for rr in sorted(fixed_rrs)]
recordsets = __salt__['boto3_route53.get_resource_records'](HostedZoneId=HostedZoneId,
StartRecordName=Name, StartRecordType=Type, region=region, key=key, keyid=keyid,
@@ -691,9 +697,10 @@ def rr_present(name, HostedZoneId=None, DomainName=None, PrivateZone=False, Name
return ret
ResourceRecordSet = {
'Name': Name,
- 'Type': Type,
- 'ResourceRecords': ResourceRecords
+ 'Type': Type
}
+ if ResourceRecords:
+ ResourceRecordSet['ResourceRecords'] = ResourceRecords
for u in updatable:
ResourceRecordSet.update({u: locals().get(u)}) if locals().get(u) else None
diff --git a/salt/states/boto_ec2.py b/salt/states/boto_ec2.py
index ac88c30c11..5f373ecbce 100644
--- a/salt/states/boto_ec2.py
+++ b/salt/states/boto_ec2.py
@@ -886,6 +886,8 @@ def instance_present(name, instance_name=None, instance_id=None, image_id=None,
allocation_id=allocation_id, region=region, key=key,
keyid=keyid, profile=profile)
if r:
+ if 'new' not in ret['changes']:
+ ret['changes']['new'] = {}
ret['changes']['new']['public_ip'] = ip
else:
ret['result'] = False
diff --git a/salt/states/chocolatey.py b/salt/states/chocolatey.py
index 141d5e7d59..f5784da0af 100644
--- a/salt/states/chocolatey.py
+++ b/salt/states/chocolatey.py
@@ -101,7 +101,9 @@ def installed(name, version=None, source=None, force=False, pre_versions=False,
# Package installed
else:
- version_info = __salt__['chocolatey.version'](name, check_remote=True)
+ version_info = __salt__['chocolatey.version'](name=name,
+ check_remote=True,
+ source=source)
full_name = name
for pkg in version_info:
diff --git a/salt/states/cmd.py b/salt/states/cmd.py
index 8f55c85832..d390153a60 100644
--- a/salt/states/cmd.py
+++ b/salt/states/cmd.py
@@ -199,8 +199,7 @@ executed when the state it is watching changes. Example:
``cmd.wait`` itself does not do anything; all functionality is inside its ``mod_watch``
function, which is called by ``watch`` on changes.
-``cmd.wait`` will be deprecated in future due to the confusion it causes. The
-preferred format is using the :ref:`onchanges Requisite <requisites-onchanges>`, which
+The preferred format is using the :ref:`onchanges Requisite <requisites-onchanges>`, which
works on ``cmd.run`` as well as on any other state. The example would then look as follows:
.. code-block:: yaml
diff --git a/salt/states/docker_container.py b/salt/states/docker_container.py
index c5eb7ee597..72a84cf9b8 100644
--- a/salt/states/docker_container.py
+++ b/salt/states/docker_container.py
@@ -256,7 +256,7 @@ def running(name,
.. versionchanged:: 2017.7.0
This option was renamed from ``stop_timeout`` to
- ``shutdown_timeout`` to acommodate the ``stop_timeout`` container
+ ``shutdown_timeout`` to accommodate the ``stop_timeout`` container
configuration setting.
client_timeout : 60
@@ -1445,14 +1445,14 @@ def running(name,
.. code-block:: yaml
foo:
- dockerng.running:
+ docker_container.running:
- image: bar/baz:latest
- ulimits: nofile=1024:1024,nproc=60
.. code-block:: yaml
foo:
- dockerng.running:
+ docker_container.running:
- image: bar/baz:latest
- ulimits:
- nofile=1024:1024
diff --git a/salt/states/elasticsearch.py b/salt/states/elasticsearch.py
index 2c37a304ce..c1913eb5e5 100644
--- a/salt/states/elasticsearch.py
+++ b/salt/states/elasticsearch.py
@@ -175,7 +175,7 @@ def alias_present(name, index, definition=None):
if not old:
ret['comment'] = 'Alias {0} for index {1} does not exist and will be created'.format(name, index)
else:
- ret['comment'] = 'Alias {0} for index {1} exists with wrong configuration and will be overriden'.format(name, index)
+ ret['comment'] = 'Alias {0} for index {1} exists with wrong configuration and will be overridden'.format(name, index)
ret['result'] = None
else:
@@ -348,7 +348,7 @@ def pipeline_present(name, definition):
if not pipeline:
ret['comment'] = 'Pipeline {0} does not exist and will be created'.format(name)
else:
- ret['comment'] = 'Pipeline {0} exists with wrong configuration and will be overriden'.format(name)
+ ret['comment'] = 'Pipeline {0} exists with wrong configuration and will be overridden'.format(name)
ret['result'] = None
else:
@@ -439,7 +439,7 @@ def search_template_present(name, definition):
if not template:
ret['comment'] = 'Search template {0} does not exist and will be created'.format(name)
else:
- ret['comment'] = 'Search template {0} exists with wrong configuration and will be overriden'.format(name)
+ ret['comment'] = 'Search template {0} exists with wrong configuration and will be overridden'.format(name)
ret['result'] = None
else:
diff --git a/salt/states/file.py b/salt/states/file.py
index da982a13d7..3c1e226692 100644
--- a/salt/states/file.py
+++ b/salt/states/file.py
@@ -3503,7 +3503,9 @@ def retention_schedule(name, retain, strptime_format=None, timezone=None):
This is only used when datetime is pulled from ``os.path.getmtime()``.
Defaults to ``None`` which uses the timezone from the locale.
- .. code-block: yaml
+ Usage example:
+
+ .. code-block:: yaml
/var/backups/example_directory:
file.retention_schedule:
@@ -3757,7 +3759,7 @@ def line(name, content=None, match=None, mode=None, location=None,
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
- .. code-block: yaml
+ .. code-block:: yaml
update_config:
file.line:
@@ -4021,11 +4023,20 @@ def blockreplace(
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
- show_changes=True):
+ show_changes=True,
+ append_newline=None):
'''
Maintain an edit in a file in a zone delimited by two line markers
.. versionadded:: 2014.1.0
+ .. versionchanged:: 2017.7.5,2018.3.1
+ ``append_newline`` argument added. Additionally, to improve
+ idempotence, if the string represented by ``marker_end`` is found in
+ the middle of the line, the content preceding the marker will be
+ removed when the block is replaced. This allows one to remove
+ ``append_newline: False`` from the SLS and have the block properly
+ replaced if the end of the content block is immediately followed by the
+ ``marker_end`` (i.e. no newline before the marker).
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal. This can help you
@@ -4110,41 +4121,54 @@ def blockreplace(
See the ``source_hash`` parameter description for :mod:`file.managed
<salt.states.file.managed>` function for more details and examples.
- template
- The named templating engine will be used to render the downloaded file.
- Defaults to ``jinja``. The following templates are supported:
+ template : jinja
+ Templating engine to be used to render the downloaded file. The
+ following engines are supported:
- - :mod:`cheetah<salt.renderers.cheetah>`
- - :mod:`genshi<salt.renderers.genshi>`
- - :mod:`jinja<salt.renderers.jinja>`
- - :mod:`mako<salt.renderers.mako>`
- - :mod:`py<salt.renderers.py>`
- - :mod:`wempy<salt.renderers.wempy>`
+ - :mod:`cheetah <salt.renderers.cheetah>`
+ - :mod:`genshi <salt.renderers.genshi>`
+ - :mod:`jinja <salt.renderers.jinja>`
+ - :mod:`mako <salt.renderers.mako>`
+ - :mod:`py <salt.renderers.py>`
+ - :mod:`wempy <salt.renderers.wempy>`
context
- Overrides default context variables passed to the template.
+ Overrides default context variables passed to the template
defaults
- Default context passed to the template.
+ Default context passed to the template
- append_if_not_found
- If markers are not found and set to True then the markers and content
- will be appended to the file. Default is ``False``
+ append_if_not_found : False
+ If markers are not found and this option is set to ``True``, the
+ content block will be appended to the file.
- prepend_if_not_found
- If markers are not found and set to True then the markers and content
- will be prepended to the file. Default is ``False``
+ prepend_if_not_found : False
+ If markers are not found and this option is set to ``True``, the
+ content block will be prepended to the file.
backup
The file extension to use for a backup of the file if any edit is made.
Set this to ``False`` to skip making a backup.
- dry_run
- Don't make any edits to the file
+ dry_run : False
+ If ``True``, do not make any edits to the file and simply return the
+ changes that *would* be made.
- show_changes
- Output a unified diff of the old file and the new file. If ``False``
- return a boolean if any changes were made
+ show_changes : True
+ Controls how changes are presented. If ``True``, the ``Changes``
+ section of the state return will contain a unified diff of the changes
+ made. If False, then it will contain a boolean (``True`` if any changes
+ were made, otherwise ``False``).
+
+ append_newline
+ Controls whether or not a newline is appended to the content block. If
+ the value of this argument is ``True`` then a newline will be added to
+ the content block. If it is ``False``, then a newline will *not* be
+ added to the content block. If it is unspecified, then a newline will
+ only be added to the content block if it does not already end in a
+ newline.
+
+ .. versionadded:: 2017.7.5,2018.3.1
Example of usage with an accumulator and with a variable:
@@ -4246,17 +4270,25 @@ def blockreplace(
for index, item in enumerate(text):
content += str(item)
- changes = __salt__['file.blockreplace'](
- name,
- marker_start,
- marker_end,
- content=content,
- append_if_not_found=append_if_not_found,
- prepend_if_not_found=prepend_if_not_found,
- backup=backup,
- dry_run=__opts__['test'],
- show_changes=show_changes
- )
+ try:
+ changes = __salt__['file.blockreplace'](
+ name,
+ marker_start,
+ marker_end,
+ content=content,
+ append_if_not_found=append_if_not_found,
+ prepend_if_not_found=prepend_if_not_found,
+ backup=backup,
+ dry_run=__opts__['test'],
+ show_changes=show_changes,
+ append_newline=append_newline)
+ except Exception as exc:
+ log.exception('Encountered error managing block')
+ ret['comment'] = (
+ 'Encountered error managing block: {0}. '
+ 'See the log for details.'.format(exc)
+ )
+ return ret
if changes:
ret['pchanges'] = {'diff': changes}
diff --git a/salt/states/git.py b/salt/states/git.py
index a7f4f13eb5..0a8ec07355 100644
--- a/salt/states/git.py
+++ b/salt/states/git.py
@@ -2315,6 +2315,7 @@ def detached(name,
else:
# Clone repository
if os.path.isdir(target):
+ target_contents = os.listdir(target)
if force_clone:
# Clone is required, and target directory exists, but the
# ``force`` option is enabled, so we need to clear out its
@@ -2331,20 +2332,26 @@ def detached(name,
'place (force_clone=True set in git.detached state)'
.format(target, name)
)
- try:
- if os.path.islink(target):
- os.unlink(target)
- else:
- salt.utils.rm_rf(target)
- except OSError as exc:
+ removal_errors = {}
+ for target_object in target_contents:
+ target_path = os.path.join(target, target_object)
+ try:
+ salt.utils.rm_rf(target_path)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ removal_errors[target_path] = exc
+ if removal_errors:
+ err_strings = [
+ ' {0}\n {1}'.format(k, v)
+ for k, v in six.iteritems(removal_errors)
+ ]
return _fail(
ret,
- 'Unable to remove {0}: {1}'.format(target, exc),
+ 'Unable to remove\n{0}'.format('\n'.join(err_strings)),
comments
)
- else:
- ret['changes']['forced clone'] = True
- elif os.listdir(target):
+ ret['changes']['forced clone'] = True
+ elif target_contents:
# Clone is required, but target dir exists and is non-empty. We
# can't proceed.
return _fail(
diff --git a/salt/states/grafana4_dashboard.py b/salt/states/grafana4_dashboard.py
index 0f3318c3b8..eff2cac3ec 100644
--- a/salt/states/grafana4_dashboard.py
+++ b/salt/states/grafana4_dashboard.py
@@ -4,17 +4,38 @@ Manage Grafana v4.0 Dashboards
.. versionadded:: 2017.7.0
-.. code-block:: yaml
+:configuration: This state requires a configuration profile to be configured
+ in the minion config, minion pillar, or master config. The module will use
+ the 'grafana' key by default, if defined.
+
+ Example configuration using basic authentication:
+
+ .. code-block:: yaml
+
+ grafana:
+ grafana_url: http://grafana.localhost
+ grafana_user: admin
+ grafana_password: admin
+ grafana_timeout: 3
+
+ Example configuration using token based authentication:
- grafana:
- grafana_timeout: 3
- grafana_token: qwertyuiop
- grafana_url: 'https://url.com'
+ .. code-block:: yaml
+
+ grafana:
+ grafana_url: http://grafana.localhost
+ grafana_token: token
+ grafana_timeout: 3
+
+The behavior of this module is to create dashboards if they do not exist, to
+add rows if they do not exist in existing dashboards, and to update rows if
+they exist in dashboards. The module will not manage rows that are not defined,
+allowing users to manage their own custom rows.
.. code-block:: yaml
Ensure minimum dashboard is managed:
- grafana_dashboard.present:
+ grafana4_dashboard.present:
- name: insightful-dashboard
- base_dashboards_from_pillar:
- default_dashboard
@@ -30,12 +51,6 @@ Manage Grafana v4.0 Dashboards
- target: alias(constantLine(50), 'max')
title: Imaginary
type: graph
-
-
-The behavior of this module is to create dashboards if they do not exist, to
-add rows if they do not exist in existing dashboards, and to update rows if
-they exist in dashboards. The module will not manage rows that are not defined,
-allowing users to manage their own custom rows.
'''
# Import Python libs
diff --git a/salt/states/grafana4_datasource.py b/salt/states/grafana4_datasource.py
index 7ae3ef3e95..29728815d7 100644
--- a/salt/states/grafana4_datasource.py
+++ b/salt/states/grafana4_datasource.py
@@ -4,31 +4,36 @@ Manage Grafana v4.0 data sources
.. versionadded:: 2017.7.0
-Token auth setup
+:configuration: This state requires a configuration profile to be configured
+ in the minion config, minion pillar, or master config. The module will use
+ the 'grafana' key by default, if defined.
-.. code-block:: yaml
+ Example configuration using basic authentication:
- grafana_version: 4
- grafana:
- grafana_timeout: 5
- grafana_token: qwertyuiop
- grafana_url: 'https://url.com'
+ .. code-block:: yaml
-Basic auth setup
+ grafana:
+ grafana_url: http://grafana.localhost
+ grafana_user: admin
+ grafana_password: admin
+ grafana_timeout: 3
-.. code-block:: yaml
+ Example configuration using token based authentication:
+
+ .. code-block:: yaml
+
+ grafana:
+ grafana_url: http://grafana.localhost
+ grafana_token: token
+ grafana_timeout: 3
- grafana_version: 4
- grafana:
- grafana_timeout: 5
- grafana_user: grafana
- grafana_password: qwertyuiop
- grafana_url: 'https://url.com'
+The behavior of this module is to create data sources if the do not exists, and
+to update data sources if the already exists.
.. code-block:: yaml
Ensure influxdb data source is present:
- grafana_datasource.present:
+ grafana4_datasource.present:
- name: influxdb
- type: influxdb
- url: http://localhost:8086
diff --git a/salt/states/grafana4_org.py b/salt/states/grafana4_org.py
index 08376ba5cd..053fc7a692 100644
--- a/salt/states/grafana4_org.py
+++ b/salt/states/grafana4_org.py
@@ -4,26 +4,28 @@ Manage Grafana v4.0 orgs
.. versionadded:: 2017.7.0
-Token auth setup
+:configuration: This state requires a configuration profile to be configured
+ in the minion config, minion pillar, or master config. The module will use
+ the 'grafana' key by default, if defined.
-.. code-block:: yaml
+ Example configuration using basic authentication:
- grafana_version: 4
- grafana:
- grafana_timeout: 5
- grafana_token: qwertyuiop
- grafana_url: 'https://url.com'
+ .. code-block:: yaml
-Basic auth setup
+ grafana:
+ grafana_url: http://grafana.localhost
+ grafana_user: admin
+ grafana_password: admin
+ grafana_timeout: 3
-.. code-block:: yaml
+ Example configuration using token based authentication:
+
+ .. code-block:: yaml
- grafana_version: 4
- grafana:
- grafana_timeout: 5
- grafana_org: grafana
- grafana_password: qwertyuiop
- grafana_url: 'https://url.com'
+ grafana:
+ grafana_url: http://grafana.localhost
+ grafana_token: token
+ grafana_timeout: 3
.. code-block:: yaml
diff --git a/salt/states/grafana4_user.py b/salt/states/grafana4_user.py
index ce1a46c50b..1a4caa474c 100644
--- a/salt/states/grafana4_user.py
+++ b/salt/states/grafana4_user.py
@@ -4,26 +4,28 @@ Manage Grafana v4.0 users
.. versionadded:: 2017.7.0
-Token auth setup
+:configuration: This state requires a configuration profile to be configured
+ in the minion config, minion pillar, or master config. The module will use
+ the 'grafana' key by default, if defined.
-.. code-block:: yaml
+ Example configuration using basic authentication:
- grafana_version: 4
- grafana:
- grafana_timeout: 5
- grafana_token: qwertyuiop
- grafana_url: 'https://url.com'
+ .. code-block:: yaml
-Basic auth setup
+ grafana:
+ grafana_url: http://grafana.localhost
+ grafana_user: admin
+ grafana_password: admin
+ grafana_timeout: 3
-.. code-block:: yaml
+ Example configuration using token based authentication:
+
+ .. code-block:: yaml
- grafana_version: 4
- grafana:
- grafana_timeout: 5
- grafana_user: grafana
- grafana_password: qwertyuiop
- grafana_url: 'https://url.com'
+ grafana:
+ grafana_url: http://grafana.localhost
+ grafana_token: token
+ grafana_timeout: 3
.. code-block:: yaml
diff --git a/salt/states/heat.py b/salt/states/heat.py
index a042751225..26dd0f37d8 100644
--- a/salt/states/heat.py
+++ b/salt/states/heat.py
@@ -16,7 +16,7 @@ Stack can be set as either absent or deploy.
heat.deployed:
- name:
- template: #Required
- - enviroment:
+ - environment:
- params: {}
- poll: 5
- rollback: False
@@ -33,6 +33,12 @@ mysql:
image: Debian 7
- rollback: True
+.. versionadded:: 2017.7.5,2018.3.1
+
+ The spelling mistake in parameter `enviroment` was corrected to `environment`.
+ The misspelled version is still supported for backward compatibility, but will
+ be removed in Salt Neon.
+
'''
from __future__ import absolute_import
import json
@@ -122,7 +128,7 @@ def _parse_template(tmpl_str):
return tpl
-def deployed(name, template=None, enviroment=None, params=None, poll=5,
+def deployed(name, template=None, environment=None, params=None, poll=5,
rollback=False, timeout=60, update=False, profile=None,
**connection_args):
'''
@@ -134,14 +140,14 @@ def deployed(name, template=None, enviroment=None, params=None, poll=5,
template
File of template
- enviroment
- File of enviroment
+ environment
+ File of environment
params
Parameter dict used to create the stack
poll
- Poll(in sec.) and report events until stack complete
+ Poll (in sec.) and report events until stack complete
rollback
Enable rollback on create failure
@@ -152,10 +158,22 @@ def deployed(name, template=None, enviroment=None, params=None, poll=5,
profile
Profile to use
+ .. versionadded:: 2017.7.5,2018.3.1
+
+ The spelling mistake in parameter `enviroment` was corrected to `environment`.
+ The misspelled version is still supported for backward compatibility, but will
+ be removed in Salt Neon.
+
'''
+ if environment is None and 'enviroment' in connection_args:
+ salt.utils.warn_until('Neon', (
+ "Please use the 'environment' parameter instead of the misspelled 'enviroment' "
+ "parameter which will be removed in Salt Neon."
+ ))
+ environment = connection_args.pop('enviroment')
log.debug('Deployed with(' +
'{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9})'
- .format(name, template, enviroment, params, poll, rollback,
+ .format(name, template, environment, params, poll, rollback,
timeout, update, profile, connection_args))
ret = {'name': None,
'comment': '',
@@ -266,7 +284,7 @@ def deployed(name, template=None, enviroment=None, params=None, poll=5,
else:
stack = __salt__['heat.update_stack'](name=name,
template_file=template,
- enviroment=enviroment,
+ environment=environment,
parameters=params, poll=poll,
rollback=rollback,
timeout=timeout,
@@ -282,7 +300,7 @@ def deployed(name, template=None, enviroment=None, params=None, poll=5,
else:
stack = __salt__['heat.create_stack'](name=name,
template_file=template,
- enviroment=enviroment,
+ environment=environment,
parameters=params, poll=poll,
rollback=rollback,
timeout=timeout,
diff --git a/salt/states/hipchat.py b/salt/states/hipchat.py
index 08ffdb1a28..64a98f1168 100644
--- a/salt/states/hipchat.py
+++ b/salt/states/hipchat.py
@@ -60,7 +60,7 @@ def send_message(name,
- api_url: https://hipchat.myteam.com
- api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
- api_version: v1
- - color: green
+ - message_color: green
- notify: True
The following parameters are required:
@@ -93,7 +93,7 @@ def send_message(name,
The api version for Hipchat to use,
if not specified in the configuration options of master or minion.
- color
+ message_color
The color the Hipchat message should be displayed in. One of the following, default: yellow
"yellow", "red", "green", "purple", "gray", or "random".
diff --git a/salt/states/http.py b/salt/states/http.py
index 58399293e9..55505221c6 100644
--- a/salt/states/http.py
+++ b/salt/states/http.py
@@ -143,7 +143,7 @@ def wait_for_successful_query(name, wait_for=300, **kwargs):
.. note::
- All other arguements are passed to the http.query state.
+ All other arguments are passed to the http.query state.
'''
starttime = time.time()
diff --git a/salt/states/junos.py b/salt/states/junos.py
index e132c15605..d7236e0ce6 100644
--- a/salt/states/junos.py
+++ b/salt/states/junos.py
@@ -40,7 +40,7 @@ def rpc(name, dest=None, format='xml', args=None, **kwargs):
The rpc to be executed. (default = None)
Optional
* dest:
- Destination file where the rpc ouput is stored. (default = None)
+ Destination file where the rpc output is stored. (default = None)
Note that the file will be stored on the proxy minion. To push the
files to the master use the salt's following execution module: \
:py:func:`cp.push <salt.modules.cp.push>`
@@ -319,7 +319,7 @@ def install_config(name, **kwargs):
the given time unless the commit is confirmed.
* diffs_file:
Path to the file where the diff (difference in old configuration
- and the commited configuration) will be stored.(default = None)
+ and the committed configuration) will be stored.(default = None)
Note that the file will be stored on the proxy minion. To push the
files to the master use the salt's following execution module: \
:py:func:`cp.push <salt.modules.cp.push>`
diff --git a/salt/states/kubernetes.py b/salt/states/kubernetes.py
index bc62da1ab7..f14061695a 100644
--- a/salt/states/kubernetes.py
+++ b/salt/states/kubernetes.py
@@ -6,6 +6,10 @@ Manage kubernetes resources as salt states
NOTE: This module requires the proper pillar values set. See
salt.modules.kubernetes for more information.
+.. warning::
+
+ Configuration options will change in Flourine.
+
The kubernetes module is used to manage different kubernetes resources.
diff --git a/salt/states/module.py b/salt/states/module.py
index 062e44ff6f..f0d5ab4226 100644
--- a/salt/states/module.py
+++ b/salt/states/module.py
@@ -530,7 +530,25 @@ def _get_result(func_ret, changes):
res = changes_ret.get('result', {})
elif changes_ret.get('retcode', 0) != 0:
res = False
+ # Explore dict in depth to determine if there is a
+ # 'result' key set to False which sets the global
+ # state result.
+ else:
+ res = _get_dict_result(changes_ret)
return res
+
+def _get_dict_result(node):
+ ret = True
+ for key, val in six.iteritems(node):
+ if key == 'result' and val is False:
+ ret = False
+ break
+ elif isinstance(val, dict):
+ ret = _get_dict_result(val)
+ if ret is False:
+ break
+ return ret
+
mod_watch = salt.utils.alias_function(run, 'mod_watch')
diff --git a/salt/states/mount.py b/salt/states/mount.py
index 5381bf3f81..f046c794a1 100644
--- a/salt/states/mount.py
+++ b/salt/states/mount.py
@@ -328,10 +328,12 @@ def mounted(name,
mount_invisible_keys = [
'actimeo',
'comment',
+ 'credentials',
'direct-io-mode',
'password',
- 'retry',
'port',
+ 'retry',
+ 'secretfile',
]
if extra_mount_invisible_keys:
diff --git a/salt/states/netacl.py b/salt/states/netacl.py
index df97c76b6e..bb441443f8 100644
--- a/salt/states/netacl.py
+++ b/salt/states/netacl.py
@@ -155,7 +155,7 @@ def term(name,
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
@@ -406,7 +406,7 @@ def term(name,
.. note::
The first method allows the user to eventually apply complex manipulation
and / or retrieve the data from external services before passing the
- data to the state. The second one is more straighforward, for less
+ data to the state. The second one is more straightforward, for less
complex cases when loading the data directly from the pillar is sufficient.
.. note::
@@ -526,7 +526,7 @@ def filter(name, # pylint: disable=redefined-builtin
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
CLI Example:
@@ -636,7 +636,7 @@ def filter(name, # pylint: disable=redefined-builtin
.. note::
The first method allows the user to eventually apply complex manipulation
and / or retrieve the data from external services before passing the
- data to the state. The second one is more straighforward, for less
+ data to the state. The second one is more straightforward, for less
complex cases when loading the data directly from the pillar is sufficient.
.. note::
@@ -710,7 +710,7 @@ def managed(name,
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``False``
- Merge the ``filters`` wil the corresponding values from the pillar. Default: ``False``.
+ Merge the ``filters`` will the corresponding values from the pillar. Default: ``False``.
.. note::
By default this state does not merge, to avoid any unexpected behaviours.
@@ -746,7 +746,7 @@ def managed(name,
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
CLI Example:
@@ -933,7 +933,7 @@ def managed(name,
.. note::
The first method allows the user to eventually apply complex manipulation
and / or retrieve the data from external services before passing the
- data to the state. The second one is more straighforward, for less
+ data to the state. The second one is more straightforward, for less
complex cases when loading the data directly from the pillar is sufficient.
.. note::
diff --git a/salt/states/netconfig.py b/salt/states/netconfig.py
index fa9ea90937..8ba7192f4a 100644
--- a/salt/states/netconfig.py
+++ b/salt/states/netconfig.py
@@ -135,8 +135,8 @@ def managed(name,
To replace the config, set ``replace`` to ``True``. This option is recommended to be used with caution!
.. warning::
- The spport for NAPALM native templates will be dropped beginning with Salt Fluorine.
- Implicitly, the ``template_path`` argument will be depreacted and removed.
+ The support for NAPALM native templates will be dropped beginning with Salt Fluorine.
+ Implicitly, the ``template_path`` argument will be deprecated and removed.
template_name
Identifies path to the template source. The template can be either stored on the local machine,
@@ -152,7 +152,7 @@ def managed(name,
Placing the template under ``/etc/salt/states/templates/example.jinja``, it can be used as
``salt://templates/example.jinja``.
- Alternatively, for local files, the user can specify the abolute path.
+ Alternatively, for local files, the user can specify the absolute path.
If remotely, the source can be retrieved via ``http``, ``https`` or ``ftp``.
Examples:
@@ -213,7 +213,7 @@ def managed(name,
Commit? Default: ``True``.
debug: False
- Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` contaning the raw
+ Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw
result after the template was rendered.
replace: False
@@ -223,7 +223,7 @@ def managed(name,
Default variables/context passed to the template.
**template_vars
- Dictionary with the arguments/context to be used when the template is rendered. Do not explicitely specify this
+ Dictionary with the arguments/context to be used when the template is rendered. Do not explicitly specify this
argument. This represents any other variable that will be sent to the template rendering system. Please
see an example below! In both ``ntp_peers_example_using_pillar`` and ``ntp_peers_example``, ``peers`` is sent as
template variable.
diff --git a/salt/states/netyang.py b/salt/states/netyang.py
index 2db6208f57..e500e5e449 100644
--- a/salt/states/netyang.py
+++ b/salt/states/netyang.py
@@ -109,7 +109,7 @@ def managed(name,
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
@@ -212,7 +212,7 @@ def configured(name,
configuration on the device and the expected
configuration. Depending on the platform and hardware
capabilities, one could be more optimal than the other.
- Additionally, the output of the ``managed`` is diferent,
+ Additionally, the output of the ``managed`` is different,
in such a way that the ``pchange`` field in the output
contains structured data, rather than text.
@@ -236,7 +236,7 @@ def configured(name,
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
- as ``loaded_config`` contaning the raw configuration loaded on the device.
+ as ``loaded_config`` containing the raw configuration loaded on the device.
replace: ``False``
Should replace the config with the new generate one?
diff --git a/salt/states/pdbedit.py b/salt/states/pdbedit.py
index 9b4c7b0b98..f420b7adf8 100644
--- a/salt/states/pdbedit.py
+++ b/salt/states/pdbedit.py
@@ -107,7 +107,7 @@ def managed(name, **kwargs):
specify user account control properties
.. note::
- Only the follwing can be set:
+ Only the following can be set:
- N: No password required
- D: Account disabled
- H: Home directory required
diff --git a/salt/states/pip_state.py b/salt/states/pip_state.py
index 66d2fcb7ec..3238078014 100644
--- a/salt/states/pip_state.py
+++ b/salt/states/pip_state.py
@@ -182,8 +182,7 @@ def _check_pkg_version_format(pkg):
def _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
- upgrade, user, cwd, bin_env):
-
+ upgrade, user, cwd, bin_env, env_vars):
# result: None means the command failed to run
# result: True means the package is installed
# result: False means the package is not installed
@@ -192,7 +191,8 @@ def _check_if_installed(prefix, state_pkg_name, version_spec,
# Check if the requested package is already installed.
try:
pip_list = __salt__['pip.list'](prefix, bin_env=bin_env,
- user=user, cwd=cwd)
+ user=user, cwd=cwd,
+ env_vars=env_vars)
prefix_realname = _find_key(prefix, pip_list)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
@@ -683,7 +683,7 @@ def installed(name,
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
- upgrade, user, cwd, bin_env)
+ upgrade, user, cwd, bin_env, env_vars)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
@@ -824,9 +824,10 @@ def installed(name,
# Case for packages that are not an URL
if prefix:
pipsearch = __salt__['pip.list'](prefix, bin_env,
- user=user, cwd=cwd)
+ user=user, cwd=cwd,
+ env_vars=env_vars)
- # If we didnt find the package in the system after
+ # If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
diff --git a/salt/states/pkg.py b/salt/states/pkg.py
index afe0774a82..0ab5a5f9d1 100644
--- a/salt/states/pkg.py
+++ b/salt/states/pkg.py
@@ -39,7 +39,7 @@ A more involved example involves pulling from a custom repository.
- keyserver: keyserver.ubuntu.com
logstash:
- pkg.installed
+ pkg.installed:
- fromrepo: ppa:wolfnet/logstash
Multiple packages can also be installed with the use of the pkgs
@@ -1953,7 +1953,7 @@ def downloaded(name,
return ret
# It doesn't make sense here to received 'downloadonly' as kwargs
- # as we're explicitely passing 'downloadonly=True' to execution module.
+ # as we're explicitly passing 'downloadonly=True' to execution module.
if 'downloadonly' in kwargs:
del kwargs['downloadonly']
@@ -2126,7 +2126,7 @@ def patch_downloaded(name, advisory_ids=None, **kwargs):
'this platform'}
# It doesn't make sense here to received 'downloadonly' as kwargs
- # as we're explicitely passing 'downloadonly=True' to execution module.
+ # as we're explicitly passing 'downloadonly=True' to execution module.
if 'downloadonly' in kwargs:
del kwargs['downloadonly']
return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs)
diff --git a/salt/states/pkgrepo.py b/salt/states/pkgrepo.py
index 93fcc6e068..e38d44dd30 100644
--- a/salt/states/pkgrepo.py
+++ b/salt/states/pkgrepo.py
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
'''
-Management of APT/YUM package repos
-===================================
+Management of APT/DNF/YUM/Zypper package repos
+==============================================
-Package repositories for APT-based and YUM-based distros can be managed with
-these states. Here is some example SLS:
+States for managing software package repositories on Linux distros. Supported
+package managers are APT, DNF, YUM and Zypper. Here is some example SLS:
.. code-block:: yaml
@@ -131,7 +131,7 @@ def managed(name, ppa=None, **kwargs):
disabled : False
Included to reduce confusion due to APT's use of the ``disabled``
- argument. If this is passed for a yum/dnf/zypper-based distro, then the
+ argument. If this is passed for a YUM/DNF/Zypper-based distro, then the
reverse will be passed as ``enabled``. For example passing
``disabled=True`` will assume ``enabled=False``.
@@ -150,8 +150,14 @@ def managed(name, ppa=None, **kwargs):
enabled configuration. Anything supplied for this list will be saved
in the repo configuration with a comment marker (#) in front.
- Additional configuration values seen in yum repo files, such as ``gpgkey`` or
- ``gpgcheck``, will be used directly as key-value pairs. For example:
+ gpgautoimport
+ Only valid for Zypper package manager. If set to True, automatically
+ trust and import public GPG key for the repository. The key should be
+ specified with ``gpgkey`` parameter. See details below.
+
+ Additional configuration values seen in YUM/DNF/Zypper repo files, such as
+ ``gpgkey`` or ``gpgcheck``, will be used directly as key-value pairs.
+ For example:
.. code-block:: yaml
@@ -217,7 +223,7 @@ def managed(name, ppa=None, **kwargs):
and/or installing packages.
enabled : True
- Included to reduce confusion due to yum/dnf/zypper's use of the
+ Included to reduce confusion due to YUM/DNF/Zypper's use of the
``enabled`` argument. If this is passed for an APT-based distro, then
the reverse will be passed as ``disabled``. For example, passing
``enabled=False`` will assume ``disabled=False``.
@@ -371,6 +377,9 @@ def managed(name, ppa=None, **kwargs):
repo = salt.utils.pkg.deb.strip_uri(repo)
if pre:
+ #22412: Remove file attribute in case same repo is set up multiple times but with different files
+ pre.pop('file', None)
+ sanitizedkwargs.pop('file', None)
for kwarg in sanitizedkwargs:
if kwarg not in pre:
if kwarg == 'enabled':
diff --git a/salt/states/postgres_user.py b/salt/states/postgres_user.py
index f85264da20..d9f686871c 100644
--- a/salt/states/postgres_user.py
+++ b/salt/states/postgres_user.py
@@ -95,7 +95,7 @@ def present(name,
encrypted to the previous
format if it is not already done.
- default_passwoord
+ default_password
The password used only when creating the user, unless password is set.
.. versionadded:: 2016.3.0
diff --git a/salt/states/rabbitmq_user.py b/salt/states/rabbitmq_user.py
index a990b5a2d2..d6f1cf664b 100644
--- a/salt/states/rabbitmq_user.py
+++ b/salt/states/rabbitmq_user.py
@@ -126,7 +126,7 @@ def present(name,
return ret
if user and not any((force, perms, tags, passwd_reqs_update)):
- log.debug(('RabbitMQ user \'%s\' exists, password is upto'
+ log.debug(('RabbitMQ user \'%s\' exists, password is up to'
' date and force is not set.'), name)
ret['comment'] = 'User \'{0}\' is already present.'.format(name)
ret['result'] = True
diff --git a/salt/states/reg.py b/salt/states/reg.py
index d9bc8a60e5..99cca3b364 100644
--- a/salt/states/reg.py
+++ b/salt/states/reg.py
@@ -192,9 +192,14 @@ def present(name,
salt.utils.to_unicode(name, 'utf-8'))
return ret
+ try:
+ vdata_decoded = salt.utils.to_unicode(vdata, 'utf-8')
+ except UnicodeDecodeError:
+ # vdata contains binary data that can't be decoded
+ vdata_decoded = vdata
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': u'{0}'.format(salt.utils.to_unicode(vname, 'utf-8') if vname else u'(Default)'),
- 'Value': salt.utils.to_unicode(vdata, 'utf-8')}
+ 'Value': vdata_decoded}
# Check for test option
if __opts__['test']:
diff --git a/salt/states/smartos.py b/salt/states/smartos.py
index fa01d613f9..42038b0a39 100644
--- a/salt/states/smartos.py
+++ b/salt/states/smartos.py
@@ -368,7 +368,7 @@ def image_vacuum(name):
# list of images to keep
images = []
- # retreive image_present state data for host
+ # retrieve image_present state data for host
for state in __salt__['state.show_lowstate']():
# don't throw exceptions when not highstate run
if 'state' not in state:
diff --git a/salt/states/user.py b/salt/states/user.py
index 20d0ee390c..4a468c3eb1 100644
--- a/salt/states/user.py
+++ b/salt/states/user.py
@@ -240,7 +240,8 @@ def present(name,
gid_from_name
If True, the default group id will be set to the id of the group with
- the same name as the user, Default is ``False``.
+ the same name as the user. If the group does not exist the state will
+ fail. Default is ``False``.
groups
A list of groups to assign the user to, pass a list object. If a group
@@ -455,6 +456,10 @@ def present(name,
if gid_from_name:
gid = __salt__['file.group_to_gid'](name)
+ if gid == '':
+ ret['comment'] = 'Default group with name "{0}" is not present'.format(name)
+ ret['result'] = False
+ return ret
changes = _changes(name,
uid,
diff --git a/salt/states/win_lgpo.py b/salt/states/win_lgpo.py
index 96e6ff729d..922161abf2 100644
--- a/salt/states/win_lgpo.py
+++ b/salt/states/win_lgpo.py
@@ -23,7 +23,7 @@ Example single policy configuration
.. code-block:: yaml
- Acount lockout duration:
+ Account lockout duration:
gpo.set:
- setting: 120
- policy_class: Machine
diff --git a/salt/states/zfs.py b/salt/states/zfs.py
index c06d972102..969ad6c020 100644
--- a/salt/states/zfs.py
+++ b/salt/states/zfs.py
@@ -764,7 +764,7 @@ def scheduled_snapshot(name, prefix, recursive=True, schedule=None):
## manage snapshots
if ret['result']:
- # retreive snapshots
+ # retrieve snapshots
prunable = []
snapshots = {}
for key in schedule:
diff --git a/salt/states/zone.py b/salt/states/zone.py
index 22900f89c5..b3357b3d82 100644
--- a/salt/states/zone.py
+++ b/salt/states/zone.py
@@ -9,7 +9,7 @@ Management of Solaris Zones
.. versionadded:: 2017.7.0
-Bellow are some examples of how to use this state.
+Below are some examples of how to use this state.
Lets start with creating a zone and installing it.
.. code-block:: yaml
@@ -47,7 +47,7 @@ Lets start with creating a zone and installing it.
A zone without network access is not very useful. We could update
the zone.present state in the example above to add a network interface
-or we could use a seperate state for this.
+or we could use a separate state for this.
.. code-block:: yaml
@@ -836,7 +836,7 @@ def import_(name, path, mode='import', nodataset=False, brand_opts=None):
def present(name, brand, zonepath, properties=None, resources=None):
'''
- Ensure a zone with certain properties and resouces
+ Ensure a zone with certain properties and resources
name : string
name of the zone
diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py
index d8d8235920..d93a62154e 100644
--- a/salt/transport/ipc.py
+++ b/salt/transport/ipc.py
@@ -559,6 +559,11 @@ class IPCMessagePublisher(object):
io_loop=self.io_loop
)
self.streams.add(stream)
+
+ def discard_after_closed():
+ self.streams.discard(stream)
+
+ stream.set_close_callback(discard_after_closed)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))
diff --git a/salt/transport/mixins/auth.py b/salt/transport/mixins/auth.py
index 866493b854..f6a01a1d30 100644
--- a/salt/transport/mixins/auth.py
+++ b/salt/transport/mixins/auth.py
@@ -417,6 +417,10 @@ class AESReqServerMixin(object):
log.debug('Host key change detected in open mode.')
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
+ elif not load['pub']:
+ log.error('Public key is empty: {0}'.format(load['id']))
+ return {'enc': 'clear',
+ 'load': {'ret': False}}
pub = None
diff --git a/salt/transport/tcp.py b/salt/transport/tcp.py
index f274240a1e..ca37fb61ba 100644
--- a/salt/transport/tcp.py
+++ b/salt/transport/tcp.py
@@ -23,6 +23,7 @@ import salt.utils
import salt.utils.verify
import salt.utils.event
import salt.utils.async
+import salt.utils.files
import salt.payload
import salt.exceptions
import salt.transport.frame
@@ -1359,11 +1360,8 @@ class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
- old_umask = os.umask(0o177)
- try:
+ with salt.utils.files.set_umask(0o177):
pull_sock.start()
- finally:
- os.umask(old_umask)
# run forever
try:
diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py
index 0e2a4f2be4..d346ef432d 100644
--- a/salt/transport/zeromq.py
+++ b/salt/transport/zeromq.py
@@ -23,6 +23,7 @@ import salt.crypt
import salt.utils
import salt.utils.verify
import salt.utils.event
+import salt.utils.files
import salt.payload
import salt.transport.client
import salt.transport.server
@@ -750,11 +751,8 @@ class ZeroMQPubServerChannel(salt.transport.server.PubServerChannel):
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
- old_umask = os.umask(0o177)
- try:
+ with salt.utils.files.set_umask(0o177):
pull_sock.bind(pull_uri)
- finally:
- os.umask(old_umask)
try:
while True:
diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py
index 6e2305bd2d..9f11579279 100644
--- a/salt/utils/__init__.py
+++ b/salt/utils/__init__.py
@@ -484,7 +484,7 @@ def daemonize(redirect_out=True):
os.chdir('/')
# noinspection PyArgumentList
os.setsid()
- os.umask(18)
+ os.umask(0o022) # pylint: disable=blacklisted-function
# do second fork
try:
@@ -3130,7 +3130,7 @@ def chugid_and_umask(runas, umask):
if runas is not None and runas != getpass.getuser():
chugid(runas)
if umask is not None:
- os.umask(umask)
+ os.umask(umask) # pylint: disable=blacklisted-function
def rand_string(size=32):
diff --git a/salt/utils/args.py b/salt/utils/args.py
index c7a49b3a29..e5eeb2e81c 100644
--- a/salt/utils/args.py
+++ b/salt/utils/args.py
@@ -100,7 +100,8 @@ def yamlify_arg(arg):
return arg
if arg.strip() == '':
- # Because YAML loads empty strings as None, we return the original string
+ # Because YAML loads empty (or all whitespace) strings as None, we
+ # return the original string
# >>> import yaml
# >>> yaml.load('') is None
# True
@@ -109,6 +110,9 @@ def yamlify_arg(arg):
return arg
elif '_' in arg and all([x in '0123456789_' for x in arg.strip()]):
+ # When the stripped string includes just digits and underscores, the
+ # underscores are ignored and the digits are combined together and
+ # loaded as an int. We don't want that, so return the original value.
return arg
try:
@@ -135,6 +139,14 @@ def yamlify_arg(arg):
else:
return arg
+ elif isinstance(arg, list):
+ # lists must be wrapped in brackets
+ if (isinstance(original_arg, six.string_types) and
+ not original_arg.startswith('[')):
+ return original_arg
+ else:
+ return arg
+
elif arg is None \
or isinstance(arg, (list, float, six.integer_types, six.string_types)):
# yaml.safe_load will load '|' as '', don't let it do that.
diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py
index 4b84bd59c4..4a1d767ea0 100644
--- a/salt/utils/cloud.py
+++ b/salt/utils/cloud.py
@@ -89,6 +89,10 @@ try:
except ImportError:
HAS_GETPASS = False
+# This is required to support international characters in AWS EC2 tags or any
+# other kind of metadata provided by particular Cloud vendor.
+MSGPACK_ENCODING = 'utf-8'
+
NSTATES = {
0: 'running',
1: 'rebooting',
@@ -2557,7 +2561,7 @@ def cachedir_index_add(minion_id, profile, driver, provider, base=None):
if os.path.exists(index_file):
mode = 'rb' if six.PY3 else 'r'
with salt.utils.fopen(index_file, mode) as fh_:
- index = msgpack.load(fh_)
+ index = msgpack.load(fh_, encoding=MSGPACK_ENCODING)
else:
index = {}
@@ -2574,7 +2578,7 @@ def cachedir_index_add(minion_id, profile, driver, provider, base=None):
mode = 'wb' if six.PY3 else 'w'
with salt.utils.fopen(index_file, mode) as fh_:
- msgpack.dump(index, fh_)
+ msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING)
unlock_file(index_file)
@@ -2591,7 +2595,7 @@ def cachedir_index_del(minion_id, base=None):
if os.path.exists(index_file):
mode = 'rb' if six.PY3 else 'r'
with salt.utils.fopen(index_file, mode) as fh_:
- index = msgpack.load(fh_)
+ index = msgpack.load(fh_, encoding=MSGPACK_ENCODING)
else:
return
@@ -2600,7 +2604,7 @@ def cachedir_index_del(minion_id, base=None):
mode = 'wb' if six.PY3 else 'w'
with salt.utils.fopen(index_file, mode) as fh_:
- msgpack.dump(index, fh_)
+ msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING)
unlock_file(index_file)
@@ -2656,8 +2660,9 @@ def request_minion_cachedir(
fname = '{0}.p'.format(minion_id)
path = os.path.join(base, 'requested', fname)
- with salt.utils.fopen(path, 'w') as fh_:
- msgpack.dump(data, fh_)
+ mode = 'wb' if six.PY3 else 'w'
+ with salt.utils.fopen(path, mode) as fh_:
+ msgpack.dump(data, fh_, encoding=MSGPACK_ENCODING)
def change_minion_cachedir(
@@ -2689,12 +2694,12 @@ def change_minion_cachedir(
path = os.path.join(base, cachedir, fname)
with salt.utils.fopen(path, 'r') as fh_:
- cache_data = msgpack.load(fh_)
+ cache_data = msgpack.load(fh_, encoding=MSGPACK_ENCODING)
cache_data.update(data)
with salt.utils.fopen(path, 'w') as fh_:
- msgpack.dump(cache_data, fh_)
+ msgpack.dump(cache_data, fh_, encoding=MSGPACK_ENCODING)
def activate_minion_cachedir(minion_id, base=None):
@@ -2766,8 +2771,9 @@ def list_cache_nodes_full(opts=None, provider=None, base=None):
# Finally, get a list of full minion data
fpath = os.path.join(min_dir, fname)
minion_id = fname[:-2] # strip '.p' from end of msgpack filename
- with salt.utils.fopen(fpath, 'r') as fh_:
- minions[driver][prov][minion_id] = msgpack.load(fh_)
+ mode = 'rb' if six.PY3 else 'r'
+ with salt.utils.fopen(fpath, mode) as fh_:
+ minions[driver][prov][minion_id] = msgpack.load(fh_, encoding=MSGPACK_ENCODING)
return minions
@@ -2945,8 +2951,9 @@ def cache_node_list(nodes, provider, opts):
for node in nodes:
diff_node_cache(prov_dir, node, nodes[node], opts)
path = os.path.join(prov_dir, '{0}.p'.format(node))
- with salt.utils.fopen(path, 'w') as fh_:
- msgpack.dump(nodes[node], fh_)
+ mode = 'wb' if six.PY3 else 'w'
+ with salt.utils.fopen(path, mode) as fh_:
+ msgpack.dump(nodes[node], fh_, encoding=MSGPACK_ENCODING)
def cache_node(node, provider, opts):
@@ -2970,8 +2977,9 @@ def cache_node(node, provider, opts):
if not os.path.exists(prov_dir):
os.makedirs(prov_dir)
path = os.path.join(prov_dir, '{0}.p'.format(node['name']))
- with salt.utils.fopen(path, 'w') as fh_:
- msgpack.dump(node, fh_)
+ mode = 'wb' if six.PY3 else 'w'
+ with salt.utils.fopen(path, mode) as fh_:
+ msgpack.dump(node, fh_, encoding=MSGPACK_ENCODING)
def missing_node_cache(prov_dir, node_list, provider, opts):
@@ -3046,7 +3054,7 @@ def diff_node_cache(prov_dir, node, new_data, opts):
with salt.utils.fopen(path, 'r') as fh_:
try:
- cache_data = msgpack.load(fh_)
+ cache_data = msgpack.load(fh_, encoding=MSGPACK_ENCODING)
except ValueError:
log.warning('Cache for {0} was corrupt: Deleting'.format(node))
cache_data = {}
diff --git a/salt/utils/dns.py b/salt/utils/dns.py
index 593fc732ce..eb23c8bdf6 100644
--- a/salt/utils/dns.py
+++ b/salt/utils/dns.py
@@ -797,6 +797,8 @@ def parse_resolv(src='/etc/resolv.conf'):
'''
nameservers = []
+ ip4_nameservers = []
+ ip6_nameservers = []
search = []
sortlist = []
domain = ''
@@ -815,10 +817,20 @@ def parse_resolv(src='/etc/resolv.conf'):
lambda x: x[0] not in ('#', ';'), arg))
if directive == 'nameserver':
+ # Split the scope (interface) if it is present
+ addr, scope = arg[0].split('%', 1) if '%' in arg[0] else (arg[0], '')
try:
- ip_addr = ipaddress.ip_address(arg[0])
+ ip_addr = ipaddress.ip_address(addr)
+ version = ip_addr.version
+ # Rejoin scope after address validation
+ if scope:
+ ip_addr = '%'.join((str(ip_addr), scope))
if ip_addr not in nameservers:
nameservers.append(ip_addr)
+ if version == 4 and ip_addr not in ip4_nameservers:
+ ip4_nameservers.append(ip_addr)
+ elif version == 6 and ip_addr not in ip6_nameservers:
+ ip6_nameservers.append(ip_addr)
except ValueError as exc:
log.error('{0}: {1}'.format(src, exc))
elif directive == 'domain':
@@ -870,8 +882,8 @@ def parse_resolv(src='/etc/resolv.conf'):
return {
'nameservers': nameservers,
- 'ip4_nameservers': [ip for ip in nameservers if ip.version == 4],
- 'ip6_nameservers': [ip for ip in nameservers if ip.version == 6],
+ 'ip4_nameservers': ip4_nameservers,
+ 'ip6_nameservers': ip6_nameservers,
'sortlist': [ip.with_netmask for ip in sortlist],
'domain': domain,
'search': search,
diff --git a/salt/utils/docker/__init__.py b/salt/utils/docker/__init__.py
index e186d639d7..c9ad6031c7 100644
--- a/salt/utils/docker/__init__.py
+++ b/salt/utils/docker/__init__.py
@@ -2,12 +2,13 @@
'''
Common logic used by the docker state and execution module
-This module contains logic to accomodate docker/salt CLI usage, as well as
+This module contains logic to accommodate docker/salt CLI usage, as well as
input as formatted by states.
'''
# Import Python libs
from __future__ import absolute_import
+import copy
import logging
import os
@@ -174,7 +175,7 @@ def translate_input(**kwargs):
have their translation skipped. Optionally, skip_translate can be set to
True to skip *all* translation.
'''
- kwargs = salt.utils.clean_kwargs(**kwargs)
+ kwargs = copy.deepcopy(salt.utils.clean_kwargs(**kwargs))
invalid = {}
collisions = []
@@ -287,27 +288,31 @@ def translate_input(**kwargs):
actual_volumes.sort()
if kwargs.get('port_bindings') is not None \
- and (skip_translate is True or
- all(x not in skip_translate
- for x in ('port_bindings', 'expose', 'ports'))):
+ and all(x not in skip_translate
+ for x in ('port_bindings', 'expose', 'ports')):
# Make sure that all ports defined in "port_bindings" are included in
# the "ports" param.
- auto_ports = list(kwargs['port_bindings'])
- if auto_ports:
- actual_ports = []
- # Sort list to make unit tests more reliable
- for port in auto_ports:
- if port in actual_ports:
- continue
- if isinstance(port, six.integer_types):
- actual_ports.append((port, 'tcp'))
+ ports_to_bind = list(kwargs['port_bindings'])
+ if ports_to_bind:
+ ports_to_open = set(kwargs.get('ports', []))
+ for port_def in ports_to_bind:
+ if isinstance(port_def, six.integer_types):
+ ports_to_open.add(port_def)
else:
- port, proto = port.split('/')
- actual_ports.append((int(port), proto))
- actual_ports.sort()
- actual_ports = [
- port if proto == 'tcp' else '{}/{}'.format(port, proto) for (port, proto) in actual_ports
- ]
- kwargs.setdefault('ports', actual_ports)
+ port_num, proto = port_def.split('/')
+ ports_to_open.add((int(port_num), proto))
+ kwargs['ports'] = list(ports_to_open)
+
+ if 'ports' in kwargs \
+ and all(x not in skip_translate for x in ('expose', 'ports')):
+ # TCP ports should only be passed as the port number. Normalize the
+ # input so a port definition of 80/tcp becomes just 80 instead of
+ # (80, 'tcp').
+ for index, _ in enumerate(kwargs['ports']):
+ try:
+ if kwargs['ports'][index][1] == 'tcp':
+ kwargs['ports'][index] = ports_to_open[index][0]
+ except TypeError:
+ continue
return kwargs, invalid, sorted(collisions)
diff --git a/salt/utils/docker/translate.py b/salt/utils/docker/translate.py
index 372596a759..2c3504e9ef 100644
--- a/salt/utils/docker/translate.py
+++ b/salt/utils/docker/translate.py
@@ -705,13 +705,7 @@ def ports(val, **kwargs): # pylint: disable=unused-argument
raise SaltInvocationError(exc.__str__())
new_ports.update([_get_port_def(x, proto)
for x in range(range_start, range_end + 1)])
- ordered_new_ports = [
- port if proto == 'tcp' else (port, proto) for (port, proto) in sorted(
- [(new_port, 'tcp') if isinstance(new_port, six.integer_types) else new_port
- for new_port in new_ports]
- )
- ]
- return ordered_new_ports
+ return list(new_ports)
def privileged(val, **kwargs): # pylint: disable=unused-argument
diff --git a/salt/utils/event.py b/salt/utils/event.py
index c96489dc0d..a6daef0670 100644
--- a/salt/utils/event.py
+++ b/salt/utils/event.py
@@ -76,6 +76,7 @@ import salt.utils
import salt.utils.async
import salt.utils.cache
import salt.utils.dicttrim
+import salt.utils.files
import salt.utils.process
import salt.utils.zeromq
import salt.log.setup
@@ -1014,12 +1015,9 @@ class AsyncEventPublisher(object):
)
log.info('Starting pull socket on {0}'.format(epull_uri))
- old_umask = os.umask(0o177)
- try:
+ with salt.utils.files.set_umask(0o177):
self.publisher.start()
self.puller.start()
- finally:
- os.umask(old_umask)
def handle_publish(self, package, _):
'''
@@ -1102,8 +1100,7 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
)
# Start the master event publisher
- old_umask = os.umask(0o177)
- try:
+ with salt.utils.files.set_umask(0o177):
self.publisher.start()
self.puller.start()
if (self.opts['ipc_mode'] != 'tcp' and (
@@ -1111,8 +1108,6 @@ class EventPublisher(salt.utils.process.SignalHandlingMultiprocessingProcess):
self.opts['external_auth'])):
os.chmod(os.path.join(
self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666)
- finally:
- os.umask(old_umask)
# Make sure the IO loop and respective sockets are closed and
# destroyed
diff --git a/salt/utils/extmods.py b/salt/utils/extmods.py
index 5d4263b80f..a42c9b7cbb 100644
--- a/salt/utils/extmods.py
+++ b/salt/utils/extmods.py
@@ -11,6 +11,7 @@ import shutil
# Import salt libs
import salt.fileclient
+import salt.utils.files
import salt.utils.url
# Import 3rd-party libs
@@ -63,85 +64,83 @@ def sync(opts, form, saltenv=None, extmod_whitelist=None, extmod_blacklist=None)
remote = set()
source = salt.utils.url.create('_' + form)
mod_dir = os.path.join(opts['extension_modules'], '{0}'.format(form))
- cumask = os.umask(0o77)
touched = False
- try:
- if not os.path.isdir(mod_dir):
- log.info('Creating module dir \'{0}\''.format(mod_dir))
- try:
- os.makedirs(mod_dir)
- except (IOError, OSError):
- log.error(
- 'Cannot create cache module directory {0}. Check '
- 'permissions.'.format(mod_dir)
+ with salt.utils.files.set_umask(0o077):
+ try:
+ if not os.path.isdir(mod_dir):
+ log.info('Creating module dir \'{0}\''.format(mod_dir))
+ try:
+ os.makedirs(mod_dir)
+ except (IOError, OSError):
+ log.error(
+ 'Cannot create cache module directory {0}. Check '
+ 'permissions.'.format(mod_dir)
+ )
+ fileclient = salt.fileclient.get_file_client(opts)
+ for sub_env in saltenv:
+ log.info(
+ 'Syncing {0} for environment \'{1}\''.format(form, sub_env)
)
- fileclient = salt.fileclient.get_file_client(opts)
- for sub_env in saltenv:
- log.info(
- 'Syncing {0} for environment \'{1}\''.format(form, sub_env)
- )
- cache = []
- log.info(
- 'Loading cache from {0}, for {1})'.format(source, sub_env)
- )
- # Grab only the desired files (.py, .pyx, .so)
- cache.extend(
- fileclient.cache_dir(
- source, sub_env, include_empty=False,
- include_pat=r'E@\.(pyx?|so|zip)$', exclude_pat=None
+ cache = []
+ log.info(
+ 'Loading cache from {0}, for {1})'.format(source, sub_env)
)
- )
- local_cache_dir = os.path.join(
- opts['cachedir'],
- 'files',
- sub_env,
- '_{0}'.format(form)
+ # Grab only the desired files (.py, .pyx, .so)
+ cache.extend(
+ fileclient.cache_dir(
+ source, sub_env, include_empty=False,
+ include_pat=r'E@\.(pyx?|so|zip)$', exclude_pat=None
)
- log.debug('Local cache dir: \'{0}\''.format(local_cache_dir))
- for fn_ in cache:
- relpath = os.path.relpath(fn_, local_cache_dir)
- relname = os.path.splitext(relpath)[0].replace(os.sep, '.')
- if extmod_whitelist and form in extmod_whitelist and relname not in extmod_whitelist[form]:
- continue
- if extmod_blacklist and form in extmod_blacklist and relname in extmod_blacklist[form]:
- continue
- remote.add(relpath)
- dest = os.path.join(mod_dir, relpath)
- log.info('Copying \'{0}\' to \'{1}\''.format(fn_, dest))
- if os.path.isfile(dest):
- # The file is present, if the sum differs replace it
- hash_type = opts.get('hash_type', 'md5')
- src_digest = salt.utils.get_hash(fn_, hash_type)
- dst_digest = salt.utils.get_hash(dest, hash_type)
- if src_digest != dst_digest:
- # The downloaded file differs, replace!
+ )
+ local_cache_dir = os.path.join(
+ opts['cachedir'],
+ 'files',
+ sub_env,
+ '_{0}'.format(form)
+ )
+ log.debug('Local cache dir: \'{0}\''.format(local_cache_dir))
+ for fn_ in cache:
+ relpath = os.path.relpath(fn_, local_cache_dir)
+ relname = os.path.splitext(relpath)[0].replace(os.sep, '.')
+ if extmod_whitelist and form in extmod_whitelist and relname not in extmod_whitelist[form]:
+ continue
+ if extmod_blacklist and form in extmod_blacklist and relname in extmod_blacklist[form]:
+ continue
+ remote.add(relpath)
+ dest = os.path.join(mod_dir, relpath)
+ log.info('Copying \'{0}\' to \'{1}\''.format(fn_, dest))
+ if os.path.isfile(dest):
+ # The file is present, if the sum differs replace it
+ hash_type = opts.get('hash_type', 'md5')
+ src_digest = salt.utils.get_hash(fn_, hash_type)
+ dst_digest = salt.utils.get_hash(dest, hash_type)
+ if src_digest != dst_digest:
+ # The downloaded file differs, replace!
+ shutil.copyfile(fn_, dest)
+ ret.append('{0}.{1}'.format(form, relname))
+ else:
+ dest_dir = os.path.dirname(dest)
+ if not os.path.isdir(dest_dir):
+ os.makedirs(dest_dir)
shutil.copyfile(fn_, dest)
ret.append('{0}.{1}'.format(form, relname))
- else:
- dest_dir = os.path.dirname(dest)
- if not os.path.isdir(dest_dir):
- os.makedirs(dest_dir)
- shutil.copyfile(fn_, dest)
- ret.append('{0}.{1}'.format(form, relname))
- touched = bool(ret)
- if opts['clean_dynamic_modules'] is True:
- current = set(_listdir_recursively(mod_dir))
- for fn_ in current - remote:
- full = os.path.join(mod_dir, fn_)
- if os.path.isfile(full):
- touched = True
- os.remove(full)
- # Cleanup empty dirs
- while True:
- emptydirs = _list_emptydirs(mod_dir)
- if not emptydirs:
- break
- for emptydir in emptydirs:
- touched = True
- shutil.rmtree(emptydir, ignore_errors=True)
- except Exception as exc:
- log.error('Failed to sync {0} module: {1}'.format(form, exc))
- finally:
- os.umask(cumask)
+ touched = bool(ret)
+ if opts['clean_dynamic_modules'] is True:
+ current = set(_listdir_recursively(mod_dir))
+ for fn_ in current - remote:
+ full = os.path.join(mod_dir, fn_)
+ if os.path.isfile(full):
+ touched = True
+ os.remove(full)
+ # Cleanup empty dirs
+ while True:
+ emptydirs = _list_emptydirs(mod_dir)
+ if not emptydirs:
+ break
+ for emptydir in emptydirs:
+ touched = True
+ shutil.rmtree(emptydir, ignore_errors=True)
+ except Exception as exc:
+ log.error('Failed to sync {0} module: {1}'.format(form, exc))
return ret, touched
diff --git a/salt/utils/files.py b/salt/utils/files.py
index 6f4ac70d55..294b01c321 100644
--- a/salt/utils/files.py
+++ b/salt/utils/files.py
@@ -274,20 +274,29 @@ def wait_lock(path, lock_fn=None, timeout=5, sleep=0.1, time_start=None):
log.trace('Write lock for %s (%s) released', path, lock_fn)
+def get_umask():
+ '''
+ Returns the current umask
+ '''
+ ret = os.umask(0) # pylint: disable=blacklisted-function
+ os.umask(ret) # pylint: disable=blacklisted-function
+ return ret
+
+
@contextlib.contextmanager
def set_umask(mask):
'''
Temporarily set the umask and restore once the contextmanager exits
'''
- if salt.utils.is_windows():
- # Don't attempt on Windows
+ if mask is None or salt.utils.is_windows():
+ # Don't attempt on Windows, or if no mask was passed
yield
else:
try:
- orig_mask = os.umask(mask)
+ orig_mask = os.umask(mask) # pylint: disable=blacklisted-function
yield
finally:
- os.umask(orig_mask)
+ os.umask(orig_mask) # pylint: disable=blacklisted-function
def safe_filename_leaf(file_basename):
diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py
index 35501ca2ba..0ab4face60 100644
--- a/salt/utils/gitfs.py
+++ b/salt/utils/gitfs.py
@@ -41,6 +41,9 @@ import salt.ext.six as six
# Optional per-remote params that can only be used on a per-remote basis, and
# thus do not have defaults in salt/config.py.
PER_REMOTE_ONLY = ('name',)
+# Params which are global only and cannot be overridden for a single remote.
+GLOBAL_ONLY = ()
+
SYMLINK_RECURSE_DEPTH = 100
# Auth support (auth params can be global or per-remote, too)
@@ -307,7 +310,7 @@ class GitProvider(object):
salt.utils.url.strip_proto(saltenv_ptr['mountpoint'])
for key, val in six.iteritems(self.conf):
- if key not in PER_SALTENV_PARAMS:
+ if key not in PER_SALTENV_PARAMS and not hasattr(self, key):
setattr(self, key, val)
for key in PER_SALTENV_PARAMS:
@@ -770,13 +773,13 @@ class GitProvider(object):
'''
Resolve dynamically-set branch
'''
- if self.branch == '__env__':
+ if self.role == 'git_pillar' and self.branch == '__env__':
target = self.opts.get('pillarenv') \
or self.opts.get('environment') \
or 'base'
- return self.opts['{0}_base'.format(self.role)] \
+ return self.base \
if target == 'base' \
- else target
+ else six.text_type(target)
return self.branch
def get_refspecs(self):
@@ -824,7 +827,7 @@ class GitProvider(object):
try:
self.branch, self.url = self.id.split(None, 1)
except ValueError:
- self.branch = self.opts['{0}_branch'.format(self.role)]
+ self.branch = self.conf['branch']
self.url = self.id
else:
self.url = self.id
@@ -1913,7 +1916,8 @@ class GitBase(object):
self.opts['cachedir'], 'file_lists', self.role)
def init_remotes(self, remotes, per_remote_overrides,
- per_remote_only=PER_REMOTE_ONLY):
+ per_remote_only=PER_REMOTE_ONLY,
+ global_only=GLOBAL_ONLY):
'''
Initialize remotes
'''
@@ -1946,7 +1950,9 @@ class GitBase(object):
failhard(self.role)
per_remote_defaults = {}
- for param in override_params:
+ global_values = set(override_params)
+ global_values.update(set(global_only))
+ for param in global_values:
key = '{0}_{1}'.format(self.role, param)
if key not in self.opts:
log.critical(
@@ -1982,6 +1988,9 @@ class GitBase(object):
for saltenv, saltenv_conf in six.iteritems(repo_obj.saltenv):
if 'ref' in saltenv_conf:
ref = saltenv_conf['ref']
+ repo_obj.saltenv_revmap.setdefault(
+ ref, []).append(saltenv)
+
if saltenv == 'base':
# Remove redundant 'ref' config for base saltenv
repo_obj.saltenv[saltenv].pop('ref')
@@ -1996,9 +2005,6 @@ class GitBase(object):
)
# Rewrite 'base' config param
repo_obj.base = ref
- else:
- repo_obj.saltenv_revmap.setdefault(
- ref, []).append(saltenv)
# Build list of all envs defined by ref mappings in the
# per-remote 'saltenv' param. We won't add any matching envs
@@ -2531,13 +2537,17 @@ class GitFS(GitBase):
return fnd
salt.fileserver.wait_lock(lk_fn, dest)
- if os.path.isfile(blobshadest) and os.path.isfile(dest):
+ try:
with salt.utils.fopen(blobshadest, 'r') as fp_:
sha = fp_.read()
if sha == blob_hexsha:
fnd['rel'] = path
fnd['path'] = dest
return _add_file_stat(fnd, blob_mode)
+ except IOError as exc:
+ if exc.errno != errno.ENOENT:
+ raise exc
+
with salt.utils.fopen(lk_fn, 'w+') as fp_:
fp_.write('')
for filename in glob.glob(hashes_glob):
@@ -2623,17 +2633,25 @@ class GitFS(GitBase):
load['saltenv'],
'{0}.hash.{1}'.format(relpath,
self.opts['hash_type']))
- if not os.path.isfile(hashdest):
- if not os.path.exists(os.path.dirname(hashdest)):
- os.makedirs(os.path.dirname(hashdest))
- ret['hsum'] = salt.utils.get_hash(path, self.opts['hash_type'])
- with salt.utils.fopen(hashdest, 'w+') as fp_:
- fp_.write(ret['hsum'])
- return ret
- else:
+
+ try:
with salt.utils.fopen(hashdest, 'rb') as fp_:
ret['hsum'] = fp_.read()
return ret
+ except IOError as exc:
+ if exc.errno != errno.ENOENT:
+ raise exc
+
+ try:
+ os.makedirs(os.path.dirname(hashdest))
+ except OSError as exc:
+ if exc.errno != errno.EEXIST:
+ raise exc
+
+ ret['hsum'] = salt.utils.get_hash(path, self.opts['hash_type'])
+ with salt.utils.fopen(hashdest, 'w+') as fp_:
+ fp_.write(ret['hsum'])
+ return ret
def _file_lists(self, load, form):
'''
@@ -2756,8 +2774,7 @@ class GitPillar(GitBase):
if repo.env:
env = repo.env
else:
- base_branch = self.opts['{0}_base'.format(self.role)]
- env = 'base' if repo.branch == base_branch else repo.branch
+ env = 'base' if repo.branch == repo.base else repo.branch
if repo._mountpoint:
if self.link_mountpoint(repo, cachedir):
self.pillar_dirs[repo.linkdir] = env
@@ -2844,6 +2861,9 @@ class WinRepo(GitBase):
def __init__(self, opts, winrepo_dir):
self.role = 'winrepo'
super(WinRepo, self).__init__(opts, cache_root=winrepo_dir)
+ # Need to define this in case we try to reference it before checking
+ # out the repos.
+ self.winrepo_dirs = {}
def checkout(self):
'''
diff --git a/salt/utils/http.py b/salt/utils/http.py
index 70a1c812ba..19ee85c39c 100644
--- a/salt/utils/http.py
+++ b/salt/utils/http.py
@@ -531,8 +531,10 @@ def query(url,
'charset' in res_params and \
not isinstance(result_text, six.text_type):
result_text = result_text.decode(res_params['charset'])
+ if six.PY3 and isinstance(result_text, bytes):
+ result_text = result_text.decode('utf-8')
ret['body'] = result_text
- if 'Set-Cookie' in result_headers.keys() and cookies is not None:
+ if 'Set-Cookie' in result_headers and cookies is not None:
result_cookies = parse_cookie_header(result_headers['Set-Cookie'])
for item in result_cookies:
sess_cookies.set_cookie(item)
@@ -857,12 +859,10 @@ def parse_cookie_header(header):
for cookie in cookies:
name = None
value = None
- for item in cookie:
+ for item in list(cookie):
if item in attribs:
continue
- name = item
- value = cookie[item]
- del cookie[name]
+ value = cookie.pop(item)
# cookielib.Cookie() requires an epoch
if 'expires' in cookie:
@@ -870,7 +870,7 @@ def parse_cookie_header(header):
# Fill in missing required fields
for req in reqd:
- if req not in cookie.keys():
+ if req not in cookie:
cookie[req] = ''
if cookie['version'] == '':
cookie['version'] = 0
diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py
index 9d0e5e8355..06cfd7c879 100644
--- a/salt/utils/jinja.py
+++ b/salt/utils/jinja.py
@@ -63,9 +63,11 @@ class SaltCacheLoader(BaseLoader):
else:
self.searchpath = [path.join(opts['cachedir'], 'files', saltenv)]
log.debug('Jinja search path: %s', self.searchpath)
- self._file_client = None
self.cached = []
self.pillar_rend = pillar_rend
+ self._file_client = None
+ # Instantiate the fileclient
+ self.file_client()
def file_client(self):
'''
diff --git a/salt/utils/master.py b/salt/utils/master.py
index 16e95a40aa..5738efbbaf 100644
--- a/salt/utils/master.py
+++ b/salt/utils/master.py
@@ -298,7 +298,7 @@ class MasterPillarUtil(object):
cached minion data on the master, or by fetching the grains
directly on the minion.
- By default, this function tries hard to get the pillar data:
+ By default, this function tries hard to get the grains data:
- Try to get the cached minion grains if the master
has minion_data_cache: True
- If the grains data for the minion is cached, use it.
@@ -307,6 +307,8 @@ class MasterPillarUtil(object):
'''
minion_grains = {}
minion_ids = self._tgt_to_list()
+ if not minion_ids:
+ return {}
if any(arg for arg in [self.use_cached_grains, self.grains_fallback]):
log.debug('Getting cached minion data.')
cached_minion_grains, cached_minion_pillars = self._get_cached_minion_data(*minion_ids)
diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py
index 1f560fd280..e81b97152e 100644
--- a/salt/utils/napalm.py
+++ b/salt/utils/napalm.py
@@ -238,7 +238,7 @@ def call(napalm_device, method, *args, **kwargs):
# either running in a not-always-alive proxy
# either running in a regular minion
# close the connection when the call is over
- # unless the CLOSE is explicitely set as False
+ # unless the CLOSE is explicitly set as False
napalm_device['DRIVER'].close()
return {
'out': out,
@@ -392,7 +392,7 @@ def proxy_napalm_wrap(func):
else:
# in case the `inherit_napalm_device` is set
# and it also has a non-empty value,
- # the global var `napalm_device` will be overriden.
+ # the global var `napalm_device` will be overridden.
# this is extremely important for configuration-related features
# as all actions must be issued within the same configuration session
# otherwise we risk to open multiple sessions
@@ -418,7 +418,7 @@ def proxy_napalm_wrap(func):
else:
# in case the `inherit_napalm_device` is set
# and it also has a non-empty value,
- # the global var `napalm_device` will be overriden.
+ # the global var `napalm_device` will be overridden.
# this is extremely important for configuration-related features
# as all actions must be issued within the same configuration session
# otherwise we risk to open multiple sessions
diff --git a/salt/utils/parsers.py b/salt/utils/parsers.py
index 97a82a7ecf..3694a38423 100644
--- a/salt/utils/parsers.py
+++ b/salt/utils/parsers.py
@@ -34,6 +34,7 @@ import salt.utils
import salt.utils.args
import salt.utils.xdg
import salt.utils.jid
+import salt.utils.files
from salt.utils import kinds
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.validate.path import is_writeable
@@ -261,6 +262,8 @@ class OptionParser(optparse.OptionParser, object):
)
)
if self._setup_mp_logging_listener_ is True:
+ # Stop logging through the queue
+ log.shutdown_multiprocessing_logging()
# Stop the logging queue listener process
log.shutdown_multiprocessing_logging_listener(daemonizing=True)
if isinstance(msg, six.string_types) and msg and msg[-1] != '\n':
@@ -716,9 +719,8 @@ class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
# verify the default
if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')):
# Logfile is not using Syslog, verify
- current_umask = os.umask(0o027)
- verify_files([logfile], self.config['user'])
- os.umask(current_umask)
+ with salt.utils.files.set_umask(0o027):
+ verify_files([logfile], self.config['user'])
if logfile is None:
# Use the default setting if the logfile wasn't explicity set
diff --git a/salt/utils/schema.py b/salt/utils/schema.py
index 6f1d824b3a..6469524606 100644
--- a/salt/utils/schema.py
+++ b/salt/utils/schema.py
@@ -731,7 +731,7 @@ class SchemaItem(six.with_metaclass(BaseSchemaItemMeta, object)):
'''
Return the argname value looking up on all possible attributes
'''
- # Let's see if there's a private fuction to get the value
+ # Let's see if there's a private function to get the value
argvalue = getattr(self, '__get_{0}__'.format(argname), None)
if argvalue is not None and callable(argvalue):
argvalue = argvalue()
diff --git a/salt/utils/verify.py b/salt/utils/verify.py
index 75505ccfef..802b37339c 100644
--- a/salt/utils/verify.py
+++ b/salt/utils/verify.py
@@ -28,6 +28,7 @@ from salt.exceptions import SaltClientError, SaltSystemExit, \
CommandExecutionError
import salt.defaults.exitcodes
import salt.utils
+import salt.utils.files
log = logging.getLogger(__name__)
@@ -218,12 +219,11 @@ def verify_env(dirs, user, permissive=False, pki_dir='', skip_extra=False, root_
continue
if not os.path.isdir(dir_):
try:
- cumask = os.umask(18) # 077
- os.makedirs(dir_)
+ with salt.utils.files.set_umask(0o022):
+ os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
- os.umask(cumask)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
diff --git a/salt/utils/win_functions.py b/salt/utils/win_functions.py
index 9f9f1d7a10..fb1feb8901 100644
--- a/salt/utils/win_functions.py
+++ b/salt/utils/win_functions.py
@@ -5,6 +5,8 @@ missing functions in other modules
'''
from __future__ import absolute_import
import platform
+import re
+import ctypes
# Import Salt Libs
from salt.exceptions import CommandExecutionError
@@ -16,6 +18,7 @@ try:
import win32api
import win32net
import win32security
+ from win32con import HWND_BROADCAST, WM_SETTINGCHANGE, SMTO_ABORTIFHUNG
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
@@ -159,3 +162,122 @@ def get_sam_name(username):
return '\\'.join([platform.node()[:15].upper(), username])
username, domain, _ = win32security.LookupAccountSid(None, sid_obj)
return '\\'.join([domain, username])
+
+
+def escape_argument(arg):
+ '''
+ Escape the argument for the cmd.exe shell.
+ See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx
+
+ First we escape the quote chars to produce a argument suitable for
+ CommandLineToArgvW. We don't need to do this for simple arguments.
+
+ Args:
+ arg (str): a single command line argument to escape for the cmd.exe shell
+
+ Returns:
+ str: an escaped string suitable to be passed as a program argument to the cmd.exe shell
+ '''
+ if not arg or re.search(r'(["\s])', arg):
+ arg = '"' + arg.replace('"', r'\"') + '"'
+
+ return escape_for_cmd_exe(arg)
+
+
+def escape_for_cmd_exe(arg):
+ '''
+ Escape an argument string to be suitable to be passed to
+ cmd.exe on Windows
+
+ This method takes an argument that is expected to already be properly
+ escaped for the receiving program to be properly parsed. This argument
+ will be further escaped to pass the interpolation performed by cmd.exe
+ unchanged.
+
+ Any meta-characters will be escaped, removing the ability to e.g. use
+ redirects or variables.
+
+ Args:
+ arg (str): a single command line argument to escape for cmd.exe
+
+ Returns:
+ str: an escaped string suitable to be passed as a program argument to cmd.exe
+ '''
+ meta_chars = '()%!^"<>&|'
+ meta_re = re.compile('(' + '|'.join(re.escape(char) for char in list(meta_chars)) + ')')
+ meta_map = {char: "^{0}".format(char) for char in meta_chars}
+
+ def escape_meta_chars(m):
+ char = m.group(1)
+ return meta_map[char]
+
+ return meta_re.sub(escape_meta_chars, arg)
+
+
+def broadcast_setting_change(message='Environment'):
+ '''
+ Send a WM_SETTINGCHANGE Broadcast to all Windows
+
+ Args:
+
+ message (str):
+ A string value representing the portion of the system that has been
+ updated and needs to be refreshed. Default is ``Environment``. These
+ are some common values:
+
+ - "Environment" : to effect a change in the environment variables
+ - "intl" : to effect a change in locale settings
+ - "Policy" : to effect a change in Group Policy Settings
+ - a leaf node in the registry
+ - the name of a section in the ``Win.ini`` file
+
+ See lParam within msdn docs for
+ `WM_SETTINGCHANGE <https://msdn.microsoft.com/en-us/library/ms725497%28VS.85%29.aspx>`_
+ for more information on Broadcasting Messages.
+
+ See GWL_WNDPROC within msdn docs for
+ `SetWindowLong <https://msdn.microsoft.com/en-us/library/windows/desktop/ms633591(v=vs.85).aspx>`_
+ for information on how to retrieve those messages.
+
+ .. note::
+ This will only affect new processes that aren't launched by services. To
+ apply changes to the path or registry to services, the host must be
+ restarted. The ``salt-minion``, if running as a service, will not see
+ changes to the environment until the system is restarted. Services
+ inherit their environment from ``services.exe`` which does not respond
+ to messaging events. See
+ `MSDN Documentation <https://support.microsoft.com/en-us/help/821761/changes-that-you-make-to-environment-variables-do-not-affect-services>`_
+ for more information.
+
+ CLI Example:
+
+ ... code-block:: python
+
+ import salt.utils.win_functions
+ salt.utils.win_functions.broadcast_setting_change('Environment')
+ '''
+ # Listen for messages sent by this would involve working with the
+ # SetWindowLong function. This can be accessed via win32gui or through
+ # ctypes. You can find examples on how to do this by searching for
+ # `Accessing WGL_WNDPROC` on the internet. Here are some examples of how
+ # this might work:
+ #
+ # # using win32gui
+ # import win32con
+ # import win32gui
+ # old_function = win32gui.SetWindowLong(window_handle, win32con.GWL_WNDPROC, new_function)
+ #
+ # # using ctypes
+ # import ctypes
+ # import win32con
+ # from ctypes import c_long, c_int
+ # user32 = ctypes.WinDLL('user32', use_last_error=True)
+ # WndProcType = ctypes.WINFUNCTYPE(c_int, c_long, c_int, c_int)
+ # new_function = WndProcType
+ # old_function = user32.SetWindowLongW(window_handle, win32con.GWL_WNDPROC, new_function)
+ broadcast_message = ctypes.create_unicode_buffer(message)
+ user32 = ctypes.WinDLL('user32', use_last_error=True)
+ result = user32.SendMessageTimeoutW(HWND_BROADCAST, WM_SETTINGCHANGE, 0,
+ broadcast_message, SMTO_ABORTIFHUNG,
+ 5000, 0)
+ return result == 1
diff --git a/salt/utils/yamlloader.py b/salt/utils/yamlloader.py
index da0fd8d466..f6932f6109 100644
--- a/salt/utils/yamlloader.py
+++ b/salt/utils/yamlloader.py
@@ -30,7 +30,7 @@ warnings.simplefilter('always', category=DuplicateKeyWarning)
# with code integrated from https://gist.github.com/844388
-class SaltYamlSafeLoader(yaml.SafeLoader, object):
+class SaltYamlSafeLoader(yaml.SafeLoader):
'''
Create a custom YAML loader that uses the custom constructor. This allows
for the YAML loading defaults to be manipulated based on needs within salt
diff --git a/salt/version.py b/salt/version.py
index 6e5d4e74af..f049d84486 100644
--- a/salt/version.py
+++ b/salt/version.py
@@ -10,7 +10,7 @@ import sys
import locale
import platform
-# linux_distribution depreacted in py3.7
+# linux_distribution deprecated in py3.7
try:
from platform import linux_distribution
except ImportError:
@@ -512,6 +512,9 @@ def __discover_version(saltstack_version):
process = subprocess.Popen(
['git', 'describe', '--tags', '--match', 'v[0-9]*', '--always'], **kwargs)
out, err = process.communicate()
+ if six.PY3:
+ out = out.decode()
+ err = err.decode()
out = out.strip()
err = err.strip()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment