Skip to content

Instantly share code, notes, and snippets.

@WaltHP
Created December 19, 2016 21:30
Show Gist options
  • Save WaltHP/6da8484d3ba14fd4d8efdb0a89904e6a to your computer and use it in GitHub Desktop.
Save WaltHP/6da8484d3ba14fd4d8efdb0a89904e6a to your computer and use it in GitHub Desktop.
diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py
index 1f452a5a6..d76115630 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py
@@ -21,6 +21,8 @@ import six
import string
import time
+from oslo_log import log as logging
+
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder.objects import fields
@@ -29,7 +31,6 @@ from cinder.volume.drivers.ibm.ibm_storage import cryptish
from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient
from cinder.volume.drivers.ibm.ibm_storage import proxy
from cinder.volume.drivers.ibm.ibm_storage import strings
-from oslo_log import log as logging
LOG = logging.getLogger(__name__)
@@ -121,8 +122,7 @@ class DS8KCommonHelper(object):
try:
clear_pass = cryptish.decrypt(self._get_value('san_password'))
except TypeError:
- msg = _LE("san_password")
- raise exception.InvalidParameterValue(msg)
+ raise exception.InvalidParameterValue(err=san_password)
verify = self._get_certificate(self._get_value('san_ip'))
try:
self._client = restclient.RESTScheduler(
@@ -132,8 +132,8 @@ class DS8KCommonHelper(object):
self._connector_obj,
verify)
except restclient.TimeoutException as e:
- msg = (_LE("Can not connect to %(host)s, Exception: %(e)s")
- % {'host': self._get_value('san_ip'), 'e': e})
+ msg = _("Can't connect to %(host)s" %
+ {'host': self._get_value('san_ip')})
raise restclient.APIException(data=msg)
self.backend['rest_version'] = self._get_version()['bundle_version']
msg = _LI("Connection to DS8K storage system %(host)s has been "
diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py
index 181c88c52..7f042a0ff 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py
@@ -424,10 +424,10 @@ class DS8KProxy(proxy.IBMStorageProxy):
thin_provision = self._helper.get_thin_provision()
if lun.type_thin and thin_provision:
if lun.type_replication:
- msg = (_LE("The primary or the secondary storage "
- "can not support ECKD ESE volume."))
+ msg = _("The primary or the secondary storage "
+ "can not support ECKD ESE volume.")
else:
- msg = _LE("Backend can not support ECKD ESE volume.")
+ msg = _("Backend can not support ECKD ESE volume.")
LOG.error(msg)
raise restclient.APIException(message=msg)
# There is a time gap between find available LSS slot and
@@ -502,8 +502,8 @@ class DS8KProxy(proxy.IBMStorageProxy):
def _ensure_vol_not_fc_target(self, vol_hex_id):
for cp in self._helper.get_flashcopy(vol_hex_id):
if cp['targetvolume']['id'] == vol_hex_id:
- msg = (_LE('Volume %s is currently a target of another '
- 'FlashCopy operation') % vol_hex_id)
+ msg = _('Volume %s is currently a target of another '
+ 'FlashCopy operation' % vol_hex_id)
raise restclient.APIException(data=msg)
@proxy._trace_time
@@ -541,8 +541,8 @@ class DS8KProxy(proxy.IBMStorageProxy):
self._replication.extend_replica(lun, param)
self._replication.create_pprc_pairs(lun)
else:
- msg = (_LE("The volume %s has been failed over, it is "
- "not suggested to extend it.") % lun.ds_id)
+ msg = _("The volume %s has been failed over, it is "
+ "not suggested to extend it." % lun.ds_id)
raise exception.CinderException(data=msg)
else:
self._helper.change_lun(lun.ds_id, param)
@@ -672,10 +672,10 @@ class DS8KProxy(proxy.IBMStorageProxy):
lun = self._replication.delete_replica(lun)
lun = _convert_thin_and_thick(lun, new_type_thin)
else:
- msg = (_LE("The volume %s is in replication relationship, "
- "it is not supported to retype from thin to "
- "thick or vice versus.") % lun.ds_id)
- raise exception.CinderException(data=msg)
+ msg = _("The volume %s is in replication relationship, "
+ "it is not supported to retype from thin to "
+ "thick or vice versus." % lun.ds_id)
+ raise exception.CinderException(msg)
else:
lun = _convert_thin_and_thick(lun, new_type_thin)
if new_type_replication:
@@ -739,8 +739,8 @@ class DS8KProxy(proxy.IBMStorageProxy):
self._clone_group(src_luns, tgt_luns, cg_enabled)
except restclient.APIException as e:
model_update['status'] = fields.GroupStatus.ERROR
- msg = _LE('Failed to create group snapshot. Exception= %s')
- LOG.error(msg, e)
+ msg = _('Failed to create group snapshot.')
+ LOG.exception(msg, e)
for tgt_lun in tgt_luns:
snapshot_model_update = tgt_lun.get_volume_update()
@@ -830,9 +830,8 @@ class DS8KProxy(proxy.IBMStorageProxy):
self._clone_group(src_luns, tgt_luns, cg_enabled)
except restclient.APIException as e:
model_update['status'] = fields.GroupStatus.ERROR
- msg = _LE("Failed to create group from group snapshot. "
- "Exception= %s")
- LOG.error(msg, e)
+ msg = _("Failed to create group from group snapshot.")
+ LOG.exception(msg, e)
for tgt_lun in tgt_luns:
volume_model_update = tgt_lun.get_volume_update()
@@ -952,8 +951,8 @@ class DS8KProxy(proxy.IBMStorageProxy):
if secondary_id is None:
secondary_id = backend_id
elif secondary_id != backend_id:
- msg = (_LE('Invalid secondary_backend_id specified. '
- 'Valid backend id is %s.') % backend_id)
+ msg = _('Invalid secondary_backend_id specified. '
+ 'Valid backend id is %s.' % backend_id)
raise exception.InvalidReplicationTarget(message=msg)
LOG.debug("Starting failover to %s.", secondary_id)
@@ -984,7 +983,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
except restclient.APIException as e:
msg = (_LE("Unable to failover host to %(id)s. "
"Exception= %(ex)s")
- % {'id': secondary_id, 'ex': e})
+ % {'id': secondary_id, 'ex': six.text_type(e)})
raise exception.UnableToFailOver(data=msg)
for lun in replicated_luns:
diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py
index 386a6c1f5..844d2d62f 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py
@@ -14,18 +14,20 @@
# under the License.
#
import ast
+import eventlet
import six
import time
+from oslo_log import log as logging
+from oslo_utils import excutils
+
from cinder import exception
-from cinder.i18n import _LE, _LI
+from cinder.i18n import _, _LE, _LI
from cinder.utils import synchronized
import cinder.volume.drivers.ibm.ibm_storage as storage
from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper
from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient
from cinder.volume.drivers.ibm.ibm_storage import proxy
-from oslo_log import log as logging
-from oslo_utils import excutils
LOG = logging.getLogger(__name__)
@@ -49,10 +51,10 @@ class MetroMirrorManager(object):
ports = self._source.get_physical_links(
self._target.backend['storage_wwnn'])
if not ports:
- msg = (_LE("DS8K %(tgt)s is not connected to the DS8K %(src)s!")
- % {'tgt': self._target.backend['storage_wwnn'],
- 'src': self._source.backend['storage_wwnn']})
- raise exception.CinderException(data=msg)
+ msg = _("DS8K %(tgt)s is not connected to the DS8K %(src)s!" %
+ {'tgt': self._target.backend['storage_wwnn'],
+ 'src': self._source.backend['storage_wwnn']})
+ raise exception.CinderException(msg)
pairs = [{
'source_port_id': p['source_port_id'],
@@ -75,10 +77,11 @@ class MetroMirrorManager(object):
invalid_pair = "%s-%s" % (pair['source_port_id'],
pair['target_port_id'])
- msg = (_LE("Invalid port pair: %(invaid)s, vaild port "
- "pair(s) are: %(valid)s")
- % {'invalid': invalid_pair, 'valid': valid_pairs})
- raise exception.CinderException(data=msg)
+ msg = _("Invalid port pair: %(invaid)s, vaild port "
+ "pair(s) are: %(valid)s" %
+ {'invalid': invalid_pair,
+ 'valid': valid_pairs})
+ raise exception.CinderException(msg)
self._source.backend['port_pairs'] = [{
'source_port_id': p['target_port_id'],
'target_port_id': p['source_port_id']
@@ -252,13 +255,13 @@ class MetroMirrorManager(object):
# check the state of the pprc path
LOG.debug("Checking the state of the new PPRC path.")
for retry in range(4):
- time.sleep(2)
+ eventlet.sleep(2)
if self._is_pprc_paths_healthy(pid) == PPRC_PATH_HEALTHY:
break
if retry == 3:
self._source.delete_pprc_path(pid)
- msg = (_LE("Fail to create PPRC path %(src)s:%(tgt)s.")
- % {'src': src_lss, 'tgt': tgt_lss})
+ msg = _("Fail to create PPRC path %(src)s:%(tgt)s." %
+ {'src': src_lss, 'tgt': tgt_lss})
raise restclient.APIException(data=msg)
LOG.debug("Create the new PPRC path successfully.")
@@ -319,10 +322,12 @@ class MetroMirrorManager(object):
continue
vol_pairs.append({
- 'source_volume': str(target_vol_id),
- 'source_system_id': str(self._target.backend['storage_unit']),
- 'target_volume': str(lun.ds_id),
- 'target_system_id': str(self._source.backend['storage_unit'])
+ 'source_volume': six.text_type(target_vol_id),
+ 'source_system_id': six.text_type(
+ self._target.backend['storage_unit']),
+ 'target_volume': six.text_type(lun.ds_id),
+ 'target_system_id': six.text_type(
+ self._source.backend['storage_unit'])
})
target_vol_ids.append(target_vol_id)
@@ -399,7 +404,7 @@ class Replication(object):
"%(primary)s, but in secondary backend it is "
"%(secondary)s") %
{'primary': src_conn_type, 'secondary': tgt_conn_type})
- raise exception.CinderException(data=msg)
+ raise exception.CinderException(msg)
# PPRC can not copy from ESE volume to standard volume or vice versus.
if src_conn_type == storage.XIV_CONNECTION_TYPE_FC_ECKD:
src_thin = self._source_helper.get_thin_provision()
@@ -449,18 +454,18 @@ class Replication(object):
state, lun.lss_pair = (
self._mm_manager.find_available_pprc_path(lun.ds_id[0:2]))
if state == PPRC_PATH_UNHEALTHY:
- msg = (_LE("The path(s) for volume %(name)s isn't available "
- "any more, please make sure the state of the path(s) "
- "which source LSS is %(lss)s is success.")
- % {'name': lun.cinder_name, 'lss': lun.ds_id[0:2]})
+ msg = _("The path(s) for volume %(name)s isn't available "
+ "any more, please make sure the state of the path(s) "
+ "which source LSS is %(lss)s is success." %
+ {'name': lun.cinder_name, 'lss': lun.ds_id[0:2]})
raise restclient.APIException(data=msg)
elif state == PPRC_PATH_NOT_EXIST:
pid, node = self._source_helper.get_pool_info(lun.ds_id[0:2])
lss_pair = {'source': (pid, lun.ds_id[0:2])}
lss_pair.update(self.find_new_lss_for_target())
lun.lss_pair = lss_pair
- msg = _LI("Begin to create replication volume, lss_pair is %s.")
- LOG.debug(msg, lun.lss_pair)
+ LOG.debug("Begin to create replication volume, lss_pair is %s." %
+ lun.lss_pair)
lun = self.create_replica(lun, False)
return lun
@@ -489,9 +494,9 @@ class Replication(object):
self._mm_manager.delete_pprc_pairs(lun)
self._delete_replica(lun)
except restclient.APIException as e:
- msg = (_LE('Fail to delete the target volume for volume '
- '%(volume)s, Exception: %(ex)s.')
- % {'volume': lun.ds_id, 'ex': six.text_type(e)})
+ msg = _('Fail to delete the target volume for volume '
+ '%(volume)s, Exception: %(ex)s.' %
+ {'volume': lun.ds_id, 'ex': six.text_type(e)})
raise exception.CinderException(msg)
lun.replication_status = 'disabled'
@@ -537,10 +542,10 @@ class Replication(object):
try:
self._target_helper.update_client()
except restclient.APIException:
- msg = _LE("Can not connect to the primary backend, "
- "please make sure it is back.")
+ msg = _("Can not connect to the primary backend, "
+ "please make sure it is back.")
LOG.error(msg)
- raise exception.UnableToFailOver(message=msg)
+ raise exception.UnableToFailOver(reason=msg)
LOG.debug("Failback starts, backend id is %s.", backend_id)
for lun in luns:
diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py
index 7badc262e..88daab67b 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py
@@ -26,11 +26,6 @@ from urllib import urlencode
from cinder import exception
from cinder.i18n import _LE
-# disable the warning of InsecurePlatformWarning and SNIMissingWarning
-requests.packages.urllib3.disable_warnings(
- url_exception.InsecurePlatformWarning)
-requests.packages.urllib3.disable_warnings(
- url_exception.SNIMissingWarning)
TOKEN_ERROR_CODES = ('BE7A001B', 'BE7A001A')
# remove BE7A0032 after REST fixed the problem of throwing message
diff --git a/cinder/volume/drivers/ibm/ibm_storage/proxy.py b/cinder/volume/drivers/ibm/ibm_storage/proxy.py
index ec6a0d417..13eee4397 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/proxy.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/proxy.py
@@ -18,13 +18,14 @@ import gettext
import inspect
import platform
+from oslo_log import log as logging
+from oslo_utils import timeutils
+
from cinder.i18n import _, _LE
from cinder import version
from cinder import volume as c_volume
import cinder.volume.drivers.ibm.ibm_storage as storage
from cinder.volume.drivers.ibm.ibm_storage import strings
-from oslo_log import log as logging
-from oslo_utils import timeutils
LOG = logging.getLogger(__name__)
gettext.install('cinder')
@@ -255,9 +256,8 @@ class IBMStorageProxy(object):
returns a string made out of code and status if present, else message
"""
- if getattr(exception, "code", None) is not None \
- and \
- getattr(exception, "status", None) is not None:
+ if (getattr(exception, "code", None) is not None and
+ getattr(exception, "status", None) is not None):
return "Status: '%s', Code: %s" % (
exception.status, exception.code)
return exception.message
@@ -382,14 +382,14 @@ class IBMStorageProxy(object):
@_trace_time
def _read_replication_devices(self):
- '''Read replication devices from configuration
+ """Read replication devices from configuration
Several replication devices are permitted.
If an entry already exists an error is assumed.
The format is:
replication_device = backend_id:vendor-id-1,unique_key:val....
- '''
+ """
if not self.driver:
return
replication_devices = self._get_safely_from_configuration(
diff --git a/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py
index 0c8454a12..753bbc34c 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py
@@ -17,6 +17,7 @@ import datetime
import six
import socket
+from oslo_log import log as logging
from pyxcli import client
from pyxcli import errors
from pyxcli.events import events
@@ -26,7 +27,7 @@ from pyxcli import transports
from cinder.volume import qos_specs
from cinder.volume import volume_types
-from cinder.i18n import _LE, _LW, _LI
+from cinder.i18n import _, _LE, _LW, _LI
from cinder import context
from cinder import objects
import cinder.volume.drivers.ibm.ibm_storage as storage
@@ -34,7 +35,6 @@ from cinder.volume.drivers.ibm.ibm_storage import certificate
from cinder.volume.drivers.ibm.ibm_storage import cryptish
from cinder.volume.drivers.ibm.ibm_storage import proxy
from cinder.volume.drivers.ibm.ibm_storage import strings
-from oslo_log import log as logging
OPENSTACK_PRODUCT_NAME = "OpenStack"
PERF_CLASS_NAME_PREFIX = "cinder-qos"
@@ -61,19 +61,8 @@ class Rate(object):
def _schedule_name_from_schedule(self, schedule):
if schedule == '00:00:20':
return 'min_interval'
- return 'cinder_{0}'.format(schedule.replace(':', '_'))
-
-
-async_rates = (
- Rate(rpo=30, schedule='00:00:20'),
- Rate(rpo=60, schedule='00:00:20'),
- Rate(rpo=300, schedule='00:02:00'),
- Rate(rpo=600, schedule='00:05:00'),
- Rate(rpo=3600, schedule='00:15:00'),
- Rate(rpo=7200, schedule='00:30:00'),
- Rate(rpo=14400, schedule='01:00:00'),
- Rate(rpo=43200, schedule='03:00:00'),
-)
+ return ("cinder_%(sched)s" %
+ {'sched': schedule.replace(':', '_')})
class XIVProxy(proxy.IBMStorageProxy):
@@ -81,6 +70,16 @@ class XIVProxy(proxy.IBMStorageProxy):
Supports IBM XIV, Spectrum Accelerate, A9000, A9000R
"""
+ async_rates = (
+ Rate(rpo=30, schedule='00:00:20'),
+ Rate(rpo=60, schedule='00:00:20'),
+ Rate(rpo=300, schedule='00:02:00'),
+ Rate(rpo=600, schedule='00:05:00'),
+ Rate(rpo=3600, schedule='00:15:00'),
+ Rate(rpo=7200, schedule='00:30:00'),
+ Rate(rpo=14400, schedule='01:00:00'),
+ Rate(rpo=43200, schedule='03:00:00'),
+ )
def __init__(self, storage_info, logger, exception,
driver=None, active_backend_id=None):
@@ -94,9 +93,9 @@ class XIVProxy(proxy.IBMStorageProxy):
if active_backend_id:
LOG.info(_LI("__init__: active_backend_id: %(id)s"),
{'id': active_backend_id})
- self.ibm_storage_cli = \
- self.meta['ibm_storage_portal'] = \
- self.meta['ibm_storage_iqn'] = None
+ self.ibm_storage_cli = None
+ self.meta['ibm_storage_portal'] = None
+ self.meta['ibm_storage_iqn'] = None
self.ibm_storage_remote_cli = None
self.meta['ibm_storage_fc_targets'] = []
self.meta['storage_version'] = None
@@ -106,14 +105,16 @@ class XIVProxy(proxy.IBMStorageProxy):
def setup(self, context):
"""Connect ssl client."""
LOG.info(strings.SETUP_START)
- msg = "Active backend_id: '%(id)s'." % {'id': self.active_backend_id}
+ msg = _LI("Active backend_id: '%(id)s'." %
+ {'id': self.active_backend_id})
LOG.info(msg)
self.ibm_storage_cli = self._init_xcli(self.active_backend_id)
if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI:
- self.meta['ibm_storage_iqn'] = self._call_xiv_xcli("config_get")\
- .as_dict('name')['iscsi_name'].value
+ self.meta['ibm_storage_iqn'] = (
+ self._call_xiv_xcli("config_get").
+ as_dict('name')['iscsi_name'].value)
portals = storage.get_online_iscsi_ports(self.ibm_storage_cli)
if len(portals) == 0:
@@ -123,9 +124,7 @@ class XIVProxy(proxy.IBMStorageProxy):
strings.SETUP_BASE_ERROR % {
'details': strings.SETUP_NO_ISCSI_PORTALS}))
- self.meta['ibm_storage_portal'] = (
- "%s:3260" %
- portals[:1][0])
+ self.meta['ibm_storage_portal'] = "%s:3260" % portals[:1][0]
remote_id = self._get_secondary_backend_id()
if remote_id:
@@ -139,28 +138,27 @@ class XIVProxy(proxy.IBMStorageProxy):
LOG.info(strings.SETUP_SUCCEEDED)
def _get_schedule_from_rpo(self, rpo):
- return [rate for rate in async_rates
+ return [rate for rate in self.async_rates
if rate.rpo == rpo][0].schedule_name
def _get_supported_rpo(self):
- return [rate.rpo for rate in async_rates]
+ return [rate.rpo for rate in self.async_rates]
@proxy._trace_time
def _update_active_schedule_objects(self):
- '''Set schedule objects on active backend.
+ """Set schedule objects on active backend.
The value 00:20:00 is covered in XIV by a pre-defined object named
min_interval.
- '''
+ """
schedules = self._call_xiv_xcli("schedule_list").as_dict('name')
- for rate in async_rates:
+ for rate in self.async_rates:
if rate.schedule == '00:00:20':
continue
name = rate.schedule_name
schedule = schedules.get(name, None)
if schedule:
- msg = 'Exists on local backend %(sch)s' % {'sch': name}
- LOG.debug(msg)
+ LOG.debug('Exists on local backend %(sch)s', {'sch': name})
interval = schedule.get('interval', '')
if interval != rate.schedule:
msg = (strings.UNEXPECTED_INTERVAL_VALUE %
@@ -169,25 +167,25 @@ class XIVProxy(proxy.IBMStorageProxy):
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
else:
- LOG.debug('create %(sch)s' % {'sch': name})
+ LOG.debug('create %(sch)s', {'sch': name})
self._call_xiv_xcli("schedule_create",
schedule=name, type='interval',
interval=rate.schedule)
@proxy._trace_time
def _update_remote_schedule_objects(self):
- '''Set schedule objects on remote backend.
+ """Set schedule objects on remote backend.
The value 00:20:00 is covered in XIV by a pre-defined object named
min_interval.
- '''
+ """
schedules = self._call_remote_xiv_xcli("schedule_list").as_dict('name')
- for rate in async_rates:
+ for rate in self.async_rates:
if rate.schedule == '00:00:20':
continue
name = rate.schedule_name
if schedules.get(name, None):
- LOG.debug('Exists on remote backend %(sch)s' % {'sch': name})
+ LOG.debug('Exists on remote backend %(sch)s', {'sch': name})
interval = schedules.get(name, None)['interval']
if interval != rate.schedule:
msg = (strings.UNEXPECTED_INTERVAL_VALUE %
@@ -205,13 +203,12 @@ class XIVProxy(proxy.IBMStorageProxy):
return
local_ibm_storage_cli = self._init_xcli(strings.PRIMARY_BACKEND_ID)
if not local_ibm_storage_cli:
- LOG.error(_LE('Failed to connect to main backend.'
+ LOG.error(_LE('Failed to connect to main backend. '
'Cannot retrieve main backend system_id'))
return
system_id = local_ibm_storage_cli.cmd.config_get().as_dict(
'name')['system_id'].value
- LOG.debug('system_id: %(id)s' %
- {'id': system_id})
+ LOG.debug('system_id: %(id)s', {'id': system_id})
self.system_id = system_id
@proxy._trace_time
@@ -287,8 +284,8 @@ class XIVProxy(proxy.IBMStorageProxy):
# list is not empty, check if class has the right values
for perf_class in classes_list:
- if not perf_class.max_iops == specs.get('iops', '0') or \
- not perf_class.max_bw == specs.get('bw', '0'):
+ if (not perf_class.max_iops == specs.get('iops', '0') or
+ not perf_class.max_bw == specs.get('bw', '0')):
raise self.meta['exception'].VolumeBackendAPIException(
data=strings.PERF_CLASS_VALUES_ERROR %
{'details': perf_class_name})
@@ -344,9 +341,9 @@ class XIVProxy(proxy.IBMStorageProxy):
if specs:
LOG.debug('_get_replication_info: specs %(specs)s',
{'specs': specs})
- info['enabled'] = specs.get(
- 'replication_enabled', '').upper() \
- in (u'TRUE', strings.METADATA_IS_TRUE)
+ info['enabled'] = (
+ specs.get('replication_enabled', '').upper() in
+ (u'TRUE', strings.METADATA_IS_TRUE))
replication_type = specs.get('replication_type', SYNC).lower()
if replication_type in (u'sync', u'<is> sync'):
info['mode'] = SYNC
@@ -429,8 +426,8 @@ class XIVProxy(proxy.IBMStorageProxy):
self._create_volume(volume)
if cg:
- volume_update['consistencygroup_id'] = \
- volume.get('consistencygroup_id', None)
+ volume_update['consistencygroup_id'] = (
+ volume.get('consistencygroup_id', None))
try:
self._call_xiv_xcli(
"cg_add_vol", vol=volume['name'], cg=cg)
@@ -710,8 +707,7 @@ class XIVProxy(proxy.IBMStorageProxy):
lun_id = self._vol_map_and_get_lun_id(
volume, connector, host)
- meta = \
- {
+ meta = {
'driver_volume_type': connection_type,
'data':
{
@@ -744,13 +740,13 @@ class XIVProxy(proxy.IBMStorageProxy):
modules = set()
for wwpn in fc_targets:
modules.add(wwpn[-2])
- meta['data']['recommended_connections'] = \
- len(modules) * CONNECTIONS_PER_MODULE
+ meta['data']['recommended_connections'] = (
+ len(modules) * CONNECTIONS_PER_MODULE)
meta['data']['target_wwn'] = fc_targets
if fc_targets == []:
fc_targets = all_storage_wwpns
- meta['data']['initiator_target_map'] = \
- self._build_initiator_target_map(fc_targets, connector)
+ meta['data']['initiator_target_map'] = (
+ self._build_initiator_target_map(fc_targets, connector))
LOG.debug(str(meta))
return meta
@@ -808,14 +804,14 @@ class XIVProxy(proxy.IBMStorageProxy):
# The following meta data is provided so that zoning can
# be cleared
- meta = \
- {
+ meta = {
'driver_volume_type': self._get_connection_type(),
'data': {'volume_id': volume['id'], },
}
meta['data']['target_wwn'] = fc_targets
- meta['data']['initiator_target_map'] = \
- self._build_initiator_target_map(fc_targets, connector)
+ meta['data']['initiator_target_map'] = (
+ self._build_initiator_target_map(fc_targets,
+ connector))
self._call_xiv_xcli("host_delete", host=host.name)
if not self._is_iscsi():
return meta
@@ -1168,11 +1164,10 @@ class XIVProxy(proxy.IBMStorageProxy):
{'vol': volume['name']})
active = volume_replication_mgr.is_mirror_active(
resource_id=volume['name'])
- except Exception as e:
+ except Exception:
active = False
state = 'active' if active else 'inactive'
- LOG.debug('Mirroring is %(state)s',
- {'state': state})
+ LOG.debug('Mirroring is %(state)s', {'state': state})
# In case of failback, mirroring must be active
# In case of failover we attempt to move in any condition
@@ -1274,9 +1269,9 @@ class XIVProxy(proxy.IBMStorageProxy):
# Create volume manager for both master and remote
volume_replication_mgr = volume_recovery_manager.VolumeRecoveryManager(
False, self.ibm_storage_cli)
- failover_volume_replication_mgr = \
+ failover_volume_replication_mgr = (
volume_recovery_manager.VolumeRecoveryManager(
- True, self.ibm_storage_remote_cli)
+ True, self.ibm_storage_remote_cli))
# get replication_info for all volumes at once
if len(volumes):
@@ -1431,12 +1426,12 @@ class XIVProxy(proxy.IBMStorageProxy):
self.meta['stat']['QoS_support'] = \
self._check_storage_version_for_qos_support()
- self.meta['stat']['location_info'] =\
+ self.meta['stat']['location_info'] = (
('%(destination)s:%(hostname)s:%(pool)s' %
{'destination': strings.XIV_BACKEND_PREFIX,
'hostname': self.storage_info[storage.FLAG_KEYS['address']],
'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']]
- })
+ }))
pools = self._call_xiv_xcli(
"pool_list",
@@ -1503,9 +1498,9 @@ class XIVProxy(proxy.IBMStorageProxy):
vol_src=src_vref['name'],
vol_trg=volume['name'])
except errors.XCLIError as e:
- error = "Fatal: Failed to copy from '%s' to '%s': %s" % \
- (src_vref.get('name', ''), volume.get('name', ''),
- self._get_code_and_status_or_message(e))
+ error = ("Fatal: Failed to copy from '%s' to '%s': %s" %
+ (src_vref.get('name', ''), volume.get('name', ''),
+ self._get_code_and_status_or_message(e)))
LOG.error(error)
self._silent_delete_volume(volume=volume)
raise self._get_exception()(error)
@@ -1536,7 +1531,7 @@ class XIVProxy(proxy.IBMStorageProxy):
A utility method to translate from id
to CG name on the storage
'''
- return "cg_{id}".format(id=id)
+ return "cg_%(id)s" % {'id': id}
def _group_name_from_id(self, id):
'''Get storage group name from id.
@@ -1544,7 +1539,7 @@ class XIVProxy(proxy.IBMStorageProxy):
A utility method to translate from id
to Snapshot Group name on the storage
'''
- return "cgs_{id}".format(id=id)
+ return "cgs_%(id)s" % {'id': id}
def _cg_name_from_volume(self, volume):
'''Get storage CG name from volume.
@@ -1591,7 +1586,7 @@ class XIVProxy(proxy.IBMStorageProxy):
def _volume_name_from_cg_snapshot(self, cgs, vol):
# Note: The string is limited by the storage to 63 characters
- return '{cgs}.{vol}'.format(cgs=cgs, vol=vol)[0:62]
+ return '%(cgs)s.%(vol)s' % {'cgs': cgs, 'vol': vol}[0:62]
@proxy._trace_time
def create_consistencygroup(self, context, group):
@@ -1608,15 +1603,16 @@ class XIVProxy(proxy.IBMStorageProxy):
pool=self.storage_info[
storage.FLAG_KEYS['storage_pool']]).as_list
except errors.CgNameExistsError as e:
- error = "consistency group %s already exists on backend" % cgname
+ error = _("consistency group %s already exists on backend" %
+ cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.CgLimitReachedError as e:
- error = "Reached Maximum number of consistency groups"
+ error = _("Reached Maximum number of consistency groups")
LOG.error(error)
raise self._get_exception()(error)
except errors.XCLIError as e:
- error = "Fatal: %s" % self._get_code_and_status_or_message(e)
+ error = _("Fatal: %s" % self._get_code_and_status_or_message(e))
LOG.error(error)
raise self._get_exception()(error)
model_update = {'status': 'available'}
@@ -1662,7 +1658,7 @@ class XIVProxy(proxy.IBMStorageProxy):
LOG.error(
_LE("Creating CG from cgsnapshot failed: %(details)s"),
{'details': self._get_code_and_status_or_message(e)})
- raise e
+ raise
created_volumes = []
try:
groupname = self._group_name_from_cgsnapshot(cgsnapshot)
@@ -1692,7 +1688,7 @@ class XIVProxy(proxy.IBMStorageProxy):
# cleanup and then raise exception
self._silent_cleanup_consistencygroup_from_src(
context, group, created_volumes, cgname)
- raise e
+ raise
elif source_cg and sorted_source_vols:
LOG.debug("Creating from CG %(cg)s .",
@@ -1703,7 +1699,7 @@ class XIVProxy(proxy.IBMStorageProxy):
except Exception as e:
LOG.error(_LE("Creating CG from CG failed: %(details)s"),
{'details': self._get_code_and_status_or_message(e)})
- raise e
+ raise
created_volumes = []
try:
for volume, source in zip(volumes, sorted_source_vols):
@@ -1721,7 +1717,7 @@ class XIVProxy(proxy.IBMStorageProxy):
# cleanup and then raise exception
self._silent_cleanup_consistencygroup_from_src(
context, group, created_volumes, cgname)
- raise e
+ raise
else:
error = 'create_consistencygroup_from_src called without a source'
@@ -1784,25 +1780,25 @@ class XIVProxy(proxy.IBMStorageProxy):
"cg_delete", cg=cgname).as_list
model_update['status'] = 'deleted'
except errors.CgDoesNotExistError as e:
- error = "consistency group %s does not exist " \
- "on backend" % cgname
+ error = _("consistency group %s does not exist on backend" %
+ cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.CgHasMirrorError as e:
- error = "consistency group %s is being mirrored " % cgname
+ error = _("consistency group %s is being mirrored " % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.CgNotEmptyError as e:
- error = "consistency group %s is not empty " % cgname
+ error = _("consistency group %s is not empty " % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.CgBadNameError as e:
- error = "consistency group %s does not exist " % cgname
+ error = _("consistency group %s does not exist " % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.XCLIError as e:
- error = "Fatal: %s. CG: %s" \
- % self._get_code_and_status_or_message(e), cgname
+ error = _("Fatal: %s. CG: %s" %
+ self._get_code_and_status_or_message(e), cgname)
LOG.error(error)
raise self._get_exception()(error)
return model_update, volumes_model_update
@@ -1863,7 +1859,7 @@ class XIVProxy(proxy.IBMStorageProxy):
self._call_xiv_xcli(
"cg_remove_vol", vol=volume['name'])
except Exception:
- LOG.debug("cg_remove_vol(%s) failed" % volume['name'])
+ LOG.debug("cg_remove_vol(%s) failed", volume['name'])
if remove_volumes:
cgname = self._cg_name_from_group(group)
@@ -1872,8 +1868,8 @@ class XIVProxy(proxy.IBMStorageProxy):
self._call_xiv_xcli(
"cg_add_vol", vol=volume['name'], cg=cgname)
except Exception:
- LOG.debug("cg_add_vol(%s, %s) failed" %
- volume['name'], cgname)
+ LOG.debug("cg_add_vol(%(name)s, %(cgname)s) failed",
+ {'name': volume['name'], 'cgname': cgname})
@proxy._trace_time
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
@@ -1891,44 +1887,44 @@ class XIVProxy(proxy.IBMStorageProxy):
"cg_snapshots_create", cg=cgname,
snap_group=groupname).as_list
except errors.CgDoesNotExistError as e:
- error = _LE("Consistency group %s does not exist on backend") % \
- cgname
+ error = _("Consistency group %s does not exist on backend" %
+ cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.CgBadNameError as e:
- error = _LE("Consistency group %s has an illegal name") % cgname
+ error = _("Consistency group %s has an illegal name" % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.SnapshotGroupDoesNotExistError as e:
- error = _LE("Snapshot group %s has an illegal name") % cgname
+ error = _("Snapshot group %s has an illegal name" % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.PoolSnapshotLimitReachedError as e:
- error = _LE("Reached maximum snapshots allocation size")
+ error = _("Reached maximum snapshots allocation size")
LOG.error(error)
raise self._get_exception()(error)
except errors.CgEmptyError as e:
- error = _LE("Consistency group %s is empty") % cgname
+ error = _("Consistency group %s is empty" % cgname)
LOG.error(error)
raise self._get_exception()(error)
except (errors.MaxVolumesReachedError,
errors.DomainMaxVolumesReachedError) as e:
- error = _LE("Reached Maximum number of volumes")
+ error = _("Reached Maximum number of volumes")
LOG.error(error)
raise self._get_exception()(error)
except errors.SnapshotGroupIsReservedError as e:
- error = _LE("Consistency group %s name is reserved") % cgname
+ error = _("Consistency group %s name is reserved" % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.SnapshotGroupAlreadyExistsError as e:
- error = _LE("Snapshot group %s already exists") % groupname
+ error = _("Snapshot group %s already exists" % groupname)
LOG.error(error)
raise self._get_exception()(error)
except errors.XCLIError as e:
- error = _LE("Fatal: CG %(cg)s, Group %(group)s. %(err)s") % \
- {'cg': cgname,
- 'group': groupname,
- 'err': self._get_code_and_status_or_message(e)}
+ error = _("Fatal: CG %(cg)s, Group %(group)s. %(err)s" %
+ {'cg': cgname,
+ 'group': groupname,
+ 'err': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
@@ -2031,10 +2027,11 @@ class XIVProxy(proxy.IBMStorageProxy):
host_bunch = self._get_bunch_from_host(connector)
except Exception as e:
details = self._get_code_and_status_or_message(e)
- raise self._get_exception()(
- "%s %s" % (
- storage.XIV_LOG_PREFIX,
- strings.INVALID_CONNECTOR_INFO % {'details': details}))
+ ex_details = (strings.INVALID_CONNECTOR_INFO %
+ {'details': details})
+ raise self._get_exception()("%(prefix)s %(ex_details)s" %
+ {'prefix': storage.XIV_LOG_PREFIX,
+ 'ex_details': ex_details})
host = []
chap = None
all_hosts = self._call_xiv_xcli("host_list").as_list
@@ -2105,7 +2102,7 @@ class XIVProxy(proxy.IBMStorageProxy):
"""Check on chap state and define host accordingly."""
chap_name = None
chap_secret = None
- if (self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI) \
+ if (self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI
and (self._get_chap_type() == storage.CHAP_ENABLED):
host_bunch = dict({'name': host, 'chap': None, })
chap = self._create_chap(host=host_bunch)
@@ -2313,7 +2310,7 @@ class XIVProxy(proxy.IBMStorageProxy):
self._call_xiv_xcli(
"mapping_list",
host=host['name']).as_list]
- for lun_id in xrange(MIN_LUNID, MAX_LUNID):
+ for lun_id in six.moves.xrange(MIN_LUNID, MAX_LUNID):
if lun_id not in used_luns:
self._call_xiv_xcli(
"map_vol",
@@ -2334,19 +2331,19 @@ class XIVProxy(proxy.IBMStorageProxy):
"""
target_wwpns = []
- target_wwpns += \
+ target_wwpns += (
[t.wwpn for t in
self._call_xiv_xcli("fc_port_list") if
t.wwpn != '0000000000000000' and
t.role == 'Target' and
- t.port_state == 'Online']
+ t.port_state == 'Online'])
fc_targets = list(set(target_wwpns))
fc_targets.sort(self._sort_last_digit)
LOG.debug("fc_targets : %s" % fc_targets)
return fc_targets
def _sort_last_digit(self, a, b):
- return cmp(a[-1:], b[-1:])
+ return ((a[-1:] > b[-1:]) - < (a[-1:] < b[-1:]))
@proxy._trace_time
def _get_xcli(self, xcli, backend_id):
@@ -2427,7 +2424,7 @@ class XIVProxy(proxy.IBMStorageProxy):
@proxy._trace_time
def _init_xcli(self, backend_id=strings.PRIMARY_BACKEND_ID):
- """Initilize XCLI connection
+ """Initilize XCLI connection.
returns an XCLIClient object
"""
@@ -2436,21 +2433,26 @@ class XIVProxy(proxy.IBMStorageProxy):
address, user, password = self._get_connection_params(backend_id)
except Exception as e:
details = self._get_code_and_status_or_message(e)
- LOG.error(strings.SETUP_BASE_ERROR, {'details': details})
+ ex_details = strings.SETUP_BASE_ERROR % {'details': details}
+ LOG.error(ex_details)
raise self.meta['exception'].InvalidParameterValue(
- "%s %s" % (storage.XIV_LOG_PREFIX, strings.SETUP_BASE_ERROR %
- {'details': details}))
+ _("%(prefix)s %(ex_details)s" % {
+ 'prefix': storage.XIV_LOG_PREFIX,
+ 'ex_details': ex_details}))
self._verify_xiv_flags(address, user, password)
try:
clear_pass = cryptish.decrypt(password)
except TypeError:
+ ex_details = (strings.SETUP_BASE_ERROR %
+ {'details': strings.SETUP_INVALID_ENCRYPTION})
LOG.error(strings.SETUP_BASE_ERROR,
{'details': strings.SETUP_INVALID_ENCRYPTION})
raise self.meta['exception'].InvalidParameterValue(
- "%s %s" % (storage.XIV_LOG_PREFIX, strings.SETUP_BASE_ERROR %
- {'details': strings.SETUP_INVALID_ENCRYPTION}))
+ _("%(prefix)s %(ex_details)s" % {
+ 'prefix': storage.XIV_LOG_PREFIX,
+ 'ex_details': ex_details}))
certs = certificate.CertificateCollector()
path = certs.collect_certificate()
@@ -2470,7 +2472,7 @@ class XIVProxy(proxy.IBMStorageProxy):
'address': address}
LOG.error(strings.SETUP_BASE_ERROR,
{'details': err_msg})
- raise self.meta['exception'].HostNotFound(err_msg)
+ raise self.meta['exception'].HostNotFound(host=err_msg)
except Exception as er:
err_msg = strings.SETUP_UNKNOWN_ERROR % {'error': er}
LOG.error(strings.SETUP_BASE_ERROR,
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment