Created
December 9, 2016 00:58
-
-
Save hemna/c04ec27ec9a73559379d193a005ca6de to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/cinder/volume/drivers/ibm/ibm_storage/__init__.py b/cinder/volume/drivers/ibm/ibm_storage/__init__.py | |
index 498521f..317e0ce 100644 | |
--- a/cinder/volume/drivers/ibm/ibm_storage/__init__.py | |
+++ b/cinder/volume/drivers/ibm/ibm_storage/__init__.py | |
@@ -13,7 +13,6 @@ | |
# License for the specific language governing permissions and limitations | |
# under the License. | |
# | |
-import neobunch | |
BLOCKS_PER_17_GIGABYTES = 33554432.0 | |
XIV_LOG_PREFIX = "[IBM XIV STORAGE]:" | |
@@ -26,7 +25,7 @@ STORAGE_DRIVER_XIV = 'xiv' | |
STORAGE_DRIVER_DS8K = 'ds8k' | |
-CONF_KEYS = neobunch.NeoBunch( | |
+CONF_KEYS = dict( | |
driver="volume_driver", | |
proxy="proxy", | |
user="san_login", | |
@@ -42,7 +41,7 @@ CONF_KEYS = neobunch.NeoBunch( | |
system_id='system_id', | |
replication_device='replication_device' | |
) | |
-CONF_BACKEND_KEYS = neobunch.NeoBunch( | |
+CONF_BACKEND_KEYS = dict( | |
user="san_login", | |
password="san_password", | |
storage_pool="san_clustername", | |
@@ -51,7 +50,7 @@ CONF_BACKEND_KEYS = neobunch.NeoBunch( | |
connection_type="connection_type", | |
management_ips="management_ips", | |
) | |
-FLAG_KEYS = neobunch.NeoBunch( | |
+FLAG_KEYS = dict( | |
user="user", | |
password="password", | |
storage_pool="vol_pool", | |
@@ -60,7 +59,7 @@ FLAG_KEYS = neobunch.NeoBunch( | |
bypass_connection_check="XIV_BYPASS_CONNECTION_CHECK", | |
management_ips="management_ips" | |
) | |
-METADATA_KEYS = neobunch.NeoBunch( | |
+METADATA_KEYS = dict( | |
ibm_storage_version='openstack_ibm_storage_driver_version', | |
openstack_version='openstack_version', | |
pool_host_key='openstack_compute_node_%(hostname)s', | |
@@ -107,7 +106,7 @@ def get_online_iscsi_ports(ibm_storage_cli): | |
"""Returns online iscsi ports""" | |
iscsi_ports = [ | |
- neobunch.NeoBunch( | |
+ dict( | |
{ | |
'ip': p.address, | |
# ipinterface_list returns ports field in Gen3, and | |
@@ -118,7 +117,7 @@ def get_online_iscsi_ports(ibm_storage_cli): | |
if p.type == 'iSCSI'] | |
iscsi_connected_ports = [ | |
- neobunch.NeoBunch( | |
+ dict( | |
{ | |
'port': p.index, | |
'module': p.module_id | |
@@ -129,8 +128,8 @@ def get_online_iscsi_ports(ibm_storage_cli): | |
for ip in iscsi_ports: | |
if len([ | |
p for p in iscsi_connected_ports | |
- if p.port == ip.port and p.module == ip.module | |
+ if p['port'] == ip['port'] and p['module'] == ip['module'] | |
]) > 0: | |
- to_return += [ip.ip] | |
+ to_return += [ip['ip']] | |
return to_return | |
diff --git a/cinder/volume/drivers/ibm/ibm_storage/proxy.py b/cinder/volume/drivers/ibm/ibm_storage/proxy.py | |
index cd9b077..65cf112 100644 | |
--- a/cinder/volume/drivers/ibm/ibm_storage/proxy.py | |
+++ b/cinder/volume/drivers/ibm/ibm_storage/proxy.py | |
@@ -18,8 +18,6 @@ import gettext | |
import inspect | |
import platform | |
-import neobunch | |
- | |
from cinder.i18n import _LE | |
from cinder import version | |
from cinder import volume as c_volume | |
@@ -80,12 +78,12 @@ class IBMStorageProxy(object): | |
"""Initialize Proxy.""" | |
self.storage_info = storage_info | |
- self.meta = neobunch.NeoBunch() | |
+ self.meta = dict() | |
self.logger = logger | |
- self.meta.exception = exception | |
- self.meta.openstack_version = "cinder-%s" % version.version_string() | |
- self.meta.stat = None | |
+ self.meta['exception'] = exception | |
+ self.meta['openstack_version']] = "cinder-%s" % version.version_string() | |
+ self.meta['stat'] = None | |
self.driver = driver | |
self.full_version = "%(title)s (v%(version)s)" % { | |
'title': strings.TITLE, | |
@@ -93,9 +91,9 @@ class IBMStorageProxy(object): | |
self.active_backend_id = active_backend_id | |
self.targets = {} | |
self._read_replication_devices() | |
- self.meta.bypass_connection_check = ( | |
+ self.meta['bypass_connection_check'] = ( | |
self._get_safely_from_configuration( | |
- storage.FLAG_KEYS.bypass_connection_check, False)) | |
+ storage.FLAG_KEYS['bypass_connection_check'], False)) | |
@_trace_time | |
def setup(self, context): | |
@@ -169,9 +167,9 @@ class IBMStorageProxy(object): | |
@_trace_time | |
def get_volume_stats(self, refresh=False): | |
"""get volume stats.""" | |
- if self.meta.stat is None or refresh: | |
+ if self.meta['stat'] is None or refresh: | |
self._update_stats() | |
- return self.meta.stat | |
+ return self.meta['stat'] | |
@_trace_time | |
def _update_stats(self): | |
@@ -213,7 +211,7 @@ class IBMStorageProxy(object): | |
pass | |
@_trace_time | |
- def _get_bunch_from_host( | |
+ def _get_host_info( | |
self, connector, host_id=0, host_name=None, chap=None): | |
"""Get's a Bunch describing a host""" | |
if not host_name: | |
@@ -225,14 +223,13 @@ class IBMStorageProxy(object): | |
if len(wwpns) == 0 and "wwnns" in connector: | |
wwpns = connector.get("wwns", []) | |
- return neobunch.NeoBunch( | |
- { | |
+ return { | |
'name': current_host_name, | |
'initiator': initiator, | |
'id': host_id, | |
'wwpns': wwpns, | |
'chap': chap, | |
- }) | |
+ } | |
@_trace_time | |
def _get_os_type(self): | |
@@ -249,7 +246,7 @@ class IBMStorageProxy(object): | |
def _get_exception(self): | |
"""Get's Cinder exception""" | |
- return self.meta.exception.CinderException | |
+ return self.meta['exception'].CinderException | |
def _get_code_and_status_or_message(self, exception): | |
"""Returns status message | |
@@ -277,7 +274,7 @@ class IBMStorageProxy(object): | |
:returns: iscsi|fibre_channel | |
""" | |
return self._get_safely_from_configuration( | |
- storage.CONF_KEYS.connection_type, | |
+ storage.CONF_KEYS['connection_type'], | |
default=storage.XIV_CONNECTION_TYPE_ISCSI) | |
def _is_iscsi(self): | |
@@ -288,7 +285,7 @@ class IBMStorageProxy(object): | |
def _get_management_ips(self): | |
"""Gets the management IP addresses from conf""" | |
return self._get_safely_from_configuration( | |
- storage.CONF_KEYS.management_ips, | |
+ storage.CONF_KEYS['management_ips'], | |
default='') | |
def _get_chap_type(self): | |
@@ -297,9 +294,9 @@ class IBMStorageProxy(object): | |
:returns: disabled|enabled | |
""" | |
LOG.debug("_get_chap_type chap: %(chap)s", | |
- {'chap': storage.CONF_KEYS.chap}) | |
+ {'chap': storage.CONF_KEYS['chap']}) | |
return self._get_safely_from_configuration( | |
- storage.CONF_KEYS.chap, | |
+ storage.CONF_KEYS['chap'], | |
default=storage.CHAP_NONE) | |
def _get_safely_from_configuration(self, key, default=None): | |
diff --git a/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py | |
index 15cd302..d361965 100644 | |
--- a/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py | |
+++ b/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py | |
@@ -17,7 +17,6 @@ import datetime | |
import six | |
import socket | |
-import neobunch | |
from pyxcli import client | |
from pyxcli import errors | |
from pyxcli.events import events | |
@@ -341,18 +340,18 @@ class XIVProxy(proxy.IBMStorageProxy): | |
return self._get_qos_specs(type_id) | |
def _get_replication_info(self, specs): | |
- info = neobunch.NeoBunch({'enabled': False, 'mode': None, 'rpo': 0}) | |
+ info = {'enabled': False, 'mode': None, 'rpo': 0} | |
if specs: | |
LOG.debug('_get_replication_info: specs %(specs)s', | |
{'specs': specs}) | |
- info.enabled = specs.get( | |
+ info['enabled'] = specs.get( | |
'replication_enabled', '').upper() \ | |
in (u'TRUE', strings.METADATA_IS_TRUE) | |
replication_type = specs.get('replication_type', SYNC).lower() | |
if replication_type in (u'sync', u'<is> sync'): | |
- info.mode = SYNC | |
+ info['mode'] = SYNC | |
elif replication_type in (u'async', u'<is> async'): | |
- info.mode = ASYNC | |
+ info['mode'] = ASYNC | |
else: | |
msg = ('_get_replication_info: invalid ' | |
'replication type %(info)s' % | |
@@ -361,15 +360,15 @@ class XIVProxy(proxy.IBMStorageProxy): | |
raise self._get_exception()( | |
message=strings.REPLICA_INVALID_MODE % | |
{'mode': replication_type}) | |
- info.rpo = int(specs.get('rpo', u'<is> 0')[5:]) | |
- if info.rpo and info.rpo not in self._get_supported_rpo(): | |
+ info['rpo'] = int(specs.get('rpo', u'<is> 0')[5:]) | |
+ if info['rpo'] and info['rpo'] not in self._get_supported_rpo(): | |
msg = ('_get_replication_info: invalid ' | |
'replication type %(info)s' | |
'_get_replication_info: invalid rpo %(rpo)s' % | |
- {'info': info, 'rpo': info.rpo}) | |
+ {'info': info, 'rpo': info['rpo']}) | |
LOG.error(msg) | |
raise self._get_exception()( | |
- message=strings.REPLICA_INVALID_RPO % {'rpo': info.rpo}) | |
+ message=strings.REPLICA_INVALID_RPO % {'rpo': info['rpo']}) | |
msg = ('_get_replication_info: info %(info)s' % | |
{'info': info}) | |
LOG.debug(msg) | |
@@ -382,7 +381,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
try: | |
self._call_xiv_xcli( | |
"vol_create", vol=volume['name'], size_blocks=size, | |
- pool=self.storage_info[storage.FLAG_KEYS.storage_pool]) | |
+ pool=self.storage_info[storage.FLAG_KEYS['storage_pool']]) | |
except errors.SystemOutOfSpaceError: | |
msg = (strings.CREATE_VOLUME_BASE_ERROR, | |
{'details': strings.CREATE_VOLUME_SYSTEM_OUT_OF_SPACE}) | |
@@ -392,7 +391,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
{'details': strings.CREATE_VOLUME_SYSTEM_OUT_OF_SPACE}) | |
except errors.PoolOutOfSpaceError: | |
err = strings.CREATE_VOLUME_POOL_OUT_OF_SPACE % { | |
- 'pool': self.storage_info[storage.FLAG_KEYS.storage_pool]} | |
+ 'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']]} | |
msg = (strings.CREATE_VOLUME_BASE_ERROR, | |
{'details': err}) | |
LOG.error(msg) | |
@@ -1238,7 +1237,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
"to fail back again.") | |
LOG.info(msg) | |
return self.active_backend_id, volume_update_list | |
- pool_slave = self.storage_info[storage.FLAG_KEYS.storage_pool] | |
+ pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']] | |
pool_master = self._get_target_params( | |
self.active_backend_id)['san_clustername'] | |
goal_status = 'available' | |
@@ -1256,7 +1255,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
LOG.error(msg) | |
raise self.meta.exception.VolumeBackendAPIException( | |
data=msg) | |
- pool_master = self.storage_info[storage.FLAG_KEYS.storage_pool] | |
+ pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']] | |
try: | |
pool_slave = self._get_target_params( | |
secondary_id)['san_clustername'] | |
@@ -1265,7 +1264,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
LOG.error(msg) | |
raise self.meta.exception.VolumeBackendAPIException( | |
data=msg) | |
- pool_master = self.storage_info[storage.FLAG_KEYS.storage_pool] | |
+ pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']] | |
goal_status = objects.fields.ReplicationStatus.FAILED_OVER | |
# connnect xcli to secondary storage according to backend_id by | |
@@ -1359,7 +1358,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
if (dest != strings.XIV_BACKEND_PREFIX or dest_host != volume_host): | |
return False | |
- pool_name = self.storage_info[storage.FLAG_KEYS.storage_pool] | |
+ pool_name = self.storage_info[storage.FLAG_KEYS['storage_pool']] | |
# if pool is different. else - we're on the same pool and retype is ok. | |
if (pool_name != dest_pool): | |
@@ -1422,8 +1421,8 @@ class XIVProxy(proxy.IBMStorageProxy): | |
self.meta.stat["volume_backend_name"] = backend_name or \ | |
'%s_%s_%s_%s' % ( | |
strings.XIV_BACKEND_PREFIX, | |
- self.storage_info[storage.FLAG_KEYS.address], | |
- self.storage_info[storage.FLAG_KEYS.storage_pool], | |
+ self.storage_info[storage.FLAG_KEYS['address']], | |
+ self.storage_info[storage.FLAG_KEYS['storage_pool']], | |
connection_type) | |
self.meta.stat["vendor_name"] = 'IBM' | |
self.meta.stat["driver_version"] = self.full_version | |
@@ -1435,17 +1434,17 @@ class XIVProxy(proxy.IBMStorageProxy): | |
self.meta.stat['location_info'] =\ | |
('%(destination)s:%(hostname)s:%(pool)s' % | |
{'destination': strings.XIV_BACKEND_PREFIX, | |
- 'hostname': self.storage_info[storage.FLAG_KEYS.address], | |
- 'pool': self.storage_info[storage.FLAG_KEYS.storage_pool] | |
+ 'hostname': self.storage_info[storage.FLAG_KEYS['address']], | |
+ 'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']] | |
}) | |
pools = self._call_xiv_xcli( | |
"pool_list", | |
- pool=self.storage_info[storage.FLAG_KEYS.storage_pool]).as_list | |
+ pool=self.storage_info[storage.FLAG_KEYS['storage_pool']]).as_list | |
if len(pools) != 1: | |
LOG.error( | |
_LE("_update_stats: Pool %(pool)s not available on storage"), | |
- {'pool': self.storage_info[storage.FLAG_KEYS.storage_pool]}) | |
+ {'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']]}) | |
return | |
pool = pools[0] | |
@@ -1606,7 +1605,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
try: | |
self._call_xiv_xcli( | |
"cg_create", cg=cgname, | |
- pool=self.storage_info[storage.FLAG_KEYS.storage_pool]).as_list | |
+ pool=self.storage_info[storage.FLAG_KEYS['storage_pool']]).as_list | |
except errors.CgNameExistsError as e: | |
error = "consistency group %s already exists on backend" % cgname | |
LOG.error(error) | |
@@ -2009,12 +2008,12 @@ class XIVProxy(proxy.IBMStorageProxy): | |
""" | |
if host: | |
- if host.chap: | |
- chap_name = host.chap[0] | |
+ if host['chap']: | |
+ chap_name = host['chap'][0] | |
LOG.debug("_create_chap: %(chap_name)s ", | |
{'chap_name': chap_name}) | |
else: | |
- chap_name = host.name | |
+ chap_name = host['name'] | |
else: | |
LOG.info(_LI("_create_chap: host missing!!!")) | |
chap_name = "12345678901234" | |
@@ -2028,7 +2027,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
"""Returns a host looked up via initiator.""" | |
try: | |
- host_bunch = self._get_bunch_from_host(connector) | |
+ host_info = self._get_host_info(connector) | |
except Exception as e: | |
details = self._get_code_and_status_or_message(e) | |
raise self._get_exception()( | |
@@ -2040,11 +2039,11 @@ class XIVProxy(proxy.IBMStorageProxy): | |
all_hosts = self._call_xiv_xcli("host_list").as_list | |
if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI: | |
host = [host_obj for host_obj in all_hosts | |
- if host_bunch.initiator in host_obj.iscsi_ports.split(',')] | |
+ if host_info['initiator'] in host_obj.iscsi_ports.split(',')] | |
else: | |
if 'wwpns' in connector: | |
- if len(host_bunch.wwpns) > 0: | |
- wwpn_set = set([wwpn.lower() for wwpn in host_bunch.wwpns]) | |
+ if len(host_info['wwpns']) > 0: | |
+ wwpn_set = set([wwpn.lower() for wwpn in host_info['wwpns']]) | |
host = [host for host in all_hosts if | |
len(wwpn_set.intersection(host.get( | |
'fc_ports', '').lower().split(','))) > 0] | |
@@ -2057,7 +2056,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
self._generate_chap_secret(host[0].iscsi_chap_name)) | |
LOG.debug("_get_host: chap_name %(chap_name)s ", | |
{'chap_name': host[0].iscsi_chap_name}) | |
- return self._get_bunch_from_host( | |
+ return self._get_host_info( | |
connector, host[0].id, host[0].name, chap) | |
LOG.debug("_get_host: returns None") | |
@@ -2105,8 +2104,8 @@ class XIVProxy(proxy.IBMStorageProxy): | |
chap_secret = None | |
if (self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI) \ | |
and (self._get_chap_type() == storage.CHAP_ENABLED): | |
- host_bunch = neobunch.NeoBunch({'name': host, 'chap': None, }) | |
- chap = self._create_chap(host=host_bunch) | |
+ host_info = {'name': host, 'chap': None, } | |
+ chap = self._create_chap(host=host_info) | |
chap_name = chap[0] | |
chap_secret = chap[1] | |
LOG.debug("_define_host_according_to_chap: %(name)s : %(secret)s", | |
@@ -2117,20 +2116,20 @@ class XIVProxy(proxy.IBMStorageProxy): | |
chap_secret=chap_secret, | |
domain_name=in_domain) | |
- def _define_ports(self, host_bunch): | |
+ def _define_ports(self, host_info): | |
"""Defines ports in XIV.""" | |
fc_targets = [] | |
- LOG.debug(host_bunch.name) | |
+ LOG.debug(host_info['name']) | |
if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI: | |
- self._define_iscsi(host_bunch) | |
+ self._define_iscsi(host_info) | |
else: | |
- fc_targets = self._define_fc(host_bunch) | |
+ fc_targets = self._define_fc(host_info) | |
fc_targets = list(set(fc_targets)) | |
fc_targets.sort(self._sort_last_digit) | |
return fc_targets | |
def _get_pool_domain(self, connector): | |
- pool_name = self.storage_info[storage.FLAG_KEYS.storage_pool] | |
+ pool_name = self.storage_info[storage.FLAG_KEYS['storage_pool']] | |
LOG.debug("pool name from configuration: %s" % pool_name) | |
domain = None | |
try: | |
@@ -2145,17 +2144,17 @@ class XIVProxy(proxy.IBMStorageProxy): | |
def _define_host(self, connector): | |
"""Defines a host in XIV.""" | |
domain = self._get_pool_domain(connector) | |
- host_bunch = self._get_bunch_from_host(connector) | |
+ host_info = self._get_host_info(connector) | |
host = self._call_xiv_xcli( | |
- "host_list", host=host_bunch.name).as_list | |
+ "host_list", host=host_info['name']).as_list | |
connection_type = self._get_connection_type() | |
if len(host) == 0: | |
LOG.debug("Non existing host, defining") | |
host = self._define_host_according_to_chap( | |
- host=host_bunch.name, in_domain=domain) | |
- host_bunch = self._get_bunch_from_host(connector, host.id) | |
+ host=host_info['name'], in_domain=domain) | |
+ host_info = self._get_host_info(connector, host.id) | |
else: | |
- host_bunch = self._get_bunch_from_host(connector, host[0].id) | |
+ host_info = self._get_host_info(connector, host[0].id) | |
LOG.debug("Generating hostname for connector %(conn)s" % | |
{'conn': connector}) | |
generated_hostname = storage.get_host_or_create_from_iqn( | |
@@ -2169,19 +2168,19 @@ class XIVProxy(proxy.IBMStorageProxy): | |
in_domain=domain) | |
else: | |
host = generated_host[0] | |
- host_bunch = self._get_bunch_from_host( | |
+ host_info = self._get_host_info( | |
connector, host.id, host_name=generated_hostname) | |
- LOG.debug("The host_bunch : %s" % host_bunch) | |
- return host_bunch | |
+ LOG.debug("The host_info : %s" % host_info) | |
+ return host_info | |
@proxy._trace_time | |
- def _define_fc(self, host_bunch): | |
+ def _define_fc(self, host_info): | |
"""Define FC Connectivity.""" | |
fc_targets = [] | |
- if len(host_bunch.wwpns) > 0: | |
+ if len(host_info['wwpns']) > 0: | |
connected_wwpns = [] | |
- for wwpn in host_bunch.wwpns: | |
+ for wwpn in host_info['wwpns']: | |
component_ids = list(set( | |
[p.component_id for p in | |
self._call_xiv_xcli( | |
@@ -2200,7 +2199,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
fc_targets += wwpn_fc_target_lists | |
LOG.debug("adding fc port %s" % wwpn) | |
self._call_xiv_xcli( | |
- "host_add_port", host=host_bunch.name, | |
+ "host_add_port", host=host_info['name'], | |
fcaddress=wwpn) | |
if len(connected_wwpns) == 0: | |
LOG.error(strings.CONNECTIVITY_FC_NO_TARGETS) | |
@@ -2211,13 +2210,13 @@ class XIVProxy(proxy.IBMStorageProxy): | |
return fc_targets | |
@proxy._trace_time | |
- def _define_iscsi(self, host_bunch): | |
+ def _define_iscsi(self, host_info): | |
"""Add iscsi ports.""" | |
- if host_bunch.initiator: | |
+ if host_info['initiator']: | |
LOG.debug("adding iscsi") | |
self._call_xiv_xcli( | |
- "host_add_port", host=host_bunch.name, | |
- iscsi_name=host_bunch.initiator) | |
+ "host_add_port", host=host_info['name'], | |
+ iscsi_name=host_info['initiator']) | |
else: | |
msg = _LE("No iSCSI initiator found!") | |
LOG.error(msg) | |
@@ -2229,7 +2228,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
LOG.debug("send event SERVICE_STARTED") | |
service_start_evnt_prop = { | |
"openstack_version": self.meta.openstack_version, | |
- "pool_name": self.storage_info[storage.FLAG_KEYS.storage_pool]} | |
+ "pool_name": self.storage_info[storage.FLAG_KEYS['storage_pool]']} | |
ev_mgr = events.EventsManager(self.ibm_storage_cli, | |
OPENSTACK_PRODUCT_NAME, | |
self.full_version) | |
@@ -2242,7 +2241,7 @@ class XIVProxy(proxy.IBMStorageProxy): | |
compute_host_name = socket.getfqdn() | |
vol_attach_evnt_prop = { | |
"openstack_version": self.meta.openstack_version, | |
- "pool_name": self.storage_info[storage.FLAG_KEYS.storage_pool], | |
+ "pool_name": self.storage_info[storage.FLAG_KEYS['storage_pool']], | |
"compute_hostname": compute_host_name} | |
ev_mgr = events.EventsManager(self.ibm_storage_cli, | |
@@ -2400,11 +2399,11 @@ class XIVProxy(proxy.IBMStorageProxy): | |
if not backend_id or backend_id == strings.PRIMARY_BACKEND_ID: | |
if self._get_management_ips(): | |
address = [e.strip(" ") for e in self.storage_info[ | |
- storage.FLAG_KEYS.management_ips].split(",")] | |
+ storage.FLAG_KEYS['management_ips']].split(",")] | |
else: | |
- address = self.storage_info[storage.FLAG_KEYS.address] | |
- user = self.storage_info[storage.FLAG_KEYS.user] | |
- password = self.storage_info[storage.FLAG_KEYS.password] | |
+ address = self.storage_info[storage.FLAG_KEYS['address']] | |
+ user = self.storage_info[storage.FLAG_KEYS['user']] | |
+ password = self.storage_info[storage.FLAG_KEYS['password']] | |
else: | |
params = self._get_target_params(backend_id) | |
if not params: |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment