Skip to content

Instantly share code, notes, and snippets.

@patrick-east
Created May 5, 2016 18:50
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save patrick-east/e6b4d0dc69ae357c00bcfe802dc1cc81 to your computer and use it in GitHub Desktop.
Save patrick-east/e6b4d0dc69ae357c00bcfe802dc1cc81 to your computer and use it in GitHub Desktop.
# Copyright (c) 2016 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest.common import credentials_factory as common_creds
from tempest import config
from pure_scale_tempest_plugin.services import manage_snapshot_client
from pure_scale_tempest_plugin.services import replication_client
CONF = config.CONF
class Manager:
def __init__(self, base_manager):
params = {
'service': CONF.volume.catalog_type,
'region': CONF.volume.region or CONF.identity.region,
'endpoint_type': CONF.volume.endpoint_type,
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
}
params.update(base_manager.default_params)
auth_provider = base_manager.auth_provider
self.manage_snapshot_client = \
manage_snapshot_client.ManageSnapshotClient(auth_provider,
**params)
params['failover_timeout'] = CONF.pure_scale.failover_timeout
self.replication_client = replication_client.ReplicationVolumesClient(
auth_provider, **params)
# Copyright (c) 2016 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.lib.common import rest_client
from tempest.services.volume.base import base_snapshots_client
class ManageSnapshotClient(base_snapshots_client.BaseSnapshotsClient):
def __init__(self, auth_provider, service, region,
default_volume_size=1, **kwargs):
self.api_version = "v2"
super(ManageSnapshotClient, self).__init__(
auth_provider, service, region, **kwargs)
self.default_volume_size = default_volume_size
def manage_snapshot(self, volume_id, snapshot_ref):
body = {
'snapshot': {
'volume_id': volume_id,
'ref': snapshot_ref
}
}
body_json = json.dumps(body)
resp, body = self.post('os-snapshot-manage', body_json)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
# Copyright (c) 2016 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
from oslo_log import log
from tempest import config
from tempest.lib.common import rest_client
from tempest.services.volume.base import base_volumes_client
CONF = config.CONF
LOG = log.getLogger(__name__)
class ReplicationVolumesClient(base_volumes_client.BaseVolumesClient):
def __init__(self, auth_provider, service, region,
failover_timeout=1800, **kwargs):
self.api_version = "v2"
self.failover_timeout = failover_timeout
super(ReplicationVolumesClient, self).__init__(
auth_provider, service, region, **kwargs)
def get_capabilities(self, host):
url = '/capabilities/%s' % host
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def _get_services(self):
url = '/os-services'
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return body['services']
def get_replication_status(self, host):
services = self._get_services()
for service in services:
if service['host'] == host:
return service.get('replication_status', None)
return None
def list_volume_services(self):
services = self._get_services()
c_vol_services = []
for service in services:
if service['binary'] == 'cinder-volume':
c_vol_services.append(service)
return c_vol_services
def failover_host(self, host, secondary_id=None):
url = '/os-services/failover_host'
body = {
'host': host
}
if secondary_id:
body['backend_id'] = secondary_id
resp, body = self.put(url, json.dumps(body))
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def wait_for_failover(self, host):
start_time = time.time()
status = self.get_replication_status(host)
while status == 'failing-over':
time.sleep(self.build_interval)
status = self.get_replication_status(host)
dtime = time.time() - start_time
if dtime > self.failover_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for failover of host %s, '
'status is %s.' %
(self.build_timeout, host, status))
raise exceptions.TimeoutException(message)
status = self.get_replication_status(host)
# Copyright (c) 2016 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import purestorage
import time
from oslo_log import log
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
from pure_scale_tempest_plugin import clients_scale
from pure_scale_tempest_plugin.services import manage_snapshot_client
from pure_scale_tempest_plugin.services import replication_client
CONF = config.CONF
LOG = log.getLogger(__name__)
MAX_VOLUME_COUNT = 2
MAX_SNAPSHOT_COUNT = 2
class TestReplicationv2_1(base.BaseVolumeAdminTest):
@classmethod
def setup_clients(cls):
cls._api_version = 2
super(TestReplicationv2_1, cls).setup_clients()
manager = clients_scale.Manager(cls.os_adm)
cls.replication_client = manager.replication_client
cls.flash_array = purestorage.FlashArray(
CONF.pure_scale.san_ip,
api_token=CONF.pure_scale.api_token
)
@classmethod
def resource_setup(cls):
super(TestReplicationv2_1, cls).resource_setup()
# Create our volume type
vol_type_name = data_utils.rand_name('Volume-type-repl')
cls.volume_type = cls.volume_types_client.create_volume_type(
name=vol_type_name)['volume_type']
extra_specs = {'replication_enabled': '<is> True'}
cls.volume_types_client.create_volume_type_extra_specs(
cls.volume_type['id'], extra_specs)
# Create test volumes
for i in range(MAX_VOLUME_COUNT):
cls.create_volume(volume_type=cls.volume_type['id'])
# Create the snapshots, we're going to put them all on a single volume.
# Cinder won't know about them, they are only on the FlashArray.
cls.snap_vol = cls.volumes[0]
vols = cls.flash_array.list_volumes()
cls.snap_vol_name = None
for vol in vols:
if cls.snap_vol['id'] in vol['name']:
cls.snap_vol_name = vol['name']
break
for i in range(MAX_SNAPSHOT_COUNT):
suffix = 'scale-test-bulk-' + str(i)
cls.flash_array.create_snapshot(cls.snap_vol_name, suffix=suffix)
@classmethod
def resource_cleanup(cls):
# Our base class will cleanup volumes and snapshots
super(TestReplicationv2_1, cls).resource_cleanup()
# Delete the volume type
cls.volume_types_client.delete_volume_type(cls.volume_type['id'])
vols = cls.flash_array.list_volumes()
for vol in vols:
try:
cls.flash_array.eradicate_volume(vol['name'])
except purestorage.PureHTTPError:
pass
def test_failover_replication(self):
"""Make sure that failover can succeed with full volumes."""
# Pick a host to failover
c_vols = self.replication_client.list_volume_services()
self.assertIsNotNone(c_vols)
repl_host = None
for c_vol in c_vols:
status = c_vol.get('replication_status', None)
if status == 'enabled':
repl_host = c_vol['host']
self.assertIsNotNone(repl_host)
# Start the failover
self.replication_client.failover_host(repl_host)
self.replication_client.wait_for_failover(repl_host)
repl_status = self.replication_client.get_replication_status(repl_host)
self.assertEqual(repl_status, 'failed-over')
# TODO(patrick): We should, in the future, check each volume to make
# sure they were all correctly failed over, this test is focused only
# on making sure that we don't hit a timeout or are unable to complete
# the driver operation successfully.
# Put ourselves back to non-failed-over
self.replication_client.failover_host(repl_host, 'default')
self.replication_client.wait_for_failover(repl_host)
repl_status = self.replication_client.get_replication_status(repl_host)
self.assertEqual(status, 'enabled')
# Copyright (c) 2016 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import purestorage
import time
from oslo_log import log
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
from pure_scale_tempest_plugin import clients_scale
from pure_scale_tempest_plugin.services import manage_snapshot_client
from pure_scale_tempest_plugin.services import replication_client
CONF = config.CONF
LOG = log.getLogger(__name__)
MAX_VOLUME_COUNT = 2
MAX_SNAPSHOT_COUNT = 2
class TestManageSnapshot(base.BaseVolumeAdminTest):
@classmethod
def setup_clients(cls):
cls._api_version = 2
super(TestManageSnapshot, cls).setup_clients()
manager = clients_scale.Manager(cls.os_adm)
cls.replication_client = manager.replication_client
cls.manage_snapshot_client = manager.manage_snapshot_client
cls.flash_array = purestorage.FlashArray(
CONF.pure_scale.san_ip,
api_token=CONF.pure_scale.api_token
)
@classmethod
def resource_setup(cls):
super(TestManageSnapshot, cls).resource_setup()
cls.admin_snapshots = []
# Create test volumes
for i in range(MAX_VOLUME_COUNT):
cls.create_volume()
# Create the snapshots, we're going to put them all on a single volume.
# Cinder won't know about them, they are only on the FlashArray.
cls.snap_vol = cls.volumes[0]
vols = cls.flash_array.list_volumes()
cls.snap_vol_name = None
for vol in vols:
if cls.snap_vol['id'] in vol['name']:
cls.snap_vol_name = vol['name']
break
for i in range(MAX_SNAPSHOT_COUNT):
suffix = 'scale-test-bulk-' + str(i)
cls.flash_array.create_snapshot(cls.snap_vol_name, suffix=suffix)
@classmethod
def resource_cleanup(cls):
for snapshot in cls.admin_snapshots:
cls.manage_snapshot_client.delete_snapshot(snapshot['id'])
while cls.admin_snapshots:
time.sleep(1)
cls.admin_snapshots = [
snap for snap in cls.admin_snapshots if
not cls.manage_snapshot_client.is_resource_deleted(snap['id'])
]
# Our base class will cleanup volumes and snapshots
super(TestManageSnapshot, cls).resource_cleanup()
vols = cls.flash_array.list_volumes()
for vol in vols:
try:
cls.flash_array.eradicate_volume(vol['name'])
except purestorage.PureHTTPError:
pass
def test_manage_snapshot(self):
"""Manage a snapshot on a volume with max snapshots.
The driver needs to iterate through all of the snaps, make sure
we aren't timing out or choking on the huge list of snapshots
in the response from the API.
"""
# First pick a snapshot we want to manage
snap = self.flash_array.get_volume(self.snap_vol_name, snap=True)[-1]
# Now manage it...
snap_ref = {'name': snap['name']}
snapshot = self.manage_snapshot_client.manage_snapshot(
self.snap_vol['id'],
snap_ref
)['snapshot']
# Add to our class snapshots so it will be cleaned up correctly
if snapshot:
self.admin_snapshots.append(snapshot)
self.manage_snapshot_client.wait_for_snapshot_status(snapshot['id'],
'available')
resp = self.manage_snapshot_client.show_snapshot(snapshot['id'])
snapshot = resp['snapshot']
self.assertEqual(snapshot['status'], 'available')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment