commit e1e289ca8b12e3f6dfd9982d2cc5946104283ca7 Author: andrei.perepiolkin Date: Mon May 11 20:01:30 2020 +0300 Add Cinder driver for Open-E JovianDSS data storage Added support of Open-E JovianDSS data storage. Driver supports Open-E disaster recovery feature and cascade volume deletion in addition to all required functions. Implements: bp open-e-joviandss-driver Change-Id: I72513ec2100f1f4cb7e3fdb57e69243aa01dba38 diff --git a/cinder/opts.py b/cinder/opts.py index b63a9d2..24b88c2 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -131,6 +131,8 @@ from cinder.volume.drivers.nexenta import options as \ cinder_volume_drivers_nexenta_options from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs from cinder.volume.drivers import nimble as cinder_volume_drivers_nimble +from cinder.volume.drivers.open_e import options as \ + cinder_volume_drivers_open_e_options from cinder.volume.drivers.prophetstor import options as \ cinder_volume_drivers_prophetstor_options from cinder.volume.drivers import pure as cinder_volume_drivers_pure @@ -260,6 +262,9 @@ def list_opts(): instorage_mcs_opts, cinder_volume_drivers_inspur_instorage_instorageiscsi. instorage_mcs_iscsi_opts, + cinder_volume_drivers_open_e_options.jdss_connection_opts, + cinder_volume_drivers_open_e_options.jdss_iscsi_opts, + cinder_volume_drivers_open_e_options.jdss_volume_opts, cinder_volume_drivers_sandstone_sdsdriver.sds_opts, cinder_volume_drivers_veritas_access_veritasiscsi.VA_VOL_OPTS, cinder_volume_manager.volume_manager_opts, diff --git a/cinder/tests/unit/volume/drivers/open_e/__init__.py b/cinder/tests/unit/volume/drivers/open_e/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder/tests/unit/volume/drivers/open_e/test_iscsi.py b/cinder/tests/unit/volume/drivers/open_e/test_iscsi.py new file mode 100644 index 0000000..c2c8d7b --- /dev/null +++ b/cinder/tests/unit/volume/drivers/open_e/test_iscsi.py @@ -0,0 +1,1461 @@ +# Copyright (c) 2020 Open-E, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +from unittest import mock + +from oslo_utils import units as o_units + +from cinder import context +from cinder import exception +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder.tests.unit import test +from cinder.volume.drivers.open_e import iscsi +from cinder.volume.drivers.open_e.jovian_common import exception as jexc +from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom + + +UUID_1 = '12345678-1234-1234-1234-000000000001' +UUID_2 = '12345678-1234-1234-1234-000000000002' +UUID_3 = '12345678-1234-1234-1234-000000000003' +UUID_4 = '12345678-1234-1234-1234-000000000004' + +CONFIG_OK = { + 'san_hosts': ['192.168.0.2'], + 'san_api_port': 82, + 'driver_use_ssl': 'https', + 'jovian_rest_send_repeats': 3, + 'jovian_recovery_delay': 60, + 'jovian_user': 'admin', + 'jovian_password': 'password', + 'jovian_ignore_tpath': [], + 'target_port': 3260, + 'jovian_pool': 'Pool-0', + 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', + 'chap_password_len': 12, + 'san_thin_provision': False, + 'jovian_block_size': '128K' +} + +CONFIG_BLOCK_SIZE = { + 'san_hosts': ['192.168.0.2'], + 'san_api_port': 82, + 'driver_use_ssl': 'https', + 'jovian_rest_send_repeats': 3, + 'jovian_recovery_delay': 60, + 'jovian_user': 'admin', + 'jovian_password': 'password', + 'jovian_ignore_tpath': [], + 'target_port': 3260, + 'jovian_pool': 'Pool-0', + 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', + 'chap_password_len': 12, + 'san_thin_provision': False, + 'jovian_block_size': '64K' +} + +CONFIG_BACKEND_NAME = { + 'san_hosts': ['192.168.0.2'], + 'san_api_port': 82, + 'driver_use_ssl': 'https', + 'jovian_rest_send_repeats': 3, + 'jovian_recovery_delay': 60, + 'jovian_user': 'admin', + 'jovian_password': 'password', + 'jovian_ignore_tpath': [], + 'target_port': 3260, + 'jovian_pool': 'Pool-0', + 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', + 'chap_password_len': 12, + 'san_thin_provision': False, + 'volume_backend_name': 'JovianDSS', + 'reserved_percentage': 10, + 'jovian_block_size': '128K' +} + +CONFIG_MULTI_HOST = { + 'san_hosts': ['192.168.0.2', '192.168.0.3'], + 'san_api_port': 82, + 'driver_use_ssl': 'https', + 'jovian_rest_send_repeats': 3, + 'jovian_recovery_delay': 60, + 'jovian_user': 'admin', + 'jovian_password': 'password', + 'jovian_ignore_tpath': [], + 'target_port': 3260, + 'jovian_pool': 'Pool-0', + 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', + 'chap_password_len': 12, + 'san_thin_provision': False, + 'volume_backend_name': 'JovianDSS', + 'reserved_percentage': 10, + 'jovian_block_size': '128K' +} + +SNAPSHOTS_CASCADE_1 = [ + {"name": jcom.sname(UUID_1), + "clones": "Pool-0/" + jcom.sname(UUID_1)}, + {"name": jcom.sname(UUID_2), + "clones": "Pool-0/" + jcom.sname(UUID_2)}, + {"name": jcom.sname(UUID_3), + "clones": "Pool-0/" + jcom.sname(UUID_3)}] + +SNAPSHOTS_CASCADE_2 = [ + {"name": jcom.sname(UUID_1), + "clones": "Pool-0/" + jcom.sname(UUID_1)}, + {"name": jcom.vname(UUID_2), + "clones": "Pool-0/" + jcom.vname(UUID_2)}, + {"name": jcom.sname(UUID_3), + "clones": "Pool-0/" + jcom.sname(UUID_3)}] + +SNAPSHOTS_CASCADE_3 = [ + {"name": jcom.vname(UUID_4), + "clones": "Pool-0/" + jcom.vname(UUID_4)}] + +SNAPSHOTS_EMPTY = [] + +SNAPSHOTS_CLONE = [ + {"name": jcom.vname(UUID_1), + "clones": "Pool-0/" + jcom.vname(UUID_1)}] + +SNAPSHOTS_GARBAGE = [ + {"name": jcom.sname(UUID_1), + "clones": "Pool-0/" + jcom.vname(UUID_2)}, + {"name": jcom.sname(UUID_2), + "clones": ""}] + +SNAPSHOTS_RECURSIVE_1 = [ + {"name": jcom.sname(UUID_1), + "clones": "Pool-0/" + jcom.sname(UUID_1)}, + {"name": jcom.sname(UUID_2), + "clones": "Pool-0/" + jcom.hidden(UUID_2)}] + +SNAPSHOTS_RECURSIVE_CHAIN_1 = [ + {"name": jcom.sname(UUID_3), + "clones": "Pool-0/" + jcom.hidden(UUID_3)}] + +SNAPSHOTS_RECURSIVE_CHAIN_2 = [ + {"name": jcom.vname(UUID_2), + "clones": "Pool-0/" + jcom.hidden(UUID_2)}] + + +def get_jdss_exceptions(): + + out = [jexc.JDSSException(reason="Testing"), + jexc.JDSSRESTException(request="ra request", reason="Testing"), + jexc.JDSSRESTProxyException(host="test_host", reason="Testing"), + jexc.JDSSResourceNotFoundException(res="test_resource"), + jexc.JDSSVolumeNotFoundException(volume="test_volume"), + jexc.JDSSSnapshotNotFoundException(snapshot="test_snapshot"), + jexc.JDSSResourceExistsException(res="test_resource"), + jexc.JDSSSnapshotExistsException(snapshot="test_snapshot"), + jexc.JDSSVolumeExistsException(volume="test_volume"), + jexc.JDSSSnapshotIsBusyException(snapshot="test_snapshot")] + + return out + + +def fake_safe_get(value): + return CONFIG_OK[value] + + +class TestOpenEJovianDSSDriver(test.TestCase): + + def get_driver(self, config): + ctx = context.get_admin_context() + + cfg = mock.Mock() + cfg.append_config_values.return_value = None + cfg.safe_get = lambda val: config[val] + + jdssd = iscsi.JovianISCSIDriver() + jdssd.configuration = cfg + jdssd.do_setup(ctx) + jdssd.ra = mock.Mock() + return jdssd, ctx + + def start_patches(self, patches): + for p in patches: + p.start() + + def stop_patches(self, patches): + for p in patches: + p.stop() + + def test_get_provider_location(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + host = CONFIG_OK["san_hosts"][0] + port = CONFIG_OK["target_port"] + target_name = CONFIG_OK["target_prefix"] + UUID_1 + patches = [mock.patch.object( + jdssd.ra, + "get_active_host", + return_value=host)] + out = '{host}:{port},1 {name} 0'.format( + host=host, + port=port, + name=target_name + ) + self.start_patches(patches) + self.assertEqual(out, jdssd._get_provider_location(UUID_1)) + self.stop_patches(patches) + + def test_create_volume(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + vol.size = 1 + host = CONFIG_OK["san_hosts"][0] + port = CONFIG_OK["target_port"] + + target_name = CONFIG_OK["target_prefix"] + UUID_1 + + jdssd.ra.create_lun.return_value = None + jdssd.ra.get_active_host.return_value = host + + ret = jdssd.create_volume(vol) + + location = '{host}:{port},1 {name} 0'.format( + host=host, + port=port, + name=target_name + ) + self.assertEqual(location, ret['provider_location']) + cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " + "[0-9,a-z,A-Z]{{{pass_len}}}").format( + name_len=8, + pass_len=CONFIG_OK['chap_password_len']) + self.assertIsNotNone(re.match(cred_format, ret['provider_auth'])) + + def test_create_volume_small_block(self): + + jdssd, ctx = self.get_driver(CONFIG_BLOCK_SIZE) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + vol.size = 1 + host = CONFIG_OK["san_hosts"][0] + port = CONFIG_OK["target_port"] + + target_name = CONFIG_OK["target_prefix"] + UUID_1 + vname = jcom.vname(UUID_1) + + jdssd.ra.create_lun.return_value = None + jdssd.ra.get_active_host.return_value = host + + ret = jdssd.create_volume(vol) + + jdssd.ra.create_lun.assert_called_once_with( + vname, o_units.Gi, sparse=False, block_size="64K") + + location = '{host}:{port},1 {name} 0'.format( + host=host, + port=port, + name=target_name + ) + self.assertEqual(location, ret['provider_location']) + cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " + "[0-9,a-z,A-Z]{{{pass_len}}}").format( + name_len=8, + pass_len=CONFIG_OK['chap_password_len']) + self.assertIsNotNone(re.match(cred_format, ret['provider_auth'])) + + def test_hide_object(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + vname = jcom.vname(UUID_1) + + jdssd.ra.modify_lun.return_value = None + jdssd._hide_object(vname) + + hidden_volume = {"name": jcom.hidden(UUID_1)} + jdssd.ra.modify_lun.assert_called_once_with(vname, hidden_volume) + + def test_clean_garbage_snapshots(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + o_vname = jcom.vname(UUID_1) + o_snaps = SNAPSHOTS_GARBAGE.copy() + + jdssd.ra.delete_snapshot.return_value = None + jdssd._clean_garbage_snapshots(o_vname, o_snaps) + jdssd.ra.delete_snapshot.assert_called_once_with( + o_vname, + SNAPSHOTS_GARBAGE[1]["name"]) + # Test exception handling + for exc in get_jdss_exceptions(): + o_snaps = SNAPSHOTS_GARBAGE.copy() + jdssd.ra.delete_snapshot.side_effect = exc + try: + jdssd._clean_garbage_snapshots(o_vname, o_snaps) + except Exception as err: + self.assertIsInstance(err, exception.VolumeBackendAPIException) + + def test_cascade_volume_delete_snapshots(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + o_vname = jcom.vname(UUID_1) + + # Volume with 3 snapshots and no descendants + # We should delete snapshots and then cal for volume deletion + o_snaps = SNAPSHOTS_CASCADE_1.copy() + + jdssd.ra.modify_lun.return_value = None + jdssd.ra.delete_snapshot.return_value = None + jdssd.ra.get_snapshots.side_effect = [ + SNAPSHOTS_EMPTY, + SNAPSHOTS_EMPTY, + SNAPSHOTS_EMPTY] + + with mock.patch.object(jdssd, "_gc_delete", return_value=None) as gc: + jdssd._cascade_volume_delete(o_vname, o_snaps) + gc.assert_called_once_with(o_vname) + delete_snapshot_expected = [ + mock.call(o_vname, + SNAPSHOTS_CASCADE_1[0]["name"], + recursively_children=True, + recursively_dependents=True, + force_umount=True), + mock.call(o_vname, + SNAPSHOTS_CASCADE_1[1]["name"], + recursively_children=True, + recursively_dependents=True, + force_umount=True), + mock.call(o_vname, + SNAPSHOTS_CASCADE_1[2]["name"], + recursively_children=True, + recursively_dependents=True, + force_umount=True)] + jdssd.ra.delete_snapshot.assert_has_calls(delete_snapshot_expected) + + def test_cascade_volume_delete_with_clone(self): + # Volume with 2 snapshots and 1 clone + # We should delete snapshots and then cal for volume hiding + jdssd, ctx = self.get_driver(CONFIG_OK) + o_vname = jcom.vname(UUID_1) + o_snaps = SNAPSHOTS_CASCADE_2.copy() + + jdssd.ra.modify_lun.return_value = None + jdssd.ra.delete_snapshot.return_value = None + jdssd.ra.get_snapshots.side_effect = [ + SNAPSHOTS_EMPTY, + SNAPSHOTS_EMPTY] + + fake_gc = mock.Mock() + fake_hide_object = mock.Mock() + gc = mock.patch.object(jdssd, "_gc_delete", new=fake_gc) + gc.start() + hide = mock.patch.object(jdssd, "_hide_object", new=fake_hide_object) + hide.start() + jdssd._cascade_volume_delete(o_vname, o_snaps) + jdssd._hide_object.assert_called_once_with(o_vname) + hide.stop() + jdssd._gc_delete.assert_not_called() + gc.stop() + delete_snapshot_expected = [ + mock.call(o_vname, + SNAPSHOTS_CASCADE_2[0]["name"], + recursively_children=True, + recursively_dependents=True, + force_umount=True), + mock.call(o_vname, + SNAPSHOTS_CASCADE_2[2]["name"], + recursively_children=True, + recursively_dependents=True, + force_umount=True)] + jdssd.ra.delete_snapshot.assert_has_calls(delete_snapshot_expected) + + def test_cascade_volume_delete_snapshot_clone(self): + # Volume with 3 snapshots and 1 clone of a snapshots + # We should delete childless snapshots + # and then cal for volume deletion + + jdssd, ctx = self.get_driver(CONFIG_OK) + o_vname = jcom.vname(UUID_1) + o_snaps = SNAPSHOTS_CASCADE_1.copy() + + jdssd.ra.modify_lun.return_value = None + jdssd.ra.delete_snapshot.return_value = None + jdssd.ra.get_snapshots.side_effect = [ + SNAPSHOTS_EMPTY, + SNAPSHOTS_CASCADE_3.copy(), + SNAPSHOTS_EMPTY] + get_snapshots = [ + mock.call(SNAPSHOTS_CASCADE_1[0]['name']), + mock.call(SNAPSHOTS_CASCADE_1[1]['name']), + mock.call(SNAPSHOTS_CASCADE_1[2]['name']) + ] + hide_object_expected = [ + mock.call(SNAPSHOTS_CASCADE_1[1]["name"]), + mock.call(o_vname)] + + fake_gc = mock.Mock() + fake_hide_object = mock.Mock() + gc = mock.patch.object(jdssd, "_gc_delete", new=fake_gc) + gc.start() + hide = mock.patch.object(jdssd, "_hide_object", new=fake_hide_object) + hide.start() + jdssd._cascade_volume_delete(o_vname, o_snaps) + jdssd._hide_object.assert_has_calls(hide_object_expected) + hide.stop() + jdssd._gc_delete.assert_not_called() + gc.stop() + jdssd.ra.get_snapshots.assert_has_calls(get_snapshots) + + delete_snapshot_expected = [ + mock.call(o_vname, + SNAPSHOTS_CASCADE_2[0]["name"], + recursively_children=True, + recursively_dependents=True, + force_umount=True), + mock.call(o_vname, + SNAPSHOTS_CASCADE_2[2]["name"], + recursively_children=True, + recursively_dependents=True, + force_umount=True)] + jdssd.ra.delete_snapshot.assert_has_calls(delete_snapshot_expected) + + def test_delete_volume_with_snapshots(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + vname = jcom.vname(UUID_1) + jdssd.ra.get_snapshots.side_effect = [SNAPSHOTS_CASCADE_1.copy()] + + patches = [mock.patch.object(jdssd, "_cascade_volume_delete"), + mock.patch.object(jdssd, "_gc_delete"), + mock.patch.object(jdssd, "_hide_object")] + + self.start_patches(patches) + + jdssd.delete_volume(vol, cascade=False) + jdssd._gc_delete.assert_not_called() + jdssd._cascade_volume_delete.assert_not_called() + jdssd._hide_object.assert_called_once_with(vname) + + self.stop_patches(patches) + + jdssd.ra.get_snapshots.assert_called_once_with(vname) + + def test_delete_volume_without_snapshots(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + vname = jcom.vname(UUID_1) + jdssd.ra.get_snapshots.side_effect = [SNAPSHOTS_EMPTY.copy()] + + patches = [mock.patch.object(jdssd, "_cascade_volume_delete"), + mock.patch.object(jdssd, "_gc_delete"), + mock.patch.object(jdssd, "_hide_object")] + + self.start_patches(patches) + + jdssd.delete_volume(vol, cascade=False) + jdssd._gc_delete.assert_called_once_with(vname) + jdssd._cascade_volume_delete.assert_not_called() + jdssd._hide_object.assert_not_called() + + self.stop_patches(patches) + + jdssd.ra.get_snapshots.assert_called_once_with(vname) + + def test_delete_volume_exceptions(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + + patches = [mock.patch.object(jdssd, "_cascade_volume_delete"), + mock.patch.object(jdssd, "_gc_delete"), + mock.patch.object(jdssd, "_hide_object")] + + self.start_patches(patches) + + for exc in get_jdss_exceptions(): + jdssd.ra.get_snapshots.side_effect = exc + try: + jdssd.delete_volume(vol, cascade=False) + except Exception as err: + self.assertIsInstance(err, exception.VolumeBackendAPIException) + + jdssd._gc_delete.assert_not_called() + jdssd._cascade_volume_delete.assert_not_called() + jdssd._hide_object.assert_not_called() + + self.stop_patches(patches) + + def test_gc_delete_not_clone(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + + get_lun_resp = {'vscan': None, + 'full_name': 'Pool-0/' + jcom.vname(UUID_1), + 'userrefs': None, + 'primarycache': 'all', + 'logbias': 'latency', + 'creation': '1591543140', + 'sync': 'always', + 'is_clone': False, + 'dedup': 'off', + 'sharenfs': None, + 'receive_resume_token': None, + 'volsize': '1073741824'} + + jdssd.ra.get_lun.return_value = get_lun_resp + jdssd.ra.delete_lun.return_value = None + + patches = [mock.patch.object(jdssd, "_delete_back_recursively")] + + self.start_patches(patches) + + jdssd._gc_delete(jcom.vname(UUID_1)) + + jdssd._delete_back_recursively.assert_not_called() + jdssd.ra.delete_lun.assert_called_once_with(jcom.vname(UUID_1)) + + self.stop_patches(patches) + + def test_gc_delete_is_clone(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + origin = "Pool-0/{vorig}@{sorig}".format(vorig=jcom.vname(UUID_1), + sorig=jcom.vname(UUID_2)) + + get_lun_resp = {'origin': origin, + 'vscan': None, + 'full_name': 'Pool-0/' + jcom.vname(UUID_2), + 'userrefs': None, + 'primarycache': 'all', + 'logbias': 'latency', + 'creation': '1591543140', + 'sync': 'always', + 'is_clone': True, + 'dedup': 'off', + 'sharenfs': None, + 'receive_resume_token': None, + 'volsize': '1073741824'} + + jdssd.ra.get_lun.return_value = get_lun_resp + jdssd.ra.delete_lun.return_value = None + + patches = [mock.patch.object(jdssd, "_delete_back_recursively")] + + self.start_patches(patches) + + jdssd._gc_delete(jcom.vname(UUID_1)) + + jdssd._delete_back_recursively.assert_called_once_with( + jcom.vname(UUID_1), jcom.vname(UUID_2)) + jdssd.ra.delete_lun.assert_not_called() + + self.stop_patches(patches) + + def test_delete_back_recursively_res_active(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + opvname = jcom.vname(UUID_1) + opsname = jcom.sname(UUID_2) + + jdssd._delete_back_recursively(opvname, opsname) + + jdssd.ra.delete_snapshot.assert_called_once_with( + opvname, + opsname, + recursively_children=True, + recursively_dependents=True, + force_umount=True) + + def test_delete_back_recursively_hidden_have_snapshots(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + opvname = jcom.hidden(UUID_1) + opsname = jcom.sname(UUID_2) + + jdssd.ra.get_snapshots.return_value = SNAPSHOTS_RECURSIVE_1.copy() + + jdssd._delete_back_recursively(opvname, opsname) + + jdssd.ra.delete_snapshot.assert_called_once_with( + opvname, + opsname, + recursively_children=True, + recursively_dependents=True, + force_umount=True) + + def test_delete_back_recursively_single_snapshot(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + opvname = jcom.hidden(UUID_2) + opsname = jcom.sname(UUID_3) + + jdssd.ra.get_snapshots.side_effect = [ + SNAPSHOTS_RECURSIVE_CHAIN_1.copy(), + SNAPSHOTS_RECURSIVE_CHAIN_2.copy()] + + origin = "Pool-0/{vorig}@{sorig}".format(vorig=jcom.vname(UUID_1), + sorig=jcom.vname(UUID_2)) + get_lun_resp = {'origin': origin, + 'vscan': None, + 'full_name': 'Pool-0/' + jcom.hidden(UUID_2), + 'userrefs': None, + 'primarycache': 'all', + 'logbias': 'latency', + 'creation': '1591543140', + 'sync': 'always', + 'is_clone': True, + 'dedup': 'off', + 'sharenfs': None, + 'receive_resume_token': None, + 'volsize': '1073741824'} + jdssd.ra.get_lun.return_value = get_lun_resp + jdssd._delete_back_recursively(opvname, opsname) + + jdssd.ra.delete_snapshot.assert_called_once_with( + jcom.vname(UUID_1), + jcom.vname(UUID_2), + recursively_children=True, + recursively_dependents=True, + force_umount=True) + + get_snapshots_expected = [mock.call(opvname)] + jdssd.ra.get_snapshots.assert_has_calls(get_snapshots_expected) + + def test_extend_volume(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + jdssd.ra.extend_lun.return_value = None + jdssd.extend_volume(vol, 2) + + jdssd.ra.extend_lun.assert_called_once_with( + jcom.vname(UUID_1), + 2147483648) + + def test_extend_volume_exceptions(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + + for exc in get_jdss_exceptions(): + try: + jdssd.extend_volume(vol, 2) + except Exception as err: + self.assertIsInstance(err, exception.VolumeBackendAPIException) + + def test_clone_object(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + origin = jcom.vname(UUID_1) + clone = jcom.vname(UUID_2) + + jdssd.ra.create_snapshot.return_value = None + jdssd.ra.create_volume_from_snapshot.return_value = None + + jdssd._clone_object(origin, clone) + jdssd.ra.create_snapshot.assert_called_once_with(origin, clone) + jdssd.ra.create_volume_from_snapshot.assert_called_once_with( + clone, + clone, + origin, + sparse=False) + + def test_clone_object_dne(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + calls = [] + origin = jcom.vname(UUID_1) + clone = jcom.vname(UUID_2) + calls.append(mock.call(origin, clone)) + + jdssd.ra.create_snapshot.side_effect = ( + jexc.JDSSResourceNotFoundException(res=origin)) + + self.assertRaises(exception.VolumeNotFound, + jdssd._clone_object, origin, clone) + + origin = jcom.sname(UUID_1) + calls.append(mock.call(origin, clone)) + + self.assertRaises(exception.SnapshotNotFound, + jdssd._clone_object, origin, clone) + jdssd.ra.create_snapshot.assert_has_calls(calls) + + def test_clone_object_exists(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + origin = jcom.vname(UUID_1) + clone = jcom.vname(UUID_2) + + jdssd.ra.create_snapshot.side_effect = ( + jexc.JDSSSnapshotExistsException(snapshot=clone)) + + jdssd.ra.delete_snapshot.side_effect = ( + jexc.JDSSSnapshotIsBusyException(snapshot=clone)) + + self.assertRaises(exception.Duplicate, + jdssd._clone_object, origin, clone) + jdssd.ra.delete_snapshot.assert_called_once_with(origin, clone) + jdssd.ra.create_snapshot.assert_called_once_with(origin, clone) + + def test_clone_object_volume_exists(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + origin = jcom.vname(UUID_1) + clone = jcom.vname(UUID_2) + + jdssd.ra.create_snapshot.return_value = None + jdssd.ra.create_volume_from_snapshot.side_effect = ( + jexc.JDSSVolumeExistsException(volume=clone)) + + self.assertRaises(exception.Duplicate, + jdssd._clone_object, origin, clone) + jdssd.ra.create_snapshot.assert_called_once_with(origin, clone) + jdssd.ra.create_volume_from_snapshot.assert_called_once_with( + clone, + clone, + origin, + sparse=CONFIG_OK['san_thin_provision']) + + def test_create_cloned_volume(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + origin_vname = jcom.vname(UUID_1) + clone_vname = jcom.vname(UUID_2) + + orig_vol = fake_volume.fake_volume_obj(ctx) + orig_vol.id = UUID_1 + orig_vol.size = 1 + + clone_vol = fake_volume.fake_volume_obj(ctx) + clone_vol.id = UUID_2 + clone_vol.size = 1 + + host = CONFIG_OK["san_hosts"][0] + port = CONFIG_OK["target_port"] + target_name = CONFIG_OK["target_prefix"] + UUID_2 + + location = '{host}:{port},1 {name} 0'.format( + host=host, + port=port, + name=target_name + ) + + cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " + "[0-9,a-z,A-Z]{{{pass_len}}}").format( + name_len=8, + pass_len=CONFIG_OK['chap_password_len']) + + patches = [ + mock.patch.object(jdssd, "_clone_object", return_value=None), + mock.patch.object(jdssd, "extend_volume", return_value=None), + mock.patch.object( + jdssd, + "_get_provider_location", + return_value=location), + mock.patch.object( + jdssd, + "_get_provider_auth", + return_value=cred_format)] + + jdssd.ra.get_lun.return_value = { + 'vscan': None, + 'full_name': 'Pool-0/' + UUID_2, + 'userrefs': None, + 'primarycache': 'all', + 'logbias': 'latency', + 'creation': '1591543140', + 'sync': 'always', + 'is_clone': False, + 'dedup': 'off', + 'sharenfs': None, + 'receive_resume_token': None, + 'volsize': '1073741824'} + + self.start_patches(patches) + + ret = jdssd.create_cloned_volume(clone_vol, orig_vol) + + jdssd.extend_volume.assert_not_called() + jdssd._clone_object.assert_called_once_with(origin_vname, clone_vname) + self.stop_patches(patches) + + jdssd.ra.get_lun.assert_called_once_with(jcom.vname(clone_vol.id)) + self.assertEqual(location, ret['provider_location']) + self.assertEqual(cred_format, ret['provider_auth']) + + def test_create_volume_from_snapshot(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + origin_sname = jcom.sname(UUID_1) + + clone_vname = jcom.vname(UUID_2) + + orig_snap = fake_snapshot.fake_snapshot_obj(ctx) + orig_snap.id = UUID_1 + + clone_vol = fake_volume.fake_volume_obj(ctx) + clone_vol.id = UUID_2 + clone_vol.size = 1 + + host = CONFIG_OK["san_hosts"][0] + port = CONFIG_OK["target_port"] + target_name = CONFIG_OK["target_prefix"] + UUID_2 + + location = '{host}:{port},1 {name} 0'.format( + host=host, + port=port, + name=target_name + ) + + cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " + "[0-9,a-z,A-Z]{{{pass_len}}}").format( + name_len=8, + pass_len=CONFIG_OK['chap_password_len']) + + patches = [ + mock.patch.object(jdssd, "_clone_object", return_value=None), + mock.patch.object(jdssd, "extend_volume", return_value=None), + mock.patch.object( + jdssd, + "_get_provider_location", + return_value=location), + mock.patch.object( + jdssd, + "_get_provider_auth", + return_value=cred_format)] + + jdssd.ra.get_lun.return_value = { + 'vscan': None, + 'full_name': 'Pool-0/' + UUID_2, + 'userrefs': None, + 'primarycache': 'all', + 'logbias': 'latency', + 'creation': '1591543140', + 'sync': 'always', + 'is_clone': False, + 'dedup': 'off', + 'sharenfs': None, + 'receive_resume_token': None, + 'volsize': '1073741824'} + + self.start_patches(patches) + + ret = jdssd.create_volume_from_snapshot(clone_vol, orig_snap) + + jdssd.extend_volume.assert_not_called() + jdssd._clone_object.assert_called_once_with(origin_sname, clone_vname) + self.stop_patches(patches) + + jdssd.ra.get_lun.assert_called_once_with(jcom.vname(clone_vol.id)) + self.assertEqual(location, ret['provider_location']) + self.assertEqual(cred_format, ret['provider_auth']) + + def test_create_volume_from_snapshot_extend(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + origin_sname = jcom.sname(UUID_1) + + clone_vname = jcom.vname(UUID_2) + + orig_snap = fake_snapshot.fake_snapshot_obj(ctx) + orig_snap.id = UUID_1 + + clone_vol = fake_volume.fake_volume_obj(ctx) + clone_vol.id = UUID_2 + clone_vol.size = 2 + + host = CONFIG_OK["san_hosts"][0] + port = CONFIG_OK["target_port"] + target_name = CONFIG_OK["target_prefix"] + UUID_2 + + location = '{host}:{port},1 {name} 0'.format( + host=host, + port=port, + name=target_name + ) + + cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " + "[0-9,a-z,A-Z]{{{pass_len}}}").format( + name_len=8, + pass_len=CONFIG_OK['chap_password_len']) + + patches = [ + mock.patch.object(jdssd, "_clone_object", return_value=None), + mock.patch.object(jdssd, "extend_volume", return_value=None), + mock.patch.object( + jdssd, + "_get_provider_location", + return_value=location), + mock.patch.object( + jdssd, + "_get_provider_auth", + return_value=cred_format)] + + jdssd.ra.get_lun.return_value = { + 'vscan': None, + 'full_name': 'Pool-0/' + UUID_2, + 'userrefs': None, + 'primarycache': 'all', + 'logbias': 'latency', + 'creation': '1591543140', + 'sync': 'always', + 'is_clone': False, + 'dedup': 'off', + 'sharenfs': None, + 'receive_resume_token': None, + 'volsize': '1073741824'} + + self.start_patches(patches) + + ret = jdssd.create_volume_from_snapshot(clone_vol, orig_snap) + + jdssd.extend_volume.assert_called_once_with(clone_vol, clone_vol.size) + jdssd._clone_object.assert_called_once_with(origin_sname, clone_vname) + self.stop_patches(patches) + + jdssd.ra.get_lun.assert_called_once_with(jcom.vname(clone_vol.id)) + self.assertEqual(location, ret['provider_location']) + self.assertEqual(cred_format, ret['provider_auth']) + + def test_create_snapshot(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + vname = jcom.vname(UUID_1) + + sname = jcom.sname(UUID_2) + + snap = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_2) + snap.volume_id = UUID_1 + + patches = [ + mock.patch.object(jdssd, "_clone_object", return_value=None)] + + self.start_patches(patches) + + jdssd.create_snapshot(snap) + + jdssd._clone_object.assert_called_once_with(vname, sname) + self.stop_patches(patches) + + def test_delete_snapshot_no_child(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + sname = jcom.sname(UUID_2) + + snap = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_2) + + jdssd.ra.get_snapshots.return_value = SNAPSHOTS_EMPTY + patches = [ + mock.patch.object( + jdssd, + "_clean_garbage_snapshots", + return_value=SNAPSHOTS_EMPTY), + mock.patch.object(jdssd, "_clone_object", return_value=None), + mock.patch.object(jdssd, "_hide_object", return_value=None), + mock.patch.object(jdssd, "_gc_delete", return_value=None)] + + self.start_patches(patches) + + jdssd.create_snapshot(snap) + + jdssd.delete_snapshot(snap) + jdssd._gc_delete.assert_called_once_with(sname) + jdssd._hide_object.assert_not_called() + jdssd._clean_garbage_snapshots.assert_called_once_with( + sname, + SNAPSHOTS_EMPTY) + self.stop_patches(patches) + + def test_delete_snapshot_has_clone(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + sname = jcom.sname(UUID_2) + + snap = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_2) + + jdssd.ra.get_snapshots.return_value = SNAPSHOTS_EMPTY + patches = [ + mock.patch.object( + jdssd, + "_clean_garbage_snapshots", + return_value=SNAPSHOTS_CLONE), + mock.patch.object(jdssd, "_clone_object", return_value=None), + mock.patch.object(jdssd, "_hide_object", return_value=None), + mock.patch.object(jdssd, "_gc_delete", return_value=None)] + + self.start_patches(patches) + + jdssd.create_snapshot(snap) + + jdssd.delete_snapshot(snap) + jdssd._gc_delete.assert_not_called() + jdssd._hide_object.assert_called_once_with(sname) + jdssd._clean_garbage_snapshots.assert_called_once_with( + sname, + SNAPSHOTS_EMPTY) + self.stop_patches(patches) + + def test_local_path(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_1) + + self.assertRaises(NotImplementedError, jdssd.local_path, vol) + + def test_get_provider_auth(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + auth = jdssd._get_provider_auth() + cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " + "[0-9,a-z,A-Z]{{{pass_len}}}").format( + name_len=8, + pass_len=CONFIG_OK['chap_password_len']) + self.assertIsNotNone(re.match(cred_format, auth)) + + def test_get_provider_auth_long(self): + long_pass_config = CONFIG_OK.copy() + long_pass_config['chap_password_len'] = 16 + jdssd, ctx = self.get_driver(long_pass_config) + + auth = jdssd._get_provider_auth() + cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " + "[0-9,a-z,A-Z]{{{pass_len}}}").format( + name_len=8, + pass_len=16) + self.assertIsNotNone(re.match(cred_format, auth)) + + def test_create_export(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + + patches = [ + mock.patch.object( + jdssd, + "_create_target_volume", + return_value=None), + mock.patch.object( + jdssd, + "_get_provider_location", + return_value='provider_location')] + + self.start_patches(patches) + + ret = jdssd.create_export(ctx, vol, "connector") + jdssd._create_target_volume.assert_called_once_with(vol) + + self.stop_patches(patches) + + self.assertEqual('provider_location', ret["provider_location"]) + + def test_ensure_export(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + + patches = [ + mock.patch.object( + jdssd, + "_ensure_target_volume", + return_value=None)] + + self.start_patches(patches) + + jdssd.ensure_export(ctx, vol) + jdssd._ensure_target_volume.assert_called_once_with(vol) + + self.stop_patches(patches) + + def test_remove_export(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + vol = fake_volume.fake_volume_obj(ctx, id=UUID_1) + + patches = [ + mock.patch.object( + jdssd, + "_remove_target_volume", + return_value=None)] + + self.start_patches(patches) + + jdssd.remove_export(ctx, vol) + jdssd._remove_target_volume.assert_called_once_with(vol) + + self.stop_patches(patches) + + def test_update_volume_stats(self): + jdssd, ctx = self.get_driver(CONFIG_BACKEND_NAME) + + location_info = 'JovianISCSIDriver:192.168.0.2:Pool-0' + correct_out = { + 'vendor_name': 'Open-E', + 'driver_version': "1.0.0", + 'storage_protocol': 'iSCSI', + 'total_capacity_gb': 100, + 'free_capacity_gb': 50, + 'reserved_percentage': 10, + 'volume_backend_name': CONFIG_BACKEND_NAME['volume_backend_name'], + 'QoS_support': False, + 'location_info': location_info + } + jdssd.ra.get_pool_stats.return_value = { + 'size': 100 * o_units.Gi, + 'available': 50 * o_units.Gi} + jdssd.ra.get_active_host.return_value = CONFIG_OK['san_hosts'] + jdssd._update_volume_stats() + + self.assertEqual(correct_out, jdssd._stats) + + def test_create_target(self): + + jdssd, ctx = self.get_driver(CONFIG_BACKEND_NAME) + + target_name = CONFIG_OK['target_prefix'] + UUID_1 + jdssd.ra.create_target.return_value = None + jdssd._create_target(target_name, use_chap=True) + + jdssd.ra.create_target.assert_called_once_with( + target_name, use_chap=True) + + jdssd.ra.create_target.side_effect = jexc.JDSSResourceExistsException( + res=target_name) + + self.assertRaises(exception.Duplicate, + jdssd._create_target, + target_name, + use_chap=True) + + def test_attach_target_volume(self): + + jdssd, ctx = self.get_driver(CONFIG_BACKEND_NAME) + + target_name = CONFIG_OK['target_prefix'] + UUID_1 + vname = jcom.vname(UUID_1) + + jdssd.ra.attach_target_vol.return_value = None + jdssd.ra.delete_target.return_value = None + + jdssd._attach_target_volume(target_name, vname) + + jdssd.ra.attach_target_vol.assert_called_once_with( + target_name, vname) + jdssd.ra.delete_target.assert_not_called() + + ex = jexc.JDSSResourceExistsException(res=target_name) + jdssd.ra.attach_target_vol.side_effect = ex + + self.assertRaises(exception.VolumeBackendAPIException, + jdssd._attach_target_volume, + target_name, + vname) + jdssd.ra.delete_target.assert_called_once_with(target_name) + + def test_set_target_credentials(self): + jdssd, ctx = self.get_driver(CONFIG_BACKEND_NAME) + + target_name = CONFIG_BACKEND_NAME['target_prefix'] + UUID_1 + cred = {'name': 'user_name', 'password': '123456789012'} + + jdssd.ra.create_target_user.return_value = None + jdssd.ra.delete_target.return_value = None + + jdssd._set_target_credentials(target_name, cred) + + jdssd.ra.create_target_user.assert_called_once_with( + target_name, cred) + jdssd.ra.delete_target.assert_not_called() + + ex = jexc.JDSSResourceExistsException(res=target_name) + jdssd.ra.create_target_user.side_effect = ex + + self.assertRaises(exception.VolumeBackendAPIException, + jdssd._set_target_credentials, + target_name, + cred) + jdssd.ra.delete_target.assert_called_once_with(target_name) + + def test_create_target_volume(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + target_name = CONFIG_OK['target_prefix'] + UUID_1 + vol.provider_auth = 'chap user_name 123456789012' + + cred = {'name': 'user_name', 'password': '123456789012'} + + patches = [ + mock.patch.object(jdssd, "_create_target"), + mock.patch.object(jdssd, "_attach_target_volume"), + mock.patch.object(jdssd, "_set_target_credentials")] + + self.start_patches(patches) + jdssd._create_target_volume(vol) + jdssd._create_target.assert_called_once_with(target_name, True) + jdssd._attach_target_volume.assert_called_once_with( + target_name, jcom.vname(UUID_1)) + jdssd._set_target_credentials.assert_called_once_with( + target_name, cred) + self.stop_patches(patches) + + def test_ensure_target_volume(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + target_name = CONFIG_OK['target_prefix'] + UUID_1 + vol.provider_auth = 'chap user_name 123456789012' + + cred = {'name': 'user_name'} + + patches = [ + mock.patch.object(jdssd, "_create_target"), + mock.patch.object(jdssd, "_attach_target_volume"), + mock.patch.object(jdssd, "_set_target_credentials"), + mock.patch.object(jdssd, "_attach_target_volume")] + + jdssd.ra.is_target.return_value = True + jdssd.ra.is_target_lun.return_value = True + jdssd.ra.get_target_user.return_value = [cred] + + self.start_patches(patches) + + jdssd._ensure_target_volume(vol) + + jdssd.ra.is_target.assert_called_once_with(target_name) + jdssd.ra.is_target_lun.assert_called_once_with(target_name, UUID_1) + + jdssd.ra.get_target_user.assert_called_once_with(target_name) + + jdssd.ra.delete_target_user.assert_not_called() + jdssd._set_target_credentials.assert_not_called() + self.stop_patches(patches) + + def test_ensure_target_volume_not_attached(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + vname = jcom.vname(UUID_1) + target_name = CONFIG_OK['target_prefix'] + UUID_1 + vol.provider_auth = 'chap user_name 123456789012' + + cred = {'name': 'user_name'} + + patches = [ + mock.patch.object(jdssd, "_create_target"), + mock.patch.object(jdssd, "_attach_target_volume"), + mock.patch.object(jdssd, "_set_target_credentials"), + mock.patch.object(jdssd, "_attach_target_volume")] + + jdssd.ra.is_target.return_value = True + jdssd.ra.is_target_lun.return_value = False + jdssd.ra.get_target_user.return_value = [cred] + + self.start_patches(patches) + + jdssd._ensure_target_volume(vol) + + jdssd.ra.is_target.assert_called_once_with(target_name) + jdssd.ra.is_target_lun.assert_called_once_with(target_name, UUID_1) + + jdssd._attach_target_volume.assert_called_once_with( + target_name, vname) + jdssd.ra.get_target_user.assert_called_once_with(target_name) + + jdssd.ra.delete_target_user.assert_not_called() + jdssd._set_target_credentials.assert_not_called() + self.stop_patches(patches) + + def test_ensure_target_volume_no_target(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + target_name = CONFIG_OK['target_prefix'] + UUID_1 + vol.provider_auth = 'chap user_name 123456789012' + + cred = {'name': 'user_name'} + + patches = [ + mock.patch.object(jdssd, "_create_target_volume"), + mock.patch.object(jdssd, "_attach_target_volume"), + mock.patch.object(jdssd, "_set_target_credentials"), + mock.patch.object(jdssd, "_attach_target_volume")] + + jdssd.ra.is_target.return_value = False + jdssd.ra.get_target_user.return_value = cred['name'] + + self.start_patches(patches) + + jdssd._ensure_target_volume(vol) + + jdssd.ra.is_target.assert_called_once_with(target_name) + jdssd._create_target_volume.assert_called_once_with(vol) + + jdssd.ra.is_target_lun.assert_not_called() + + def test_remove_target_volume(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + target_name = CONFIG_OK['target_prefix'] + UUID_1 + + jdssd.ra.detach_target_vol.return_value = None + jdssd.ra.delete_target.return_value = None + + jdssd._remove_target_volume(vol) + + jdssd.ra.detach_target_vol.assert_called_once_with(target_name, + jcom.vname(UUID_1)) + jdssd.ra.delete_target.assert_called_with(target_name) + + def test_remove_target_volume_no_target(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + target_name = CONFIG_OK['target_prefix'] + UUID_1 + + jdssd.ra.detach_target_vol.return_value = None + jdssd.ra.detach_target_vol.side_effect = ( + jexc.JDSSResourceNotFoundException(res=target_name)) + jdssd.ra.delete_target.return_value = None + + jdssd._remove_target_volume(vol) + + jdssd.ra.detach_target_vol.assert_called_once_with(target_name, + jcom.vname(UUID_1)) + jdssd.ra.delete_target.assert_called_with(target_name) + + def test_remove_target_volume_fail_to_detach(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + target_name = CONFIG_OK['target_prefix'] + UUID_1 + + jdssd.ra.detach_target_vol.side_effect = ( + jexc.JDSSRESTException(reason='running test', request='test')) + jdssd.ra.delete_target.return_value = None + + self.assertRaises(exception.VolumeBackendAPIException, + jdssd._remove_target_volume, vol) + + jdssd.ra.detach_target_vol.assert_called_once_with( + target_name, jcom.vname(UUID_1)) + jdssd.ra.delete_target.assert_not_called() + + def test_remove_target_volume_fail_to_delete(self): + + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + target_name = CONFIG_OK['target_prefix'] + UUID_1 + + jdssd.ra.detach_target_vol.return_value = None + jdssd.ra.delete_target.side_effect = ( + jexc.JDSSRESTException(reason='running test', request='test')) + + self.assertRaises(exception.VolumeBackendAPIException, + jdssd._remove_target_volume, vol) + + jdssd.ra.detach_target_vol.assert_called_once_with(target_name, + jcom.vname(UUID_1)) + jdssd.ra.delete_target.assert_called_with(target_name) + + def test_get_iscsi_properties(self): + jdssd, ctx = self.get_driver(CONFIG_OK) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + vol.provider_auth = 'chap user_name 123456789012' + + connector = {'multipath': True} + + target_name = CONFIG_OK['target_prefix'] + UUID_1 + ret = jdssd._get_iscsi_properties(vol, connector) + expected = {'auth_method': 'chap', + 'auth_password': '123456789012', + 'auth_username': 'user_name', + 'target_discovered': False, + 'target_iqns': [target_name], + 'target_lun': 0, + 'target_luns': [0], + 'target_portals': ['192.168.0.2:3260']} + self.assertEqual(expected, ret) + + def test_get_iscsi_properties_multipath(self): + jdssd, ctx = self.get_driver(CONFIG_MULTI_HOST) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + vol.provider_auth = 'chap user_name 123456789012' + + connector = {'multipath': True} + + target_name = CONFIG_OK['target_prefix'] + UUID_1 + ret = jdssd._get_iscsi_properties(vol, connector) + expected = {'auth_method': 'chap', + 'auth_password': '123456789012', + 'auth_username': 'user_name', + 'target_discovered': False, + 'target_iqns': [target_name, target_name], + 'target_lun': 0, + 'target_luns': [0, 0], + 'target_portals': ['192.168.0.2:3260', '192.168.0.3:3260']} + self.assertEqual(expected, ret) + + def test_initialize_connection(self): + + jdssd, ctx = self.get_driver(CONFIG_MULTI_HOST) + + vol = fake_volume.fake_volume_obj(ctx) + vol.id = UUID_1 + vol.provider_auth = 'chap user_name 123456789012' + + connector = {'multipath': True, 'ip': '172.16.0.2'} + + target_name = CONFIG_OK['target_prefix'] + UUID_1 + + properties = {'auth_method': 'chap', + 'auth_password': '123456789012', + 'auth_username': 'user_name', + 'target_discovered': False, + 'target_iqns': [target_name, target_name], + 'target_lun': 0, + 'target_luns': [0, 0], + 'target_portals': ['192.168.0.2:3260', + '192.168.0.3:3260']} + + con_info = { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + jdssd.ra.activate_target.return_value = None + + ret = jdssd.initialize_connection(vol, connector) + + self.assertEqual(con_info, ret) diff --git a/cinder/tests/unit/volume/drivers/open_e/test_rest.py b/cinder/tests/unit/volume/drivers/open_e/test_rest.py new file mode 100644 index 0000000..4dba899 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/open_e/test_rest.py @@ -0,0 +1,997 @@ +# Copyright (c) 2020 Open-E, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from oslo_utils import units as o_units + +from cinder import context +from cinder import exception +from cinder.tests.unit import test +from cinder.volume.drivers.open_e.jovian_common import exception as jexc +from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom +from cinder.volume.drivers.open_e.jovian_common import rest + +UUID_1 = '12345678-1234-1234-1234-000000000001' +UUID_2 = '12345678-1234-1234-1234-000000000002' + +CONFIG_OK = { + 'san_hosts': ['192.168.0.2'], + 'san_api_port': 82, + 'driver_use_ssl': 'https', + 'jovian_rest_send_repeats': 3, + 'jovian_recovery_delay': 60, + 'san_login': 'admin', + 'san_password': 'password', + 'jovian_ignore_tpath': [], + 'target_port': 3260, + 'jovian_pool': 'Pool-0', + 'iscsi_target_prefix': 'iqn.2020-04.com.open-e.cinder:', + 'chap_password_len': 12, + 'san_thin_provision': False, + 'jovian_block_size': '128K' +} + + +def fake_safe_get(value): + return CONFIG_OK[value] + + +class TestOpenEJovianRESTAPI(test.TestCase): + + def get_rest(self, config): + ctx = context.get_admin_context() + + cfg = mock.Mock() + cfg.append_config_values.return_value = None + cfg.safe_get = lambda val: config[val] + cfg.get = lambda val, default: config[val] + jdssr = rest.JovianRESTAPI(cfg) + jdssr.rproxy = mock.Mock() + return jdssr, ctx + + def start_patches(self, patches): + for p in patches: + p.start() + + def stop_patches(self, patches): + for p in patches: + p.stop() + + def test_get_active_host(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + + jrest.rproxy.get_active_host.return_value = "test_data" + + ret = jrest.get_active_host() + + self.assertEqual("test_data", ret) + + def test_is_pool_exists(self): + jrest, ctx = self.get_rest(CONFIG_OK) + resp = {'code': 200, + 'error': None} + + jrest.rproxy.pool_request.return_value = resp + self.assertTrue(jrest.is_pool_exists()) + + err = {'errorid': 12} + resp = {'code': 404, + 'error': err} + jrest.rproxy.pool_request.return_value = resp + self.assertFalse(jrest.is_pool_exists()) + + pool_request_expected = [ + mock.call('GET', ''), + mock.call('GET', '')] + + jrest.rproxy.pool_request.assert_has_calls(pool_request_expected) + + def get_iface_info(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + resp = { + 'code': 200, + 'error': None} + jrest.rproxy.pool_request.return_value = resp + self.assertTrue(jrest.is_pool_exists()) + + def test_get_luns(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + resp = {'data': [{ + 'vscan': None, + 'full_name': 'pool-0/' + UUID_1, + 'userrefs': None, + 'primarycache': 'all', + 'logbias': 'latency', + 'creation': '1591543140', + 'sync': 'always', + 'is_clone': False, + 'dedup': 'off', + 'sharenfs': None, + 'receive_resume_token': None, + 'volsize': '1073741824'}], + 'error': None, + 'code': 200} + jrest.rproxy.pool_request.return_value = resp + self.assertEqual(resp['data'], jrest.get_luns()) + + err = {'errorid': 12, 'message': 'test failure'} + resp = {'code': 404, + 'data': None, + 'error': err} + jrest.rproxy.pool_request.return_value = resp + self.assertRaises(jexc.JDSSRESTException, jrest.get_luns) + + get_luns_expected = [ + mock.call('GET', "/volumes"), + mock.call('GET', "/volumes")] + + jrest.rproxy.pool_request.assert_has_calls(get_luns_expected) + + def test_create_lun(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + resp = {'data': { + 'vscan': None, + 'full_name': 'pool-0/' + jcom.vname(UUID_1), + 'userrefs': None, + 'primarycache': 'all', + 'logbias': 'latency', + 'creation': '1591543140', + 'sync': 'always', + 'is_clone': False, + 'dedup': 'off', + 'sharenfs': None, + 'receive_resume_token': None, + 'volsize': '1073741824'}, + 'error': None, + 'code': 200} + + jbody = { + 'name': jcom.vname(UUID_1), + 'size': "1073741824", + 'sparse': False + } + + jbody_sparse = { + 'name': jcom.vname(UUID_1), + 'size': "1073741824", + 'sparse': True + } + + jrest.rproxy.pool_request.return_value = resp + self.assertIsNone(jrest.create_lun(jcom.vname(UUID_1), o_units.Gi)) + + err = {'errno': '5', 'message': 'test failure'} + resp = {'code': 404, + 'data': None, + 'error': err} + jrest.rproxy.pool_request.return_value = resp + self.assertRaises(jexc.JDSSRESTException, + jrest.create_lun, + jcom.vname(UUID_1), + o_units.Gi, + sparse=True) + + addr = "/volumes" + create_lun_expected = [ + mock.call('POST', addr, json_data=jbody), + mock.call('POST', addr, json_data=jbody_sparse)] + + jrest.rproxy.pool_request.assert_has_calls(create_lun_expected) + + def test_extend_lun(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + resp = {'data': None, + 'error': None, + 'code': 201} + + jbody = { + 'size': "2147483648", + } + + jrest.rproxy.pool_request.return_value = resp + self.assertIsNone(jrest.extend_lun(jcom.vname(UUID_1), 2 * o_units.Gi)) + + err = {'message': 'test failure'} + resp = {'code': 500, + 'data': None, + 'error': err} + jrest.rproxy.pool_request.return_value = resp + self.assertRaises(jexc.JDSSRESTException, + jrest.extend_lun, + jcom.vname(UUID_1), + 2 * o_units.Gi) + + addr = "/volumes/" + jcom.vname(UUID_1) + create_lun_expected = [ + mock.call('PUT', addr, json_data=jbody), + mock.call('PUT', addr, json_data=jbody)] + + jrest.rproxy.pool_request.assert_has_calls(create_lun_expected) + + def test_is_lun(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + resp = {'data': { + "vscan": None, + "full_name": "pool-0/" + jcom.vname(UUID_1), + "userrefs": None, + "primarycache": "all", + "logbias": "latency", + "creation": "1591543140", + "sync": "always", + "is_clone": False, + "dedup": "off", + "sharenfs": None, + "receive_resume_token": None, + "volsize": "1073741824"}, + 'error': None, + 'code': 200} + + jrest.rproxy.pool_request.return_value = resp + self.assertTrue(jrest.is_lun(jcom.vname(UUID_1))) + + err = {'errno': 1, + 'message': ('Zfs resource: Pool-0/' + jcom.vname(UUID_1) + + ' not found in this collection.')} + resp = {'code': 500, + 'data': None, + 'error': err} + + jrest.rproxy.pool_request.return_value = resp + self.assertEqual(False, jrest.is_lun(jcom.vname(UUID_1))) + + jrest.rproxy.pool_request.side_effect = ( + jexc.JDSSRESTProxyException(host='test_host', reason='test')) + + self.assertRaises(jexc.JDSSRESTProxyException, + jrest.is_lun, + 'v_' + UUID_1) + + def test_get_lun(self): + jrest, ctx = self.get_rest(CONFIG_OK) + resp = {'data': {"vscan": None, + "full_name": "pool-0/v_" + UUID_1, + "userrefs": None, + "primarycache": "all", + "logbias": "latency", + "creation": "1591543140", + "sync": "always", + "is_clone": False, + "dedup": "off", + "sharenfs": None, + "receive_resume_token": None, + "volsize": "1073741824"}, + 'error': None, + 'code': 200} + + jrest.rproxy.pool_request.return_value = resp + self.assertEqual(resp['data'], jrest.get_lun('v_' + UUID_1)) + + err = {'errno': 1, + 'message': ('Zfs resource: Pool-0/v_' + UUID_1 + + ' not found in this collection.')} + resp = {'code': 500, + 'data': None, + 'error': err} + + jrest.rproxy.pool_request.return_value = resp + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.get_lun, + 'v_' + UUID_1) + + jrest.rproxy.pool_request.return_value = resp + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.get_lun, + 'v_' + UUID_1) + + err = {'errno': 10, + 'message': ('Test error')} + resp = {'code': 500, + 'data': None, + 'error': err} + + jrest.rproxy.pool_request.return_value = resp + self.assertRaises(jexc.JDSSException, jrest.get_lun, 'v_' + UUID_1) + + def test_modify_lun(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + resp = {'data': None, + 'error': None, + 'code': 201} + req = {'name': 'v_' + UUID_2} + + jrest.rproxy.pool_request.return_value = resp + self.assertIsNone(jrest.modify_lun('v_' + UUID_1, prop=req)) + + err = {'errno': 1, + 'message': ('Zfs resource: Pool-0/v_' + UUID_1 + + ' not found in this collection.')} + resp = {'code': 500, + 'data': None, + 'error': err} + + jrest.rproxy.pool_request.return_value = resp + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.modify_lun, + 'v_' + UUID_1, + prop=req) + + err = {'errno': 10, + 'message': ('Test error')} + resp = {'code': 500, + 'data': None, + 'error': err} + + jrest.rproxy.pool_request.return_value = resp + self.assertRaises(jexc.JDSSException, + jrest.modify_lun, + 'v_' + UUID_1, + prop=req) + + addr = "/volumes/v_" + UUID_1 + modify_lun_expected = [ + mock.call('PUT', addr, json_data=req), + mock.call('PUT', addr, json_data=req), + mock.call('PUT', addr, json_data=req)] + + jrest.rproxy.pool_request.assert_has_calls(modify_lun_expected) + + def test_make_readonly_lun(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + resp = {'data': None, + 'error': None, + 'code': 201} + req = {'property_name': 'readonly', 'property_value': 'on'} + + jrest.rproxy.pool_request.return_value = resp + self.assertIsNone(jrest.modify_lun('v_' + UUID_1, prop=req)) + + addr = "/volumes/v_" + UUID_1 + modify_lun_expected = [mock.call('PUT', addr, json_data=req)] + + jrest.rproxy.pool_request.assert_has_calls(modify_lun_expected) + + def test_delete_lun(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + + # Delete OK + resp = {'data': None, + 'error': None, + 'code': 204} + + jrest.rproxy.pool_request.return_value = resp + self.assertIsNone(jrest.delete_lun('v_' + UUID_1)) + addr = "/volumes/v_" + UUID_1 + delete_lun_expected = [mock.call('DELETE', addr)] + jrest.rproxy.pool_request.assert_has_calls(delete_lun_expected) + # No volume to delete + err = {'errno': 1, + 'message': ('Zfs resource: Pool-0/v_' + UUID_1 + + ' not found in this collection.')} + resp = {'code': 500, + 'data': None, + 'error': err} + + jrest.rproxy.pool_request.return_value = resp + self.assertIsNone(jrest.delete_lun('v_' + UUID_1)) + + delete_lun_expected += [mock.call('DELETE', addr)] + + jrest.rproxy.pool_request.assert_has_calls(delete_lun_expected) + + # Volume has snapshots + msg = ("cannot destroy 'Pool-0/{vol}': volume has children\nuse '-r'" + " to destroy the following datasets:\nPool-0/{vol}@s1") + msg = msg.format(vol='v_' + UUID_1) + + url = "http://192.168.0.2:82/api/v3/pools/Pool-0/volumes/" + UUID_1 + err = {"class": "zfslib.wrap.zfs.ZfsCmdError", + "errno": 1000, + "message": msg, + "url": url} + + resp = { + 'code': 500, + 'data': None, + 'error': err} + + delete_lun_expected += [mock.call('DELETE', addr)] + jrest.rproxy.pool_request.return_value = resp + self.assertRaises( + exception.VolumeIsBusy, + jrest.delete_lun, + 'v_' + UUID_1) + + jrest.rproxy.pool_request.assert_has_calls(delete_lun_expected) + + def test_delete_lun_args(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + addr = "/volumes/v_" + UUID_1 + + # Delete OK + resp = {'data': None, + 'error': None, + 'code': 204} + req = {'recursively_children': True, + 'recursively_dependents': True, + 'force_umount': True} + + delete_lun_expected = [mock.call('DELETE', addr, json_data=req)] + jrest.rproxy.pool_request.return_value = resp + self.assertIsNone( + jrest.delete_lun('v_' + UUID_1, + recursively_children=True, + recursively_dependents=True, + force_umount=True)) + + jrest.rproxy.pool_request.assert_has_calls(delete_lun_expected) + + def test_is_target(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + addr = '/san/iscsi/targets/{}'.format(tname) + data = {'incoming_users_active': True, + 'name': tname, + 'allow_ip': [], + 'outgoing_user': None, + 'active': True, + 'conflicted': False, + 'deny_ip': []} + + resp = {'data': data, + 'error': None, + 'code': 200} + + is_target_expected = [mock.call('GET', addr)] + jrest.rproxy.pool_request.return_value = resp + self.assertTrue(jrest.is_target(tname)) + + msg = "Target {} not exists.".format(tname) + url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" + "san/iscsi/targets/{target}") + url = url.format(addr=CONFIG_OK['san_hosts'][0], + port=CONFIG_OK['san_api_port'], + target=tname) + err = {"class": "opene.exceptions.ItemNotFoundError", + "message": msg, + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 404} + + is_target_expected += [mock.call('GET', addr)] + jrest.rproxy.pool_request.return_value = resp + self.assertEqual(False, jrest.is_target(tname)) + + jrest.rproxy.pool_request.assert_has_calls(is_target_expected) + + def test_create_target(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + # Create OK + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + addr = '/san/iscsi/targets' + data = {'incoming_users_active': True, + 'name': tname, + 'allow_ip': [], + 'outgoing_user': None, + 'active': True, + 'conflicted': False, + 'deny_ip': []} + + resp = {'data': data, + 'error': None, + 'code': 201} + + req = {'name': tname, + 'active': True, + 'incoming_users_active': True} + + jrest.rproxy.pool_request.return_value = resp + create_target_expected = [mock.call('POST', addr, json_data=req)] + self.assertIsNone(jrest.create_target(tname)) + + # Target exists + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + addr = '/san/iscsi/targets' + data = {'incoming_users_active': True, + 'name': tname, + 'allow_ip': [], + 'outgoing_user': None, + 'active': True, + 'conflicted': False, + 'deny_ip': []} + + resp = {'data': data, + 'error': None, + 'code': 201} + + url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" + "san/iscsi/targets") + url = url.format(addr=CONFIG_OK['san_hosts'][0], + port=CONFIG_OK['san_api_port']) + msg = "Target with name {} is already present on Pool-0.".format(tname) + + err = {"class": "opene.san.target.base.iscsi.TargetNameConflictError", + "message": msg, + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 409} + + jrest.rproxy.pool_request.return_value = resp + create_target_expected += [mock.call('POST', addr, json_data=req)] + + self.assertRaises(jexc.JDSSResourceExistsException, + jrest.create_target, tname) + + # Unknown error + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + addr = "/san/iscsi/targets" + + resp = {'data': data, + 'error': None, + 'code': 500} + + url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" + "san/iscsi/targets") + url = url.format(addr=CONFIG_OK['san_hosts'][0], + port=CONFIG_OK['san_api_port']) + + msg = "Target with name {} faced some fatal failure.".format(tname) + + err = {"class": "some test error", + "message": msg, + "url": url, + "errno": 123} + + resp = {'data': None, + 'error': err, + 'code': 500} + + jrest.rproxy.pool_request.return_value = resp + create_target_expected += [mock.call('POST', addr, json_data=req)] + + self.assertRaises(jexc.JDSSException, + jrest.create_target, tname) + + jrest.rproxy.pool_request.assert_has_calls(create_target_expected) + + def test_delete_target(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + # Delete OK + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + addr = '/san/iscsi/targets/{}'.format(tname) + + resp = {'data': None, + 'error': None, + 'code': 204} + + jrest.rproxy.pool_request.return_value = resp + delete_target_expected = [mock.call('DELETE', addr)] + self.assertIsNone(jrest.delete_target(tname)) + + # Delete no such target + + url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" + "san/iscsi/targets") + url = url.format(addr=CONFIG_OK['san_hosts'][0], + port=CONFIG_OK['san_api_port']) + err = {"class": "opene.exceptions.ItemNotFoundError", + "message": "Target {} not exists.".format(tname), + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 404} + + jrest.rproxy.pool_request.return_value = resp + delete_target_expected += [mock.call('DELETE', addr)] + + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.delete_target, tname) + # Delete unknown error + err = {"class": "some test error", + "message": "test error message", + "url": url, + "errno": 123} + + resp = {'data': None, + 'error': err, + 'code': 500} + + jrest.rproxy.pool_request.return_value = resp + delete_target_expected += [mock.call('DELETE', addr)] + + self.assertRaises(jexc.JDSSException, + jrest.delete_target, tname) + + jrest.rproxy.pool_request.assert_has_calls(delete_target_expected) + + def test_create_target_user(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + # Modify OK + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + addr = '/san/iscsi/targets/{}/incoming-users'.format(tname) + + chap_cred = {"name": "chapuser", + "password": "123456789012"} + resp = {'data': None, + 'error': None, + 'code': 201} + + jrest.rproxy.pool_request.return_value = resp + expected = [mock.call('POST', addr, json_data=chap_cred)] + self.assertIsNone(jrest.create_target_user(tname, chap_cred)) + + # No such target + + url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" + "san/iscsi/targets") + url = url.format(addr=CONFIG_OK['san_hosts'][0], + port=CONFIG_OK['san_api_port']) + err = {"class": "opene.exceptions.ItemNotFoundError", + "message": "Target {} not exists.".format(tname), + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 404} + + jrest.rproxy.pool_request.return_value = resp + expected += [mock.call('POST', addr, json_data=chap_cred)] + + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.create_target_user, tname, chap_cred) + # Unknown error + err = {"class": "some test error", + "message": "test error message", + "url": url, + "errno": 123} + + resp = {'data': None, + 'error': err, + 'code': 500} + + jrest.rproxy.pool_request.return_value = resp + expected += [mock.call('POST', addr, json_data=chap_cred)] + + self.assertRaises(jexc.JDSSException, + jrest.create_target_user, tname, chap_cred) + + jrest.rproxy.pool_request.assert_has_calls(expected) + + def test_get_target_user(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + # Get OK + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + addr = '/san/iscsi/targets/{}/incoming-users'.format(tname) + + chap_users = {"name": "chapuser"} + + resp = {'data': chap_users, + 'error': None, + 'code': 200} + + jrest.rproxy.pool_request.return_value = resp + get_target_user_expected = [mock.call('GET', addr)] + self.assertEqual(chap_users, jrest.get_target_user(tname)) + + # No such target + + url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" + "san/iscsi/targets") + url = url.format(addr=CONFIG_OK['san_hosts'][0], + port=CONFIG_OK['san_api_port']) + err = {"class": "opene.exceptions.ItemNotFoundError", + "message": "Target {} not exists.".format(tname), + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 404} + + jrest.rproxy.pool_request.return_value = resp + get_target_user_expected += [mock.call('GET', addr)] + + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.get_target_user, tname) + # Unknown error + err = {"class": "some test error", + "message": "test error message", + "url": url, + "errno": 123} + + resp = {'data': None, + 'error': err, + 'code': 500} + + jrest.rproxy.pool_request.return_value = resp + get_target_user_expected += [mock.call('GET', addr)] + + self.assertRaises(jexc.JDSSException, + jrest.get_target_user, tname) + + jrest.rproxy.pool_request.assert_has_calls(get_target_user_expected) + + def test_delete_target_user(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + # Delete OK + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + user = "chapuser" + addr = '/san/iscsi/targets/{}/incoming-users/chapuser'.format(tname) + + resp = {'data': None, + 'error': None, + 'code': 204} + + jrest.rproxy.pool_request.return_value = resp + delete_target_user_expected = [mock.call('DELETE', addr)] + self.assertIsNone(jrest.delete_target_user(tname, user)) + + # No such user + + url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" + "san/iscsi/targets/{tname}/incoming-user/{chapuser}") + url = url.format(addr=CONFIG_OK['san_hosts'][0], + port=CONFIG_OK['san_api_port'], + tname=tname, + chapuser=user) + err = {"class": "opene.exceptions.ItemNotFoundError", + "message": "User {} not exists.".format(user), + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 404} + + jrest.rproxy.pool_request.return_value = resp + delete_target_user_expected += [mock.call('DELETE', addr)] + + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.delete_target_user, tname, user) + # Unknown error + err = {"class": "some test error", + "message": "test error message", + "url": url, + "errno": 123} + + resp = {'data': None, + 'error': err, + 'code': 500} + + jrest.rproxy.pool_request.return_value = resp + delete_target_user_expected += [mock.call('DELETE', addr)] + + self.assertRaises(jexc.JDSSException, + jrest.delete_target_user, tname, user) + + jrest.rproxy.pool_request.assert_has_calls(delete_target_user_expected) + + def test_is_target_lun(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + # lun present + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + vname = jcom.vname(UUID_1) + addr = '/san/iscsi/targets/{target}/luns/{lun}'.format( + target=tname, lun=vname) + data = { + "block_size": 512, + "device_handler": "vdisk_fileio", + "lun": 0, + "mode": "wt", + "name": vname, + "prod_id": "Storage", + "scsi_id": "99e2c883331edf87"} + resp = {'data': data, + 'error': None, + 'code': 200} + + jrest.rproxy.pool_request.return_value = resp + is_target_lun_expected = [mock.call('GET', addr)] + self.assertTrue(jrest.is_target_lun(tname, vname)) + + url = "http://{ip}:{port}/api/v3/pools/Pool-0{addr}" + url = url.format(ip=CONFIG_OK['san_hosts'][0], + port=CONFIG_OK['san_api_port'], + tname=tname, + addr=addr) + msg = "volume name {lun} is not attached to target {target}" + msg = msg.format(lun=vname, target=tname) + err = {"class": "opene.exceptions.ItemNotFoundError", + "message": msg, + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 404} + + jrest.rproxy.pool_request.return_value = resp + is_target_lun_expected += [mock.call('GET', addr)] + + self.assertEqual(False, jrest.is_target_lun(tname, vname)) + + err = {"class": "some test error", + "message": "test error message", + "url": url, + "errno": 123} + + resp = {'data': None, + 'error': err, + 'code': 500} + + jrest.rproxy.pool_request.return_value = resp + is_target_lun_expected += [mock.call('GET', addr)] + + self.assertRaises(jexc.JDSSException, + jrest.is_target_lun, tname, vname) + + jrest.rproxy.pool_request.assert_has_calls(is_target_lun_expected) + + def test_attach_target_vol(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + # attach ok + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + vname = jcom.vname(UUID_1) + + addr = '/san/iscsi/targets/{}/luns'.format(tname) + jbody = {"name": vname, "lun": 0} + + data = {"block_size": 512, + "device_handler": "vdisk_fileio", + "lun": 0, + "mode": "wt", + "name": vname, + "prod_id": "Storage", + "scsi_id": "99e2c883331edf87"} + + resp = {'data': data, + 'error': None, + 'code': 201} + + jrest.rproxy.pool_request.return_value = resp + attach_target_vol_expected = [ + mock.call('POST', addr, json_data=jbody)] + self.assertIsNone(jrest.attach_target_vol(tname, vname)) + + # lun attached already + url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) + msg = 'Volume /dev/Pool-0/{} is already used.'.format(vname) + err = {"class": "opene.exceptions.ItemConflictError", + "message": msg, + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 409} + + jrest.rproxy.pool_request.return_value = resp + attach_target_vol_expected += [ + mock.call('POST', addr, json_data=jbody)] + self.assertRaises(jexc.JDSSResourceExistsException, + jrest.attach_target_vol, tname, vname) + + # no such target + url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) + msg = 'Target {} not exists.'.format(vname) + err = {"class": "opene.exceptions.ItemNotFoundError", + "message": msg, + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 404} + + jrest.rproxy.pool_request.return_value = resp + attach_target_vol_expected += [ + mock.call('POST', addr, json_data=jbody)] + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.attach_target_vol, tname, vname) + + # error unknown + url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) + msg = 'Target {} not exists.'.format(vname) + + err = {"class": "some test error", + "message": "test error message", + "url": url, + "errno": 123} + + resp = {'data': None, + 'error': err, + 'code': 500} + + jrest.rproxy.pool_request.return_value = resp + attach_target_vol_expected += [ + mock.call('POST', addr, json_data=jbody)] + self.assertRaises(jexc.JDSSException, + jrest.attach_target_vol, tname, vname) + jrest.rproxy.pool_request.assert_has_calls(attach_target_vol_expected) + + def test_detach_target_vol(self): + + jrest, ctx = self.get_rest(CONFIG_OK) + # detach target vol ok + tname = CONFIG_OK['iscsi_target_prefix'] + UUID_1 + vname = jcom.vname(UUID_1) + + addr = '/san/iscsi/targets/{tar}/luns/{vol}'.format( + tar=tname, vol=vname) + + resp = {'data': None, + 'error': None, + 'code': 204} + + jrest.rproxy.pool_request.return_value = resp + detach_target_vol_expected = [ + mock.call('DELETE', addr)] + self.assertIsNone(jrest.detach_target_vol(tname, vname)) + + # no such target + url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) + msg = 'Target {} not exists.'.format(vname) + err = {"class": "opene.exceptions.ItemNotFoundError", + "message": msg, + "url": url} + + resp = {'data': None, + 'error': err, + 'code': 404} + + jrest.rproxy.pool_request.return_value = resp + detach_target_vol_expected += [ + mock.call('DELETE', addr)] + self.assertRaises(jexc.JDSSResourceNotFoundException, + jrest.detach_target_vol, tname, vname) + + # error unknown + url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) + msg = 'Target {} not exists.'.format(vname) + + err = {"class": "some test error", + "message": "test error message", + "url": url, + "errno": 125} + + resp = {'data': None, + 'error': err, + 'code': 500} + + jrest.rproxy.pool_request.return_value = resp + detach_target_vol_expected += [ + mock.call('DELETE', addr)] + self.assertRaises(jexc.JDSSException, + jrest.detach_target_vol, tname, vname) + jrest.rproxy.pool_request.assert_has_calls(detach_target_vol_expected) diff --git a/cinder/volume/drivers/open_e/__init__.py b/cinder/volume/drivers/open_e/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder/volume/drivers/open_e/iscsi.py b/cinder/volume/drivers/open_e/iscsi.py new file mode 100644 index 0000000..4e82bb4 --- /dev/null +++ b/cinder/volume/drivers/open_e/iscsi.py @@ -0,0 +1,975 @@ +# Copyright (c) 2020 Open-E, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""iSCSI volume driver for JovianDSS driver.""" +import math +import string + +from oslo_log import log as logging +from oslo_utils import units as o_units + +from cinder import exception +from cinder.i18n import _ +from cinder import interface +from cinder.volume import driver +from cinder.volume.drivers.open_e.jovian_common import exception as jexc +from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom +from cinder.volume.drivers.open_e.jovian_common import rest +from cinder.volume.drivers.open_e import options +from cinder.volume.drivers.san import san +from cinder.volume import volume_utils + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class JovianISCSIDriver(driver.ISCSIDriver): + """Executes volume driver commands on Open-E JovianDSS V7. + + Version history: + + .. code-block:: none + + 1.0.0 - Open-E JovianDSS driver with basic functionality + """ + + # ThirdPartySystems wiki page + CI_WIKI_NAME = "Open-E_JovianDSS_CI" + VERSION = "1.0.0" + + def __init__(self, *args, **kwargs): + super(JovianISCSIDriver, self).__init__(*args, **kwargs) + + self._stats = None + self._pool = 'Pool-0' + self.jovian_iscsi_target_portal_port = "3260" + self.jovian_target_prefix = 'iqn.2020-04.com.open-e.cinder:' + self.jovian_chap_pass_len = 12 + self.jovian_sparse = False + self.jovian_ignore_tpath = None + self.jovian_hosts = None + self.ra = None + + @property + def backend_name(self): + """Return backend name.""" + backend_name = None + if self.configuration: + backend_name = self.configuration.safe_get('volume_backend_name') + if not backend_name: + backend_name = self.__class__.__name__ + return backend_name + + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + self.configuration.append_config_values( + options.jdss_connection_opts) + self.configuration.append_config_values( + options.jdss_iscsi_opts) + self.configuration.append_config_values( + options.jdss_volume_opts) + self.configuration.append_config_values(san.san_opts) + + self._pool = self.configuration.safe_get('jovian_pool') + self.jovian_iscsi_target_portal_port = self.configuration.safe_get( + 'target_port') + + self.jovian_target_prefix = self.configuration.safe_get( + 'target_prefix') + self.jovian_chap_pass_len = self.configuration.safe_get( + 'chap_password_len') + self.block_size = ( + self.configuration.safe_get('jovian_block_size')) + self.jovian_sparse = ( + self.configuration.safe_get('san_thin_provision')) + self.jovian_ignore_tpath = self.configuration.get( + 'jovian_ignore_tpath', None) + self.jovian_hosts = self.configuration.safe_get( + 'san_hosts') + self.ra = rest.JovianRESTAPI(self.configuration) + + def check_for_setup_error(self): + """Verify that the pool exists.""" + if len(self.jovian_hosts) == 0: + msg = _("No hosts provided in configuration") + raise exception.VolumeDriverException(msg) + + if not self.ra.is_pool_exists(): + msg = (_("Unable to identify pool %s") % self._pool) + raise exception.VolumeDriverException(msg) + + def _get_target_name(self, volume_name): + """Return iSCSI target name to access volume.""" + return '%s%s' % (self.jovian_target_prefix, volume_name) + + def _get_active_ifaces(self): + """Return list of ip addreses for iSCSI connection""" + + return self.jovian_hosts + + def create_volume(self, volume): + """Create a volume. + + :param volume: volume reference + :return: model update dict for volume reference + """ + vname = jcom.vname(volume.id) + LOG.debug('creating volume %s.', vname) + + provider_location = self._get_provider_location(volume.id) + provider_auth = self._get_provider_auth() + + try: + self.ra.create_lun(vname, + volume.size * o_units.Gi, + sparse=self.jovian_sparse, + block_size=self.block_size) + + except jexc.JDSSException as ex: + LOG.error("Create volume error. Because %(err)s", + {"err": ex}) + raise exception.VolumeBackendAPIException( + _('Failed to create volume %s.') % volume.id) + ret = {} + if provider_auth is not None: + ret['provider_auth'] = provider_auth + + ret['provider_location'] = provider_location + + return ret + + def _hide_object(self, vname): + """Mark volume/snapshot as hidden + + :param vname: physical volume name + """ + rename = {'name': jcom.hidden(vname)} + try: + self.ra.modify_lun(vname, rename) + except jexc.JDSSException as err: + msg = _('Failure in hidding {object}, err: {error},' + ' object have to be removed manually') + emsg = msg.format(object=vname, error=err) + LOG.warning(emsg) + raise exception.VolumeBackendAPIException(emsg) + + def _clean_garbage_snapshots(self, vname, snapshots): + """Delete physical snapshots that have no descendents""" + garbage = [] + for snap in snapshots: + if snap['clones'] == '': + try: + self.ra.delete_snapshot(vname, snap['name']) + except jexc.JDSSException as err: + args = {'obj': jcom.idname(vname), 'err': err} + msg = (_("Unable to clean garbage for " + "%(obj)s: %(err)s") % args) + raise exception.VolumeBackendAPIException(msg) + garbage.append(snap) + for snap in garbage: + snapshots.remove(snap) + + return snapshots + + def _cascade_volume_delete(self, o_vname, o_snaps): + """Delete or hides volume(if it is busy) + + Go over snapshots and deletes them if possible + Calls for recursive volume deletion if volume do not have children + """ + vsnaps = [] + deletable = True + + for snap in o_snaps: + if jcom.is_snapshot(snap['name']): + vsnaps += [(snap['name'], + jcom.full_name_volume(snap['clones']))] + + active_vsnaps = [vs for vs in vsnaps if jcom.is_hidden(vs[1]) is False] + + # If volume have clones or hidden snapshots it should be hidden + if len(active_vsnaps) < len(o_snaps): + deletable = False + + for vsnap in active_vsnaps: + psnap = [] + try: + psnap = self.ra.get_snapshots(vsnap[1]) + except jexc.JDSSException: + msg = (_('Failure in acquiring snapshot for %s.') % vsnap[1]) + raise exception.VolumeBackendAPIException(msg) + + try: + psnap = self._clean_garbage_snapshots(vsnap[1], psnap) + except exception.VolumeBackendAPIException as err: + msg = (_('Failure in cleaning garbage snapshots %s' + ' for volume %s, %s') % psnap, vsnap[1], err) + raise exception.VolumeBackendAPIException(msg) + if len(psnap) > 0: + deletable = False + self._hide_object(vsnap[1]) + else: + try: + self.ra.delete_snapshot(o_vname, + vsnap[0], + recursively_children=True, + recursively_dependents=True, + force_umount=True) + except jexc.JDSSException as err: + LOG.warning('Failure during deletion of physical ' + 'snapshot %s, err: %s', vsnap[0], err) + msg = (_('Failure during deletion of virtual snapshot ' + '%s') % vsnap[1]) + raise exception.VolumeBackendAPIException(msg) + + if deletable: + self._gc_delete(o_vname) + else: + self._hide_object(o_vname) + + def delete_volume(self, volume, cascade=False): + """Delete volume + + :param volume: volume reference + :param cascade: remove snapshots of a volume as well + """ + vname = jcom.vname(volume.id) + + LOG.debug('deleating volume %s', vname) + + snapshots = None + try: + snapshots = self.ra.get_snapshots(vname) + except jexc.JDSSResourceNotFoundException: + LOG.debug('volume %s dne, it was already ' + 'deleted', vname) + return + except jexc.JDSSException as err: + raise exception.VolumeBackendAPIException(err) + + snapshots = self._clean_garbage_snapshots(vname, snapshots) + + if cascade: + self._cascade_volume_delete(vname, snapshots) + else: + if len(snapshots) > 0: + self._hide_object(vname) + else: + self._gc_delete(vname) + + def _gc_delete(self, vname): + """Delete volume and its hidden parents + + Deletes volume by going recursively to the first active + parent and cals recursive deletion on storage side + """ + vol = None + try: + vol = self.ra.get_lun(vname) + except jexc.JDSSResourceNotFoundException: + LOG.debug('volume %s does not exist, it was already ' + 'deleted.', vname) + return + except jexc.JDSSException as err: + raise exception.VolumeBackendAPIException(err) + + if vol['is_clone']: + self._delete_back_recursively(jcom.origin_volume(vol['origin']), + jcom.origin_snapshot(vol['origin'])) + else: + try: + self.ra.delete_lun(vname) + except jexc.JDSSRESTException as err: + LOG.debug( + "Unable to delete physical volume %(volume)s " + "with error %(err)s.", { + "volume": vname, + "err": err}) + raise exception.SnapshotIsBusy(err) + + def _delete_back_recursively(self, opvname, opsname): + """Deletes snapshot by removing its oldest removable parent + + Checks if source volume for this snapshot is hidden: + If it is hidden and have no other descenents, it calls itself on its + source snapshot if such exists, or deletes it + If it is not hidden, trigers delete for snapshot + + :param ovname: origin phisical volume name + :param osname: origin phisical snapshot name + """ + + if jcom.is_hidden(opvname): + # Resource is hidden + snaps = [] + try: + snaps = self.ra.get_snapshots(opvname) + except jexc.JDSSResourceNotFoundException: + LOG.debug('Unable to get physical snapshots related to' + ' physical volume %s, volume do not exist', + opvname) + return + except jexc.JDSSException as err: + raise exception.VolumeBackendAPIException(err) + + snaps = self._clean_garbage_snapshots(opvname, snaps) + + if len(snaps) > 1: + # opvname has active snapshots and cant be deleted + # that is why we delete branch related to opsname + try: + self.ra.delete_snapshot(opvname, + opsname, + recursively_children=True, + recursively_dependents=True, + force_umount=True) + except jexc.JDSSException as err: + raise exception.VolumeBackendAPIException(err) + else: + vol = None + try: + vol = self.ra.get_lun(opvname) + + except jexc.JDSSResourceNotFoundException: + LOG.debug('volume %s does not exist, it was already' + 'deleted.', opvname) + return + except jexc.JDSSException as err: + raise exception.VolumeBackendAPIException(err) + + if vol['is_clone']: + self._delete_back_recursively( + jcom.origin_volume(vol['origin']), + jcom.origin_snapshot(vol['origin'])) + else: + try: + self.ra.delete_lun(opvname, + recursively_children=True, + recursively_dependents=True, + force_umount=True) + except jexc.JDSSResourceNotFoundException: + LOG.debug('volume %s does not exist, it was already' + 'deleted.', opvname) + return + except jexc.JDSSException as err: + raise exception.VolumeBackendAPIException(err) + else: + # Resource is active + try: + self.ra.delete_snapshot(opvname, + opsname, + recursively_children=True, + recursively_dependents=True, + force_umount=True) + except jexc.JDSSException as err: + raise exception.VolumeBackendAPIException(err) + + def extend_volume(self, volume, new_size): + """Extend an existing volume. + + :param volume: volume reference + :param new_size: volume new size in GB + """ + LOG.debug("Extend volume %s", volume.id) + + try: + self.ra.extend_lun(jcom.vname(volume.id), + new_size * o_units.Gi) + except jexc.JDSSException: + raise exception.VolumeBackendAPIException( + (_('Failed to extend volume %s.'), volume.id)) + + def _clone_object(self, oname, coname): + """Creates a clone of specified object + + :param: oname: name of an object to clone + :param: coname: name of a new clone + """ + LOG.debug('cloning %(oname)s to %(coname)s', { + "oname": oname, + "coname": coname}) + + try: + self.ra.create_snapshot(oname, coname) + except jexc.JDSSSnapshotExistsException: + try: + self.ra.delete_snapshot(oname, coname) + except jexc.JDSSSnapshotIsBusyException: + raise exception.Duplicate() + except jexc.JDSSException: + raise exception.VolumeBackendAPIException( + (_("Unable to create volume %s.") % coname)) + except jexc.JDSSResourceNotFoundException: + if jcom.is_volume(oname): + raise exception.VolumeNotFound(volume_id=jcom.idname(oname)) + raise exception.SnapshotNotFound(snapshot_id=jcom.idname(oname)) + + except jexc.JDSSException as err: + args = {'snapshot': coname, + 'object': oname, + 'err': err} + msg = (_('Failed to create tmp snapshot %(snapshot)s' + 'for object %(object)s: %(err)s') % args) + raise exception.VolumeBackendAPIException(msg) + + try: + self.ra.create_volume_from_snapshot( + coname, + coname, + oname, + sparse=self.jovian_sparse) + except jexc.JDSSVolumeExistsException: + raise exception.Duplicate() + except jexc.JDSSException as err: + try: + self.ra.delete_snapshot(oname, + coname, + recursively_children=True, + recursively_dependents=True, + force_umount=True) + except jexc.JDSSException as terr: + LOG.warning("Because of %s phisical snapshot %s of volume" + " %s have to be removed manually", + terr, + coname, + oname) + + raise exception.VolumeBackendAPIException( + _("Unable to create volume {vol} because of {err}.").format( + vol=coname, err=err)) + + def create_cloned_volume(self, volume, src_vref): + """Create a clone of the specified volume. + + :param volume: new volume reference + :param src_vref: source volume reference + """ + cvname = jcom.vname(volume.id) + + vname = jcom.vname(src_vref.id) + + LOG.debug('cloned volume %(id)s to %(id_clone)s', { + "id": src_vref.id, + "id_clone": volume.id}) + + self._clone_object(vname, cvname) + + clone_size = 0 + + try: + clone_size = int(self.ra.get_lun(cvname)['volsize']) + except jexc.JDSSException: + + self._delete_back_recursively(vname, cvname) + raise exception.VolumeBackendAPIException( + _("Fail in cloning volume {vol} to {clone}.").format( + vol=src_vref.id, clone=volume.id)) + + try: + if int(clone_size) < o_units.Gi * int(volume.size): + self.extend_volume(volume, int(volume.size)) + + except exception.VolumeBackendAPIException: + # If volume can't be set to a proper size make sure to clean it + # before failing + try: + self._delete_back_recursively(cvname, cvname) + except exception.VolumeBackendAPIException as err: + LOG.warning("Because of %s phisical snapshot %s of volume" + " %s have to be removed manualy", + err, + cvname, + vname) + raise + + provider_location = self._get_provider_location(volume.id) + provider_auth = self._get_provider_auth() + + ret = {} + if provider_auth: + ret['provider_auth'] = provider_auth + + ret['provider_location'] = provider_location + + return ret + + def create_volume_from_snapshot(self, volume, snapshot): + """Create a volume from a snapshot. + + If volume_type extra specs includes 'replication: True' + the driver needs to create a volume replica (secondary), + and setup replication between the newly created volume and + the secondary volume. + """ + LOG.debug('create volume %(vol)s from snapshot %(snap)s', { + 'vol': volume.id, + 'snap': snapshot.id}) + + cvname = jcom.vname(volume.id) + sname = jcom.sname(snapshot.id) + + self._clone_object(sname, cvname) + + clone_size = 0 + + try: + clone_size = int(self.ra.get_lun(cvname)['volsize']) + except jexc.JDSSException: + + self._delete_back_recursively(sname, cvname) + raise exception.VolumeBackendAPIException( + _("Fail in cloning snapshot {snap} to {clone}.").format( + snap=snapshot.id, clone=volume.id)) + + try: + if clone_size < o_units.Gi * int(volume.size): + self.extend_volume(volume, int(volume.size)) + except exception.VolumeBackendAPIException: + # If volume can't be set to a proper size make sure to clean it + # before failing + try: + self._delete_back_recursively(cvname, cvname) + except exception.VolumeBackendAPIException as ierr: + msg = ("Hidden snapshot %s of volume %s " + "have to be removed manualy, " + "as automatic removal failed: %s") + LOG.warning(msg, cvname, sname, ierr) + raise + + provider_location = self._get_provider_location(volume.id) + provider_auth = self._get_provider_auth() + + ret = {} + if provider_auth is not None: + ret['provider_auth'] = provider_auth + + ret['provider_location'] = provider_location + + return ret + + def create_snapshot(self, snapshot): + """Create snapshot of existing volume. + + :param snapshot: snapshot reference + """ + LOG.debug('create snapshot %(snap)s for volume %(vol)s', { + 'snap': snapshot.id, + 'vol': snapshot.volume_id}) + + vname = jcom.vname(snapshot.volume_id) + sname = jcom.sname(snapshot.id) + + self._clone_object(vname, sname) + + try: + self.ra.make_readonly_lun(sname) + except jexc.JDSSException as err: + # Name of snapshot should be the same as a name of volume + # that is going to be created from it + self._delete_back_recursively(vname, sname) + raise exception.VolumeBackendAPIException(err) + + def delete_snapshot(self, snapshot): + """Delete snapshot of existing volume. + + :param snapshot: snapshot reference + """ + sname = jcom.sname(snapshot.id) + + LOG.debug('deleating snapshot %s.', sname) + + snapshots = None + try: + snapshots = self.ra.get_snapshots(sname) + except jexc.JDSSResourceNotFoundException: + LOG.debug('physical volume %s dne, it was already' + 'deleted.', sname) + return + except jexc.JDSSException as err: + raise exception.VolumeBackendAPIException(err) + + snapshots = self._clean_garbage_snapshots(sname, snapshots) + + if len(snapshots) > 0: + self._hide_object(sname) + else: + self._gc_delete(sname) + + def _get_provider_auth(self): + """Get provider authentication for the volume. + + :return: string of auth method and credentials + """ + chap_user = volume_utils.generate_password( + length=8, + symbolgroups=(string.ascii_lowercase + + string.ascii_uppercase)) + + chap_password = volume_utils.generate_password( + length=self.jovian_chap_pass_len, + symbolgroups=(string.ascii_lowercase + + string.ascii_uppercase + string.digits)) + + return 'CHAP {user} {passwd}'.format( + user=chap_user, passwd=chap_password) + + def _get_provider_location(self, volume_name): + """Return volume iscsiadm-formatted provider location string.""" + return '{host}:{port},1 {name} 0'.format( + host=self.ra.get_active_host(), + port=self.jovian_iscsi_target_portal_port, + name=self._get_target_name(volume_name) + ) + + def create_export(self, _ctx, volume, connector): + """Create new export for zvol. + + :param volume: reference of volume to be exported + :return: iscsiadm-formatted provider location string + """ + LOG.debug("create export for volume: %s.", volume.id) + + self._create_target_volume(volume) + + return {'provider_location': self._get_provider_location(volume.id)} + + def ensure_export(self, _ctx, volume): + """Recreate parts of export if necessary. + + :param volume: reference of volume to be exported + """ + LOG.debug("ensure export for volume: %s.", volume.id) + self._ensure_target_volume(volume) + + def remove_export(self, _ctx, volume): + """Destroy all resources created to export zvol. + + :param volume: reference of volume to be unexported + """ + LOG.debug("remove_export for volume: %s.", volume.id) + + self._remove_target_volume(volume) + + def _update_volume_stats(self): + """Retrieve stats info.""" + LOG.debug('Updating volume stats') + + pool_stats = self.ra.get_pool_stats() + total_capacity = math.floor(int(pool_stats["size"]) / o_units.Gi) + free_capacity = math.floor(int(pool_stats["available"]) / o_units.Gi) + + reserved_percentage = ( + self.configuration.safe_get('reserved_percentage')) + + if total_capacity is None: + total_capacity = 'unknown' + if free_capacity is None: + free_capacity = 'unknown' + + location_info = '%(driver)s:%(host)s:%(volume)s' % { + 'driver': self.__class__.__name__, + 'host': self.ra.get_active_host()[0], + 'volume': self._pool + } + + self._stats = { + 'vendor_name': 'Open-E', + 'driver_version': self.VERSION, + 'storage_protocol': 'iSCSI', + 'total_capacity_gb': total_capacity, + 'free_capacity_gb': free_capacity, + 'reserved_percentage': int(reserved_percentage), + 'volume_backend_name': self.backend_name, + 'QoS_support': False, + 'location_info': location_info + } + + LOG.debug('Total capacity: %d, ' + 'Free %d.', + self._stats['total_capacity_gb'], + self._stats['free_capacity_gb']) + + def _create_target(self, target_name, use_chap=True): + """Creates target and handles exceptions + + Tryes to create target. + :param target_name: name of target + :param use_chap: flag for using chap + """ + try: + self.ra.create_target(target_name, + use_chap=use_chap) + + except jexc.JDSSResourceExistsException: + raise exception.Duplicate() + except jexc.JDSSException as ex: + + msg = (_('Unable to create target %(target)s ' + 'because of %(error)s.') % {'target': target_name, + 'error': ex}) + raise exception.VolumeBackendAPIException(msg) + + def _attach_target_volume(self, target_name, vname): + """Attach target to volume and handles exceptions + + Tryes to set attach volume to specific target. + In case of failure will remve target. + :param target_name: name of target + :param use_chap: flag for using chap + """ + try: + self.ra.attach_target_vol(target_name, vname) + except jexc.JDSSException as ex: + msg = ('Unable to attach volume to target {target} ' + 'because of {error}.') + emsg = msg.format(target=target_name, error=ex) + LOG.debug(msg, {"target": target_name, "error": ex}) + try: + self.ra.delete_target(target_name) + except jexc.JDSSException: + pass + raise exception.VolumeBackendAPIException(_(emsg)) + + def _set_target_credentials(self, target_name, cred): + """Set CHAP configuration for target and handle exceptions + + Tryes to set CHAP credentials for specific target. + In case of failure will remve target. + :param target_name: name of target + :param cred: CHAP user name and password + """ + try: + self.ra.create_target_user(target_name, cred) + + except jexc.JDSSException as ex: + try: + self.ra.delete_target(target_name) + except jexc.JDSSException: + pass + + err_msg = (('Unable to create user %(user)s ' + 'for target %(target)s ' + 'because of %(error)s.') % { + 'target': target_name, + 'user': cred['name'], + 'error': ex}) + + LOG.debug(err_msg) + + raise exception.VolumeBackendAPIException(_(err_msg)) + + def _create_target_volume(self, volume): + """Creates target and attach volume to it + + :param volume: volume id + :return: + """ + LOG.debug("create target and attach volume %s to it", volume.id) + + target_name = self.jovian_target_prefix + volume.id + vname = jcom.vname(volume.id) + + auth = volume.provider_auth + + if not auth: + msg = _("Volume {} is missing provider_auth") % volume.id + raise exception.VolumeDriverException(msg) + + (__, auth_username, auth_secret) = auth.split() + chap_cred = {"name": auth_username, + "password": auth_secret} + + # Create target + self._create_target(target_name, True) + + # Attach volume + self._attach_target_volume(target_name, vname) + + # Set credentials + self._set_target_credentials(target_name, chap_cred) + + def _ensure_target_volume(self, volume): + """Checks if target configured properly and volume is attached to it + + param: volume: volume structure + """ + LOG.debug("ensure volume %s assigned to a proper target", volume.id) + + target_name = self.jovian_target_prefix + volume.id + + auth = volume.provider_auth + + if not auth: + msg = _("volume {} is missing provider_auth").format(volume.id) + raise exception.VolumeDriverException(msg) + + (__, auth_username, auth_secret) = auth.split() + chap_cred = {"name": auth_username, + "password": auth_secret} + + if not self.ra.is_target(target_name): + self._create_target_volume(volume) + return + + if not self.ra.is_target_lun(target_name, volume.id): + vname = jcom.vname(volume.id) + self._attach_target_volume(target_name, vname) + + try: + users = self.ra.get_target_user(target_name) + if len(users) == 1: + if users[0]['name'] == chap_cred['name']: + return + self.ra.delete_target_user( + target_name, + users[0]['name']) + for user in users: + self.ra.delete_target_user( + target_name, + user['name']) + self._set_target_credentials(target_name, chap_cred) + + except jexc.JDSSException as err: + self.ra.delete_target(target_name) + raise exception.VolumeBackendAPIException(err) + + def _remove_target_volume(self, volume): + """_remove_target_volume + + Ensure that volume is not attached to target and target do not exists. + """ + target_name = self.jovian_target_prefix + volume.id + LOG.debug("remove export") + LOG.debug("detach volume:%(vol)s from target:%(targ)s.", { + 'vol': volume, + 'targ': target_name}) + + try: + self.ra.detach_target_vol(target_name, jcom.vname(volume.id)) + except jexc.JDSSResourceNotFoundException as ex: + LOG.debug('failed to remove resource %(t)s because of %(err)s', { + 't': target_name, + 'err': ex.args[0]}) + except jexc.JDSSException as ex: + LOG.debug('failed to Terminate_connection for target %(targ)s' + 'because of: %(err)s', { + 'targ': target_name, + 'err': ex.args[0]}) + raise exception.VolumeBackendAPIException(ex) + + LOG.debug("delete target: %s", target_name) + + try: + self.ra.delete_target(target_name) + except jexc.JDSSResourceNotFoundException as ex: + LOG.debug('failed to remove resource %(target)s because ' + 'of %(err)s', {'target': target_name, + 'err': ex.args[0]}) + + except jexc.JDSSException as ex: + LOG.debug('Failed to Terminate_connection for target %(targ)s' + 'because of: %(err)s', { + 'targ': target_name, + 'err': ex.args[0]}) + + raise exception.VolumeBackendAPIException(ex) + + def _get_iscsi_properties(self, volume, connector): + """Return dict according to cinder/driver.py implementation. + + :param volume: + :return: + """ + tname = self.jovian_target_prefix + volume.id + iface_info = [] + multipath = connector.get('multipath', False) + if multipath: + iface_info = self._get_active_ifaces() + if not iface_info: + raise exception.InvalidConfigurationValue( + _('No available interfaces ' + 'or config excludes them')) + + iscsi_properties = dict() + + if multipath: + iscsi_properties['target_iqns'] = [] + iscsi_properties['target_portals'] = [] + iscsi_properties['target_luns'] = [] + LOG.debug('tpaths %s.', iface_info) + for iface in iface_info: + iscsi_properties['target_iqns'].append( + self.jovian_target_prefix + + volume.id) + iscsi_properties['target_portals'].append( + iface + + ":" + + str(self.jovian_iscsi_target_portal_port)) + iscsi_properties['target_luns'].append(0) + else: + iscsi_properties['target_iqn'] = tname + iscsi_properties['target_portal'] = ( + self.ra.get_active_host() + + ":" + + str(self.jovian_iscsi_target_portal_port)) + + iscsi_properties['target_discovered'] = False + + auth = volume.provider_auth + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + iscsi_properties['auth_method'] = auth_method + iscsi_properties['auth_username'] = auth_username + iscsi_properties['auth_password'] = auth_secret + + iscsi_properties['target_lun'] = 0 + return iscsi_properties + + def initialize_connection(self, volume, connector): + """Initialize the connection and returns connection info. + + The iscsi driver returns a driver_volume_type of 'iscsi'. + the format of the driver data is defined in smis_get_iscsi_properties. + Example return value: + .. code-block:: json + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': '12345678-1234-1234-1234-123456789012', + } + } + """ + iscsi_properties = self._get_iscsi_properties(volume, connector) + + LOG.debug("initialize_connection for %(volume)s %(ip)s.", + {'volume': volume.id, + 'ip': connector['ip']}) + + return { + 'driver_volume_type': 'iscsi', + 'data': iscsi_properties, + } + + def terminate_connection(self, volume, connector, force=False, **kwargs): + """terminate_connection + + """ + + LOG.debug("terminate connection for %(volume)s ", + {'volume': volume.id}) diff --git a/cinder/volume/drivers/open_e/jovian_common/__init__.py b/cinder/volume/drivers/open_e/jovian_common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder/volume/drivers/open_e/jovian_common/exception.py b/cinder/volume/drivers/open_e/jovian_common/exception.py new file mode 100644 index 0000000..dbc1eb7 --- /dev/null +++ b/cinder/volume/drivers/open_e/jovian_common/exception.py @@ -0,0 +1,82 @@ +# Copyright (c) 2020 Open-E, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import exception +from cinder.i18n import _ + + +class JDSSException(exception.VolumeDriverException): + """Unknown error""" + message = _("%(reason)s") + + +class JDSSRESTException(JDSSException): + """Unknown communication error""" + + message = _("JDSS REST request %(request)s faild: %(reason)s.") + + +class JDSSRESTProxyException(JDSSException): + """Connection with host failed""" + + message = _("JDSS connection with %(host)s failed: %(reason)s.") + + +class JDSSResourceNotFoundException(JDSSException): + """Resource does not exist""" + + message = _("JDSS resource %(res)s DNE.") + + +class JDSSVolumeNotFoundException(JDSSResourceNotFoundException): + """Volume does not exist""" + + message = _("JDSS volume %(volume)s DNE.") + + +class JDSSSnapshotNotFoundException(JDSSResourceNotFoundException): + """Snapshot does not exist""" + + message = _("JDSS snapshot %(snapshot)s DNE.") + + +class JDSSResourceExistsException(JDSSException): + """Resource with specified id exists""" + + message = _("JDSS resource with id %(res)s exists.") + + +class JDSSSnapshotExistsException(JDSSResourceExistsException): + """Snapshot with the same id exists""" + + message = _("JDSS snapshot %(snapshot)s already exists.") + + +class JDSSVolumeExistsException(JDSSResourceExistsException): + """Volume with same id exists""" + + message = _("JDSS volume %(volume)s already exists.") + + +class JDSSResourceIsBusyException(JDSSException): + """Resource have dependents""" + + message = _("JDSS resource %(res)s is busy.") + + +class JDSSSnapshotIsBusyException(JDSSResourceIsBusyException): + """Snapshot have dependent clones""" + + message = _("JDSS snapshot %(snapshot)s is busy.") diff --git a/cinder/volume/drivers/open_e/jovian_common/jdss_common.py b/cinder/volume/drivers/open_e/jovian_common/jdss_common.py new file mode 100644 index 0000000..9a86d7a --- /dev/null +++ b/cinder/volume/drivers/open_e/jovian_common/jdss_common.py @@ -0,0 +1,112 @@ +# Copyright (c) 2020 Open-E, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import exception +from cinder.i18n import _ + + +def is_volume(name): + """Return True if volume""" + + return name.startswith("v_") + + +def is_snapshot(name): + """Return True if volume""" + + return name.startswith("s_") + + +def idname(name): + """Convert id into snapshot name""" + + if name.startswith(('s_', 'v_', 't_')): + return name[2:] + + msg = _('Object name %s is incorrect') % name + raise exception.VolumeBackendAPIException(message=msg) + + +def vname(name): + """Convert id into volume name""" + + if name.startswith("v_"): + return name + + if name.startswith('s_'): + msg = _('Attempt to use snapshot %s as a volume') % name + raise exception.VolumeBackendAPIException(message=msg) + + if name.startswith('t_'): + msg = _('Attempt to use deleted object %s as a volume') % name + raise exception.VolumeBackendAPIException(message=msg) + + return 'v_' + name + + +def sname(name): + """Convert id into snapshot name""" + + if name.startswith('s_'): + return name + + if name.startswith('v_'): + msg = _('Attempt to use volume %s as a snapshot') % name + raise exception.VolumeBackendAPIException(message=msg) + + if name.startswith('t_'): + msg = _('Attempt to use deleted object %s as a snapshot') % name + raise exception.VolumeBackendAPIException(message=msg) + + return 's_' + name + + +def is_hidden(name): + """Check if object is active or no""" + + if len(name) < 2: + return False + if name.startswith('t_'): + return True + return False + + +def origin_snapshot(origin_str): + """Extracts original phisical snapshot name from origin record""" + + return origin_str.split("@")[1] + + +def origin_volume(origin_str): + """Extracts original phisical volume name from origin record""" + + return origin_str.split("@")[0].split("/")[1] + + +def full_name_volume(name): + """Get volume id from full_name""" + + return name.split('/')[1] + + +def hidden(name): + """Get hidden version of a name""" + + if len(name) < 2: + raise exception.VolumeDriverException("Incorrect volume name") + + if name[:2] == 'v_' or name[:2] == 's_': + return 't_' + name[2:] + return 't_' + name diff --git a/cinder/volume/drivers/open_e/jovian_common/rest.py b/cinder/volume/drivers/open_e/jovian_common/rest.py new file mode 100644 index 0000000..f04d277 --- /dev/null +++ b/cinder/volume/drivers/open_e/jovian_common/rest.py @@ -0,0 +1,893 @@ +# Copyright (c) 2020 Open-E, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""REST cmd interoperation class for JovianDSS driver.""" +import re + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.open_e.jovian_common import exception as jexc +from cinder.volume.drivers.open_e.jovian_common import rest_proxy + +LOG = logging.getLogger(__name__) + + +class JovianRESTAPI(object): + """Jovian REST API proxy.""" + + def __init__(self, config): + + self.target_p = config.get('iscsi_target_prefix', + 'iqn.2020-04.com.open-e.cinder:') + self.pool = config.safe_get('jovian_pool') + self.rproxy = rest_proxy.JovianRESTProxy(config) + + self.resource_dne_msg = ( + re.compile(r'^Zfs resource: .* not found in this collection\.$')) + + def _general_error(self, url, resp): + reason = "Request {} failure".format(url) + if 'error' in resp: + + eclass = resp.get('class', 'Unknown') + code = resp.get('code', 'Unknown') + msg = resp.get('message', 'Unknown') + + reason = ("Request to {url} failed with code:%{code} " + "of type:{eclass} reason:{message}") + reason = reason.format(eclass=eclass, + code=code, + message=msg, + url=url) + raise jexc.JDSSException(reason=reason) + + def get_active_host(self): + """Return address of currently used host.""" + return self.rproxy.get_active_host() + + def is_pool_exists(self): + """is_pool_exists. + + GET + /pools/ + + :param pool_name: + :return: Bool + """ + req = "" + LOG.debug("check pool") + + resp = self.rproxy.pool_request('GET', req) + + if resp["code"] == 200 and not resp["error"]: + return True + + return False + + def get_iface_info(self): + """get_iface_info + + GET + /network/interfaces + :return list of internet ifaces + """ + req = '/network/interfaces' + + LOG.debug("get network interfaces") + + resp = self.rproxy.request('GET', req) + if (resp['error'] is None) and (resp['code'] == 200): + return resp['data'] + raise jexc.JDSSRESTException(resp['error']['message']) + + def get_luns(self): + """get_all_pool_volumes. + + GET + /pools//volumes + :param pool_name + :return list of all pool volumes + """ + req = '/volumes' + + LOG.debug("get all volumes") + resp = self.rproxy.pool_request('GET', req) + + if resp['error'] is None and resp['code'] == 200: + return resp['data'] + raise jexc.JDSSRESTException(resp['error']['message']) + + def create_lun(self, volume_name, volume_size, sparse=False, + block_size=None): + """create_volume. + + POST + .../volumes + + :param volume_name: + :param volume_size: + :return: + """ + volume_size_str = str(volume_size) + jbody = { + 'name': volume_name, + 'size': volume_size_str, + 'sparse': sparse + } + if block_size: + jbody['blocksize'] = block_size + + req = '/volumes' + + LOG.debug("create volume %s", str(jbody)) + resp = self.rproxy.pool_request('POST', req, json_data=jbody) + + if not resp["error"] and resp["code"] in (200, 201): + return + + if resp["error"] is not None: + if resp["error"]["errno"] == str(5): + raise jexc.JDSSRESTException( + 'Failed to create volume. {}.'.format( + resp['error']['message'])) + + raise jexc.JDSSRESTException('Failed to create volume.') + + def extend_lun(self, volume_name, volume_size): + """create_volume. + + PUT /volumes/ + """ + req = '/volumes/' + volume_name + volume_size_str = str(volume_size) + jbody = { + 'size': volume_size_str + } + + LOG.debug("jdss extend volume %(volume)s to %(size)s", + {"volume": volume_name, + "size": volume_size_str}) + resp = self.rproxy.pool_request('PUT', req, json_data=jbody) + + if not resp["error"] and resp["code"] == 201: + return + + if resp["error"]: + raise jexc.JDSSRESTException( + 'Failed to extend volume {}'.format(resp['error']['message'])) + + raise jexc.JDSSRESTException('Failed to extend volume.') + + def is_lun(self, volume_name): + """is_lun. + + GET /volumes/ + Returns True if volume exists. Uses GET request. + :param pool_name: + :param volume_name: + :return: + """ + req = '/volumes/' + volume_name + + LOG.debug("check volume %s", volume_name) + ret = self.rproxy.pool_request('GET', req) + + if not ret["error"] and ret["code"] == 200: + return True + return False + + def get_lun(self, volume_name): + """get_lun. + + GET /volumes/ + :param volume_name: + :return: + { + "data": + { + "origin": null, + "referenced": "65536", + "primarycache": "all", + "logbias": "latency", + "creation": "1432730973", + "sync": "always", + "is_clone": false, + "dedup": "off", + "used": "1076101120", + "full_name": "Pool-0/v1", + "type": "volume", + "written": "65536", + "usedbyrefreservation": "1076035584", + "compression": "lz4", + "usedbysnapshots": "0", + "copies": "1", + "compressratio": "1.00x", + "readonly": "off", + "mlslabel": "none", + "secondarycache": "all", + "available": "976123452576", + "resource_name": "Pool-0/v1", + "volblocksize": "131072", + "refcompressratio": "1.00x", + "snapdev": "hidden", + "volsize": "1073741824", + "reservation": "0", + "usedbychildren": "0", + "usedbydataset": "65536", + "name": "v1", + "checksum": "on", + "refreservation": "1076101120" + }, + "error": null + } + """ + req = '/volumes/' + volume_name + + LOG.debug("get volume %s info", volume_name) + resp = self.rproxy.pool_request('GET', req) + + if not resp['error'] and resp['code'] == 200: + return resp['data'] + + if resp['error']: + if 'message' in resp['error']: + if self.resource_dne_msg.match(resp['error']['message']): + raise jexc.JDSSResourceNotFoundException(res=volume_name) + + self._general_error(req, resp) + + def modify_lun(self, volume_name, prop=None): + """Update volume properties + + :prop volume_name: volume name + :prop prop: dictionary + { + : + } + """ + + req = '/volumes/' + volume_name + + resp = self.rproxy.pool_request('PUT', req, json_data=prop) + + if resp["code"] in (200, 201, 204): + LOG.debug("volume %s updated", volume_name) + return + + if resp["code"] == 500: + if resp["error"] is not None: + if resp["error"]["errno"] == 1: + raise jexc.JDSSResourceNotFoundException( + res=volume_name) + + self._general_error(req, resp) + + def make_readonly_lun(self, volume_name): + """Set volume into read only mode + + :param: volume_name: volume name + """ + prop = {"property_name": "readonly", "property_value": "on"} + + self.modify_property_lun(volume_name, prop) + + def modify_property_lun(self, volume_name, prop=None): + """Change volume properties + + :prop: volume_name: volume name + :prop: prop: dictionary of volume properties in format + { "property_name": "", + "property_value":""} + """ + + req = '/volumes/{}/properties'.format(volume_name) + + resp = self.rproxy.pool_request('PUT', req, json_data=prop) + + if resp["code"] in (200, 201, 204): + LOG.debug( + "volume %s properties updated", volume_name) + return + + if resp["code"] == 500: + if resp["error"] is not None: + if resp["error"]["errno"] == 1: + raise jexc.JDSSResourceNotFoundException( + res=volume_name) + raise jexc.JDSSRESTException(request=req, + reason=resp['error']['message']) + raise jexc.JDSSRESTException(request=req, reason="unknown") + + def delete_lun(self, volume_name, + recursively_children=False, + recursively_dependents=False, + force_umount=False): + """delete_volume. + + DELETE /volumes/ + :param volume_name: + :return: + """ + jbody = {} + if recursively_children: + jbody['recursively_children'] = True + + if recursively_dependents: + jbody['recursively_dependents'] = True + + if force_umount: + jbody['force_umount'] = True + + req = '/volumes/' + volume_name + LOG.debug(("delete volume:%(vol)s " + "recursively children:%(args)s"), + {'vol': volume_name, + 'args': jbody}) + + if len(jbody) > 0: + resp = self.rproxy.pool_request('DELETE', req, json_data=jbody) + else: + resp = self.rproxy.pool_request('DELETE', req) + + if resp["code"] == 204: + LOG.debug( + "volume %s deleted", volume_name) + return + + # Handle DNE case + if resp["code"] == 500: + if 'message' in resp['error']: + if self.resource_dne_msg.match(resp['error']['message']): + LOG.debug("volume %s do not exists, delition success", + volume_name) + return + + # Handle volume busy + if resp["code"] == 500 and resp["error"]: + if resp["error"]["errno"] == 1000: + LOG.warning( + "volume %s is busy", volume_name) + raise exception.VolumeIsBusy(volume_name=volume_name) + + raise jexc.JDSSRESTException('Failed to delete volume.') + + def is_target(self, target_name): + """is_target. + + GET /san/iscsi/targets/ target_name + :param target_name: + :return: Bool + """ + req = '/san/iscsi/targets/' + target_name + + LOG.debug("check if targe %s exists", target_name) + resp = self.rproxy.pool_request('GET', req) + + if resp["error"] or resp["code"] not in (200, 201): + return False + + if "name" in resp["data"]: + if resp["data"]["name"] == target_name: + LOG.debug( + "target %s exists", target_name) + return True + + return False + + def create_target(self, + target_name, + use_chap=True, + allow_ip=None, + deny_ip=None): + """create_target. + + POST /san/iscsi/targets + :param target_name: + :param chap_cred: + :param allow_ip: + "allow_ip": [ + "192.168.2.30/0", + "192.168.3.45" + ], + + :return: + """ + req = '/san/iscsi/targets' + + LOG.debug("create target %s", target_name) + + jdata = {"name": target_name, "active": True} + + jdata["incoming_users_active"] = use_chap + + if allow_ip: + jdata["allow_ip"] = allow_ip + + if deny_ip: + jdata["deny_ip"] = deny_ip + + resp = self.rproxy.pool_request('POST', req, json_data=jdata) + + if not resp["error"] and resp["code"] == 201: + return + + if resp["code"] == 409: + raise jexc.JDSSResourceExistsException(res=target_name) + + self._general_error(req, resp) + + def delete_target(self, target_name): + """delete_target. + + DELETE /san/iscsi/targets/ + :param pool_name: + :param target_name: + :return: + """ + req = '/san/iscsi/targets/' + target_name + + LOG.debug("delete target %s", target_name) + + resp = self.rproxy.pool_request('DELETE', req) + + if resp["code"] in (200, 201, 204): + LOG.debug( + "target %s deleted", target_name) + return + + not_found_err = "opene.exceptions.ItemNotFoundError" + if (resp["code"] == 404) or \ + (resp["error"]["class"] == not_found_err): + raise jexc.JDSSResourceNotFoundException(res=target_name) + + self._general_error(req, resp) + + def create_target_user(self, target_name, chap_cred): + """Set CHAP credentials for accees specific target. + + POST + /san/iscsi/targets//incoming-users + + :param target_name: + :param chap_cred: + { + "name": "target_user", + "password": "3e21ewqdsacxz" --- 12 chars min + } + :return: + """ + req = '/san/iscsi/targets/' + target_name + "/incoming-users" + + LOG.debug("add credentails to target %s", target_name) + + resp = self.rproxy.pool_request('POST', req, json_data=chap_cred) + + if not resp["error"] and resp["code"] in (200, 201, 204): + return + + if resp['code'] == 404: + raise jexc.JDSSResourceNotFoundException(res=target_name) + + self._general_error(req, resp) + + def get_target_user(self, target_name): + """Get name of CHAP user for accessing target + + GET + /san/iscsi/targets//incoming-users + + :param target_name: + """ + req = '/san/iscsi/targets/' + target_name + "/incoming-users" + + LOG.debug("get chap cred for target %s", target_name) + + resp = self.rproxy.pool_request('GET', req) + + if not resp["error"] and resp["code"] == 200: + return resp['data'] + + if resp['code'] == 404: + raise jexc.JDSSResourceNotFoundException(res=target_name) + + self._general_error(req, resp) + + def delete_target_user(self, target_name, user_name): + """Delete CHAP user for target + + DELETE + /san/iscsi/targets//incoming-users/ + + :param target_name: target name + :param user_name: user name + """ + req = '/san/iscsi/targets/{0}/incoming-users/{1}'.format( + target_name, user_name) + + LOG.debug("remove credentails from target %s", target_name) + + resp = self.rproxy.pool_request('DELETE', req) + + if resp["error"] is None and resp["code"] == 204: + return + + if resp['code'] == 404: + raise jexc.JDSSResourceNotFoundException(res=target_name) + + self._general_error(req, resp) + + def is_target_lun(self, target_name, lun_name): + """is_target_lun. + + GET /san/iscsi/targets//luns/ + :param pool_name: + :param target_name: + :param lun_name: + :return: Bool + """ + req = '/san/iscsi/targets/' + target_name + "/luns/" + lun_name + + LOG.debug("check if volume %(vol)s is associated with %(tar)s", + {'vol': lun_name, + 'tar': target_name}) + resp = self.rproxy.pool_request('GET', req) + + if not resp["error"] and resp["code"] == 200: + LOG.debug("volume %(vol)s is associated with %(tar)s", + {'vol': lun_name, + 'tar': target_name}) + return True + + if resp['code'] == 404: + LOG.debug("volume %(vol)s is not associated with %(tar)s", + {'vol': lun_name, + 'tar': target_name}) + return False + + self._general_error(req, resp) + + def attach_target_vol(self, target_name, lun_name, lun_id=0): + """attach_target_vol. + + POST /san/iscsi/targets//luns + :param target_name: + :param lun_name: + :return: + """ + req = '/san/iscsi/targets/{}/luns'.format(target_name) + + jbody = {"name": lun_name, "lun": lun_id} + LOG.debug("atach volume %(vol)s to target %(tar)s", + {'vol': lun_name, + 'tar': target_name}) + + resp = self.rproxy.pool_request('POST', req, json_data=jbody) + + if not resp["error"] and resp["code"] == 201: + return + + if resp['code'] == 409: + raise jexc.JDSSResourceExistsException(res=lun_name) + + if resp['code'] == 404: + raise jexc.JDSSResourceNotFoundException(res=target_name) + + self._general_error(req, resp) + + def detach_target_vol(self, target_name, lun_name): + """detach_target_vol. + + DELETE /san/iscsi/targets//luns/ + + :param target_name: + :param lun_name: + :return: + """ + req = '/san/iscsi/targets/' + target_name + "/luns/" + lun_name + + LOG.debug("detach volume %(vol)s from target %(tar)s", + {'vol': lun_name, + 'tar': target_name}) + + resp = self.rproxy.pool_request('DELETE', req) + + if resp["code"] in (200, 201, 204): + return + + if resp['code'] == 404: + raise jexc.JDSSResourceNotFoundException(res=lun_name) + + self._general_error(req, resp) + + def create_snapshot(self, volume_name, snapshot_name): + """create_snapshot. + + POST /pools//volumes//snapshots + :param pool_name: + :param volume_name: source volume + :param snapshot_name: snapshot name + :return: + """ + req = '/volumes/' + volume_name + '/snapshots' + + jbody = { + 'snapshot_name': snapshot_name + } + + LOG.debug("create snapshot %s", snapshot_name) + + resp = self.rproxy.pool_request('POST', req, json_data=jbody) + + if not resp["error"] and resp["code"] in (200, 201, 204): + return + + if resp["code"] == 500: + if resp["error"]: + if resp["error"]["errno"] == 1: + raise jexc.JDSSVolumeNotFoundException( + volume=volume_name) + if resp["error"]["errno"] == 5: + raise jexc.JDSSSnapshotExistsException( + snapshot=snapshot_name) + + self._general_error(req, resp) + + def create_volume_from_snapshot(self, volume_name, snapshot_name, + original_vol_name, **options): + """create_volume_from_snapshot. + + POST /volumes//clone + :param volume_name: volume that is going to be created + :param snapshot_name: slice of original volume + :param original_vol_name: sample copy + :return: + """ + req = '/volumes/' + original_vol_name + '/clone' + + jbody = { + 'name': volume_name, + 'snapshot': snapshot_name, + 'sparse': False + } + + if 'sparse' in options: + jbody['sparse'] = options['sparse'] + + LOG.debug("create volume %(vol)s from snapshot %(snap)s", + {'vol': volume_name, + 'snap': snapshot_name}) + + resp = self.rproxy.pool_request('POST', req, json_data=jbody) + + if not resp["error"] and resp["code"] in (200, 201, 204): + return + + if resp["code"] == 500: + if resp["error"]: + if resp["error"]["errno"] == 100: + raise jexc.JDSSVolumeExistsException( + volume=volume_name) + args = {"vol": volume_name, "e": resp['error']['message']} + msg = _('Failed to create volume %(vol)s, err: %(e)s') % args + raise jexc.JDSSRESTException(msg) + + raise jexc.JDSSRESTException('unable to create volume') + + def is_snapshot(self, volume_name, snapshot_name): + """is_snapshots. + + GET + /volumes//snapshots//clones + + :param volume_name: that snapshot belongs to + :return: bool + """ + req = '/volumes/' + volume_name + '/snapshots/' + snapshot_name + \ + '/clones' + + LOG.debug("check if snapshot %(snap)s of volume %(vol)s exists", + {'snap': snapshot_name, + 'vol': volume_name}) + + resp = self.rproxy.pool_request('GET', req) + + if not resp["error"] and resp["code"] == 200: + return True + + return False + + def delete_snapshot(self, + volume_name, + snapshot_name, + recursively_children=False, + recursively_dependents=False, + force_umount=False): + """delete_snapshot. + + DELETE /volumes//snapshots/ + + :param volume_name: volume that snapshot belongs to + :param snapshot_name: snapshot name + :param recursively_children: boolean indicating if zfs should + recursively destroy all children of resource, in case of snapshot + remove all snapshots in descendant file system (default false). + :param recursively_dependents: boolean indicating if zfs should + recursively destroy all dependents, including cloned file systems + outside the target hierarchy (default false). + :param force_umount: boolean indicating if volume should be forced to + umount (defualt false). + :return: + """ + if not self.is_snapshot(volume_name, snapshot_name): + return + + req = '/volumes/' + volume_name + '/snapshots/' + snapshot_name + + LOG.debug("delete snapshot %(snap)s of volume %(vol)s", + {'snap': snapshot_name, + 'vol': volume_name}) + + jbody = {} + if recursively_children: + jbody['recursively_children'] = True + + if recursively_dependents: + jbody['recursively_dependents'] = True + + if force_umount: + jbody['force_umount'] = True + + resp = dict() + if len(jbody) > 0: + resp = self.rproxy.pool_request('DELETE', req, json_data=jbody) + else: + resp = self.rproxy.pool_request('DELETE', req) + + if resp["code"] in (200, 201, 204): + LOG.debug("snapshot %s deleted", snapshot_name) + return + + if resp["code"] == 500: + if resp["error"]: + if resp["error"]["errno"] == 1000: + raise jexc.JDSSSnapshotIsBusyException( + snapshot=snapshot_name) + msg = 'Failed to delete snapshot {}, err: {}'.format( + snapshot_name, resp['error']['message']) + raise jexc.JDSSRESTException(msg) + msg = 'Failed to delete snapshot {}'.format(snapshot_name) + raise jexc.JDSSRESTException(msg) + + def get_snapshots(self, volume_name): + """get_snapshots. + + GET + /volumes// + snapshots + + :param volume_name: that snapshot belongs to + :return: + { + "data": + [ + { + "referenced": "65536", + "name": "MySnapshot", + "defer_destroy": "off", + "userrefs": "0", + "primarycache": "all", + "type": "snapshot", + "creation": "2015-5-27 16:8:35", + "refcompressratio": "1.00x", + "compressratio": "1.00x", + "written": "65536", + "used": "0", + "clones": "", + "mlslabel": "none", + "secondarycache": "all" + } + ], + "error": null + } + """ + req = '/volumes/' + volume_name + '/snapshots' + + LOG.debug("get snapshots for volume %s ", volume_name) + + resp = self.rproxy.pool_request('GET', req) + + if not resp["error"] and resp["code"] == 200: + return resp["data"]["entries"] + + if resp['code'] == 500: + if 'message' in resp['error']: + if self.resource_dne_msg.match(resp['error']['message']): + raise jexc.JDSSResourceNotFoundException(volume_name) + raise jexc.JDSSRESTException('unable to get snapshots') + + def get_pool_stats(self): + """get_pool_stats. + + GET /pools/ + :param pool_name: + :return: + { + "data": { + "available": "24433164288", + "status": 24, + "name": "Pool-0", + "scan": { + "errors": 0, + "repaired": "0", + "start_time": 1463476815, + "state": "finished", + "end_time": 1463476820, + "type": "scrub" + }, + "iostats": { + "read": "0", + "write": "0", + "chksum": "0" + }, + "vdevs": [ + { + "name": "scsi-SSCST_BIOoWKF6TM0qafySQBUd1bb392e", + "iostats": { + "read": "0", + "write": "0", + "chksum": "0" + }, + "disks": [ + { + "led": "off", + "name": "sdb", + "iostats": { + "read": "0", + "write": "0", + "chksum": "0" + }, + "health": "ONLINE", + "sn": "d1bb392e", + "path": "pci-0000:04:00.0-scsi-0:0:0:0", + "model": "oWKF6TM0qafySQBU", + "id": "scsi-SSCST_BIOoWKF6TM0qafySQBUd1bb392e", + "size": 30064771072 + } + ], + "health": "ONLINE", + "vdev_replacings": [], + "vdev_spares": [], + "type": "" + } + ], + "health": "ONLINE", + "operation": "none", + "id": "11612982948930769833", + "size": "29796335616" + }, + "error": null + } + """ + req = "" + LOG.debug("Get pool %s fsprops", self.pool) + + resp = self.rproxy.pool_request('GET', req) + if not resp["error"] and resp["code"] == 200: + return resp["data"] + + raise jexc.JDSSRESTException('Unable to get pool info') diff --git a/cinder/volume/drivers/open_e/jovian_common/rest_proxy.py b/cinder/volume/drivers/open_e/jovian_common/rest_proxy.py new file mode 100644 index 0000000..f730535 --- /dev/null +++ b/cinder/volume/drivers/open_e/jovian_common/rest_proxy.py @@ -0,0 +1,226 @@ +# Copyright (c) 2020 Open-E, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Network connection handling class for JovianDSS driver.""" + +import json +import time + +from oslo_log import log as logging +from oslo_utils import netutils as o_netutils +import requests +import urllib3 + +from cinder import exception +from cinder.i18n import _ +from cinder.volume.drivers.open_e.jovian_common import exception as jexc + + +LOG = logging.getLogger(__name__) + + +class JovianRESTProxy(object): + """Jovian REST API proxy.""" + + def __init__(self, config): + """:param config: config is like dict.""" + + self.proto = 'http' + if config.get('driver_use_ssl', True): + self.proto = 'https' + + self.hosts = config.safe_get('san_hosts') + self.port = str(config.get('san_api_port', 82)) + + self.active_host = 0 + + for host in self.hosts: + if o_netutils.is_valid_ip(host) is False: + err_msg = ('Invalid value of jovian_host property: ' + '%(addr)s, IP address expected.' % + {'addr': host}) + + LOG.debug(err_msg) + raise exception.InvalidConfigurationValue(err_msg) + + self.api_path = "/api/v3" + self.delay = config.get('jovian_recovery_delay', 40) + + self.pool = config.safe_get('jovian_pool') + + self.user = config.get('san_login', 'admin') + self.password = config.get('san_password', 'admin') + self.auth = requests.auth.HTTPBasicAuth(self.user, self.password) + self.verify = False + self.retry_n = config.get('jovian_rest_send_repeats', 3) + self.header = {'connection': 'keep-alive', + 'Content-Type': 'application/json', + 'authorization': 'Basic '} + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + def _get_pool_url(self, host): + url = ('%(proto)s://%(host)s:%(port)s/api/v3/pools/%(pool)s' % { + 'proto': self.proto, + 'host': host, + 'port': self.port, + 'pool': self.pool}) + return url + + def _get_url(self, host): + url = ('%(proto)s://%(host)s:%(port)s/api/v3' % { + 'proto': self.proto, + 'host': host, + 'port': self.port}) + return url + + def request(self, request_method, req, json_data=None): + """Send request to the specific url. + + :param request_method: GET, POST, DELETE + :param url: where to send + :param json_data: data + """ + for j in range(self.retry_n): + for i in range(len(self.hosts)): + host = self.hosts[self.active_host] + url = self._get_url(host) + req + + LOG.debug( + "sending request of type %(type)s to %(url)s " + "attempt: %(num)s.", + {'type': request_method, + 'url': url, + 'num': j}) + + if json_data is not None: + LOG.debug( + "sending data: %s.", json_data) + try: + + ret = self._request_routine(url, request_method, json_data) + if len(ret) == 0: + self.active_host = ((self.active_host + 1) + % len(self.hosts)) + continue + return ret + + except requests.ConnectionError as err: + LOG.debug("Connection error %s", err) + self.active_host = (self.active_host + 1) % len(self.hosts) + continue + time.sleep(self.delay) + + msg = (_('%(times) faild in a row') % {'times': j}) + + raise jexc.JDSSRESTProxyException(host=url, reason=msg) + + def pool_request(self, request_method, req, json_data=None): + """Send request to the specific url. + + :param request_method: GET, POST, DELETE + :param url: where to send + :param json_data: data + """ + url = "" + for j in range(self.retry_n): + for i in range(len(self.hosts)): + host = self.hosts[self.active_host] + url = self._get_pool_url(host) + req + + LOG.debug( + "sending pool request of type %(type)s to %(url)s " + "attempt: %(num)s.", + {'type': request_method, + 'url': url, + 'num': j}) + + if json_data is not None: + LOG.debug( + "JovianDSS: Sending data: %s.", str(json_data)) + try: + + ret = self._request_routine(url, request_method, json_data) + if len(ret) == 0: + self.active_host = ((self.active_host + 1) + % len(self.hosts)) + continue + return ret + + except requests.ConnectionError as err: + LOG.debug("Connection error %s", err) + self.active_host = (self.active_host + 1) % len(self.hosts) + continue + time.sleep(int(self.delay)) + + msg = (_('%(times) faild in a row') % {'times': j}) + + raise jexc.JDSSRESTProxyException(host=url, reason=msg) + + def _request_routine(self, url, request_method, json_data=None): + """Make an HTTPS request and return the results.""" + + ret = None + for i in range(3): + ret = dict() + try: + response_obj = requests.request(request_method, + auth=self.auth, + url=url, + headers=self.header, + data=json.dumps(json_data), + verify=self.verify) + + LOG.debug('response code: %s', response_obj.status_code) + LOG.debug('response data: %s', response_obj.text) + + ret['code'] = response_obj.status_code + + if '{' in response_obj.text and '}' in response_obj.text: + if "error" in response_obj.text: + ret["error"] = json.loads(response_obj.text)["error"] + else: + ret["error"] = None + if "data" in response_obj.text: + ret["data"] = json.loads(response_obj.text)["data"] + else: + ret["data"] = None + + if ret["code"] == 500: + if ret["error"] is not None: + if (("errno" in ret["error"]) and + ("class" in ret["error"])): + if (ret["error"]["class"] == + "opene.tools.scstadmin.ScstAdminError"): + LOG.debug("ScstAdminError %(code)d %(msg)s", { + "code": ret["error"]["errno"], + "msg": ret["error"]["message"]}) + continue + if (ret["error"]["class"] == + "exceptions.OSError"): + LOG.debug("OSError %(code)d %(msg)s", { + "code": ret["error"]["errno"], + "msg": ret["error"]["message"]}) + continue + break + + except requests.HTTPError as err: + LOG.debug("HTTP parsing error %s", err) + self.active_host = (self.active_host + 1) % len(self.hosts) + + return ret + + def get_active_host(self): + """Return address of currently used host.""" + return self.hosts[self.active_host] diff --git a/cinder/volume/drivers/open_e/options.py b/cinder/volume/drivers/open_e/options.py new file mode 100644 index 0000000..5da8734 --- /dev/null +++ b/cinder/volume/drivers/open_e/options.py @@ -0,0 +1,51 @@ +# Copyright (c) 2020 Open-E, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +jdss_connection_opts = [ + cfg.ListOpt('san_hosts', + default='', + help='IP address of Open-E JovianDSS SA'), + cfg.IntOpt('jovian_rest_send_repeats', + default=3, + help='Number of retries to send REST request.'), + cfg.IntOpt('jovian_recovery_delay', + default=60, + help='Time before HA cluster failure.'), + cfg.ListOpt('jovian_ignore_tpath', + default=[], + help='List of multipath ip addresses to ignore.'), +] + +jdss_iscsi_opts = [ + cfg.IntOpt('chap_password_len', + default=12, + help='Length of the random string for CHAP password.'), + cfg.StrOpt('jovian_pool', + default='Pool-0', + help='JovianDSS pool that holds all cinder volumes'), +] + +jdss_volume_opts = [ + cfg.StrOpt('jovian_block_size', + default='128K', + help='Block size for volumes (512 - 128K)'), +] + +CONF = cfg.CONF +CONF.register_opts(jdss_connection_opts) +CONF.register_opts(jdss_iscsi_opts) +CONF.register_opts(jdss_volume_opts) diff --git a/doc/source/configuration/block-storage/drivers/open-e-joviandss-driver.rst b/doc/source/configuration/block-storage/drivers/open-e-joviandss-driver.rst new file mode 100644 index 0000000..2dc69e0 --- /dev/null +++ b/doc/source/configuration/block-storage/drivers/open-e-joviandss-driver.rst @@ -0,0 +1,194 @@ +============================= +Open-E JovianDSS iSCSI driver +============================= + +The ``JovianISCSIDriver`` allows usage of Open-E Jovian Data Storage +Solution to be used as Block Storage in OpenStack deployments. + +Supported operations +~~~~~~~~~~~~~~~~~~~~ + +- Create, delete, attach, and detach volumes. +- Create, list, and delete volume snapshots. +- Create a volume from a snapshot. +- Copy an image to a volume. +- Copy a volume to an image. +- Clone a volume. +- Extend a volume. +- Migrate a volume with back-end assistance. + + +Configuring +~~~~~~~~~~~ + +Edit with your favourite editor Cinder config file. It can be found at +/etc/cinder/cinder.conf + +Add the field enabled\_backends with value jdss-0: + +:: + + enabled_backends = jdss-0 + +Provide settings to JovianDSS driver by adding 'jdss-0' description: + +:: + + [jdss-0] + backend_name = jdss-0 + chap_password_len = 14 + driver_use_ssl = True + iscsi_target_prefix = iqn.2016-04.com.open-e.cinder: + jovian_pool = Pool-0 + jovian_block_size = 128K + jovian_rest_send_repeats = 4 + san_api_port = 82 + target_port = 3260 + volume_driver = cinder.volume.drivers.open_e.iscsi.JovianISCSIDriver + san_hosts = 192.168.0.40 + san_login = admin + san_password = admin + san_thin_provision = True + +.. list-table:: **Open-E JovianDSS configuration options** + :header-rows: 1 + + * - Option + - Default value + - Description + * - ``backend_name`` + - JovianDSS-iSCSI + - Name of the back end + * - ``chap_password_len`` + - 12 + - Length of the unique generated CHAP password. + * - ``driver_use_ssl`` + - True + - Use SSL to send requests to JovianDSS[1] + * - ``iscsi_target_prefix`` + - iqn.2016-04.com.open-e:01:cinder- + - Prefix that will be used to form target name for volume + * - ``jovian_pool`` + - Pool-0 + - Pool name that is going to be used. Must be created in [2] + * - ``jovian_block_size`` + - 128K + - Block size for newly created volumes + * - ``jovian_rest_send_repeats`` + - 3 + - Number of times that driver will try to send REST request + * - ``san_api_port`` + - 82 + - Rest port according to the settings in [1] + * - ``target_port`` + - 3260 + - Port for iSCSI connections + * - ``volume_driver`` + - + - Location of the driver source code + * - ``san_hosts`` + - + - Comma separated list of IP address of the JovianDSS + * - ``san_login`` + - admin + - Must be set according to the settings in [1] + * - ``san_password`` + - admin + - Jovian password [1], **should be changed** for security purpouses + * - ``san_thin_provision`` + - False + - Using thin provisioning for new volumes + + +1. JovianDSS Web interface/System Settings/REST Access + +2. Pool can be created by going to JovianDSS Web interface/Storage + +.. _interface/Storage: + +`More info about Open-E JovianDSS `__ + + +Multiple Pools +~~~~~~~~~~~~~~ + +In order to add another JovianDSS Pool, create a copy of +JovianDSS config in cinder.conf file. + +For instance if you want to add ``Pool-1`` located on the same host as +``Pool-0``. You extend ``cinder.conf`` file like: + +:: + + enabled_backends = jdss-0, jdss-1 + + [jdss-0] + backend_name = jdss-0 + chap_password_len = 14 + driver_use_ssl = True + iscsi_target_prefix = iqn.2016-04.com.open-e.cinder: + jovian_pool = Pool-0 + jovian_block_size = 128K + jovian_rest_send_repeats = 4 + san_api_port = 82 + target_port = 3260 + volume_driver = cinder.volume.drivers.open_e.iscsi.JovianISCSIDriver + san_hosts = 192.168.0.40 + san_login = admin + san_password = admin + san_thin_provision = True + + [jdss-1] + backend_name = jdss-1 + chap_password_len = 14 + driver_use_ssl = True + iscsi_target_prefix = iqn.2016-04.com.open-e.cinder: + jovian_pool = Pool-1 + jovian_block_size = 128K + jovian_rest_send_repeats = 4 + san_api_port = 82 + target_port = 3260 + volume_driver = cinder.volume.drivers.open_e.iscsi.JovianISCSIDriver + san_hosts = 192.168.0.50 + san_login = admin + san_password = admin + san_thin_provision = True + + +HA Cluster +~~~~~~~~~~ + +To utilize High Availability feature of JovianDSS: + +1. `Guide`_ on configuring Pool to high availability cluster + +.. _Guide: https://www.youtube.com/watch?v=juWIQT_bAfM + +2. Set ``jovian_hosts`` with list of ``virtual IPs`` associated with this Pool + +For instance if you have ``Pool-2`` with 2 virtual IPs 192.168.21.100 +and 192.168.31.100 the configuration file will look like: + +:: + + [jdss-2] + backend_name = jdss-2 + chap_password_len = 14 + driver_use_ssl = True + iscsi_target_prefix = iqn.2016-04.com.open-e.cinder: + jovian_pool = Pool-0 + jovian_block_size = 128K + jovian_rest_send_repeats = 4 + san_api_port = 82 + target_port = 3260 + volume_driver = cinder.volume.drivers.open_e.iscsi.JovianISCSIDriver + san_hosts = 192.168.21.100, 192.168.31.100 + san_login = admin + san_password = admin + san_thin_provision = True + + +Feedback +-------- + +Please address problems and proposals to andrei.perepiolkin@open-e.com diff --git a/releasenotes/notes/joviandss-iscsi-driver-0becc6ee6a0b3c0a.yaml b/releasenotes/notes/joviandss-iscsi-driver-0becc6ee6a0b3c0a.yaml new file mode 100644 index 0000000..dd4da9a --- /dev/null +++ b/releasenotes/notes/joviandss-iscsi-driver-0becc6ee6a0b3c0a.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added support for Open-E JovianDSS data storage. + Driver supports Open-E disaster recovery feature and cascade volume + deletion in addition to support minimum required functions.