diff --git a/compute_hyperv/nova/cluster/volumeops.py b/compute_hyperv/nova/cluster/volumeops.py index d6dd450b..31d532b2 100644 --- a/compute_hyperv/nova/cluster/volumeops.py +++ b/compute_hyperv/nova/cluster/volumeops.py @@ -32,10 +32,11 @@ class ClusterVolumeOps(volumeops.VolumeOps): def _get_volume_driver(self, connection_info): driver_type = connection_info.get('driver_volume_type') if driver_type in [constants.STORAGE_PROTOCOL_ISCSI, - constants.STORAGE_PROTOCOL_FC]: + constants.STORAGE_PROTOCOL_FC, + constants.STORAGE_PROTOCOL_RBD]: err_msg = ( "The Hyper-V Cluster driver does not currently support " - "passthrough disks (e.g. iSCSI/FC disks). The reason is " + "passthrough disks (e.g. iSCSI/FC/RBD disks). The reason is " "that the volumes need to be available on the destination " "host side during an unexpected instance failover. In order " "to leverage your storage backend, you may either use the " diff --git a/compute_hyperv/nova/constants.py b/compute_hyperv/nova/constants.py index a9c7fe6f..7673121b 100644 --- a/compute_hyperv/nova/constants.py +++ b/compute_hyperv/nova/constants.py @@ -100,6 +100,7 @@ IOPS_BASE_SIZE = 8 * units.Ki STORAGE_PROTOCOL_ISCSI = 'iscsi' STORAGE_PROTOCOL_FC = 'fibre_channel' STORAGE_PROTOCOL_SMBFS = 'smbfs' +STORAGE_PROTOCOL_RBD = 'rbd' MAX_CONSOLE_LOG_FILE_SIZE = units.Mi // 2 diff --git a/compute_hyperv/nova/volumeops.py b/compute_hyperv/nova/volumeops.py index c1eff6f9..78e08f7b 100644 --- a/compute_hyperv/nova/volumeops.py +++ b/compute_hyperv/nova/volumeops.py @@ -84,7 +84,8 @@ class VolumeOps(object): self.volume_drivers = { constants.STORAGE_PROTOCOL_SMBFS: SMBFSVolumeDriver(), constants.STORAGE_PROTOCOL_ISCSI: ISCSIVolumeDriver(), - constants.STORAGE_PROTOCOL_FC: FCVolumeDriver()} + constants.STORAGE_PROTOCOL_FC: FCVolumeDriver(), + constants.STORAGE_PROTOCOL_RBD: RBDVolumeDriver()} @property def _vmops(self): @@ -771,3 +772,9 @@ class SMBFSVolumeDriver(BaseVolumeDriver): class FCVolumeDriver(BaseVolumeDriver): _is_block_dev = True _protocol = constants.STORAGE_PROTOCOL_FC + + +class RBDVolumeDriver(BaseVolumeDriver): + _is_block_dev = True + _protocol = constants.STORAGE_PROTOCOL_RBD + _extra_connector_args = dict(do_local_attach=True) diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst index 3a1c0e1a..80332ef4 100644 --- a/doc/source/configuration/index.rst +++ b/doc/source/configuration/index.rst @@ -103,8 +103,8 @@ make sure to enable the following config option: This will ensure that the available paths are actually leveraged. Also, before attempting any volume connection, it will ensure that the MPIO service is -enabled and that passthrough block devices (iSCSI / FC) are claimed by MPIO. -SMB backed volumes are not affected by this option. +enabled and that iSCSI and Fibre Channel block devices are claimed by MPIO. +SMB and RBD volumes are not affected by this option. In some cases, Nova may fail to attach volumes due to transient connectivity issues. The following options specify how many and how often retries should be diff --git a/doc/source/install/prerequisites.rst b/doc/source/install/prerequisites.rst index a0d914fa..a62f9a44 100644 --- a/doc/source/install/prerequisites.rst +++ b/doc/source/install/prerequisites.rst @@ -99,6 +99,7 @@ following protocols: * iSCSI * Fibre Channel * SMB - the volumes are stored as virtual disk images (e.g. VHD / VHDX) +* RBD - starting with Ceph 16 (Pacific) .. note:: diff --git a/releasenotes/notes/rbd-support-9bb0037f69249785.yaml b/releasenotes/notes/rbd-support-9bb0037f69249785.yaml new file mode 100644 index 00000000..63881ea7 --- /dev/null +++ b/releasenotes/notes/rbd-support-9bb0037f69249785.yaml @@ -0,0 +1,5 @@ +features: + - | + The standard Hyper-V Nova driver can now attach RBD volumes. Note that the + cluster driver doesn't support RBD volumes yet. The minimum required + Ceph version is Ceph 16 (Pacific).