cinder 9.0.0 release

meta:version: 9.0.0
 meta:diff-start: 8.0.0.0rc1
 meta:series: newton
 meta:release-type: release
 meta:announce: openstack-announce@lists.openstack.org
 meta:pypi: no
 meta:first: yes
 meta:release:Author: Doug Hellmann <doug@doughellmann.com>
 meta:release:Commit: Doug Hellmann <doug@doughellmann.com>
 meta:release:Change-Id: I74b66ffd484f2f3a2b84c39e62bcb718cef7b906
 meta:release:Code-Review+1: Ian Cordasco <sigmavirus24@gmail.com>
 meta:release:Code-Review+1: Sean McGinnis <sean.mcginnis@gmail.com>
 meta:release:Code-Review+1: Vitaly Gridnev <vgridnev@mirantis.com>
 meta:release:Code-Review+1: Rob Cresswell <robert.cresswell@outlook.com>
 meta:release:Code-Review+1: Steve Martinelli <s.martinelli@gmail.com>
 meta:release:Code-Review+1: Steve McLellan <steven.j.mclellan@gmail.com>
 meta:release:Code-Review+1: Richard Jones <r1chardj0n3s@gmail.com>
 meta:release:Code-Review+2: Davanum Srinivas (dims) <davanum@gmail.com>
 meta:release:Code-Review+2: Thierry Carrez <thierry@openstack.org>
 meta:release:Code-Review+1: Emilien Macchi <emilien@redhat.com>
 meta:release:Code-Review+1: Julien Danjou <julien@danjou.info>
 meta:release:Code-Review+1: amrith <amrith@tesora.com>
 meta:release:Code-Review+1: Graham Hayes <graham.hayes@hpe.com>
 meta:release:Code-Review+1: Jim Rollenhagen <jim@jimrollenhagen.com>
 meta:release:Code-Review+2: Doug Hellmann <doug@doughellmann.com>
 meta:release:Workflow+1: Doug Hellmann <doug@doughellmann.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJX9k2KAAoJENljH+rwzGInOkUH/A+HZv0aERaIlIh8Ed1sN7P7
 O289m1D52I8Uz3VSfevz1ZGKyoMda7jgeP3pAAz2gp6bXQch4uayQE0DOlkIJ0KU
 tu966I3SVkQrbHo/Kx2JP4dAtwwt1iwBMH+eoi5DrOY3BzS05Xkj5DwfwUqEtZAL
 0LPYXvchUeWuf/Wk1+OMrVC4je8m2P4652YRvP4iC8+a020IVZq3Vh3OqUpswybc
 /LdrdTrtIF4fYDKozBj/3Xx6MQ061rfoeDtr3KGcOnVXGMw6cMem013XR7JuMn3P
 jciP3YFR/JaB8+utZaRC8re2Dx/CWJZMtjls8eBImd2QMz3DxW36K4mF2pdiAMw=
 =4l7h
 -----END PGP SIGNATURE-----

Merge tag '9.0.0' into debian/newton

cinder 9.0.0 release

  * New upstream release.

Change-Id: Ie4ec55fe65e3450505eb6e9b6ab9808ac0423953
This commit is contained in:
Thomas Goirand 2016-10-06 17:32:34 +02:00
commit 3c86af99c8
29 changed files with 1228 additions and 360 deletions

View File

@ -41,7 +41,8 @@ class LVM(executor.Executor):
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None):
executor=putils.execute, lvm_conf=None,
suppress_fd_warn=False):
"""Initialize the LVM object.
@ -55,6 +56,7 @@ class LVM(executor.Executor):
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
:param suppress_fd_warn: Add suppress FD Warn to LVM env
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
@ -74,11 +76,19 @@ class LVM(executor.Executor):
# Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX
# before the first LVM command is executed, and use the directory
# where the specified lvm_conf file is located as the value.
# NOTE(jdg): We use the temp var here becuase LVM_CMD_PREFIX is a
# class global and if you use append here, you'll literally just keep
# appending values to the global.
_lvm_cmd_prefix = ['env', 'LC_ALL=C']
if lvm_conf and os.path.isfile(lvm_conf):
lvm_sys_dir = os.path.dirname(lvm_conf)
LVM.LVM_CMD_PREFIX = ['env',
'LC_ALL=C',
'LVM_SYSTEM_DIR=' + lvm_sys_dir]
_lvm_cmd_prefix.append('LVM_SYSTEM_DIR=' + lvm_sys_dir)
if suppress_fd_warn:
_lvm_cmd_prefix.append('LVM_SUPPRESS_FD_WARNINGS=1')
LVM.LVM_CMD_PREFIX = _lvm_cmd_prefix
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes

View File

@ -54,6 +54,9 @@ image_helper_opts = [cfg.StrOpt('image_conversion_dir',
CONF = cfg.CONF
CONF.register_opts(image_helper_opts)
QEMU_IMG_LIMITS = processutils.ProcessLimits(
cpu_time=2,
address_space=1 * units.Gi)
# NOTE(abhishekk): qemu-img convert command supports raw, qcow2, qed,
# vdi, vmdk, vhd and vhdx disk-formats but glance doesn't support qed
@ -71,7 +74,8 @@ def qemu_img_info(path, run_as_root=True):
cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path)
if os.name == 'nt':
cmd = cmd[2:]
out, _err = utils.execute(*cmd, run_as_root=run_as_root)
out, _err = utils.execute(*cmd, run_as_root=run_as_root,
prlimit=QEMU_IMG_LIMITS)
return imageutils.QemuImgInfo(out)

View File

@ -10,9 +10,9 @@
# Monika Wolf <vcomas3@de.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: cinder 9.0.0.0b4.dev139\n"
"Project-Id-Version: cinder 9.0.0.0rc2.dev18\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2016-09-14 04:25+0000\n"
"POT-Creation-Date: 2016-09-27 22:54+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -7986,9 +7986,6 @@ msgstr "Datenträger %(existing)s konnte nicht in %(newname)s umbenannt werden."
msgid "Unable to retrieve snapshot group with id of %s."
msgstr "Schattenkopiegruppe mit der ID %s kann nicht abgerufen werden. "
msgid "Unable to retrieve volume stats."
msgstr "Fehler beim Abrufen von Datenträgerstatistikdaten."
#, python-format
msgid ""
"Unable to retype %(specname)s, expected to receive current and requested "

View File

@ -10,9 +10,9 @@
# Jose Porrua <jose.porrua@netapp.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: cinder 9.0.0.0b4.dev139\n"
"Project-Id-Version: cinder 9.0.0.0rc2.dev18\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2016-09-14 04:25+0000\n"
"POT-Creation-Date: 2016-09-27 22:54+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -8013,9 +8013,6 @@ msgstr "No se puede renombrar el volumen %(existing)s a %(newname)s"
msgid "Unable to retrieve snapshot group with id of %s."
msgstr "No se ha podido recuperar el grupo de instantáneas con el id %s."
msgid "Unable to retrieve volume stats."
msgstr "No e ha podido recuperar las estadísticas del volumen. "
#, python-format
msgid ""
"Unable to retype %(specname)s, expected to receive current and requested "

View File

@ -11,13 +11,13 @@
# Yoshiki Eguchi <yoshiki.eguchi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: cinder 9.0.0.0b4.dev152\n"
"Project-Id-Version: cinder 9.0.0.0rc2.dev18\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2016-09-15 02:36+0000\n"
"POT-Creation-Date: 2016-09-27 22:54+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-09-14 02:08+0000\n"
"PO-Revision-Date: 2016-09-15 02:09+0000\n"
"Last-Translator: Yoshiki Eguchi <yoshiki.eguchi@gmail.com>\n"
"Language: ja\n"
"Plural-Forms: nplurals=1; plural=0;\n"
@ -1653,6 +1653,16 @@ msgstr ""
"クローンタイプ '%(clone_type)s' は無効です。有効な値は '%(full_clone)s' およ"
"び '%(linked_clone)s' です。"
#, python-format
msgid "Clone volume %(new_volume_name)s failed, volume status is: %(status)s."
msgstr ""
"ボリューム %(new_volume_name)s のクローンが失敗しました。ボリュームのステータ"
"ス: %(status)s"
#, python-format
msgid "Clone volume %s failed while waiting for success."
msgstr "ボリューム %s のクローンが、成功を待っている間に失敗しました。"
msgid "Cluster"
msgstr "クラスター"
@ -2362,6 +2372,18 @@ msgstr ""
"ドライバーがインポートされたバックアップデータを正常に復号化しましたが、欠け"
"ているフィールド (%s) があります。"
msgid "Dsware clone volume failed: volume can not be found from Dsware."
msgstr ""
"Dsware でのボリュームのクローンが失敗しました: ボリュームが Dsware 内に見つか"
"りません。"
#, python-format
msgid ""
"Dsware clone volume time out. Volume: %(new_volume_name)s, status: %(status)s"
msgstr ""
"Dsware での ボリューム %(new_volume_name)s のクローンがタイムアウトしました。"
"ステータス: %(status)s"
msgid "Dsware config file not exists!"
msgstr "Dsware 設定ファイルが見つかりません。"
@ -5131,6 +5153,9 @@ msgstr ""
msgid "Host %s has no FC initiators"
msgstr "ホスト %s に FC イニシエーターがありません"
msgid "Host attach volume failed!"
msgstr "ホストへのボリュームの接続に失敗しました。"
#, python-format
msgid "Host group with name %s not found"
msgstr "名前が %s のホストグループが見つかりません"
@ -9132,9 +9157,6 @@ msgstr "ボリューム %(existing)s の名前を %(newname)s に変更できま
msgid "Unable to retrieve snapshot group with id of %s."
msgstr "%s の ID を持つスナップショットグループを取得できません。"
msgid "Unable to retrieve volume stats."
msgstr "ボリュームの統計を取得できません。"
#, python-format
msgid ""
"Unable to retype %(specname)s, expected to receive current and requested "

View File

@ -13,17 +13,18 @@
# Yu Zhang, 2014
# 颜海峰 <yanheven@gmail.com>, 2014
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Eric Lei <1165970798@qq.com>, 2016. #zanata
# howard lee <howard@mail.ustc.edu.cn>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: cinder 9.0.0.0b4.dev152\n"
"Project-Id-Version: cinder 9.0.0.0rc2.dev18\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2016-09-15 02:36+0000\n"
"POT-Creation-Date: 2016-09-27 22:54+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-09-14 08:24+0000\n"
"Last-Translator: howard lee <howard@mail.ustc.edu.cn>\n"
"PO-Revision-Date: 2016-09-28 06:21+0000\n"
"Last-Translator: Eric Lei <1165970798@qq.com>\n"
"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
@ -34,6 +35,22 @@ msgstr ""
msgid "\t%s"
msgstr "\t%s"
#, python-format
msgid ""
"\n"
"CoprHD Exception: %(msg)s\n"
msgstr ""
"\n"
"CoprHD 异常: %(msg)s\n"
#, python-format
msgid ""
"\n"
"General Exception: %(exec_info)s\n"
msgstr ""
"\n"
"一般异常: %(exec_info)s\n"
#, python-format
msgid ""
"\n"
@ -81,6 +98,14 @@ msgstr ""
msgid "%(error_message)s"
msgstr "%(error_message)s"
#, python-format
msgid "%(error_msg)s Error description: %(error_description)s"
msgstr "%(error_msg)s 错误描述: %(error_description)s"
#, python-format
msgid "%(error_msg)s Error details: %(error_details)s"
msgstr "%(error_msg)s 错误详细信息: %(error_details)s"
#, python-format
msgid "%(exception)s: %(explanation)s"
msgstr "发生异常 %(exception)s原因 %(explanation)s"
@ -139,6 +164,14 @@ msgstr ""
"%(msg_type)s创建 NetworkPortal请确保 IP %(ip)s 上的端口 %(port)d未被另一"
"项服务使用。"
#, python-format
msgid "%(name)s cannot be all spaces."
msgstr "%(name)s不能是所有空间"
#, python-format
msgid "%(new_size)s < current size %(size)s"
msgstr "%(new_size)s < 当前大小 %(size)s"
#, python-format
msgid ""
"%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. Backup object has "
@ -402,6 +435,14 @@ msgid ""
"performed"
msgstr "将删除具有非零配额的子项目。不应执行此操作"
msgid "Access forbidden: Authentication required"
msgstr "禁止访问:需要认证"
msgid ""
"Access forbidden: You don't have sufficient privileges to perform this "
"operation"
msgstr "禁止访问:你没有足够的权限来执行该操作"
msgid "Access list not available for public volume types."
msgstr "对于公用卷类型,未提供访问列表。"
@ -544,6 +585,16 @@ msgstr "使端口组与映射视图关联时发生错误。"
msgid "At least one valid iSCSI IP address must be set."
msgstr "必须至少设置一个有效 iSCSI IP 地址。"
#, python-format
msgid ""
"Attach volume (%(name)s) to host (%(hostname)s) initiator "
"(%(initiatorport)s) failed:\n"
"%(err)s"
msgstr ""
"将卷 (%(name)s) 连接到主机 (%(hostname)s) 的初始化器 (%(initiatorport)s) 失"
"败:\n"
"%(err)s"
#, python-format
msgid "Attempt to transfer %s with invalid auth key."
msgstr "请尝试使用有效的认证密钥传输 %s。"
@ -722,6 +773,9 @@ msgstr "未配置 Blockbridge 令牌(对于认证方案“令牌”,这是
msgid "Blockbridge user not configured (required for auth scheme 'password')"
msgstr "未配置 Blockbridge 用户(对于认证方案“密码”,这是必需的)"
msgid "Bourne internal server error"
msgstr "Bourne 内部服务器错误"
#, python-format
msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s"
msgstr "Brocade 光纤通道分区 CLI 错误:%(reason)s"
@ -1097,6 +1151,13 @@ msgstr ""
"无法删除高速缓存卷:%(cachevol_name)s。在 %(updated_at)s 对其进行了更新,它当"
"前具有 %(numclones)s 卷实例。"
#, python-format
msgid ""
"Cannot delete consistency group %(id)s. %(reason)s, and it cannot be the "
"source for an ongoing CG or CG Snapshot creation."
msgstr ""
"无法删除一致性组%(id)s%(reason)s它不能作为创建运行CG或者CG快照的源。"
msgid "Cannot delete encryption specs. Volume type in use."
msgstr "无法删除加密规范。卷类型在使用中。"
@ -1297,6 +1358,13 @@ msgstr ""
"无法更新一致性组 %(group_id)s因为未提供任何有效名称、描述、add_volumes 或 "
"remove_volumes。"
#, python-format
msgid ""
"Cannot update consistency group %s, status must be available, and it cannot "
"be the source for an ongoing CG or CG Snapshot creation."
msgstr ""
"不能更新一致性组%s,状态必须可获得它不能作为创建运行CG或者CG快照的源。"
msgid "Cannot update encryption specs. Volume type in use."
msgstr "无法更新加密规范。卷类型在使用中。"
@ -1324,6 +1392,12 @@ msgstr "无法验证对象 %(instanceName)s 的存在。"
msgid "CgSnapshot %(cgsnapshot_id)s could not be found."
msgstr "找不到 Cg 快照 %(cgsnapshot_id)s。"
msgid ""
"CgSnapshot status must be available or error, and no CG can be currently "
"using it as source for its creation."
msgstr ""
"一致性组快照状态必须是可获得的或错误,目前没有一致性组可以被用作创建它的源。"
msgid "Cgsnahost is empty. No consistency group will be created."
msgstr "Cg 快照为空。将不创建任何一致性组。"
@ -1373,6 +1447,15 @@ msgstr "区块大小不是用于创建散列的块大小的倍数。"
msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s"
msgstr "Cisco 光纤通道分区 CLI 错误:%(reason)s"
#, python-format
msgid "Client with ip %s wasn't found "
msgstr "为找到ip为 %s 的客户端"
msgid ""
"Clone can't be taken individually on a volume that is part of a Consistency "
"Group"
msgstr "不能对卷进行单独克隆,因为它是一致性组的一部分"
#, python-format
msgid "Clone feature is not licensed on %(storageSystem)s."
msgstr "克隆功能在 %(storageSystem)s 上未获许可。"
@ -1475,6 +1558,38 @@ msgstr "连接器未提供:%s"
msgid "Connector doesn't have required information: %(missing)s"
msgstr "连接器没有必需信息:%(missing)s"
#, python-format
msgid ""
"Consistency Group %(cg_uri)s: update failed\n"
"%(err)s"
msgstr ""
"一致性组 %(cg_uri)s: 更新失败\n"
"%(err)s"
#, python-format
msgid ""
"Consistency Group %(name)s: create failed\n"
"%(err)s"
msgstr ""
"一致性组 %(name)s: 创建失败\n"
"%(err)s"
#, python-format
msgid ""
"Consistency Group %(name)s: delete failed\n"
"%(err)s"
msgstr ""
"一致性组 %(name)s: 删除失败\n"
"%(err)s"
#, python-format
msgid "Consistency Group %s not found"
msgstr "找不到一致性组 %s"
#, python-format
msgid "Consistency Group %s: not found"
msgstr "找不到一致性组 %s"
msgid "Consistency group is empty. No cgsnapshot will be created."
msgstr "一致性组为空组。将不创建任何 cg 快照。"
@ -1530,6 +1645,10 @@ msgstr "已转换为原始文件,但现在格式为 %s。"
msgid "Coordinator uninitialized."
msgstr "协调程序未初始化。"
#, python-format
msgid "CoprHD internal server error. Error details: %s"
msgstr "CoprHD内部服务器错误. 错误详细信息: %s"
#, python-format
msgid ""
"Copy volume task failed: convert_to_base_volume: id=%(id)s, status="
@ -1633,6 +1752,10 @@ msgstr "未能将配置保存到 %(file_path)s%(exc)s"
msgid "Could not start consistency group snapshot %s."
msgstr "无法启动一致性组快照 %s。"
#, python-format
msgid "Couldn't find ORM model for Persistent Versioned Object %s."
msgstr "无法为持久版本对象%s找到ORM模型。"
#, python-format
msgid "Couldn't remove cluster %s because it doesn't exist."
msgstr "无法删除集群 %s因为它不存在。"
@ -1938,6 +2061,10 @@ msgstr "拆离卷失败:存在多个连接,但是未提供 attachment_id。"
msgid "Detach volume from instance and then try again."
msgstr "请断开卷与实例的连接,然后再次进行尝试。"
#, python-format
msgid "Detaching volume %(volumename)s from host %(hostname)s failed: %(err)s"
msgstr "将卷 %(volumename)s 从主机 %(hostname)s 上分离失败: %(err)s"
#, python-format
msgid "Detected more than one volume with name %(vol_name)s"
msgstr "检测到多个具有名称 %(vol_name)s 的卷"
@ -1957,6 +2084,9 @@ msgstr "禁用的原因包含无效字符或太长"
msgid "Domain with name %s wasn't found."
msgstr "找不到名称为 %s 的域。"
msgid "Down Hosts"
msgstr "关闭主机"
#, python-format
msgid ""
"Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster "
@ -2553,6 +2683,10 @@ msgid ""
msgstr ""
"获取复制目标详细信息时出错。返回码:%(ret.status)d 消息:%(ret.data)s。"
#, python-format
msgid "Error getting sdc id from ip %(sdc_ip)s: %(sdc_id_message)s"
msgstr "从地址 %(sdc_ip)s 获取sdc标识时出错: %(sdc_id_message)s"
#, python-format
msgid ""
"Error getting version: svc: %(svc)s.Return code: %(ret.status)d Message: "
@ -2790,6 +2924,10 @@ msgstr "更新区域字符串中的新 zones 和 cgfs 时出错。错误:%(des
msgid "Error writing field to database"
msgstr "将字段写至数据库时出错。"
#, python-format
msgid "Error: Failed to %(operation_type)s %(component)s"
msgstr "错误: %(operation_type)s %(component)s 失败"
#, python-format
msgid "Error[%(stat)s - %(res)s] while getting volume id."
msgstr "获取卷标识时发生错误 [%(stat)s - %(res)s]。"
@ -4017,6 +4155,10 @@ msgstr "Flexvisor 卷 %(id)s 未能加入组 %(vgid)s。"
msgid "Folder %s does not exist in Nexenta Store appliance"
msgstr "文件夹 %s 在 Nexenta 存储设备中不存在"
#, python-format
msgid "GET method is not supported by resource: %s"
msgstr "资源: %s 不支持GET方法"
#, python-format
msgid "GPFS is not running, state: %s."
msgstr "GPFS 没有在运行,状态:%s。"
@ -4282,6 +4424,14 @@ msgstr "发生 HBSD 错误。"
msgid "HPELeftHand url not found"
msgstr "找不到 HPELeftHand URL"
#, python-format
msgid "HTTP code: %(status_code)s, %(reason)s [%(error_msg)s]"
msgstr "HTTP状态码: %(status_code)s, %(reason)s [%(error_msg)s]"
#, python-format
msgid "HTTP code: %(status_code)s, response: %(reason)s [%(error_msg)s]"
msgstr "HTTP状态码: %(status_code)s, 响应: %(reason)s [%(error_msg)s]"
#, python-format
msgid "HTTP exit code: [%(code)s]"
msgstr "HTTP退出码:[%(code)s]"
@ -4351,6 +4501,10 @@ msgstr "主机 replication_status 必须为 %s 才能进行故障转移。"
msgid "Host type %s not supported."
msgstr "不支持主机类型 %s。"
#, python-format
msgid "Host with name: %s not found"
msgstr "找不到名为 %s 的主机"
#, python-format
msgid "Host with ports %(ports)s not found."
msgstr "找不到具有端口 %(ports)s 的主机。"
@ -4440,6 +4594,14 @@ msgid ""
"Request service %(service)s"
msgstr "记录导入失败,找不到要执行导入的备份服务。请求服务 %(service)s"
#, python-format
msgid ""
"Incorrect port number. Load balanced port is: %(lb_api_port)s, api service "
"port is: %(apisvc_port)s"
msgstr ""
"不正确的端口号. 负载均衡端口号为: %(lb_api_port)s, api服务端口号为: "
"%(apisvc_port)s"
msgid "Incorrect request body format"
msgstr "不正确的请求主体格式"
@ -4491,6 +4653,15 @@ msgstr "Cg 快照无效:%(reason)s"
msgid "Invalid ConsistencyGroup: %(reason)s"
msgstr "一致性组无效:%(reason)s"
#, python-format
msgid ""
"Invalid ConsistencyGroup: Cannot delete consistency group %(id)s. "
"%(reason)s, and it cannot be the source for an ongoing CG or CG Snapshot "
"creation."
msgstr ""
"无效的一致性组:不能删除一致性组%(id)s.%(reason)s它不能作为创建运行CG或者CG"
"快照的源。"
msgid "Invalid ConsistencyGroup: No host to create consistency group"
msgstr "一致性组无效:没有任何主机用于创建一致性组"
@ -4988,6 +5159,10 @@ msgstr "不支持处于以下状态的源卷的已链接克隆:%s。"
msgid "Lock acquisition failed."
msgstr "锁定获取失败。"
#, python-format
msgid "Login failure code: %(statuscode)s Error: %(responsetext)s"
msgstr "登陆失败状态码: %(statuscode)s 错误: %(responsetext)s"
msgid "Logout session error."
msgstr "注销会话错误。"
@ -5801,6 +5976,10 @@ msgstr "请提前创建 %(pool_list)s 池!"
msgid "Please create %(tier_levels)s tier in pool %(pool)s in advance!"
msgstr "请提前在池 %(pool)s 中创建 %(tier_levels)s 层!"
#, python-format
msgid "Please provide at least one volume for parameter %s"
msgstr "请为参数 %s 提供至少一个卷"
msgid "Please re-run cinder-manage as root."
msgstr "请以 root 用户身份重新运行 cinder-manage。"
@ -5869,10 +6048,17 @@ msgstr "Cinder程序错误:%(reason)s"
msgid "Project ID"
msgstr "项目ID"
msgid "Project name not specified"
msgstr "未指定项目名称"
#, python-format
msgid "Project quotas are not properly setup for nested quotas: %(reason)s."
msgstr "未正确设置要用作嵌套配额的项目配额:%(reason)s。"
#, python-format
msgid "Project: %s not found"
msgstr "找不到项目:%s"
msgid "Protection Group not ready."
msgstr "保护组未就绪。"
@ -6166,6 +6352,9 @@ msgstr ""
"所请求备份超过允许的备份千兆字节配额。已请求 %(requested)sG配额为 "
"%(quota)sG并且已耗用 %(consumed)sG。"
msgid "Requested resource is currently unavailable"
msgstr "请求资源目前不可用"
#, python-format
msgid ""
"Requested volume or snapshot exceeds allowed %(name)s quota. Requested "
@ -6341,6 +6530,11 @@ msgstr "调度器主机过滤器 %(filter_name)s 没有找到。"
msgid "Scheduler Host Weigher %(weigher_name)s could not be found."
msgstr "找不到调度程序主机衡量器 %(weigher_name)s。"
#, python-format
msgid ""
"Search URI %s is not in the expected format, it should end with ?tag={0}"
msgstr "查询URI %s 的格式不是预期的,它应该以 ?tag={0} 结尾"
#, python-format
msgid ""
"Secondary copy status: %(status)s and synchronized: %(sync)s, sync progress "
@ -6376,6 +6570,11 @@ msgstr "服务太旧,无法实现此请求。"
msgid "Service is unavailable at this time."
msgstr "该时刻服务无法使用。"
msgid ""
"Service temporarily unavailable: The server is temporarily unable to service "
"your request"
msgstr "服务暂时不可用:服务器暂时无法提供请求服务"
msgid "Set pair secondary access error."
msgstr "设置对辅助访问时出错。"
@ -6434,6 +6633,15 @@ msgid ""
"%(volume_size)sGB."
msgstr "所指定映像的大小 %(image_size)sGB 大于卷大小 %(volume_size)sGB。"
#, python-format
msgid ""
"Snapshot %(cgsnapshot_id)s: for Consistency Group %(cg_name)s: delete "
"failed\n"
"%(err)s"
msgstr ""
"一致性组 %(cg_name)s 的快照 %(cgsnapshot_id)s: 删除失败\n"
"%(err)s"
#, python-format
msgid ""
"Snapshot %(id)s has been asked to be deleted while waiting for it to become "
@ -6455,6 +6663,18 @@ msgstr "快照 %(snapshot_id)s 没有找到。"
msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s."
msgstr "快照 %(snapshot_id)s 没有任何具有键 %(metadata_key)s 的元数据。"
#, python-format
msgid ""
"Snapshot %(src_snapshot_name)s: clone failed\n"
"%(err)s"
msgstr ""
"快照 %(src_snapshot_name)s: 克隆失败\n"
"%(err)s"
#, python-format
msgid "Snapshot %s : Delete Failed\n"
msgstr "快照 %s : 删除失败\n"
#, python-format
msgid "Snapshot %s must not be part of a group."
msgstr "快照 %s 不能属于某个组。"
@ -6466,6 +6686,11 @@ msgstr "快照“%s”在阵列上不存在。"
msgid "Snapshot already managed."
msgstr "快照已管理。"
msgid ""
"Snapshot can't be taken individually on a volume that is part of a "
"Consistency Group"
msgstr "不能对卷单独地进行快照操作,因为它是一致性组的一部分"
#, python-format
msgid ""
"Snapshot cannot be created because volume %(vol_id)s is not available, "
@ -6475,6 +6700,19 @@ msgstr "无法创建快照,因为卷 %(vol_id)s 不可用,当前卷状态为
msgid "Snapshot cannot be created while volume is migrating."
msgstr "无法在迁移卷时创建快照。"
msgid ""
"Snapshot delete can't be done individually on a volume that is part of a "
"Consistency Group"
msgstr "无法完成对卷单独进行快照删除的操作,因为它是一致性组的一部分"
#, python-format
msgid ""
"Snapshot for Consistency Group %(cg_name)s: create failed\n"
"%(err)s"
msgstr ""
"一致性组快照 %(cg_name)s: 创建失败\n"
"%(err)s"
msgid "Snapshot of secondary replica is not allowed."
msgstr "不允许获取辅助副本的快照。"
@ -6502,6 +6740,14 @@ msgstr "要备份的快照必须可用,但当前状态为“%s”。"
msgid "Snapshot with id of %s could not be found."
msgstr "找不到标识为 %s 的快照。"
#, python-format
msgid ""
"Snapshot: %(snapshotname)s, create failed\n"
"%(err)s"
msgstr ""
"快照: %(snapshotname)s, 创建失败\n"
"%(err)s"
#, python-format
msgid ""
"Snapshot='%(snap)s' does not exist in base image='%(base)s' - aborting "
@ -6657,6 +6903,13 @@ msgstr "找不到池 %(poolNameInStr)s 的存储系统。"
msgid "StorageSystem %(array)s is not found."
msgstr "找不到存储系统 %(array)s。"
#, python-format
msgid ""
"Successfully renamed %(num_vols)s volumes and %(num_cgs)s consistency groups "
"from cluster %(current)s to %(new)s"
msgstr ""
"成功重命名%(num_vols)s卷和 %(num_cgs)s一致性组从集群%(current)s到%(new)s"
#, python-format
msgid ""
"Sum of child usage '%(sum)s' is greater than free quota of '%(free)s' for "
@ -6704,6 +6957,20 @@ msgstr "目标组类型仍在使用中。"
msgid "Target volume type is still in use."
msgstr "目标卷类型仍在使用中。"
#, python-format
msgid ""
"Task did not complete in %d secs. Operation timed out. Task in CoprHD will "
"continue"
msgstr "任务在 %d 秒内没有完成操作超时CoprHD 中的任务将继续"
#, python-format
msgid "Task: %(task_id)s is failed with error: %(error_message)s"
msgstr "任务 %(task_id)s 失败,错误信息为: %(error_message)s"
#, python-format
msgid "Tenant %s: not found"
msgstr "找不到租户:%s"
msgid "Terminate connection failed"
msgstr "终止连接发生故障"
@ -6762,6 +7029,9 @@ msgstr ""
"该阵列不支持 SLO %(slo)s 和工作负载 %(workload)s 的存储池设置。请检查该阵列以"
"获取有效 SLO 和工作负载。"
msgid "The authentication service failed to reply with 401"
msgstr "认证服务失败返回401"
msgid ""
"The back-end where the volume is created does not have replication enabled."
msgstr "创建该卷的后端未启用复制。"
@ -6867,6 +7137,9 @@ msgstr "父备份必须可用于增量备份。"
msgid "The provided snapshot '%s' is not a snapshot of the provided volume."
msgstr "所提供快照“%s”并非所提供卷的快照。"
msgid "The redirect location of the authentication service is not provided"
msgstr "未提供认证服务的重定向地址"
msgid ""
"The reference to the volume in the backend should have the format "
"file_system/volume_name (volume_name cannot contain '/')"
@ -7001,6 +7274,14 @@ msgid ""
msgstr ""
"分割元计数 %(memberCount)s 对于卷%(volumeName)s 太小,大小为 %(volumeSize)s。"
#, python-format
msgid "The token is not generated by authentication service. %s"
msgstr "认证服务未生成令牌. %s"
#, python-format
msgid "The token is not generated by authentication service.%s"
msgstr "认证服务未生成令牌.%s"
#, python-format
msgid ""
"The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is "
@ -7233,6 +7514,9 @@ msgstr "此类型不能转换为 NaElement。"
msgid "TypeError: %s"
msgstr "TypeError%s"
msgid "URI should end with /tag"
msgstr "URI 应该以 /tag 结尾"
#, python-format
msgid "UUIDs %s are in both add and remove volume list."
msgstr "UUID %s 同时位于“添加卷”和“移除卷”列表中。"
@ -7724,6 +8008,10 @@ msgstr "排序方向未知,必须为“降序”或“升序”"
msgid "Unknown sort direction, must be 'desc' or 'asc'."
msgstr "排序方向未知,必须为“降序”或“升序”。"
#, python-format
msgid "Unknown/Unsupported HTTP method: %s"
msgstr "未知/不支持的HTTP方法: %s"
msgid "Unmanage and cascade delete options are mutually exclusive."
msgstr "非管理选项与级联删除选项互斥。"
@ -7748,6 +8036,12 @@ msgstr "无法识别支持格式:%s"
msgid "Unrecognized read_deleted value '%s'"
msgstr "无法识别的 read_deleted 取值”%s“"
#, python-format
msgid ""
"Unrecoverable Error: Versioned Objects in DB are capped to unknown version "
"%(version)s."
msgstr "不可恢复错误:数据库中的版本对象被未知版本%(version)s覆盖。"
#, python-format
msgid "Unset gcs options: %s"
msgstr "取消设置 gcs 选项:%s"
@ -7845,6 +8139,10 @@ msgstr "V3 回滚"
msgid "VF is not enabled."
msgstr "未启用 VF。"
#, python-format
msgid "VPool %(name)s ( %(vpooltype)s ) : not found"
msgstr "找不到VPool%(name)s ( %(vpooltype)s ) "
#, python-format
msgid "VV Set %s does not exist."
msgstr "VV 集 %s 不存在。"
@ -7909,6 +8207,14 @@ msgstr "针对目标 %s 的卷复制作业失败。"
msgid "Volume %(deviceID)s not found."
msgstr "找不到卷 %(deviceID)s。"
#, python-format
msgid "Volume %(name)s could not be found. It might be already deleted"
msgstr "找不到卷 %(name)s。该卷可能已被删除"
#, python-format
msgid "Volume %(name)s not found"
msgstr "找不到卷 %(name)s"
#, python-format
msgid ""
"Volume %(name)s not found on the array. Cannot determine if there are "
@ -7923,6 +8229,30 @@ msgstr "在 VNX 中创建了卷 %(name)s但此卷处于 %(state)s 状态。"
msgid "Volume %(name)s was not deactivated in time."
msgstr "卷 %(name)s 没有被及时释放。"
#, python-format
msgid ""
"Volume %(name)s: clone failed\n"
"%(err)s"
msgstr ""
"卷 %(name)s: 克隆失败\n"
"%(err)s"
#, python-format
msgid ""
"Volume %(name)s: create failed\n"
"%(err)s"
msgstr ""
"卷 %(name)s: 创建失败\n"
"%(err)s"
#, python-format
msgid ""
"Volume %(name)s: delete failed\n"
"%(err)s"
msgstr ""
"卷 %(name)s: 删除失败\n"
"%(err)s"
#, python-format
msgid "Volume %(vol)s could not be created in pool %(pool)s."
msgstr "未能在池 %(pool)s 中创建卷 %(vol)s。"
@ -7992,6 +8322,26 @@ msgstr "卷 %(volume_id)s 复制错误:%(reason)s"
msgid "Volume %(volume_name)s is busy."
msgstr "卷 %(volume_name)s 处于繁忙状态。"
#, python-format
msgid ""
"Volume %(volume_name)s: expand failed\n"
"%(err)s"
msgstr ""
"卷 %(volume_name)s: 扩展失败\n"
"%(err)s"
#, python-format
msgid ""
"Volume %(volume_name)s: update failed\n"
"%(err)s"
msgstr ""
"卷 %(volume_name)s: 更新失败\n"
"%(err)s"
#, python-format
msgid "Volume %s : not found"
msgstr "找不到卷:%s"
#, python-format
msgid "Volume %s could not be created from source volume."
msgstr "未能从源卷创建卷 %s。"
@ -8056,6 +8406,10 @@ msgstr "卷 %s 不能正在迁移、已挂载、属于某个组或具有快照
msgid "Volume %s must not be part of a consistency group."
msgstr "卷 %s 不得是一致性组的一部分。"
#, python-format
msgid "Volume %s not found"
msgstr "找不到卷 %s"
#, python-format
msgid "Volume %s not found."
msgstr "找不到卷 %s。"
@ -8357,6 +8711,10 @@ msgstr "卷类型名称不能为 空."
msgid "Volume type with name %(volume_type_name)s could not be found."
msgstr "名为 %(volume_type_name)s 的卷类型没有找到。"
#, python-format
msgid "Volume%s: not found"
msgstr "找不到卷%s"
#, python-format
msgid ""
"Volume: %(volumeName)s is not a concatenated volume. You can only perform "
@ -8594,6 +8952,9 @@ msgstr ""
"_find_pooleternus_pool%(eternus_pool)sEnumerateInstances无法连接至 "
"ETERNUS。"
msgid "_get_async_url: Invalid URL."
msgstr "_get_async_url: 无效的 URL."
#, python-format
msgid ""
"_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! "
@ -8946,6 +9307,10 @@ msgstr "已更改集群"
msgid "config option key_manager.fixed_key is not defined"
msgstr "配置选项 key_manager.fixed_key 为定义。"
#, python-format
msgid "consistency group with name: %s already exists"
msgstr "名称为: %s 的一致性组已存在"
msgid "consistencygroup assigned"
msgstr "已分配 consistencygroup"
@ -8955,6 +9320,27 @@ msgstr "已更改 consistencygroup"
msgid "control_location must be defined"
msgstr "必须定义 control_location"
msgid "coprhd_hostname is not set in cinder configuration"
msgstr "在cinder配置中coprhd_hostname未被设置"
msgid "coprhd_password is not set in cinder configuration"
msgstr "在cinder配置中coprhd_password未被设置"
msgid "coprhd_port is not set in cinder configuration"
msgstr "在cinder配置中coprhd_port未被设置"
msgid "coprhd_project is not set in cinder configuration"
msgstr "在cinder配置中coprhd_project未被设置"
msgid "coprhd_tenant is not set in cinder configuration"
msgstr "在cinder配置中coprhd_tenant未被设置"
msgid "coprhd_username is not set in cinder configuration"
msgstr "在cinder配置中coprhd_username未被设置"
msgid "coprhd_varray is not set in cinder configuration"
msgstr "在cinder配置中coprhd_varray未被设置"
msgid "create_cloned_volume, Source Volume does not exist in ETERNUS."
msgstr "create_cloned_volume源卷在 ETERNUS 中不存在。"
@ -9141,6 +9527,17 @@ msgstr ""
"将对象写入 swift 时出错swift %(etag)s 中对象的 MD5 与发送至 swift %(md5)s "
"的对象的 MD5 不同"
#, python-format
msgid ""
"error: Incorrect value of new size: %(new_size_in_gb)s GB\n"
"New size must be greater than current size: %(current_size)s GB"
msgstr ""
"错误: 新大小的值不正确: %(new_size_in_gb)s GB\n"
"新大小必须大于当前大小: %(current_size)s GB"
msgid "error: task list is empty, no task response found"
msgstr "错误:任务列表为空,找不到任何任务响应"
msgid ""
"existing_ref argument must be of this format:app_inst_name:storage_inst_name:"
"vol_name"
@ -9406,6 +9803,9 @@ msgstr "找到多个快照标识为 %s 的资源"
msgid "name cannot be None"
msgstr "name不能是None"
msgid "no \"access-key\" field"
msgstr "没有“access-key”域"
#, python-format
msgid "no REPLY but %r"
msgstr "无回复,但收到 %r"
@ -9529,9 +9929,20 @@ msgstr ""
"没有在 cinder.conf 中为 Datera 驱动程序设置 san_login 和/或 san_password。请"
"设置此信息并再次启动 cinder-volume服务。"
msgid ""
"scaleio_verify_server_certificate is True but "
"scaleio_server_certificate_path is not provided in cinder configuration"
msgstr ""
"在cinder配置中scaleio_verify_server_certificate的值为True但是未提供"
"scaleio_server_certificate_path"
msgid "serve() can only be called once"
msgstr "serve() 只能调用一次"
#, python-format
msgid "snapshot with the name: %s Not Found"
msgstr "找不到名称为 %s 的快照"
#, python-format
msgid "snapshot-%s"
msgstr "快照 - %s"
@ -9574,6 +9985,10 @@ msgstr "sync_hypermetro 错误。"
msgid "sync_replica not implemented."
msgstr "未实现 sync_replica。"
#, python-format
msgid "target=%(target)s, lun=%(lun)s"
msgstr "目标=%(target)s, lun=%(lun)s"
#, python-format
msgid ""
"targetcli not installed and could not create default directory "
@ -9622,6 +10037,10 @@ msgstr "以下压缩算法不受支持:%s"
msgid "valid iqn needed for show_target"
msgstr "show_target 需要有效 iqn"
#, python-format
msgid "varray %s: not found"
msgstr "找不到varray%s"
#, python-format
msgid "vdisk %s is not defined."
msgstr "未定义 vdisk %s。"

View File

@ -223,6 +223,8 @@ class CinderPersistentObject(object):
This adds the fields that we use in common for all persistent objects.
"""
OPTIONAL_FIELDS = []
Not = db.Not
Case = db.Case

View File

@ -23,9 +23,6 @@ from cinder.objects import base
from cinder.volume import volume_types
OPTIONAL_FIELDS = ['extra_specs', 'projects', 'qos_specs']
@base.CinderObjectRegistry.register
class VolumeType(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat, base.CinderComparableObject):
@ -34,6 +31,8 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject,
# Version 1.2: Added qos_specs
VERSION = '1.2'
OPTIONAL_FIELDS = ('extra_specs', 'projects', 'qos_specs')
fields = {
'id': fields.UUIDField(),
'name': fields.StringField(nullable=True),
@ -62,12 +61,12 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject,
def _get_expected_attrs(cls, context, *args, **kwargs):
return 'extra_specs', 'projects'
@staticmethod
def _from_db_object(context, type, db_type, expected_attrs=None):
@classmethod
def _from_db_object(cls, context, type, db_type, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['extra_specs', 'projects']
for name, field in type.fields.items():
if name in OPTIONAL_FIELDS:
if name in cls.OPTIONAL_FIELDS:
continue
value = db_type[name]
if isinstance(field, fields.IntegerField):

View File

@ -411,7 +411,7 @@ class _SchedulerV3Proxy(object):
# optional keyword argument to positional argument).
return self.manager.migrate_volume_to_host(
context, None, volume.id, host, force_host_copy, request_spec,
filter_propterties=filter_properties, volume=volume)
filter_properties=filter_properties, volume=volume)
def retype(self, context, volume, request_spec, filter_properties=None):
# NOTE(dulek): Second argument here is `topic` which is unused. We're

View File

@ -112,9 +112,8 @@ class SchedulerAPI(rpc.RPCAPI):
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
image_id=None, request_spec=None,
filter_properties=None, volume=None):
request_spec_p = jsonutils.to_primitive(request_spec)
msg_args = {'snapshot_id': snapshot_id, 'image_id': image_id,
'request_spec': request_spec_p,
'request_spec': request_spec,
'filter_properties': filter_properties, 'volume': volume}
version = self._compat_ver('3.0', '2.2', '2.0')
if version in ('2.2', '2.0'):
@ -123,6 +122,10 @@ class SchedulerAPI(rpc.RPCAPI):
if version == '2.0':
# Send request_spec as dict
msg_args['request_spec'] = jsonutils.to_primitive(request_spec)
# NOTE(dulek): This is to keep supporting Mitaka's scheduler which
# expects a dictionary when creating a typeless volume.
if msg_args['request_spec'].get('volume_type') is None:
msg_args['request_spec']['volume_type'] = {}
cctxt = self.client.prepare(version=version)
return cctxt.cast(ctxt, 'create_volume', **msg_args)

View File

@ -23,16 +23,20 @@ from cinder.volume import configuration as conf
class BrickLvmTestCase(test.TestCase):
def setUp(self):
self.configuration = mock.Mock(conf.Configuration)
if not hasattr(self, 'configuration'):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.lvm_suppress_fd_warnings = False
self.configuration.volume_group_name = 'fake-vg'
super(BrickLvmTestCase, self).setUp()
self.mock_object(processutils, 'execute', self.fake_execute)
self.vg = brick.LVM(self.configuration.volume_group_name,
'sudo',
False, None,
'default',
self.fake_execute)
self.vg = brick.LVM(
self.configuration.volume_group_name,
'sudo',
False, None,
'default',
self.fake_execute,
suppress_fd_warn=self.configuration.lvm_suppress_fd_warnings)
def failed_fake_execute(obj, *cmd, **kwargs):
return ("\n", "fake-error")
@ -48,24 +52,28 @@ class BrickLvmTestCase(test.TestCase):
return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "")
def fake_execute(obj, *cmd, **kwargs): # noqa
if obj.configuration.lvm_suppress_fd_warnings:
_lvm_prefix = 'env, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=1, '
else:
_lvm_prefix = 'env, LC_ALL=C, '
cmd_string = ', '.join(cmd)
data = "\n"
if ('env, LC_ALL=C, vgs, --noheadings, --unit=g, -o, name' ==
if (_lvm_prefix + 'vgs, --noheadings, --unit=g, -o, name' ==
cmd_string):
data = " fake-vg\n"
data += " some-other-vg\n"
elif ('env, LC_ALL=C, vgs, --noheadings, -o, name, fake-vg' ==
elif (_lvm_prefix + 'vgs, --noheadings, -o, name, fake-vg' ==
cmd_string):
data = " fake-vg\n"
elif 'env, LC_ALL=C, vgs, --version' in cmd_string:
elif _lvm_prefix + 'vgs, --version' in cmd_string:
data = " LVM version: 2.02.95(2) (2012-03-06)\n"
elif ('env, LC_ALL=C, vgs, --noheadings, -o, uuid, fake-vg' in
cmd_string):
elif(_lvm_prefix + 'vgs, --noheadings, -o, uuid, fake-vg' in
cmd_string):
data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \
'-o, name,size,free,lv_count,uuid, ' \
'--separator, :, --nosuffix' in cmd_string:
elif(_lvm_prefix + 'vgs, --noheadings, --unit=g, '
'-o, name,size,free,lv_count,uuid, '
'--separator, :, --nosuffix' in cmd_string):
data = (" test-prov-cap-vg-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-unit' in cmd_string:
@ -82,17 +90,17 @@ class BrickLvmTestCase(test.TestCase):
"lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n"
data += " fake-vg-3:10.00:10.00:0:"\
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n"
elif ('env, LC_ALL=C, lvs, --noheadings, '
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-nothere' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="One or more specified logical volume(s) not found.")
elif ('env, LC_ALL=C, lvs, --noheadings, '
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-newerror' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="Failed to find logical volume \"fake-vg/lv-newerror\"")
elif ('env, LC_ALL=C, lvs, --noheadings, '
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size' in cmd_string):
if 'fake-unknown' in cmd_string:
raise processutils.ProcessExecutionError(
@ -111,7 +119,7 @@ class BrickLvmTestCase(test.TestCase):
else:
data = " fake-vg fake-1 1.00g\n"
data += " fake-vg fake-2 1.00g\n"
elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in
elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Attr' in
cmd_string):
if 'test-volumes' in cmd_string:
data = ' wi-a-'
@ -121,19 +129,19 @@ class BrickLvmTestCase(test.TestCase):
data = ' -wi-ao---'
else:
data = ' owi-a-'
elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Origin' in
elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Origin' in
cmd_string):
if 'snapshot' in cmd_string:
data = ' fake-volume-1'
else:
data = ' '
elif 'env, LC_ALL=C, pvs, --noheadings' in cmd_string:
elif _lvm_prefix + 'pvs, --noheadings' in cmd_string:
data = " fake-vg|/dev/sda|10.00|1.00\n"
data += " fake-vg|/dev/sdb|10.00|1.00\n"
data += " fake-vg|/dev/sdc|10.00|8.99\n"
data += " fake-vg-2|/dev/sdd|10.00|9.99\n"
elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \
', -o, size,data_percent, --separator, :' in cmd_string:
elif _lvm_prefix + 'lvs, --noheadings, --unit=g' \
', -o, size,data_percent, --separator, :' in cmd_string:
if 'test-prov-cap-pool' in cmd_string:
data = " 9.5:20\n"
else:
@ -389,3 +397,10 @@ class BrickLvmTestCase(test.TestCase):
self.vg.create_volume('test', '1G')
self.assertRaises(exception.VolumeNotDeactivated,
self.vg.deactivate_lv, 'test')
class BrickLvmTestCaseIgnoreFDWarnings(BrickLvmTestCase):
def setUp(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.lvm_suppress_fd_warnings = True
super(BrickLvmTestCaseIgnoreFDWarnings, self).setUp()

View File

@ -116,7 +116,7 @@ class SchedulerRpcAPITestCase(test.TestCase):
volume_id=self.volume_id,
snapshot_id='snapshot_id',
image_id='image_id',
request_spec='fake_request_spec',
request_spec={'volume_type': {}},
filter_properties='filter_properties',
volume=fake_volume.fake_volume_obj(
self.context),

View File

@ -39,7 +39,8 @@ class TestQemuImgInfo(test.TestCase):
output = image_utils.qemu_img_info(test_path)
mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img',
'info', test_path, run_as_root=True)
'info', test_path, run_as_root=True,
prlimit=image_utils.QEMU_IMG_LIMITS)
self.assertEqual(mock_info.return_value, output)
@mock.patch('oslo_utils.imageutils.QemuImgInfo')
@ -52,7 +53,8 @@ class TestQemuImgInfo(test.TestCase):
output = image_utils.qemu_img_info(test_path, run_as_root=False)
mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img',
'info', test_path, run_as_root=False)
'info', test_path, run_as_root=False,
prlimit=image_utils.QEMU_IMG_LIMITS)
self.assertEqual(mock_info.return_value, output)
@mock.patch('cinder.image.image_utils.os')
@ -67,7 +69,8 @@ class TestQemuImgInfo(test.TestCase):
output = image_utils.qemu_img_info(test_path)
mock_exec.assert_called_once_with('qemu-img', 'info', test_path,
run_as_root=True)
run_as_root=True,
prlimit=image_utils.QEMU_IMG_LIMITS)
self.assertEqual(mock_info.return_value, output)
@mock.patch('cinder.utils.execute')

View File

@ -262,7 +262,8 @@ class DellSCSanFCDriverTestCase(test.TestCase):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
'secondaryScSerialNumber': 102,
'secondaryRole': 'Secondary'}
mock_is_live_volume.return_value = True
mock_find_wwns.return_value = (
1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
@ -272,7 +273,7 @@ class DellSCSanFCDriverTestCase(test.TestCase):
1, [u'5000D31000FCBE3E', u'5000D31000FCBE36'],
{u'21000024FF30441E': [u'5000D31000FCBE36'],
u'21000024FF30441F': [u'5000D31000FCBE3E']})
mock_get_live_volume.return_value = (sclivevol, False)
mock_get_live_volume.return_value = sclivevol
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
@ -292,6 +293,74 @@ class DellSCSanFCDriverTestCase(test.TestCase):
mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, True)
mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver,
'_is_live_vol')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns')
@mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver,
'initialize_secondary')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_live_volume')
def test_initialize_connection_live_vol_afo(self,
mock_get_live_volume,
mock_initialize_secondary,
mock_find_wwns,
mock_is_live_volume,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'}
scvol = {'instanceId': '102.101'}
mock_find_volume.return_value = scvol
mock_get_volume.return_value = scvol
connector = self.connector
sclivevol = {'instanceId': '101.10001',
'primaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'primaryScSerialNumber': 102,
'secondaryVolume': {'instanceId': '101.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 101,
'secondaryRole': 'Activated'}
mock_is_live_volume.return_value = True
mock_find_wwns.return_value = (
1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_get_live_volume.return_value = sclivevol
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_discovered': True,
'target_lun': 1,
'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
self.assertFalse(mock_initialize_secondary.called)
mock_find_volume.assert_called_once_with(
fake.VOLUME_ID, '101.101', True)
mock_get_volume.assert_called_once_with('102.101')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@ -581,11 +650,7 @@ class DellSCSanFCDriverTestCase(test.TestCase):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
mock_terminate_secondary.return_value = (None, [], {})
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_is_live_vol.return_value = sclivevol
mock_is_live_vol.return_value = True
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
expected = {'driver_volume_type': 'fibre_channel',

View File

@ -518,9 +518,9 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api.get_live_volume = mock.MagicMock(return_value=(sclivevol,
False))
'secondaryScSerialNumber': 102,
'secondaryRole': 'Secondary'}
mock_api.get_live_volume = mock.MagicMock(return_value=sclivevol)
# No replication driver data.
ret = self.driver._delete_live_volume(mock_api, vol)
self.assertFalse(ret)
@ -538,7 +538,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
ret = self.driver._delete_live_volume(mock_api, vol)
self.assertFalse(ret)
# No live volume found.
mock_api.get_live_volume.return_value = (None, False)
mock_api.get_live_volume.return_value = None
ret = self.driver._delete_live_volume(mock_api, vol)
self.assertFalse(ret)
@ -786,7 +786,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
data = self.driver.initialize_connection(volume, connector)
self.assertEqual('iscsi', data['driver_volume_type'])
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_called_once_with(fake.VOLUME_ID, provider_id)
mock_find_volume.assert_called_once_with(
fake.VOLUME_ID, provider_id, False)
mock_get_volume.assert_called_once_with(provider_id)
expected = {'data': self.ISCSI_PROPERTIES,
'driver_volume_type': 'iscsi'}
@ -990,11 +991,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_is_live_vol.return_value = sclivevol
mock_is_live_vol.return_value = True
lvol_properties = {'access_mode': 'rw',
'target_discovered': False,
'target_iqn':
@ -1018,6 +1015,75 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
'driver_volume_type': 'iscsi'}
self.assertEqual(expected, ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS[0])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_iscsi_properties')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_live_volume')
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
'_is_live_vol')
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
'initialize_secondary')
def test_initialize_connection_live_volume_afo(self,
mock_initialize_secondary,
mock_is_live_vol,
mock_get_live_vol,
mock_find_iscsi_props,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'}
scvol = {'instanceId': '102.101'}
mock_find_volume.return_value = scvol
mock_get_volume.return_value = scvol
connector = self.connector
sclivevol = {'instanceId': '101.10001',
'primaryVolume': {'instanceId': '101.101',
'instanceName': fake.VOLUME_ID},
'primaryScSerialNumber': 101,
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102,
'secondaryRole': 'Activated'}
mock_is_live_vol.return_value = True
mock_get_live_vol.return_value = sclivevol
props = {
'access_mode': 'rw',
'target_discovered': False,
'target_iqn': u'iqn:1',
'target_iqns': [u'iqn:1',
u'iqn:2'],
'target_lun': 1,
'target_luns': [1, 1],
'target_portal': u'192.168.1.21:3260',
'target_portals': [u'192.168.1.21:3260',
u'192.168.1.22:3260']
}
mock_find_iscsi_props.return_value = props
ret = self.driver.initialize_connection(volume, connector)
expected = {'data': props,
'driver_volume_type': 'iscsi'}
expected['data']['discard'] = True
self.assertEqual(expected, ret)
self.assertFalse(mock_initialize_secondary.called)
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
'_get_replication_specs',
return_value={'enabled': True, 'live': True})
@ -1230,9 +1296,10 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
'secondaryScSerialNumber': 102,
'secondaryRole': 'Secondary'}
mock_is_live_vol.return_value = True
mock_get_live_vol.return_value = (sclivevol, False)
mock_get_live_vol.return_value = sclivevol
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
@ -2661,36 +2728,36 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
'instanceName': fake.VOLUME2_ID},
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
'secondaryScSerialNumber': 102,
'secondaryRole': 'Secondary'}
postfail = {'instanceId': '101.100',
'primaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryVolume': {'instanceId': '101.101',
'instanceName': fake.VOLUME2_ID},
'secondaryScSerialNumber': 102}
'secondaryScSerialNumber': 102,
'secondaryRole': 'Secondary'}
mock_api.get_live_volume = mock.MagicMock()
mock_api.get_live_volume.side_effect = [(sclivevol, False),
(postfail, True),
(sclivevol, False),
(sclivevol, False)
]
mock_api.get_live_volume.side_effect = [sclivevol, postfail,
sclivevol, sclivevol]
# Good run.
mock_api.is_swapped = mock.MagicMock(return_value=False)
mock_api.swap_roles_live_volume = mock.MagicMock(return_value=True)
model_update = {'provider_id': '102.101',
'replication_status': 'failed-over'}
ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID,
'101.100')
'101.101')
self.assertEqual(model_update, ret)
# Swap fail
mock_api.swap_roles_live_volume.return_value = False
model_update = {'status': 'error'}
ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID,
'101.100')
'101.101')
self.assertEqual(model_update, ret)
# Can't find live volume.
mock_api.get_live_volume.return_value = (None, False)
mock_api.get_live_volume.return_value = None
ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID,
'101.100')
'101.101')
self.assertEqual(model_update, ret)
def test__failover_replication(self,
@ -3206,16 +3273,16 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
{'id': fake.VOLUME2_ID,
'replication_driver_data': '12345',
'provider_id': '12345.2'}]
mock_get_live_volume.side_effect = [(
mock_get_live_volume.side_effect = [
{'instanceId': '11111.101',
'secondaryVolume': {'instanceId': '11111.1001',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 11111}, True), (
'secondaryScSerialNumber': 11111},
{'instanceId': '11111.102',
'secondaryVolume': {'instanceId': '11111.1002',
'instanceName': fake.VOLUME2_ID},
'secondaryScSerialNumber': 11111}, True
)]
'secondaryScSerialNumber': 11111}
]
mock_get_replication_specs.return_value = {'enabled': True,
'live': True}
mock_swap_roles_live_volume.side_effect = [True, True]

View File

@ -21,6 +21,7 @@ import uuid
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.volume.drivers.dell import dell_storagecenter_api
@ -3543,8 +3544,7 @@ class DellSCSanAPITestCase(test.TestCase):
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
'_find_controller_port')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS)
@ -3564,6 +3564,59 @@ class DellSCSanAPITestCase(test.TestCase):
mock_open_connection,
mock_init):
# Test case where there are multiple portals
mock_find_ctrl_port.side_effect = [
{'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'},
{'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}]
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_domains.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
self.assertTrue(mock_is_virtualport_mode.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe44',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe44',
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe44'],
'target_lun': 1,
'target_luns': [1, 1, 1, 1],
'target_portal': u'192.168.0.25:3260',
'target_portals': [u'192.168.0.25:3260',
u'192.168.0.21:3260',
u'192.168.0.25:3260',
u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_MULTI_PORTAL)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=True)
def test_find_iscsi_properties_multi_portals_duplicates(
self,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_domains,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are multiple portals and
mock_find_ctrl_port.return_value = {
'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_domains.called)
@ -3575,16 +3628,12 @@ class DellSCSanAPITestCase(test.TestCase):
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1, 1, 1, 1],
'target_luns': [1, 1],
'target_portal': u'192.168.0.25:3260',
'target_portals': [u'192.168.0.21:3260',
u'192.168.0.25:3260',
u'192.168.0.21:3260',
u'192.168.0.25:3260']}
'target_portals': [u'192.168.0.25:3260',
u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
@ -3709,8 +3758,7 @@ class DellSCSanAPITestCase(test.TestCase):
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
'_find_controller_port')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_MULTI_PORTAL)
@ -3730,6 +3778,9 @@ class DellSCSanAPITestCase(test.TestCase):
mock_close_connection,
mock_open_connection,
mock_init):
mock_find_ctrl_port.side_effect = [
{'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'},
{'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}]
# Test case where there are multiple portals
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_find_mappings.called)
@ -3737,13 +3788,13 @@ class DellSCSanAPITestCase(test.TestCase):
self.assertTrue(mock_find_active_controller.called)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_controller_port_iscsi_config.called)
# Since we're feeding the same info back multiple times the information
# will be duped.
# We're feeding the same info back multiple times the information
# will be scrubbed to a single item.
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe44',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43',
[u'iqn.2002-03.com.compellent:5000d31000fcbe44',
u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1, 1],
@ -5977,7 +6028,7 @@ class DellSCSanAPITestCase(test.TestCase):
expected = 'StorageCenter/ScReplication/%s' % (
self.SCREPL[0]['instanceId'])
expected_payload = {'DeleteDestinationVolume': True,
'RecycleDestinationVolume': False,
'RecycleDestinationVolume': True,
'DeleteRestorePoint': True}
ret = self.scapi.delete_replication(self.VOLUME, destssn)
mock_delete.assert_any_call(expected, payload=expected_payload,
@ -6014,7 +6065,7 @@ class DellSCSanAPITestCase(test.TestCase):
expected = 'StorageCenter/ScReplication/%s' % (
self.SCREPL[0]['instanceId'])
expected_payload = {'DeleteDestinationVolume': True,
'RecycleDestinationVolume': False,
'RecycleDestinationVolume': True,
'DeleteRestorePoint': True}
ret = self.scapi.delete_replication(self.VOLUME, destssn)
mock_delete.assert_any_call(expected, payload=expected_payload,
@ -6399,37 +6450,60 @@ class DellSCSanAPITestCase(test.TestCase):
scvol,
'a,b')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
'_sc_live_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_live_volumes')
def test_get_live_volume(self,
mock_get_json,
mock_get,
mock_get_live_volumes,
mock_sc_live_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
# Basic check
retlv, retswapped = self.scapi.get_live_volume(None)
retlv = self.scapi.get_live_volume(None)
self.assertIsNone(retlv)
self.assertFalse(retswapped)
lv1 = {'primaryVolume': {'instanceId': '12345.1'},
'secondaryVolume': {'instanceId': '67890.1'}}
lv2 = {'primaryVolume': {'instanceId': '12345.2'}}
mock_get_json.return_value = [lv1, lv2]
mock_get.return_value = self.RESPONSE_200
mock_sc_live_volumes.return_value = [lv1, lv2]
# Good Run
retlv, retswapped = self.scapi.get_live_volume('12345.2')
retlv = self.scapi.get_live_volume('12345.2')
self.assertEqual(lv2, retlv)
self.assertFalse(retswapped)
mock_sc_live_volumes.assert_called_once_with('12345')
self.assertFalse(mock_get_live_volumes.called)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
'_sc_live_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_live_volumes')
def test_get_live_volume_on_secondary(self,
mock_get_live_volumes,
mock_sc_live_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
# Basic check
retlv = self.scapi.get_live_volume(None)
self.assertIsNone(retlv)
lv1 = {'primaryVolume': {'instanceId': '12345.1'},
'secondaryVolume': {'instanceId': '67890.1'}}
lv2 = {'primaryVolume': {'instanceId': '12345.2'}}
mock_sc_live_volumes.return_value = []
mock_get_live_volumes.return_value = [lv1, lv2]
# Good Run
retlv = self.scapi.get_live_volume('12345.2')
self.assertEqual(lv2, retlv)
mock_sc_live_volumes.assert_called_once_with('12345')
mock_get_live_volumes.assert_called_once_with()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_sc_live_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_live_volumes')
def test_get_live_volume_not_found(self,
mock_get_json,
mock_get,
mock_get_live_volumes,
mock_sc_live_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
@ -6437,19 +6511,20 @@ class DellSCSanAPITestCase(test.TestCase):
'secondaryVolume': {'instanceId': '67890.1'}}
lv2 = {'primaryVolume': {'instanceId': '12345.2'},
'secondaryVolume': {'instanceId': '67890.2'}}
mock_get_json.return_value = [lv1, lv2]
mock_get.return_value = self.RESPONSE_200
retlv, retswapped = self.scapi.get_live_volume('12345.3')
mock_get_live_volumes.return_value = [lv1, lv2]
mock_sc_live_volumes.return_value = []
retlv = self.scapi.get_live_volume('12345.3')
self.assertIsNone(retlv)
self.assertFalse(retswapped)
mock_sc_live_volumes.assert_called_once_with('12345')
mock_get_live_volumes.assert_called_once_with()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
'_sc_live_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_live_volumes')
def test_get_live_volume_swapped(self,
mock_get_json,
mock_get,
mock_get_live_volumes,
mock_sc_live_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
@ -6457,23 +6532,50 @@ class DellSCSanAPITestCase(test.TestCase):
'secondaryVolume': {'instanceId': '67890.1'}}
lv2 = {'primaryVolume': {'instanceId': '67890.2'},
'secondaryVolume': {'instanceId': '12345.2'}}
mock_get_json.return_value = [lv1, lv2]
mock_get.return_value = self.RESPONSE_200
retlv, retswapped = self.scapi.get_live_volume('12345.2')
mock_get_live_volumes.return_value = [lv1, lv2]
mock_sc_live_volumes.return_value = []
retlv = self.scapi.get_live_volume('12345.2')
self.assertEqual(lv2, retlv)
self.assertTrue(retswapped)
mock_sc_live_volumes.assert_called_once_with('12345')
mock_get_live_volumes.assert_called_once_with()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_sc_live_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_live_volumes')
def test_get_live_volume_error(self,
mock_get,
mock_get_live_volumes,
mock_sc_live_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
mock_get.return_value = self.RESPONSE_400
retlv, retswapped = self.scapi.get_live_volume('12345.2')
mock_get_live_volumes.return_value = []
mock_sc_live_volumes.return_value = []
retlv = self.scapi.get_live_volume('12345.2')
self.assertIsNone(retlv)
self.assertFalse(retswapped)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_sc_live_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_live_volumes')
def test_get_live_volume_by_name(self,
mock_get_live_volumes,
mock_sc_live_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
lv1 = {'primaryVolume': {'instanceId': '12345.1'},
'secondaryVolume': {'instanceId': '67890.1'},
'instanceName': 'Live volume of ' + fake.VOLUME2_ID}
lv2 = {'primaryVolume': {'instanceId': '67890.2'},
'secondaryVolume': {'instanceId': '12345.2'},
'instanceName': 'Live volume of ' + fake.VOLUME_ID}
mock_get_live_volumes.return_value = [lv1, lv2]
mock_sc_live_volumes.return_value = []
retlv = self.scapi.get_live_volume('12345.2', fake.VOLUME_ID)
self.assertEqual(lv2, retlv)
mock_sc_live_volumes.assert_called_once_with('12345')
mock_get_live_volumes.assert_called_once_with()
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')

View File

@ -24,7 +24,7 @@ from cinder import test
from cinder.tests.unit.consistencygroup import fake_consistencygroup as fake_cg
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.fake_volume import fake_volume_obj
from cinder.volume.drivers.emc import xtremio
@ -240,6 +240,7 @@ class D(dict):
class CommonData(object):
context = {'user': 'admin', }
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': ["123456789012345", "123456789054321"],
@ -247,18 +248,19 @@ class CommonData(object):
'host': 'fakehost',
}
test_volume = {'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '192eb39b-6c2f-420c-bae3-3cfd117f0001',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id':
'192eb39b-6c2f-420c-bae3-3cfd117f0345',
}
test_volume = fake_volume_obj(context,
name = 'vol1',
size = 1,
volume_name = 'vol1',
id = '192eb39b-6c2f-420c-bae3-3cfd117f0001',
provider_auth = None,
project_id = 'project',
display_name = 'vol1',
display_description = 'test volume',
volume_type_id = None,
consistencygroup_id =
'192eb39b-6c2f-420c-bae3-3cfd117f0345',
)
test_snapshot = D()
test_snapshot.update({'name': 'snapshot1',
'size': 1,
@ -298,7 +300,6 @@ class CommonData(object):
'name': 'unmanaged1',
'size': 3,
}
context = {'user': 'admin', }
group = {'id': '192eb39b-6c2f-420c-bae3-3cfd117f0345',
'name': 'cg1',
'status': 'OK',
@ -881,7 +882,7 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase):
'index': 1}
xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1}
cg_obj = fake_cg.fake_consistencyobject_obj(d.context)
new_vol1 = fake_volume.fake_volume_obj(d.context)
new_vol1 = fake_volume_obj(d.context)
snapshot1 = (fake_snapshot
.fake_snapshot_obj
(d.context, volume_id=d.test_volume['id']))
@ -913,12 +914,12 @@ class EMCXIODriverISCSITestCase(BaseEMCXIODriverTestCase):
'index': 1}
xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1}
cg_obj = fake_cg.fake_consistencyobject_obj(d.context)
new_vol1 = fake_volume.fake_volume_obj(d.context)
new_vol1 = fake_volume_obj(d.context)
new_cg_obj = fake_cg.fake_consistencyobject_obj(
d.context, id=fake.CONSISTENCY_GROUP2_ID)
snapset2_name = new_cg_obj.id
new_vol1.id = '192eb39b-6c2f-420c-bae3-3cfd117f0001'
new_vol2 = fake_volume.fake_volume_obj(d.context)
new_vol2 = fake_volume_obj(d.context)
snapset2 = {'vol-list': [xms_data['volumes'][2]['vol-id']],
'name': snapset2_name,
'index': 1}

View File

@ -1091,20 +1091,26 @@ class StorageCenterApi(object):
def _autofailback(self, lv):
# if we have a working replication state.
ret = False
if (lv['ReplicationState'] == 'Up' and
lv['failoverState'] == 'AutoFailedOver'):
LOG.debug('Attempting autofailback of %s', lv)
if (lv and lv['status'] == 'Up' and lv['replicationState'] == 'Up' and
lv['failoverState'] == 'Protected' and lv['secondaryStatus'] == 'Up'
and lv['primarySwapRoleState'] == 'NotSwapping'):
ret = self.swap_roles_live_volume(lv)
return ret
def _find_volume_primary(self, provider_id):
def _find_volume_primary(self, provider_id, name):
# if there is no live volume then we return our provider_id.
primary_id = provider_id
lv, swapped = self.get_live_volume(provider_id)
# if we swapped see if we can autofailback. Unless the admin
# failed us over, that is.
if swapped and not self.failed_over:
lv = self.get_live_volume(provider_id, name)
LOG.info(_LI('Volume %(provider)s at primary %(primary)s.'),
{'provider': provider_id, 'primary': primary_id})
# If we have a live volume and are swapped and are not failed over
# at least give failback a shot.
if lv and self.is_swapped(provider_id, lv) and not self.failed_over:
if self._autofailback(lv):
ls, swapped = self.get_live_volume(provider_id)
lv = self.get_live_volume(provider_id)
LOG.info(_LI('After failback %s'), lv)
if lv:
primary_id = lv['primaryVolume']['instanceId']
return primary_id
@ -1127,7 +1133,7 @@ class StorageCenterApi(object):
scvolume = None
if islivevol:
# Just get the primary from the sc live vol.
primary_id = self._find_volume_primary(provider_id)
primary_id = self._find_volume_primary(provider_id, name)
scvolume = self.get_volume(primary_id)
elif self._use_provider_id(provider_id):
# just get our volume
@ -1686,7 +1692,15 @@ class StorageCenterApi(object):
:return: Nothing
"""
if self.excluded_domain_ips.count(address) == 0:
portals.append(address + ':' + six.text_type(port))
# Make sure this isn't a duplicate.
newportal = address + ':' + six.text_type(port)
for idx, portal in enumerate(portals):
if portal == newportal and iqns[idx] == iqn:
LOG.debug('Skipping duplicate portal %(ptrl)s and'
'iqn %(iqn)s.', {'ptrl': portal, 'iqn': iqn})
return
# It isn't in the list so process it.
portals.append(newportal)
iqns.append(iqn)
luns.append(lun)
@ -1778,13 +1792,17 @@ class StorageCenterApi(object):
'Volume is not yet active on any controller.')
pdata['active'] = 0
# Make sure we have a good item at the top of the list.
iqns.insert(0, iqns.pop(pdata['active']))
portals.insert(0, portals.pop(pdata['active']))
luns.insert(0, luns.pop(pdata['active']))
data = {'target_discovered': False,
'target_iqn': iqns[pdata['active']],
'target_iqn': iqns[0],
'target_iqns': iqns,
'target_portal': portals[pdata['active']],
'target_portal': portals[0],
'target_portals': portals,
'target_lun': luns[pdata['active']],
'target_luns': luns,
'target_lun': luns[0],
'target_luns': luns
}
LOG.debug('find_iscsi_properties: %s', data)
return data
@ -2735,7 +2753,7 @@ class StorageCenterApi(object):
if replication:
payload = {}
payload['DeleteDestinationVolume'] = deletedestvolume
payload['RecycleDestinationVolume'] = False
payload['RecycleDestinationVolume'] = deletedestvolume
payload['DeleteRestorePoint'] = True
r = self.client.delete('StorageCenter/ScReplication/%s' %
self._get_id(replication), payload=payload,
@ -3055,24 +3073,73 @@ class StorageCenterApi(object):
progress)
return None, None
def get_live_volume(self, primaryid):
def is_swapped(self, provider_id, sclivevolume):
if (sclivevolume.get('primaryVolume') and
sclivevolume['primaryVolume']['instanceId'] != provider_id):
return True
return False
def is_failed_over(self, provider_id, sclivevolume):
# either the secondary is active or the secondary is now our primary.
if (sclivevolume.get('secondaryRole') == 'Activated' or
self.is_swapped(provider_id, sclivevolume)):
return True
return False
def _sc_live_volumes(self, ssn):
if ssn:
r = self.client.get('StorageCenter/StorageCenter/%s/LiveVolumeList'
% ssn)
if self._check_result(r):
return self._get_json(r)
return []
def _get_live_volumes(self):
# Work around for a FW bug. Instead of grabbing the entire list at
# once we have to Trundle through each SC's list.
lvs = []
pf = self._get_payload_filter()
pf.append('connected', True)
r = self.client.post('StorageCenter/StorageCenter/GetList',
pf.payload)
if self._check_result(r):
# Should return [] if nothing there.
# Just in case do the or.
scs = self._get_json(r) or []
for sc in scs:
lvs += self._sc_live_volumes(self._get_id(sc))
return lvs
def get_live_volume(self, primaryid, name=None):
"""Get's the live ScLiveVolume object for the vol with primaryid.
:param primaryid: InstanceId of the primary volume.
:return: ScLiveVolume object or None, swapped True/False.
:parma name: Volume name associated with this live volume.
:return: ScLiveVolume object or None
"""
sclivevol = None
if primaryid:
r = self.client.get('StorageCenter/ScLiveVolume')
if self._check_result(r):
lvs = self._get_json(r)
# Try from our primary SSN. This would be the authoritay on the
# Live Volume in question.
lvs = self._sc_live_volumes(primaryid.split('.')[0])
# No, grab them all and see if we are on the secondary.
if not lvs:
lvs = self._get_live_volumes()
if lvs:
# Look for our primaryid.
for lv in lvs:
if (lv.get('primaryVolume') and
lv['primaryVolume']['instanceId'] == primaryid):
return lv, False
if (lv.get('secondaryVolume') and
lv['secondaryVolume']['instanceId'] == primaryid):
return lv, True
return None, False
if ((lv.get('primaryVolume') and
lv['primaryVolume']['instanceId'] == primaryid) or
(lv.get('secondaryVolume') and
lv['secondaryVolume']['instanceId'] == primaryid)):
sclivevol = lv
break
# Sometimes the lv object returns without a secondary
# volume. Make sure we find this by name if we have to.
if (name and sclivevol is None and
lv['instanceName'].endswith(name)):
sclivevol = lv
return sclivevol
def _get_hbas(self, serverid):
# Helper to get the hba's of a given server.
@ -3172,6 +3239,7 @@ class StorageCenterApi(object):
payload['ConvertToReplication'] = False
payload['DeleteSecondaryVolume'] = deletesecondaryvolume
payload['RecycleSecondaryVolume'] = deletesecondaryvolume
payload['DeleteRestorePoint'] = deletesecondaryvolume
r = self.client.delete('StorageCenter/ScLiveVolume/%s' %
self._get_id(sclivevolume), payload, True)
if self._check_result(r):

View File

@ -362,8 +362,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
ssnstrings = self._split_driver_data(replication_driver_data)
if ssnstrings:
ssn = int(ssnstrings[0])
sclivevolume, swapped = api.get_live_volume(
volume.get('provider_id'))
sclivevolume = api.get_live_volume(volume.get('provider_id'))
# Have we found the live volume?
if (sclivevolume and
sclivevolume.get('secondaryScSerialNumber') == ssn and
@ -682,12 +681,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
with self._client.open_connection() as api:
storageusage = api.get_storage_usage()
if not storageusage:
msg = _('Unable to retrieve volume stats.')
raise exception.VolumeBackendAPIException(message=msg)
# all of this is basically static for now
# Static stats.
data = {}
data['volume_backend_name'] = self.backend_name
data['vendor_name'] = 'Dell'
@ -696,12 +690,6 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
data['reserved_percentage'] = 0
data['consistencygroup_support'] = True
data['thin_provisioning_support'] = True
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
data['QoS_support'] = False
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
@ -715,6 +703,22 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
replication_targets.append(target_device_id)
data['replication_targets'] = replication_targets
# Get our capacity.
storageusage = api.get_storage_usage()
if storageusage:
# Get actual stats.
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
else:
# Soldier on. Just return 0 for this iteration.
LOG.error(_LE('Unable to retrieve volume stats.'))
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
self._stats = data
LOG.debug('Total cap %(total)s Free cap %(free)s',
{'total': data['total_capacity_gb'],
@ -1390,7 +1394,10 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
:return: model_update dict
"""
model_update = {}
sclivevolume, swapped = api.get_live_volume(provider_id)
# We do not search by name. Only failback if we have a complete
# LV object.
sclivevolume = api.get_live_volume(provider_id)
# TODO(tswanson): Check swapped state first.
if sclivevolume and api.swap_roles_live_volume(sclivevolume):
LOG.info(_LI('Success swapping sclivevolume roles %s'), id)
model_update = {
@ -1489,8 +1496,10 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
def _failover_live_volume(self, api, id, provider_id):
model_update = {}
sclivevolume, swapped = api.get_live_volume(provider_id)
# Search for volume by id if we have to.
sclivevolume = api.get_live_volume(provider_id, id)
if sclivevolume:
swapped = api.is_swapped(provider_id, sclivevolume)
# If we aren't swapped try it. If fail error out.
if not swapped and not api.swap_roles_live_volume(sclivevolume):
LOG.info(_LI('Failure swapping roles %s'), id)
@ -1498,7 +1507,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
return model_update
LOG.info(_LI('Success swapping sclivevolume roles %s'), id)
sclivevolume, swapped = api.get_live_volume(provider_id)
sclivevolume = api.get_live_volume(provider_id)
model_update = {
'replication_status':
fields.ReplicationStatus.FAILED_OVER,

View File

@ -90,52 +90,59 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
with self._client.open_connection() as api:
try:
wwpns = connector.get('wwpns')
# Find our server.
scserver = self._find_server(api, wwpns)
# No? Create it.
if scserver is None:
scserver = api.create_server(
wwpns, self.configuration.dell_server_os)
# Find the volume on the storage center.
# Find the volume on the storage center. Note that if this
# is live volume and we are swapped this will be the back
# half of the live volume.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
if scserver is not None and scvolume is not None:
mapping = api.map_volume(scvolume, scserver)
if mapping is not None:
# Since we just mapped our volume we had best update
# our sc volume object.
scvolume = api.get_volume(scvolume['instanceId'])
lun, targets, init_targ_map = api.find_wwns(scvolume,
scserver)
if scvolume:
# Get the SSN it is on.
ssn = scvolume['instanceId'].split('.')[0]
# Find our server.
scserver = self._find_server(api, wwpns, ssn)
# Do we have extra live volume work?
if islivevol:
# Get our volume and our swap state.
sclivevolume, swapped = api.get_live_volume(
provider_id)
# Do not map to a failed over volume.
if sclivevolume and not swapped:
# Now map our secondary.
lvlun, lvtargets, lvinit_targ_map = (
self.initialize_secondary(api,
sclivevolume,
wwpns))
# Unmapped. Add info to our list.
targets += lvtargets
init_targ_map.update(lvinit_targ_map)
# No? Create it.
if scserver is None:
scserver = api.create_server(
wwpns, self.configuration.dell_server_os, ssn)
# We have a volume and a server. Map them.
if scserver is not None:
mapping = api.map_volume(scvolume, scserver)
if mapping is not None:
# Since we just mapped our volume we had
# best update our sc volume object.
scvolume = api.get_volume(scvolume['instanceId'])
lun, targets, init_targ_map = api.find_wwns(
scvolume, scserver)
# Roll up our return data.
if lun is not None and len(targets) > 0:
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': lun,
'target_discovered': True,
'target_wwn': targets,
'initiator_target_map':
init_targ_map,
'discard': True}}
LOG.debug('Return FC data: %s', data)
return data
LOG.error(_LE('Lun mapping returned null!'))
# Do we have extra live volume work?
if islivevol:
# Get our live volume.
sclivevolume = api.get_live_volume(provider_id)
# Do not map to a failed over volume.
if (sclivevolume and not
api.is_failed_over(provider_id,
sclivevolume)):
# Now map our secondary.
lvlun, lvtargets, lvinit_targ_map = (
self.initialize_secondary(api,
sclivevolume,
wwpns))
# Unmapped. Add info to our list.
targets += lvtargets
init_targ_map.update(lvinit_targ_map)
# Roll up our return data.
if lun is not None and len(targets) > 0:
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': lun,
'target_discovered': True,
'target_wwn': targets,
'initiator_target_map':
init_targ_map,
'discard': True}}
LOG.debug('Return FC data: %s', data)
return data
LOG.error(_LE('Lun mapping returned null!'))
except Exception:
with excutils.save_and_reraise_exception():
@ -191,46 +198,53 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
with self._client.open_connection() as api:
try:
wwpns = connector.get('wwpns')
scserver = self._find_server(api, wwpns)
# Find the volume on the storage center.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
# Get our target map so we can return it to free up a zone.
lun, targets, init_targ_map = api.find_wwns(scvolume, scserver)
if scvolume:
# Get the SSN it is on.
ssn = scvolume['instanceId'].split('.')[0]
# Do we have extra live volume work?
if islivevol:
# Get our volume and our swap state.
sclivevolume, swapped = api.get_live_volume(
provider_id)
# Do not map to a failed over volume.
if sclivevolume and not swapped:
lvlun, lvtargets, lvinit_targ_map = (
self.terminate_secondary(api, sclivevolume, wwpns))
# Add to our return.
if lvlun:
targets += lvtargets
init_targ_map.update(lvinit_targ_map)
scserver = self._find_server(api, wwpns, ssn)
# If we have a server and a volume lets unmap them.
if (scserver is not None and
scvolume is not None and
api.unmap_volume(scvolume, scserver) is True):
LOG.debug('Connection terminated')
else:
raise exception.VolumeBackendAPIException(
_('Terminate connection failed'))
# Get our target map so we can return it to free up a zone.
lun, targets, init_targ_map = api.find_wwns(scvolume,
scserver)
# basic return info...
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
# Do we have extra live volume work?
if islivevol:
# Get our live volume.
sclivevolume = api.get_live_volume(provider_id)
# Do not map to a failed over volume.
if (sclivevolume and not
api.is_failed_over(provider_id,
sclivevolume)):
lvlun, lvtargets, lvinit_targ_map = (
self.terminate_secondary(
api, sclivevolume, wwpns))
# Add to our return.
if lvlun:
targets += lvtargets
init_targ_map.update(lvinit_targ_map)
# if not then we return the target map so that
# the zone can be freed up.
if api.get_volume_count(scserver) == 0:
info['data'] = {'target_wwn': targets,
'initiator_target_map': init_targ_map}
return info
# If we have a server and a volume lets unmap them.
if (scserver is not None and
scvolume is not None and
api.unmap_volume(scvolume, scserver) is True):
LOG.debug('Connection terminated')
else:
raise exception.VolumeBackendAPIException(
_('Terminate connection failed'))
# basic return info...
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
# if not then we return the target map so that
# the zone can be freed up.
if api.get_volume_count(scserver) == 0:
info['data'] = {'target_wwn': targets,
'initiator_target_map': init_targ_map}
return info
except Exception:
with excutils.save_and_reraise_exception():

View File

@ -96,59 +96,68 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
with self._client.open_connection() as api:
try:
# Find our server.
scserver = api.find_server(initiator_name)
# No? Create it.
if scserver is None:
scserver = api.create_server(
[initiator_name], self.configuration.dell_server_os)
# Find the volume on the storage center.
scvolume = api.find_volume(volume_name, provider_id)
# Find the volume on the storage center. Note that if this
# is live volume and we are swapped this will be the back
# half of the live volume.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
if scvolume:
# Get the SSN it is on.
ssn = scvolume['instanceId'].split('.')[0]
# Find our server.
scserver = api.find_server(initiator_name, ssn)
# No? Create it.
if scserver is None:
scserver = api.create_server(
[initiator_name],
self.configuration.dell_server_os, ssn)
# if we have a server and a volume lets bring them together.
if scserver is not None and scvolume is not None:
mapping = api.map_volume(scvolume, scserver)
if mapping is not None:
# Since we just mapped our volume we had best update
# our sc volume object.
scvolume = api.get_volume(provider_id)
# Our return.
iscsiprops = {}
# if we have a server and a volume lets bring them
# together.
if scserver is not None:
mapping = api.map_volume(scvolume, scserver)
if mapping is not None:
# Since we just mapped our volume we had best
# update our sc volume object.
scvolume = api.get_volume(scvolume['instanceId'])
# Our return.
iscsiprops = {}
# Three cases that should all be satisfied with the
# same return of Target_Portal and Target_Portals.
# 1. Nova is calling us so we need to return the
# Target_Portal stuff. It should ignore the
# Target_Portals stuff.
# 2. OS brick is calling us in multipath mode so we
# want to return Target_Portals. It will ignore
# the Target_Portal stuff.
# 3. OS brick is calling us in single path mode so
# we want to return Target_Portal and
# Target_Portals as alternates.
iscsiprops = api.find_iscsi_properties(scvolume)
# Three cases that should all be satisfied with the
# same return of Target_Portal and Target_Portals.
# 1. Nova is calling us so we need to return the
# Target_Portal stuff. It should ignore the
# Target_Portals stuff.
# 2. OS brick is calling us in multipath mode so we
# want to return Target_Portals. It will ignore
# the Target_Portal stuff.
# 3. OS brick is calling us in single path mode so
# we want to return Target_Portal and
# Target_Portals as alternates.
iscsiprops = api.find_iscsi_properties(scvolume)
# If this is a live volume we need to map up our
# secondary volume.
if islivevol:
sclivevolume, swapped = api.get_live_volume(
provider_id)
# Only map if we are not swapped.
if sclivevolume and not swapped:
secondaryprops = self.initialize_secondary(
api, sclivevolume, initiator_name)
# Combine with iscsiprops
iscsiprops['target_iqns'] += (
secondaryprops['target_iqns'])
iscsiprops['target_portals'] += (
secondaryprops['target_portals'])
iscsiprops['target_luns'] += (
secondaryprops['target_luns'])
# If this is a live volume we need to map up our
# secondary volume. Note that if we have failed
# over we do not wish to do this.
if islivevol:
sclivevolume = api.get_live_volume(provider_id)
# Only map if we are not failed over.
if (sclivevolume and not
api.is_failed_over(provider_id,
sclivevolume)):
secondaryprops = self.initialize_secondary(
api, sclivevolume, initiator_name)
# Combine with iscsiprops
iscsiprops['target_iqns'] += (
secondaryprops['target_iqns'])
iscsiprops['target_portals'] += (
secondaryprops['target_portals'])
iscsiprops['target_luns'] += (
secondaryprops['target_luns'])
# Return our iscsi properties.
iscsiprops['discard'] = True
return {'driver_volume_type': 'iscsi',
'data': iscsiprops}
# Return our iscsi properties.
iscsiprops['discard'] = True
return {'driver_volume_type': 'iscsi',
'data': iscsiprops}
# Re-raise any backend exception.
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
@ -214,23 +223,31 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
'initiator': initiator_name})
with self._client.open_connection() as api:
try:
scserver = api.find_server(initiator_name)
# Find the volume on the storage center.
scvolume = api.find_volume(volume_name, provider_id)
# Find the volume on the storage center. Note that if this
# is live volume and we are swapped this will be the back
# half of the live volume.
scvolume = api.find_volume(volume_name, provider_id, islivevol)
if scvolume:
# Get the SSN it is on.
ssn = scvolume['instanceId'].split('.')[0]
# Find our server.
scserver = api.find_server(initiator_name, ssn)
# Unmap our secondary if it isn't swapped.
if islivevol:
sclivevolume, swapped = api.get_live_volume(provider_id)
if sclivevolume and not swapped:
self.terminate_secondary(api, sclivevolume,
initiator_name)
# Unmap our secondary if not failed over..
if islivevol:
sclivevolume = api.get_live_volume(provider_id)
if (sclivevolume and not
api.is_failed_over(provider_id,
sclivevolume)):
self.terminate_secondary(api, sclivevolume,
initiator_name)
# If we have a server and a volume lets pull them apart.
if (scserver is not None and
scvolume is not None and
api.unmap_volume(scvolume, scserver) is True):
LOG.debug('Connection terminated')
return
# If we have a server and a volume lets pull them apart.
if (scserver is not None and
scvolume is not None and
api.unmap_volume(scvolume, scserver) is True):
LOG.debug('Connection terminated')
return
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to terminate connection '

View File

@ -457,9 +457,9 @@ class XtremIOVolumeDriver(san.SanDriver):
def delete_volume(self, volume):
"""Deletes a volume."""
try:
self.client.req('volumes', 'DELETE', name=volume['id'])
self.client.req('volumes', 'DELETE', name=volume.name_id)
except exception.NotFound:
LOG.info(_LI("volume %s doesn't exist"), volume['id'])
LOG.info(_LI("volume %s doesn't exist"), volume.name_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""

View File

@ -69,7 +69,11 @@ volume_opts = [
help='max_over_subscription_ratio setting for the LVM '
'driver. If set, this takes precedence over the '
'general max_over_subscription_ratio option. If '
'None, the general option is used.')
'None, the general option is used.'),
cfg.BoolOpt('lvm_suppress_fd_warnings',
default=False,
help='Suppress leaked file descriptor warnings in LVM '
'commands.')
]
CONF = cfg.CONF
@ -287,11 +291,14 @@ class LVMVolumeDriver(driver.VolumeDriver):
lvm_conf_file = None
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
self.vg = lvm.LVM(
self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file,
suppress_fd_warn=(
self.configuration.lvm_suppress_fd_warnings))
except exception.VolumeGroupNotFound:
message = (_("Volume Group %s does not exist") %

View File

@ -4524,7 +4524,7 @@ class _VolumeV3Proxy(object):
def get_manageable_snapshots(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs):
return self.manager.get_manageable_snapshots(
self, ctxt, marker, limit, offset, sort_keys, sort_dirs)
ctxt, marker, limit, offset, sort_keys, sort_dirs)
def get_capabilities(self, context, discover):
return self.manager.get_capabilities(context, discover)

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
cinder (2:9.0.0-1) unstable; urgency=medium
* New upstream release.
-- Thomas Goirand <zigo@debian.org> Thu, 06 Oct 2016 17:32:42 +0200
cinder (2:9.0.0~rc1-2) unstable; urgency=medium
[ Ondřej Nový ]

View File

@ -16,12 +16,25 @@ vgs: EnvFilter, env, root, LC_ALL=C, vgs
lvs: EnvFilter, env, root, LC_ALL=C, lvs
lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay
# LVM conf var
# -LVM related show commands with suppress fd warnings
pvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
vgs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
lvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
lvdisplay_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
# -LVM related show commands conf var
pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs
vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs
lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs
lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay
# -LVM conf var with suppress fd_warnings
pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs
vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs
lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs
lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay
# os-brick library commands
# os_brick.privileged.run_as_root oslo.privsep context
# This line ties the superuser privs with the config files, context name,
@ -40,6 +53,8 @@ vgcreate: CommandFilter, vgcreate, root
# cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ...
lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate
lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate
lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate
lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate
# cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
dd: CommandFilter, dd, root
@ -54,6 +69,8 @@ lvrename: CommandFilter, lvrename, root
# cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ...
lvextend: EnvFilter, env, root, LC_ALL=C, lvextend
lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend
lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend
# cinder/brick/local_dev/lvm.py: 'lvchange -a y -K <lv>'
lvchange: CommandFilter, lvchange, root

View File

@ -0,0 +1,17 @@
---
upgrade:
- |
In certain environments (Kubernetes for example) indirect calls to the LVM
commands result in file descriptor leak warning messages which in turn cause
the process_execution method to raise and exception.
To accommodate these environments, and to maintain backward compatibility
in Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver.
Setting this to True will append the LVM env vars to include the variable
``LVM_SUPPRESS_FD_WARNINGS=1``.
This is made an optional configuration because it only applies to very specific
environments. If we were to make this global that would require a rootwrap/privsep
update that could break compatibility when trying to do rolling upgrades of the
volume service.

View File

@ -0,0 +1,7 @@
---
security:
- The qemu-img tool now has resource limits applied
which prevent it from using more than 1GB of address
space or more than 2 seconds of CPU time. This provides
protection against denial of service attacks from
maliciously crafted or corrupted disk images.

View File

@ -9,7 +9,7 @@ envlist = py34,py27,compliance,pep8
setenv = VIRTUAL_ENV={envdir}
PYTHONHASHSEED=0
usedevelop = True
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/newton} {opts} {packages}
# TODO(mriedem): Move oslo.versionedobjects[fixtures] to test-requirements.txt
# after I937823ffeb95725f0b55e298ebee1857d6482883 lands.