Support VDI streaming

This is to support streaming VHD tgz files to/from XAPI. In
this way, it will avoid the restriction that SRs must be
file system based SR (e.g. ext and nfs) for XenServer
OpenStack. So that we can support other types of SRs
e.g. lvm, iscsi and etc.

Change-Id: I2cfe7340d48071bf96bc7ba0ad19f5a0b12b5378
This commit is contained in:
jianghua wang 2017-07-23 19:39:40 +01:00
parent 0ce6489379
commit 2b97fb50c1
12 changed files with 1347 additions and 0 deletions

View File

@ -57,3 +57,23 @@ class PluginImageNotFound(OsXenApiException):
class SessionLoginTimeout(OsXenApiException):
msg_fmt = _("Unable to log in to XenAPI (is the Dom0 disk full?)")
class InvalidImage(OsXenApiException):
msg_fmt = _("Image is invalid: details is - (%(details)s)")
class HostConnectionFailure(OsXenApiException):
msg_fmt = _("Failed connecting to host %(host_netloc)s")
class NotFound(OsXenApiException):
msg_fmt = _("Not found error: %s")
class VdiImportFailure(OsXenApiException):
msg_fmt = _("Failed importing VDI from VHD stream: vdi_ref=(%(vdi_ref)s)")
class VhdDiskTypeNotSupported(OsXenApiException):
msg_fmt = _("Not supported VHD disk type: type=(%(disk_type)s)")

View File

@ -0,0 +1,28 @@
# Copyright 2017 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_xenapi.client.image import vdi_handler
def stream_to_vdis(context, session, instance, host_url, data):
handler = vdi_handler.ImageStreamToVDIs(context, session, instance,
host_url, data)
handler.start()
return handler.vdis
def stream_from_vdis(context, session, instance, host_url, vdi_uuids):
handler = vdi_handler.GenerateImageStream(context, session, instance,
host_url, vdi_uuids)
return handler.get_image_data()

View File

@ -0,0 +1,282 @@
# Copyright 2017 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import logging
from six.moves import http_client as httplib
import six.moves.urllib.parse as urlparse
import tarfile
from os_xenapi.client import exception
from os_xenapi.client.image import vhd_utils
from os_xenapi.client import utils
LOG = logging.getLogger(__name__)
CHUNK_SIZE = 4 * 1024 * 1024
class ImageStreamToVDIs(object):
def __init__(self, context, session, instance, host_url, image_stream_in):
self.context = context
self.session = session
self.instance = instance
self.host_url = urlparse.urlparse(host_url)
self.image_stream = image_stream_in
self.task_ref = None
self.vdis = {}
def _clean(self):
if self.task_ref:
self.session.task.destroy(self.task_ref)
def start(self):
label = 'VDI_IMPORT_for_' + self.instance['name']
desc = 'Importing VDI for instance: %s' % self.instance['name']
self.task_ref = self.session.task.create(label, desc)
try:
with tarfile.open(mode="r|gz", fileobj=self.image_stream) as tar:
for vhd in tar:
file_size = vhd.size
LOG.debug("file_name:file_size is %(n)s:%(s)d",
{'n': vhd.name, 's': vhd.size})
vhd_file = tar.extractfile(vhd)
vhd_file_parser = vhd_utils.VHDFileParser(vhd_file)
vhd_footer = vhd_file_parser.parse_vhd_footer()
virtual_size = vhd_footer.current_size
sr_ref, vdi_ref = self._createVDI(self.session,
self.instance,
virtual_size)
self._vhd_stream_to_vdi(vhd_file_parser, vdi_ref,
file_size)
vdi_uuid = self.session.VDI.get_uuid(vdi_ref)
if 'root' in self.vdis.keys():
# we only support single vdi. If 'root' already exists
# in the dict, should raise exception.
msg = "Only support single VDI; but there are " + \
"multiple VDIs in the image."
raise exception.InvalidImage(details=msg)
self.vdis['root'] = dict(uuid=vdi_uuid)
finally:
self._clean()
def _createVDI(self, session, instance, virtual_size):
sr_ref = utils.get_default_sr(session)
vdi_ref = utils.create_vdi(session, sr_ref, instance,
instance['name'], 'root', virtual_size)
vdi_uuid = session.VDI.get_uuid(vdi_ref)
LOG.debug("Created a new VDI: uuid=%s" % vdi_uuid)
return sr_ref, vdi_ref
def _vhd_stream_to_vdi(self, vhd_file_parser, vdi_ref, file_size):
headers = {'Content-Type': 'application/octet-stream',
'Content-Length': '%s' % file_size}
if self.host_url.scheme == 'http':
conn = httplib.HTTPConnection(self.host_url.netloc)
elif self.host_url.scheme == 'https':
conn = httplib.HTTPSConnection(self.host_url.netloc)
vdi_import_path = utils.get_vdi_import_path(
self.session, self.task_ref, vdi_ref)
try:
conn.connect()
except Exception:
LOG.error('Failed connecting to host: %s', self.host_url.netloc)
raise exception.HostConnectionFailure(
host_netloc=self.host_url.netloc)
try:
conn.request('PUT', vdi_import_path, headers=headers)
# Send the data already processed by vhd file parser firstly;
# then send the remaining data from the stream.
conn.send(vhd_file_parser.cached_buff)
remain_size = file_size - len(vhd_file_parser.cached_buff)
file_obj = vhd_file_parser.src_file
while remain_size >= CHUNK_SIZE:
chunk = file_obj.read(CHUNK_SIZE)
remain_size -= CHUNK_SIZE
conn.send(chunk)
if remain_size != 0:
chunk = file_obj.read(remain_size)
conn.send(chunk)
except Exception:
LOG.error('Failed importing VDI from VHD stream - vdi_ref:%s',
vdi_ref)
raise exception.VdiImportFailure(vdi_ref=vdi_ref)
finally:
resp = conn.getresponse()
LOG.debug("Connection response status/reason is "
"%(status)s:%(reason)s",
{'status': resp.status, 'reason': resp.reason})
conn.close()
class GenerateImageStream(object):
def __init__(self, context, session, instance, host_url, vdi_uuids):
self.context = context
self.session = session
self.instance = instance
self.host_url = host_url
self.vdi_uuids = vdi_uuids
def get_image_data(self):
"""This function will:
1). export VDI as VHD stream;
2). make gzipped tarball from the VHD stream;
3). read from the tarball stream.and return the iterable data.
"""
tarpipe_out, tarpipe_in = utils.create_pipe()
pool = eventlet.GreenPool()
pool.spawn(self.start_image_stream_generator, tarpipe_in)
try:
while True:
data = tarpipe_out.read(CHUNK_SIZE)
if not data:
break
yield data
except Exception:
LOG.debug("Failed to read chunks from the tarfile "
"stream.")
raise
finally:
tarpipe_out.close()
pool.waitall()
def start_image_stream_generator(self, tarpipe_in):
tar_generator = VdisToTarStream(
self.context, self.session, self.instance, self.host_url,
self.vdi_uuids, tarpipe_in)
try:
tar_generator.start()
finally:
tarpipe_in.close()
class VdisToTarStream(object):
def __init__(self, context, session, instance, host_url, vdi_uuids,
tarpipe_in):
self.context = context
self.session = session
self.instance = instance
self.host_url = host_url
self.vdi_uuids = vdi_uuids
self.tarpipe_in = tarpipe_in
self.conn = None
self.task_ref = None
def start(self):
# Start thread to generate tgz and write tgz data into tarpipe_in.
with tarfile.open(fileobj=self.tarpipe_in, mode='w|gz') as tar_file:
# only need export the leaf vdi.
vdi_uuid = self.vdi_uuids[0]
vdi_ref = self.session.VDI.get_by_uuid(vdi_uuid)
vhd_stream = self._connect_request(vdi_ref)
tar_info = tarfile.TarInfo('0.vhd')
try:
# the VHD must be dynamical hard disk, otherwise it will raise
# VhdDiskTypeNotSupported exception when parsing VDH file.
vhd_DynDisk = vhd_utils.VHDDynDiskParser(vhd_stream)
tar_info.size = vhd_DynDisk.get_vhd_file_size()
LOG.debug("VHD size for tarfile is %d" % tar_info.size)
vhdpipe_out, vhdpipe_in = utils.create_pipe()
pool = eventlet.GreenPool()
pool.spawn(self.convert_vhd_to_tar, vhdpipe_out,
tar_file, tar_info)
try:
self._vhd_to_pipe(vhd_DynDisk, vhdpipe_in)
finally:
vhdpipe_in.close()
pool.waitall()
finally:
self._clean()
def convert_vhd_to_tar(self, vhdpipe_out, tar_file, tar_info):
tarGenerator = AddVhdToTar(tar_file, tar_info, vhdpipe_out)
try:
tarGenerator.start()
finally:
vhdpipe_out.close()
def _connect_request(self, vdi_ref):
# request connection to xapi url service for VDI export
try:
# create task for VDI export
label = 'VDI_EXPORT_for_' + self.instance['name']
desc = 'Exporting VDI for instance: %s' % self.instance['name']
self.task_ref = self.session.task.create(label, desc)
LOG.debug("task_ref is %s" % self.task_ref)
# connect to XS
xs_url = urlparse.urlparse(self.host_url)
if xs_url.scheme == 'http':
conn = httplib.HTTPConnection(xs_url.netloc)
LOG.debug("using http")
elif xs_url.scheme == 'https':
conn = httplib.HTTPSConnection(xs_url.netloc)
LOG.debug("using https")
vdi_export_path = utils.get_vdi_export_path(
self.session, self.task_ref, vdi_ref)
conn.request('GET', vdi_export_path)
conn_resp = conn.getresponse()
except Exception:
LOG.debug('request connect for vdi export failed')
raise
return conn_resp
def _vhd_to_pipe(self, vhd_dynDisk, vhdpipe_in):
# Firstly write the data already parsed by vhd_dynDisk obj;
# then write all of the remaining data to the pipe also.
vhdpipe_in.write(vhd_dynDisk.cached_buff)
remain_data = vhd_dynDisk.src_file
while True:
data = remain_data.read(CHUNK_SIZE)
if not data:
break
try:
vhdpipe_in.write(data)
except Exception:
LOG.debug("Failed when writing data to VHD stream.")
raise
def _clean(self):
if self.conn:
self.conn.close()
if self.task_ref:
self.session.task.destroy(self.task_ref)
class AddVhdToTar(object):
def __init__(self, tar_file, tar_info, vhdpipe_out):
self.tar_file = tar_file
self.tar_info = tar_info
self.stream = vhdpipe_out
def start(self):
self._add_stream_to_tar()
def _add_stream_to_tar(self):
try:
LOG.debug('self.tar_info.size=%d' % self.tar_info.size)
self.tar_file.addfile(self.tar_info, fileobj=self.stream)
LOG.debug('added file %s' % self.tar_info.name)
except IOError:
LOG.debug('IOError when streaming vhd to tarball')
raise

View File

@ -0,0 +1,267 @@
# Copyright 2017 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import struct
from os_xenapi.client import exception as xenapi_except
LOG = logging.getLogger(__name__)
FMT_TO_LEN = {
'!B': 1,
'!H': 2,
'!I': 4,
'!Q': 8,
}
DISK_TYPE = {'None': 0,
'Reserved_1': 1,
'Fixed hard disk': 2,
'Dynamic hard disk': 3,
'Differencing hard disk': 4,
'Reserved_5': 5,
'Reserved_6': 6,
}
class VHDFileParser(object):
# This class supplies utils to parse different parts of a VHD file.
# It follows the following VHD spec:
# https://www.microsoft.com/en-us/download/confirmation.aspx?id=23850
def __init__(self, file_obj):
self.src_file = file_obj
self.cached_buff = b''
def get_disk_type_name(self, type_val):
for type_name in DISK_TYPE:
if (DISK_TYPE[type_name] == type_val):
return type_name
def cached_read(self, read_size):
# the data will be cached in the buffer.
data = self.src_file.read(read_size)
if data:
self.cached_buff += data
return data
def parse_vhd_footer(self):
footer_raw_data = self.cached_read(VHDFooter.VHD_HDF_SIZE)
return VHDFooter(footer_raw_data)
class VHDDynDiskParser(VHDFileParser):
"""The class presents the Dynamical Disk file:
The Dynamic Hard Disk Image format is as below:
+-----------------------------------------------+
|Mirror Image of Hard drive footer (512 bytes) |
+-----------------------------------------------+
|Dynamic Disk Header (1024 bytes) |
+-----------------------------------------------+
| padding bytes |
|(Table Offset in Dynamic Disk Header determines|
| where the BAT starts from) |
+-----------------------------------------------+
|BAT (Block Allocation Table) |
+-----------------------------------------------+
|Padding bytes to ensure the bitmap+Data blocks |
|start from 512-byte sector boundary. |
+-----------------------------------------------+
| bitmap 1 (512 bytes) |
| Data Block 1 |
+-----------------------------------------------+
| bitmap 2 (512 bytes) |
| Data Block 2 |
+-----------------------------------------------+
| ... |
+-----------------------------------------------+
| bitmap 1 (512 bytes) |
| Data Block n |
+-----------------------------------------------+
| Hard drive footer (512 bytes) |
+-----------------------------------------------+
"""
SIZE_OF_BITMAP = 512
def __init__(self, file_obj):
self.src_file = file_obj
self.cached_buff = b''
self.footer = self.parse_vhd_footer()
dyn_disk_type = DISK_TYPE['Dynamic hard disk']
if self.footer.disk_type != dyn_disk_type:
disk_type_name = self.get_disk_type_name(
self.footer.disk_type)
raise xenapi_except.VhdDiskTypeNotSupported(
disk_type=disk_type_name)
self.DynDiskHdr = self._get_dynamic_disk_header()
self.BatPaddingData = self._get_bat_padding()
self.Bat = self._get_block_allocation_table()
def _get_dynamic_disk_header(self):
ddh_raw_data = self.cached_read(VHDDynDiskHdr.VHD_DDH_SIZE)
return VHDDynDiskHdr(ddh_raw_data)
def _get_bat_padding(self):
PaddingData = None
len_padding = (self.DynDiskHdr.bat_offset - VHDFooter.VHD_HDF_SIZE -
VHDDynDiskHdr.VHD_DDH_SIZE)
if len_padding > 0:
PaddingData = self.cached_read(len_padding)
return PaddingData
def _get_block_allocation_table(self):
bat_ent_size = FMT_TO_LEN[VHDBlockAllocTable.FMT_BAT_ENT]
bat_size = bat_ent_size * self.DynDiskHdr.bat_max_entries
raw_data = self.cached_read(bat_size)
return VHDBlockAllocTable(raw_data)
def get_vhd_file_size(self):
# it will calculate the VHD file's size basing on the first
# non data block sections. It's useful in the scenario where
# the VHD file's data is passed via streaming. We can
# calculate the file size before we get all data. But please
# note it only works when the data blocks all are continuously
# places in the VHD file (no holes). The VHD files exported
# by invoking XenAPI should meet this prerequisite.
# The "bitmap+Data blocks" should start from the point which is
# after the Block Allocation Table and also meets the 512 bytes
# boundary.
bat_offset = self.DynDiskHdr.bat_offset
bat_size = len(self.Bat.raw_data)
data_offset = bat_offset + bat_size
if data_offset % 512 != 0:
data_offset = (data_offset / 512 + 1) * 512
bitmap_size = VHDDynDiskParser.SIZE_OF_BITMAP
block_size = self.DynDiskHdr.block_size
valid_blocks = self.Bat.num_valid_bat_entries
data_size = (bitmap_size + block_size) * valid_blocks
file_size = data_offset + data_size + VHDFooter.VHD_HDF_SIZE
LOG.debug("Calcuated file_size = {}: bat_offset = {}; "
"bat_size = {}; data_offset = {}; data_size = {}; "
"footer_size = {}".format(file_size, bat_offset, bat_size,
data_offset, data_size,
VHDFooter.VHD_HDF_SIZE))
return file_size
class VHDFooter(object):
# VHD Hard Disk Footer
VHD_HDF_SIZE = 512
HDF_LAYOUT = {
'current_size': {
'offset': 48,
'format': '!Q'},
'disk_type': {
'offset': 60,
'format': '!I'},
}
def __init__(self, raw_data):
self.raw_data = raw_data
self._parse_data()
def _parse_data(self):
hdf_layout = VHDFooter.HDF_LAYOUT
for field in hdf_layout:
format = hdf_layout[field]['format']
pos_start = hdf_layout[field]['offset']
pos_end = pos_start + FMT_TO_LEN[format]
(value, ) = struct.unpack(format,
self.raw_data[pos_start: pos_end])
setattr(self, field, value)
class VHDDynDiskHdr(object):
"""VHD Dynamic Disk Header:
The Dynamic Disk Header(DDH) layout is as below:
|**fields** | **size**|
|Cookie | 8 |
|Data Offset | 8 |
|*Table Offset* | 8 |
|Header Version | 4 |
|*Max Table Entries* | 4 |
|*Block Size* | 4 |
|Checksum | 4 |
|Parent Unique ID | 16 |
|Parent Time Stamp | 4 |
|Reserved | 4 |
|Parent Unicode Name | 512 |
|Parent Locator Entry 1 | 24 |
|Parent Locator Entry 2 | 24 |
|Parent Locator Entry 3 | 24 |
|Parent Locator Entry 4 | 24 |
|Parent Locator Entry 5 | 24 |
|Parent Locator Entry 6 | 24 |
|Parent Locator Entry 7 | 24 |
|Parent Locator Entry 8 | 24 |
|Reserved | 256 |
"""
VHD_DDH_SIZE = 1024
DDH_LAYOUT = {
'bat_offset': {
'offset': 16,
'format': '!Q'},
'bat_max_entries':
{'offset': 28,
'format': '!I'},
'block_size': {
'offset': 32,
'format': '!I'},
}
def __init__(self, raw_data):
self.raw_data = raw_data
self._parse_data()
def _parse_data(self):
ddh_layout = VHDDynDiskHdr.DDH_LAYOUT
for field in ddh_layout:
format = ddh_layout[field]['format']
pos_start = ddh_layout[field]['offset']
pos_end = pos_start + FMT_TO_LEN[format]
(value,) = struct.unpack(format,
self.raw_data[pos_start: pos_end])
setattr(self, field, value)
class VHDBlockAllocTable(object):
# VHD Block Allocation Table
FMT_BAT_ENT = '!I'
def __init__(self, raw_data):
self.raw_data = raw_data
self._parse_data()
def _parse_data(self):
self.num_valid_bat_entries = self.get_valid_bat_entries()
def get_valid_bat_entries(self):
# Calculate the number of valid BAT entries.
# It will go through all BAT entries. Those entries whose value is not
# the default value - 0xFFFFFFFF will be treated as valid.
num_of_valid_bat_ent = 0
size_of_bat_entry = FMT_TO_LEN[VHDBlockAllocTable.FMT_BAT_ENT]
for i in range(0, len(self.raw_data), size_of_bat_entry):
(value, ) = struct.unpack(VHDBlockAllocTable.FMT_BAT_ENT,
self.raw_data[i: i + size_of_bat_entry])
if value != 0xFFFFFFFF:
num_of_valid_bat_ent += 1
return num_of_valid_bat_ent

View File

@ -148,3 +148,9 @@ class Pool(XenAPISessionObject):
"""Pool of hosts."""
def __init__(self, session):
super(Pool, self).__init__(session, "pool")
class Task(XenAPISessionObject):
"""XAPI task."""
def __init__(self, session):
super(Task, self).__init__(session, "task")

View File

@ -57,6 +57,7 @@ def apply_session_helpers(session):
session.host = cli_objects.Host(session)
session.network = cli_objects.Network(session)
session.pool = cli_objects.Pool(session)
session.task = cli_objects.Task(session)
class XenAPISession(object):

92
os_xenapi/client/utils.py Normal file
View File

@ -0,0 +1,92 @@
# Copyright 2017 Citrix Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenio
import os
from oslo_log import log as logging
from os_xenapi.client import exception
LOG = logging.getLogger(__name__)
def get_default_sr(session):
pool_ref = session.pool.get_all()[0]
sr_ref = session.pool.get_default_SR(pool_ref)
if sr_ref:
return sr_ref
else:
raise exception.NotFound('Cannot find default SR')
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
read_only=False):
"""Create a VDI record and returns its reference."""
vdi_ref = session.VDI.create(
{'name_label': name_label,
'name_description': '',
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': _get_vdi_other_config(disk_type, instance=instance),
'sm_config': {},
'tags': []}
)
LOG.debug('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.',
{'vdi_ref': vdi_ref, 'name_label': name_label,
'virtual_size': virtual_size, 'read_only': read_only,
'sr_ref': sr_ref})
return vdi_ref
def _get_vdi_other_config(disk_type, instance=None):
"""Return metadata to store in VDI's other_config attribute.
`nova_instance_uuid` is used to associate a VDI with a particular instance
so that, if it becomes orphaned from an unclean shutdown of a
compute-worker, we can safely detach it.
"""
other_config = {'nova_disk_type': disk_type}
# create_vdi may be called simply while creating a volume
# hence information about instance may or may not be present
if instance:
other_config['nova_instance_uuid'] = instance['uuid']
return other_config
def create_pipe():
rpipe, wpipe = os.pipe()
rfile = greenio.GreenPipe(rpipe, 'rb', 0)
wfile = greenio.GreenPipe(wpipe, 'wb', 0)
return rfile, wfile
def get_vdi_import_path(session, task_ref, vdi_ref):
session_id = session.get_session_id()
str_fmt = '/import_raw_vdi?session_id={}&task_id={}&vdi={}&format=vhd'
return str_fmt.format(session_id, task_ref, vdi_ref)
def get_vdi_export_path(session, task_ref, vdi_ref):
session_id = session.get_session_id()
str_fmt = '/export_raw_vdi?session_id={}&task_id={}&vdi={}&format=vhd'
return str_fmt.format(session_id, task_ref, vdi_ref)

View File

View File

@ -0,0 +1,43 @@
# Copyright 2017 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_xenapi.client import image
from os_xenapi.client.image import vdi_handler
from os_xenapi.tests import base
class ImageTestCase(base.TestCase):
def setUp(self):
super(ImageTestCase, self).setUp()
self.context = mock.Mock()
self.session = mock.Mock()
self.instance = {'name': 'instance-001'}
self.host_url = "http://fake-host.com"
self.stream = mock.Mock()
@mock.patch.object(vdi_handler.ImageStreamToVDIs, 'start')
def test_stream_to_vdis(self, mock_start):
image.stream_to_vdis(self.context, self.session, self.instance,
self.host_url, self.stream)
mock_start.assert_called_once_with()
@mock.patch.object(vdi_handler.GenerateImageStream, 'get_image_data')
def test_vdis_to_stream(self, mock_get):
image.stream_from_vdis(self.context, self.session, self.instance,
self.host_url, ['fake-uuid'])
mock_get.assert_called_once_with()

View File

@ -0,0 +1,310 @@
# Copyright 2017 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import eventlet
from six.moves import http_client as httplib
import tarfile
from os_xenapi.client import exception
from os_xenapi.client.image import vdi_handler
from os_xenapi.client.image import vhd_utils
from os_xenapi.client import utils
from os_xenapi.tests import base
class ImageStreamToVDIsTestCase(base.TestCase):
def setUp(self):
super(ImageStreamToVDIsTestCase, self).setUp()
self.context = mock.Mock()
self.session = mock.Mock()
self.instance = {'name': 'instance-001'}
self.host_url = "http://fake-host.com"
self.stream = mock.Mock()
@mock.patch.object(tarfile, 'open')
@mock.patch.object(vhd_utils, 'VHDFileParser')
@mock.patch.object(vdi_handler.ImageStreamToVDIs, '_createVDI',
return_value=('fake_sr_ref', 'fake_vdi_ref'))
@mock.patch.object(vdi_handler.ImageStreamToVDIs, '_vhd_stream_to_vdi')
def test_start(self, mock_to_vdi, mock_createVDI,
mock_get_parser, mock_open):
self.session.task.create.return_value = 'fake-task-ref'
mock_footer = mock.Mock(current_size=1073741824)
mock_parser = mock.Mock()
mock_get_parser.return_value = mock_parser
mock_parser.parse_vhd_footer.return_value = mock_footer
fake_vhd_info = mock.Mock()
fake_vhd_info.size = 29371904
fake_vhd_info.name = '0.vhd'
mock_tarfile = mock.MagicMock()
mock_tarfile.__enter__.return_value = mock_tarfile
mock_tarfile.__iter__.return_value = [fake_vhd_info]
mock_open.return_value = mock_tarfile
mock_tarfile.extractfile.return_value = 'fake-file-obj'
image_cmd = vdi_handler.ImageStreamToVDIs(self.context, self.session,
self.instance, self.host_url,
self.stream)
image_cmd.start()
self.session.task.create.assert_called_once_with(
'VDI_IMPORT_for_instance-001',
'Importing VDI for instance: instance-001')
mock_open.assert_called_once_with(mode="r|gz", fileobj=self.stream)
mock_tarfile.extractfile.assert_called_once_with(fake_vhd_info)
mock_createVDI.assert_called_once_with(self.session, self.instance,
1073741824)
mock_to_vdi.assert_called_once_with(mock_parser, 'fake_vdi_ref',
29371904)
self.session.VDI.get_uuid.assert_called_once_with('fake_vdi_ref')
@mock.patch.object(utils, 'get_default_sr',
return_value='fake-sr-ref')
@mock.patch.object(utils, 'create_vdi',
return_value='fake-vdi-ref')
def test_createVDI(self, mock_create_vdi, mock_get_sr):
virtual_size = 1073741824
image_cmd = vdi_handler.ImageStreamToVDIs(self.context, self.session,
self.instance, self.host_url,
self.stream)
expect_result = ('fake-sr-ref', 'fake-vdi-ref')
result = image_cmd._createVDI(self.session, self.instance,
virtual_size)
mock_get_sr.assert_called_once_with(self.session)
mock_create_vdi.assert_called_once_with(self.session, 'fake-sr-ref',
self.instance, 'instance-001',
'root', virtual_size)
self.session.VDI.get_uuid.assert_called_once_with('fake-vdi-ref')
self.assertEqual(expect_result, result)
@mock.patch.object(utils, 'get_vdi_import_path',
return_value='fake-path')
@mock.patch.object(httplib.HTTPConnection, 'connect')
@mock.patch.object(httplib.HTTPConnection, 'request')
@mock.patch.object(httplib.HTTPConnection, 'send')
@mock.patch.object(httplib.HTTPConnection, 'getresponse')
@mock.patch.object(httplib.HTTPConnection, 'close')
def test_vhd_stream_to_vdi(self, conn_close, conn_getRes, conn_send,
conn_req, conn_connect, get_path):
vdh_stream = mock.Mock()
cache_size = 4 * 1024
remain_size = vdi_handler.CHUNK_SIZE / 2
file_size = cache_size + vdi_handler.CHUNK_SIZE * 2 + remain_size
headers = {'Content-Type': 'application/octet-stream',
'Content-Length': '%s' % file_size}
image_cmd = vdi_handler.ImageStreamToVDIs(self.context, self.session,
self.instance, self.host_url,
self.stream)
mock_parser = mock.Mock()
mock_parser.cached_buff = b'\x00' * cache_size
mock_parser.src_file = vdh_stream
image_cmd.task_ref = 'fake-task-ref'
vdh_stream.read.side_effect = ['chunk1', 'chunk2', 'chunk3']
image_cmd._vhd_stream_to_vdi(mock_parser, 'fake_vdi_ref', file_size)
conn_connect.assert_called_once_with()
get_path.assert_called_once_with(self.session, 'fake-task-ref',
'fake_vdi_ref')
conn_connect.assert_called_once_with()
conn_req.assert_called_once_with('PUT', 'fake-path', headers=headers)
expect_send_calls = [mock.call(mock_parser.cached_buff),
mock.call('chunk1'),
mock.call('chunk2'),
mock.call('chunk3'),
]
conn_send.assert_has_calls(expect_send_calls)
conn_getRes.assert_called_once_with()
conn_close.assert_called_once_with()
@mock.patch.object(utils, 'get_vdi_import_path',
return_value='fake-path')
@mock.patch.object(httplib.HTTPConnection, 'connect')
@mock.patch.object(httplib.HTTPConnection, 'request',
side_effect=Exception)
@mock.patch.object(httplib.HTTPConnection, 'send')
@mock.patch.object(httplib.HTTPConnection, 'getresponse')
@mock.patch.object(httplib.HTTPConnection, 'close')
def test_vhd_stream_to_vdi_put_except(self, conn_close, conn_getRes,
conn_send, conn_req, conn_connect,
get_path):
vdh_stream = mock.Mock()
cache_size = 4 * 1024
remain_size = vdi_handler.CHUNK_SIZE / 2
file_size = cache_size + vdi_handler.CHUNK_SIZE * 2 + remain_size
image_cmd = vdi_handler.ImageStreamToVDIs(self.context, self.session,
self.instance, self.host_url,
self.stream)
mock_parser = mock.Mock()
mock_parser.cached_buff = b'\x00' * cache_size
mock_parser.src_file = vdh_stream
image_cmd.task_ref = 'fake-task-ref'
vdh_stream.read.return_value = ['chunk1', 'chunk2', 'chunk3']
self.assertRaises(exception.VdiImportFailure,
image_cmd._vhd_stream_to_vdi, mock_parser,
'fake_vdi_ref', file_size)
@mock.patch.object(utils, 'get_vdi_import_path',
return_value='fake-path')
@mock.patch.object(httplib.HTTPConnection, 'connect',
side_effect=Exception)
@mock.patch.object(httplib.HTTPConnection, 'request')
@mock.patch.object(httplib.HTTPConnection, 'send')
@mock.patch.object(httplib.HTTPConnection, 'getresponse')
@mock.patch.object(httplib.HTTPConnection, 'close')
def test_vhd_stream_to_vdi_conn_except(self, conn_close, conn_getRes,
conn_send, conn_req, conn_connect,
get_path):
vdh_stream = mock.Mock()
cache_size = 4 * 1024
remain_size = vdi_handler.CHUNK_SIZE / 2
file_size = cache_size + vdi_handler.CHUNK_SIZE * 2 + remain_size
image_cmd = vdi_handler.ImageStreamToVDIs(self.context, self.session,
self.instance, self.host_url,
self.stream)
mock_parser = mock.Mock()
mock_parser.cached_buff = b'\x00' * cache_size
mock_parser.src_file = vdh_stream
image_cmd.task_ref = 'fake-task-ref'
vdh_stream.read.return_value = ['chunk1', 'chunk2', 'chunk3']
self.assertRaises(exception.HostConnectionFailure,
image_cmd._vhd_stream_to_vdi, mock_parser,
'fake_vdi_ref', file_size)
class GenerateImageStreamTestCase(base.TestCase):
def setUp(self):
super(GenerateImageStreamTestCase, self).setUp()
self.context = mock.Mock()
self.session = mock.Mock()
self.instance = {'name': 'instance-001'}
self.host_url = "http://fake-host.com"
self.stream = mock.Mock()
@mock.patch.object(utils, 'create_pipe')
@mock.patch.object(eventlet.GreenPool, 'spawn')
@mock.patch.object(vdi_handler.GenerateImageStream,
'start_image_stream_generator')
@mock.patch.object(eventlet.GreenPool, 'waitall')
def test_get_image_data(self, mock_waitall, mock_start, mock_spawn,
create_pipe):
mock_tarpipe_out = mock.Mock()
mock_tarpipe_in = mock.Mock()
create_pipe.return_value = (mock_tarpipe_out, mock_tarpipe_in)
image_cmd = vdi_handler.GenerateImageStream(
self.context, self.session, self.instance,
self.host_url, ['vdi_uuid'])
mock_tarpipe_out.read.side_effect = ['chunk1', 'chunk2', '']
image_chunks = []
for chunk in image_cmd.get_image_data():
image_chunks.append(chunk)
create_pipe.assert_called_once_with()
mock_spawn.assert_called_once_with(mock_start, mock_tarpipe_in)
self.assertEqual(image_chunks, ['chunk1', 'chunk2'])
class VdisToTarStreamTestCase(base.TestCase):
def setUp(self):
super(VdisToTarStreamTestCase, self).setUp()
self.context = mock.Mock()
self.session = mock.Mock()
self.instance = {'name': 'instance-001'}
self.host_url = "http://fake-host.com"
self.stream = mock.Mock()
@mock.patch.object(tarfile, 'open')
@mock.patch.object(tarfile, 'TarInfo')
@mock.patch.object(vdi_handler.VdisToTarStream, '_connect_request',
return_value='fake-conn-resp')
@mock.patch.object(vhd_utils, 'VHDDynDiskParser')
@mock.patch.object(utils, 'create_pipe')
@mock.patch.object(vdi_handler.VdisToTarStream, 'convert_vhd_to_tar')
@mock.patch.object(eventlet.GreenPool, 'spawn')
@mock.patch.object(vdi_handler.VdisToTarStream, '_vhd_to_pipe')
@mock.patch.object(eventlet.GreenPool, 'waitall')
def test_start(self, mock_waitall, mock_to_pipe, mock_spawn,
mock_convert, mock_pipe, mock_parser,
mock_conn_req, mock_tarinfo, mock_open):
mock_tarfile = mock.MagicMock()
mock_tarfile.__enter__.return_value = mock_tarfile
mock_open.return_value = mock_tarfile
mock_tarinfo.return_value = mock.sentinel.tar_info
self.session.VDI.get_by_uuid.return_value = 'fake-vdi-ref'
mock_dynDisk = mock.Mock()
mock_parser.return_value = mock_dynDisk
mock_dynDisk.get_vhd_file_size.return_value = 29371904
vdi_uuids = ['vdi-uuid']
vhdpipe_in = mock.Mock()
mock_pipe.return_value = ('vhdpipe_out', vhdpipe_in)
image_cmd = vdi_handler.VdisToTarStream(
self.context, self.session, self.instance,
self.host_url, vdi_uuids, self.stream)
image_cmd.start()
mock_open.assert_called_once_with(fileobj=self.stream,
mode='w|gz')
self.session.VDI.get_by_uuid.assert_called_once_with('vdi-uuid')
mock_conn_req.assert_called_once_with('fake-vdi-ref')
mock_dynDisk.get_vhd_file_size.assert_called_once_with()
mock_pipe.assert_called_once_with()
mock_spawn.assert_called_once_with(mock_convert, 'vhdpipe_out',
mock_tarfile,
mock.sentinel.tar_info)
mock_to_pipe.assert_called_once_with(mock_dynDisk, vhdpipe_in)
vhdpipe_in.close.asset_called_once_with()
mock_waitall.assert_called_once_with()
class AddVhdToTarTestCase(base.TestCase):
def setUp(self):
super(AddVhdToTarTestCase, self).setUp()
self.context = mock.Mock()
self.session = mock.Mock()
self.instance = {'name': 'instance-001'}
self.host_url = "http://fake-host.com"
self.stream = mock.Mock()
def test_add_stream_to_tar(self):
mock_tar_file = mock.Mock()
mock_tar_info = mock.Mock()
mock_tar_info.size = 8196
mock_tar_info.name = '0.vhd'
image_cmd = vdi_handler.AddVhdToTar(mock_tar_file, mock_tar_info,
'fake-vhdpipe-out')
image_cmd.start()
mock_tar_file.addfile.assert_called_once_with(
mock_tar_info, fileobj='fake-vhdpipe-out')
def test_add_stream_to_tar_IOError(self):
mock_tar_file = mock.Mock()
mock_tar_info = mock.Mock()
mock_tar_info.size = 1024
mock_tar_info.name = '0.vhd'
image_cmd = vdi_handler.AddVhdToTar(mock_tar_file, mock_tar_info,
'fake-vhdpipe-out')
mock_tar_file.addfile.side_effect = IOError
self.assertRaises(IOError, image_cmd.start)

View File

@ -0,0 +1,174 @@
# Copyright 2017 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This file defines the tests used to cover unit tests for VHD utils.
To ensure it's close to the real VHD file parser, strongly suggest to use
the data from a real VHD file for the fake bytes to feed the unit tests.
Initially the fake data for the tests is from the VHD file exported from
the VM which booted from the default devstack image: cirros-0.3.5-x86_64-disk.
"""
import mock
import struct
from os_xenapi.client import exception as xenapi_except
from os_xenapi.client.image import vhd_utils
from os_xenapi.tests import base
class VhdUtilsTestCase(base.TestCase):
def test_VHDFooter(self):
ONE_GB = 1 * 1024 * 1024 * 1024
TYPE_DYNAMIC = 3
footer_data = b'\x00' * 48 + struct.pack('!Q', ONE_GB) + \
b'\x00' * 4 + \
b'\x00\x00\x00\x03'
vhd_footer = vhd_utils.VHDFooter(footer_data)
self.assertEqual(vhd_footer.raw_data, footer_data)
self.assertEqual(vhd_footer.current_size, ONE_GB)
self.assertEqual(vhd_footer.disk_type, TYPE_DYNAMIC)
def test_VHDDynDiskHdr(self):
BAT_OFFSET = 2048
MAX_BAT_ENTRIES = 512
SIZE_OF_DATA_BLOCK = 2 * 1024 * 1024
# Construct the DDH(Dynamical Disk Header) fields.
DDH_BAT_OFFSET = struct.pack('!Q', BAT_OFFSET)
DDH_MAX_BAT_ENTRIES = struct.pack('!I', MAX_BAT_ENTRIES)
DDH_BLOCK_SIZE = struct.pack('!I', SIZE_OF_DATA_BLOCK)
ddh_data = b'\x00' * 16 + DDH_BAT_OFFSET + \
b'\x00' * 4 + DDH_MAX_BAT_ENTRIES + \
DDH_BLOCK_SIZE
vhd_dynDiskHdr = vhd_utils.VHDDynDiskHdr(ddh_data)
self.assertEqual(vhd_dynDiskHdr.raw_data, ddh_data)
self.assertEqual(vhd_dynDiskHdr.bat_offset, BAT_OFFSET)
self.assertEqual(vhd_dynDiskHdr.bat_max_entries, MAX_BAT_ENTRIES)
self.assertEqual(vhd_dynDiskHdr.block_size, SIZE_OF_DATA_BLOCK)
def test_VHDBlockAllocTable(self):
MAX_BAT_ENTRIES = 512
# Construct BAT(Block Allocation Table)
# The non 0xffffffff means a valid BAT entry. Let's give some holes.
# At here the DATA_BAT contains 14 valid entries in the first 16
# 4-bytes units; there are 2 holes - 0xffffffff which should be
# ignored.
DATA_BAT = b'\x00\x00\x00\x08\x00\x00\x50\x0d\xff\xff\xff\xff' + \
b'\x00\x00\x10\x09\x00\x00\x20\x0a\x00\x00\x30\x0b' + \
b'\x00\x00\x40\x0c\xff\xff\xff\xff\x00\x00\x60\x0e' + \
b'\x00\x00\x70\x0f\x00\x00\x80\x10\x00\x00\x90\x11' + \
b'\x00\x00\xa0\x12\x00\x00\xb0\x13\x00\x00\xc0\x14' + \
b'\x00\x00\xd0\x15' + \
b'\xff\xff\xff\xff' * (MAX_BAT_ENTRIES - 16)
vhd_blockAllocTable = vhd_utils.VHDBlockAllocTable(DATA_BAT)
self.assertEqual(vhd_blockAllocTable.raw_data, DATA_BAT)
self.assertEqual(vhd_blockAllocTable.num_valid_bat_entries, 14)
class VhdFileParserTestCase(base.TestCase):
def test_get_disk_type_name(self):
disk_tyep_val = 3
expect_disk_type_name = 'Dynamic hard disk'
fake_file = 'fake_file'
vhdParser = vhd_utils.VHDFileParser(fake_file)
disk_type_name = vhdParser.get_disk_type_name(disk_tyep_val)
self.assertEqual(disk_type_name, expect_disk_type_name)
def test_get_vhd_file_size(self):
vhd_file = mock.Mock()
SIZE_OF_FOOTER = 512
SIZE_OF_DDH = 1024
SIZE_PADDING = 512
MAX_BAT_ENTRIES = 512
SIZE_OF_BAT_ENTRY = 4
SIZE_OF_BITMAP = 512
SIZE_OF_DATA_BLOCK = 2 * 1024 * 1024
VIRTUAL_SIZE = 40 * 1024 * 1024 * 1024
# Make fake data for VHD footer.
DATA_FOOTER = b'\x00' * 48 + struct.pack('!Q', VIRTUAL_SIZE)
# disk type is 3: dynamical disk.
DATA_FOOTER += b'\x00' * 4 + b'\x00\x00\x00\x03'
# padding bytes
padding_len = SIZE_OF_FOOTER - len(DATA_FOOTER)
DATA_FOOTER += b'\x00' * padding_len
# Construct the DDH(Dynamical Disk Header) fields.
DDH_BAT_OFFSET = struct.pack('!Q', 2048)
DDH_MAX_BAT_ENTRIES = struct.pack('!I', MAX_BAT_ENTRIES)
DDH_BLOCK_SIZE = struct.pack('!I', SIZE_OF_DATA_BLOCK)
DATA_DDH = b'\x00' * 16 + DDH_BAT_OFFSET
DATA_DDH += b'\x00' * 4 + DDH_MAX_BAT_ENTRIES
DATA_DDH += DDH_BLOCK_SIZE
# padding bytes for DDH
padding_len = SIZE_OF_DDH - len(DATA_DDH)
DATA_DDH += b'\x00' * padding_len
# Construct the padding bytes before the Block Allocation Table.
DATA_PADDING = b'\x00' * SIZE_PADDING
# Construct BAT(Block Allocation Table)
# The non 0xffffffff means a valid BAT entry. Let's give some holes.
# At here the DATA_BAT contains 14 valid entries in the first 16
# 4-bytes units; there are 2 holes - 0xffffffff which should be
# ignored.
DATA_BAT = b'\x00\x00\x00\x08\x00\x00\x50\x0d\xff\xff\xff\xff' + \
b'\x00\x00\x10\x09\x00\x00\x20\x0a\x00\x00\x30\x0b' + \
b'\x00\x00\x40\x0c\xff\xff\xff\xff\x00\x00\x60\x0e' + \
b'\x00\x00\x70\x0f\x00\x00\x80\x10\x00\x00\x90\x11' + \
b'\x00\x00\xa0\x12\x00\x00\xb0\x13\x00\x00\xc0\x14' + \
b'\x00\x00\xd0\x15' + \
b'\xff\xff\xff\xff' * (MAX_BAT_ENTRIES - 16)
expected_size = SIZE_OF_FOOTER * 2 + SIZE_OF_DDH
expected_size += SIZE_PADDING + SIZE_OF_BAT_ENTRY * MAX_BAT_ENTRIES
expected_size += (SIZE_OF_BITMAP + SIZE_OF_DATA_BLOCK) * 14
vhd_file.read.side_effect = [DATA_FOOTER,
DATA_DDH,
DATA_PADDING,
DATA_BAT]
vhd_parser = vhd_utils.VHDDynDiskParser(vhd_file)
vhd_size = vhd_parser.get_vhd_file_size()
read_call_list = vhd_file.read.call_args_list
expected = [mock.call(SIZE_OF_FOOTER),
mock.call(SIZE_OF_DDH),
mock.call(SIZE_PADDING),
mock.call(SIZE_OF_BAT_ENTRY * MAX_BAT_ENTRIES),
]
self.assertEqual(expected, read_call_list)
self.assertEqual(expected_size, vhd_size)
def test_not_dyn_disk_exception(self):
# If the VHD's disk type is not dynamic disk, it should raise
# exception.
SIZE_OF_FOOTER = 512
vhd_file = mock.Mock()
# disk type is 2: fixed disk.
DATA_FOOTER = b'\x00' * 60 + b'\x00\x00\x00\x02'
# padding bytes
padding_len = SIZE_OF_FOOTER - len(DATA_FOOTER)
DATA_FOOTER += b'\x00' * padding_len
vhd_file.read.return_value = DATA_FOOTER
self.assertRaises(xenapi_except.VhdDiskTypeNotSupported,
vhd_utils.VHDDynDiskParser, vhd_file)

View File

@ -0,0 +1,124 @@
# Copyright 2017 Citrix Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from eventlet import greenio
import os
from os_xenapi.client import exception
from os_xenapi.client import utils
from os_xenapi.tests import base
class UtilsTestCase(base.TestCase):
def setUp(self):
super(UtilsTestCase, self).setUp()
self.session = mock.Mock()
def test_get_default_sr(self):
FAKE_POOL_REF = 'fake-pool-ref'
FAKE_SR_REF = 'fake-sr-ref'
pool = self.session.pool
pool.get_all.return_value = [FAKE_POOL_REF]
pool.get_default_SR.return_value = FAKE_SR_REF
default_sr_ref = utils.get_default_sr(self.session)
pool.get_all.assert_called_once_with()
pool.get_default_SR.assert_called_once_with(FAKE_POOL_REF)
self.assertEqual(default_sr_ref, FAKE_SR_REF)
def test_get_default_sr_except(self):
FAKE_POOL_REF = 'fake-pool-ref'
FAKE_SR_REF = None
mock_pool = self.session.pool
mock_pool.get_all.return_value = [FAKE_POOL_REF]
mock_pool.get_default_SR.return_value = FAKE_SR_REF
self.assertRaises(exception.NotFound,
utils.get_default_sr,
self.session)
def test_create_vdi(self):
mock_create = self.session.VDI.create
mock_create.return_value = 'fake-vdi-ref'
fake_instance = {'uuid': 'fake-uuid'}
expect_other_conf = {'nova_disk_type': 'fake-disk-type',
'nova_instance_uuid': 'fake-uuid'}
fake_virtual_size = 1
create_param = {
'name_label': 'fake-name-label',
'name_description': '',
'SR': 'fake-sr-ref',
'virtual_size': str(fake_virtual_size),
'type': 'User',
'sharable': False,
'read_only': False,
'xenstore_data': {},
'other_config': expect_other_conf,
'sm_config': {},
'tags': [],
}
vdi_ref = utils.create_vdi(self.session, 'fake-sr-ref', fake_instance,
'fake-name-label', 'fake-disk-type',
fake_virtual_size)
self.session.VDI.create.assert_called_once_with(create_param)
self.assertEqual(vdi_ref, 'fake-vdi-ref')
@mock.patch.object(os, 'pipe')
@mock.patch.object(greenio, 'GreenPipe')
def test_create_pipe(self, mock_green_pipe, mock_pipe):
mock_pipe.return_value = ('fake-rpipe', 'fake-wpipe')
mock_green_pipe.side_effect = ['fake-rfile', 'fake-wfile']
rfile, wfile = utils.create_pipe()
mock_pipe.assert_called_once_with()
real_calls = mock_green_pipe.call_args_list
expect_calls = [mock.call('fake-rpipe', 'rb', 0),
mock.call('fake-wpipe', 'wb', 0)]
self.assertEqual(expect_calls, real_calls)
self.assertEqual('fake-rfile', rfile)
self.assertEqual('fake-wfile', wfile)
def test_get_vdi_import_path(self):
self.session.get_session_id.return_value = 'fake-id'
task_ref = 'fake-task-ref'
vdi_ref = 'fake-vdi-ref'
expected_path = '/import_raw_vdi?session_id=fake-id&'
expected_path += 'task_id=fake-task-ref&vdi=fake-vdi-ref&format=vhd'
export_path = utils.get_vdi_import_path(self.session,
task_ref,
vdi_ref)
self.session.get_session_id.assert_called_once_with()
self.assertEqual(expected_path, export_path)
def test_get_vdi_export_path(self):
self.session.get_session_id.return_value = 'fake-id'
task_ref = 'fake-task-ref'
vdi_ref = 'fake-vdi-ref'
expected_path = '/export_raw_vdi?session_id=fake-id&'
expected_path += 'task_id=fake-task-ref&vdi=fake-vdi-ref&format=vhd'
export_path = utils.get_vdi_export_path(self.session,
task_ref,
vdi_ref)
self.session.get_session_id.assert_called_once_with()
self.assertEqual(expected_path, export_path)