summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoralop <alopgeek@gmail.com>2013-03-12 12:48:24 -0700
committeralop <alopgeek@gmail.com>2013-03-12 12:48:24 -0700
commitf2c7219d60fcd53e7ac5bbca638a2e093c1353b3 (patch)
treea7213fe2036ba0a9c4f5ba841d07742862899ded
parent5a10b120b5c97bda30e51692ef89229ed299dc32 (diff)
added the patched netapp.py driver from https://bugs.launchpad.net/cinder/+bug/1091480
-rw-r--r--attributes/default.rb1
-rw-r--r--files/default/netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py1294
-rw-r--r--recipes/volume.rb8
3 files changed, 1303 insertions, 0 deletions
diff --git a/attributes/default.rb b/attributes/default.rb
index b25fd41..4e496e9 100644
--- a/attributes/default.rb
+++ b/attributes/default.rb
@@ -90,6 +90,7 @@ default["cinder"]["netapp"]["dfm_password"] = nil
90default["cinder"]["netapp"]["dfm_port"] = "8088" 90default["cinder"]["netapp"]["dfm_port"] = "8088"
91default["cinder"]["netapp"]["dfm_web_port"] = "8080" 91default["cinder"]["netapp"]["dfm_web_port"] = "8080"
92default["cinder"]["netapp"]["storage_service"] = "storage_service" 92default["cinder"]["netapp"]["storage_service"] = "storage_service"
93default["cinder"]["netapp"]["driver"] = "/usr/share/pyshared/cinder/volume/netapp.py"
93 94
94# logging attribute 95# logging attribute
95default["cinder"]["syslog"]["use"] = false 96default["cinder"]["syslog"]["use"] = false
diff --git a/files/default/netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py b/files/default/netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py
new file mode 100644
index 0000000..3d4ca20
--- /dev/null
+++ b/files/default/netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py
@@ -0,0 +1,1294 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright (c) 2012 NetApp, Inc.
4# Copyright (c) 2012 OpenStack LLC.
5# All Rights Reserved.
6#
7# Licensed under the Apache License, Version 2.0 (the "License"); you may
8# not use this file except in compliance with the License. You may obtain
9# a copy of the License at
10#
11# http://www.apache.org/licenses/LICENSE-2.0
12#
13# Unless required by applicable law or agreed to in writing, software
14# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16# License for the specific language governing permissions and limitations
17# under the License.
18"""
19Volume driver for NetApp storage systems.
20
21This driver requires NetApp OnCommand 5.0 and one or more Data
22ONTAP 7-mode storage systems with installed iSCSI licenses.
23
24"""
25
26import time
27
28import suds
29from suds import client
30from suds.sax import text
31
32from cinder import exception
33from cinder import flags
34from cinder.openstack.common import lockutils
35from cinder.openstack.common import log as logging
36from cinder.openstack.common import cfg
37from cinder.volume import driver
38from cinder.volume import volume_types
39
40LOG = logging.getLogger("cinder.volume.driver")
41
42netapp_opts = [
43 cfg.StrOpt('netapp_wsdl_url',
44 default=None,
45 help='URL of the WSDL file for the DFM server'),
46 cfg.StrOpt('netapp_login',
47 default=None,
48 help='User name for the DFM server'),
49 cfg.StrOpt('netapp_password',
50 default=None,
51 help='Password for the DFM server'),
52 cfg.StrOpt('netapp_server_hostname',
53 default=None,
54 help='Hostname for the DFM server'),
55 cfg.IntOpt('netapp_server_port',
56 default=8088,
57 help='Port number for the DFM server'),
58 cfg.StrOpt('netapp_storage_service',
59 default=None,
60 help=('Storage service to use for provisioning '
61 '(when volume_type=None)')),
62 cfg.StrOpt('netapp_storage_service_prefix',
63 default=None,
64 help=('Prefix of storage service name to use for '
65 'provisioning (volume_type name will be appended)')),
66 cfg.StrOpt('netapp_vfiler',
67 default=None,
68 help='Vfiler to use for provisioning'),
69 ]
70
71FLAGS = flags.FLAGS
72FLAGS.register_opts(netapp_opts)
73
74
75class DfmDataset(object):
76 def __init__(self, id, name, project, type):
77 self.id = id
78 self.name = name
79 self.project = project
80 self.type = type
81
82
83class DfmLun(object):
84 def __init__(self, dataset, lunpath, id):
85 self.dataset = dataset
86 self.lunpath = lunpath
87 self.id = id
88
89
90class NetAppISCSIDriver(driver.ISCSIDriver):
91 """NetApp iSCSI volume driver."""
92
93 IGROUP_PREFIX = 'openstack-'
94 DATASET_PREFIX = 'OpenStack_'
95 DATASET_METADATA_PROJECT_KEY = 'OpenStackProject'
96 DATASET_METADATA_VOL_TYPE_KEY = 'OpenStackVolType'
97
98 def __init__(self, *args, **kwargs):
99 super(NetAppISCSIDriver, self).__init__(*args, **kwargs)
100 self.discovered_luns = []
101 self.discovered_datasets = []
102 self.lun_table = {}
103
104 def _check_fail(self, request, response):
105 """Utility routine to handle checking ZAPI failures."""
106 if 'failed' == response.Status:
107 name = request.Name
108 reason = response.Reason
109 msg = _('API %(name)s failed: %(reason)s')
110 raise exception.VolumeBackendAPIException(data=msg % locals())
111
112 def _create_client(self, **kwargs):
113 """Instantiate a web services client.
114
115 This method creates a "suds" client to make web services calls to the
116 DFM server. Note that the WSDL file is quite large and may take
117 a few seconds to parse.
118 """
119 wsdl_url = kwargs['wsdl_url']
120 LOG.debug(_('Using WSDL: %s') % wsdl_url)
121 if kwargs['cache']:
122 self.client = client.Client(wsdl_url, username=kwargs['login'],
123 password=kwargs['password'])
124 else:
125 self.client = client.Client(wsdl_url, username=kwargs['login'],
126 password=kwargs['password'],
127 cache=None)
128 soap_url = 'http://%s:%s/apis/soap/v1' % (kwargs['hostname'],
129 kwargs['port'])
130 LOG.debug(_('Using DFM server: %s') % soap_url)
131 self.client.set_options(location=soap_url)
132
133 def _set_storage_service(self, storage_service):
134 """Set the storage service to use for provisioning."""
135 LOG.debug(_('Using storage service: %s') % storage_service)
136 self.storage_service = storage_service
137
138 def _set_storage_service_prefix(self, storage_service_prefix):
139 """Set the storage service prefix to use for provisioning."""
140 LOG.debug(_('Using storage service prefix: %s') %
141 storage_service_prefix)
142 self.storage_service_prefix = storage_service_prefix
143
144 def _set_vfiler(self, vfiler):
145 """Set the vfiler to use for provisioning."""
146 LOG.debug(_('Using vfiler: %s') % vfiler)
147 self.vfiler = vfiler
148
149 def _check_flags(self):
150 """Ensure that the flags we care about are set."""
151 required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
152 'netapp_server_hostname', 'netapp_server_port']
153 for flag in required_flags:
154 if not getattr(FLAGS, flag, None):
155 raise exception.InvalidInput(reason=_('%s is not set') % flag)
156 if not (FLAGS.netapp_storage_service or
157 FLAGS.netapp_storage_service_prefix):
158 raise exception.InvalidInput(reason=_('Either '
159 'netapp_storage_service or netapp_storage_service_prefix must '
160 'be set'))
161
162 def do_setup(self, context):
163 """Setup the NetApp Volume driver.
164
165 Called one time by the manager after the driver is loaded.
166 Validate the flags we care about and setup the suds (web services)
167 client.
168 """
169 self._check_flags()
170 self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
171 login=FLAGS.netapp_login, password=FLAGS.netapp_password,
172 hostname=FLAGS.netapp_server_hostname,
173 port=FLAGS.netapp_server_port, cache=True)
174 self._set_storage_service(FLAGS.netapp_storage_service)
175 self._set_storage_service_prefix(FLAGS.netapp_storage_service_prefix)
176 self._set_vfiler(FLAGS.netapp_vfiler)
177
178 def check_for_setup_error(self):
179 """Check that the driver is working and can communicate.
180
181 Invoke a web services API to make sure we can talk to the server.
182 Also perform the discovery of datasets and LUNs from DFM.
183 """
184 self.client.service.DfmAbout()
185 LOG.debug(_("Connected to DFM server"))
186 self._discover_luns()
187
188 def _get_datasets(self):
189 """Get the list of datasets from DFM."""
190 server = self.client.service
191 res = server.DatasetListInfoIterStart(IncludeMetadata=True)
192 tag = res.Tag
193 datasets = []
194 try:
195 while True:
196 res = server.DatasetListInfoIterNext(Tag=tag, Maximum=100)
197 if not res.Datasets:
198 break
199 datasets.extend(res.Datasets.DatasetInfo)
200 finally:
201 server.DatasetListInfoIterEnd(Tag=tag)
202 return datasets
203
204 def _discover_dataset_luns(self, dataset, volume):
205 """Discover all of the LUNs in a dataset."""
206 server = self.client.service
207 res = server.DatasetMemberListInfoIterStart(
208 DatasetNameOrId=dataset.id,
209 IncludeExportsInfo=True,
210 IncludeIndirect=True,
211 MemberType='lun_path')
212 tag = res.Tag
213 suffix = None
214 if volume:
215 suffix = '/' + volume
216 try:
217 while True:
218 res = server.DatasetMemberListInfoIterNext(Tag=tag,
219 Maximum=100)
220 if (not hasattr(res, 'DatasetMembers') or
221 not res.DatasetMembers):
222 break
223 for member in res.DatasetMembers.DatasetMemberInfo:
224 if suffix and not member.MemberName.endswith(suffix):
225 continue
226 # MemberName is the full LUN path in this format:
227 # host:/volume/qtree/lun
228 lun = DfmLun(dataset, member.MemberName, member.MemberId)
229 self.discovered_luns.append(lun)
230 finally:
231 server.DatasetMemberListInfoIterEnd(Tag=tag)
232
233 def _discover_luns(self):
234 """Discover the LUNs from DFM.
235
236 Discover all of the OpenStack-created datasets and LUNs in the DFM
237 database.
238 """
239 datasets = self._get_datasets()
240 self.discovered_datasets = []
241 self.discovered_luns = []
242 for dataset in datasets:
243 if not dataset.DatasetName.startswith(self.DATASET_PREFIX):
244 continue
245 if (not hasattr(dataset, 'DatasetMetadata') or
246 not dataset.DatasetMetadata):
247 continue
248 project = None
249 type = None
250 for field in dataset.DatasetMetadata.DfmMetadataField:
251 if field.FieldName == self.DATASET_METADATA_PROJECT_KEY:
252 project = field.FieldValue
253 elif field.FieldName == self.DATASET_METADATA_VOL_TYPE_KEY:
254 type = field.FieldValue
255 if not project:
256 continue
257 ds = DfmDataset(dataset.DatasetId, dataset.DatasetName,
258 project, type)
259 self.discovered_datasets.append(ds)
260 self._discover_dataset_luns(ds, None)
261 dataset_count = len(self.discovered_datasets)
262 lun_count = len(self.discovered_luns)
263 msg = _("Discovered %(dataset_count)s datasets and %(lun_count)s LUNs")
264 LOG.debug(msg % locals())
265 self.lun_table = {}
266
267 def _get_job_progress(self, job_id):
268 """Get progress of one running DFM job.
269
270 Obtain the latest progress report for the job and return the
271 list of progress events.
272 """
273 server = self.client.service
274 res = server.DpJobProgressEventListIterStart(JobId=job_id)
275 tag = res.Tag
276 event_list = []
277 try:
278 while True:
279 res = server.DpJobProgressEventListIterNext(Tag=tag,
280 Maximum=100)
281 if not hasattr(res, 'ProgressEvents'):
282 break
283 event_list += res.ProgressEvents.DpJobProgressEventInfo
284 finally:
285 server.DpJobProgressEventListIterEnd(Tag=tag)
286 return event_list
287
288 def _wait_for_job(self, job_id):
289 """Wait until a job terminates.
290
291 Poll the job until it completes or an error is detected. Return the
292 final list of progress events if it completes successfully.
293 """
294 while True:
295 events = self._get_job_progress(job_id)
296 for event in events:
297 if event.EventStatus == 'error':
298 msg = _('Job failed: %s') % (event.ErrorMessage)
299 raise exception.VolumeBackendAPIException(data=msg)
300 if event.EventType == 'job-end':
301 return events
302 time.sleep(5)
303
304 def _dataset_name(self, project, ss_type):
305 """Return the dataset name for a given project and volume type."""
306 _project = project.replace(' ', '_').replace('-', '_')
307 dataset_name = self.DATASET_PREFIX + _project
308 if not ss_type:
309 return dataset_name
310 _type = ss_type.replace(' ', '_').replace('-', '_')
311 return dataset_name + '_' + _type
312
313 def _get_dataset(self, dataset_name):
314 """Lookup a dataset by name in the list of discovered datasets."""
315 for dataset in self.discovered_datasets:
316 if dataset.name == dataset_name:
317 return dataset
318 return None
319
320 def _create_dataset(self, dataset_name, project, ss_type):
321 """Create a new dataset using the storage service.
322
323 The export settings are set to create iSCSI LUNs aligned for Linux.
324 Returns the ID of the new dataset.
325 """
326 if ss_type and not self.storage_service_prefix:
327 msg = _('Attempt to use volume_type without specifying '
328 'netapp_storage_service_prefix flag.')
329 raise exception.VolumeBackendAPIException(data=msg)
330 if not (ss_type or self.storage_service):
331 msg = _('You must set the netapp_storage_service flag in order to '
332 'create volumes with no volume_type.')
333 raise exception.VolumeBackendAPIException(data=msg)
334 storage_service = self.storage_service
335 if ss_type:
336 storage_service = self.storage_service_prefix + ss_type
337
338 factory = self.client.factory
339
340 lunmap = factory.create('DatasetLunMappingInfo')
341 lunmap.IgroupOsType = 'linux'
342 export = factory.create('DatasetExportInfo')
343 export.DatasetExportProtocol = 'iscsi'
344 export.DatasetLunMappingInfo = lunmap
345 detail = factory.create('StorageSetInfo')
346 detail.DpNodeName = 'Primary data'
347 detail.DatasetExportInfo = export
348 if hasattr(self, 'vfiler') and self.vfiler:
349 detail.ServerNameOrId = self.vfiler
350 details = factory.create('ArrayOfStorageSetInfo')
351 details.StorageSetInfo = [detail]
352 field1 = factory.create('DfmMetadataField')
353 field1.FieldName = self.DATASET_METADATA_PROJECT_KEY
354 field1.FieldValue = project
355 field2 = factory.create('DfmMetadataField')
356 field2.FieldName = self.DATASET_METADATA_VOL_TYPE_KEY
357 field2.FieldValue = ss_type
358 metadata = factory.create('ArrayOfDfmMetadataField')
359 metadata.DfmMetadataField = [field1, field2]
360
361 res = self.client.service.StorageServiceDatasetProvision(
362 StorageServiceNameOrId=storage_service,
363 DatasetName=dataset_name,
364 AssumeConfirmation=True,
365 StorageSetDetails=details,
366 DatasetMetadata=metadata)
367
368 ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type)
369 self.discovered_datasets.append(ds)
370 return ds
371
372 @lockutils.synchronized('netapp_dfm', 'cinder-', True)
373 def _provision(self, name, description, project, ss_type, size):
374 """Provision a LUN through provisioning manager.
375
376 The LUN will be created inside a dataset associated with the project.
377 If the dataset doesn't already exist, we create it using the storage
378 service specified in the cinder conf.
379 """
380 dataset_name = self._dataset_name(project, ss_type)
381 dataset = self._get_dataset(dataset_name)
382 if not dataset:
383 dataset = self._create_dataset(dataset_name, project, ss_type)
384
385 info = self.client.factory.create('ProvisionMemberRequestInfo')
386 info.Name = name
387 if description:
388 info.Description = description
389 info.Size = size
390 info.MaximumSnapshotSpace = 2 * long(size)
391
392 server = self.client.service
393 lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset.id)
394 try:
395 server.DatasetProvisionMember(EditLockId=lock_id,
396 ProvisionMemberRequestInfo=info)
397 res = server.DatasetEditCommit(EditLockId=lock_id,
398 AssumeConfirmation=True)
399 except (suds.WebFault, Exception):
400 server.DatasetEditRollback(EditLockId=lock_id)
401 msg = _('Failed to provision dataset member')
402 raise exception.VolumeBackendAPIException(data=msg)
403
404 lun_id = None
405 lunpath = None
406
407 for info in res.JobIds.JobInfo:
408 events = self._wait_for_job(info.JobId)
409 for event in events:
410 if event.EventType != 'lun-create':
411 continue
412 lunpath = event.ProgressLunInfo.LunName
413 lun_id = event.ProgressLunInfo.LunPathId
414
415 if not lun_id:
416 msg = _('No LUN was created by the provision job')
417 raise exception.VolumeBackendAPIException(data=msg)
418
419 lun = DfmLun(dataset, lunpath, lun_id)
420 self.discovered_luns.append(lun)
421 self.lun_table[name] = lun
422
423 def _get_ss_type(self, volume):
424 """Get the storage service type for a volume."""
425 id = volume['volume_type_id']
426 if not id:
427 return None
428 volume_type = volume_types.get_volume_type(None, id)
429 if not volume_type:
430 return None
431 return volume_type['name']
432
433 @lockutils.synchronized('netapp_dfm', 'cinder-', True)
434 def _remove_destroy(self, name, project):
435 """Remove the LUN from the dataset, also destroying it.
436
437 Remove the LUN from the dataset and destroy the actual LUN on the
438 storage system.
439 """
440 lun = self._lookup_lun_for_volume(name, project)
441 member = self.client.factory.create('DatasetMemberParameter')
442 member.ObjectNameOrId = lun.id
443 members = self.client.factory.create('ArrayOfDatasetMemberParameter')
444 members.DatasetMemberParameter = [member]
445
446 server = self.client.service
447 lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id)
448 try:
449 server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True,
450 DatasetMemberParameters=members)
451 server.DatasetEditCommit(EditLockId=lock_id,
452 AssumeConfirmation=True)
453 except (suds.WebFault, Exception):
454 server.DatasetEditRollback(EditLockId=lock_id)
455 msg = _('Failed to remove and delete dataset member')
456 raise exception.VolumeBackendAPIException(data=msg)
457
458 def create_volume(self, volume):
459 """Driver entry point for creating a new volume."""
460 default_size = '104857600' # 100 MB
461 gigabytes = 1073741824L # 2^30
462 name = volume['name']
463 project = volume['project_id']
464 display_name = volume['display_name']
465 display_description = volume['display_description']
466 description = None
467 if display_name:
468 if display_description:
469 description = display_name + "\n" + display_description
470 else:
471 description = display_name
472 elif display_description:
473 description = display_description
474 if int(volume['size']) == 0:
475 size = default_size
476 else:
477 size = str(int(volume['size']) * gigabytes)
478 ss_type = self._get_ss_type(volume)
479 self._provision(name, description, project, ss_type, size)
480
481 def _lookup_lun_for_volume(self, name, project):
482 """Lookup the LUN that corresponds to the give volume.
483
484 Initial lookups involve a table scan of all of the discovered LUNs,
485 but later lookups are done instantly from the hashtable.
486 """
487 if name in self.lun_table:
488 return self.lun_table[name]
489 lunpath_suffix = '/' + name
490 for lun in self.discovered_luns:
491 if lun.dataset.project != project:
492 continue
493 if lun.lunpath.endswith(lunpath_suffix):
494 self.lun_table[name] = lun
495 return lun
496 msg = _("No entry in LUN table for volume %s") % (name)
497 raise exception.VolumeBackendAPIException(data=msg)
498
499 def delete_volume(self, volume):
500 """Driver entry point for destroying existing volumes."""
501 name = volume['name']
502 project = volume['project_id']
503 self._remove_destroy(name, project)
504
505 def _get_lun_details(self, lun_id):
506 """Given the ID of a LUN, get the details about that LUN."""
507 server = self.client.service
508 res = server.LunListInfoIterStart(ObjectNameOrId=lun_id)
509 tag = res.Tag
510 try:
511 res = server.LunListInfoIterNext(Tag=tag, Maximum=1)
512 if hasattr(res, 'Luns') and res.Luns.LunInfo:
513 return res.Luns.LunInfo[0]
514 finally:
515 server.LunListInfoIterEnd(Tag=tag)
516 msg = _('Failed to get LUN details for LUN ID %s')
517 raise exception.VolumeBackendAPIException(data=msg % lun_id)
518
519 def _get_host_details(self, host_id):
520 """Given the ID of a host, get the details about it.
521
522 A "host" is a storage system here.
523 """
524 server = self.client.service
525 res = server.HostListInfoIterStart(ObjectNameOrId=host_id)
526 tag = res.Tag
527 try:
528 res = server.HostListInfoIterNext(Tag=tag, Maximum=1)
529 if hasattr(res, 'Hosts') and res.Hosts.HostInfo:
530 return res.Hosts.HostInfo[0]
531 finally:
532 server.HostListInfoIterEnd(Tag=tag)
533 msg = _('Failed to get host details for host ID %s')
534 raise exception.VolumeBackendAPIException(data=msg % host_id)
535
536 def _get_iqn_for_host(self, host_id):
537 """Get the iSCSI Target Name for a storage system."""
538 request = self.client.factory.create('Request')
539 request.Name = 'iscsi-node-get-name'
540 response = self.client.service.ApiProxy(Target=host_id,
541 Request=request)
542 self._check_fail(request, response)
543 return response.Results['node-name'][0]
544
545 def _api_elem_is_empty(self, elem):
546 """Return true if the API element should be considered empty.
547
548 Helper routine to figure out if a list returned from a proxy API
549 is empty. This is necessary because the API proxy produces nasty
550 looking XML.
551 """
552 if not type(elem) is list:
553 return True
554 if 0 == len(elem):
555 return True
556 child = elem[0]
557 if isinstance(child, text.Text):
558 return True
559 if type(child) is str:
560 return True
561 return False
562
563 def _get_target_portal_for_host(self, host_id, host_address):
564 """Get iSCSI target portal for a storage system.
565
566 Get the iSCSI Target Portal details for a particular IP address
567 on a storage system.
568 """
569 request = self.client.factory.create('Request')
570 request.Name = 'iscsi-portal-list-info'
571 response = self.client.service.ApiProxy(Target=host_id,
572 Request=request)
573 self._check_fail(request, response)
574 portal = {}
575 portals = response.Results['iscsi-portal-list-entries']
576 if self._api_elem_is_empty(portals):
577 return portal
578 portal_infos = portals[0]['iscsi-portal-list-entry-info']
579 for portal_info in portal_infos:
580 portal['address'] = portal_info['ip-address'][0]
581 portal['port'] = portal_info['ip-port'][0]
582 portal['portal'] = portal_info['tpgroup-tag'][0]
583 if host_address == portal['address']:
584 break
585 return portal
586
587 def _get_export(self, volume):
588 """Get the iSCSI export details for a volume.
589
590 Looks up the LUN in DFM based on the volume and project name, then get
591 the LUN's ID. We store that value in the database instead of the iSCSI
592 details because we will not have the true iSCSI details until masking
593 time (when initialize_connection() is called).
594 """
595 name = volume['name']
596 project = volume['project_id']
597 lun = self._lookup_lun_for_volume(name, project)
598 return {'provider_location': lun.id}
599
600 def ensure_export(self, context, volume):
601 """Driver entry point to get the export info for an existing volume."""
602 return self._get_export(volume)
603
604 def create_export(self, context, volume):
605 """Driver entry point to get the export info for a new volume."""
606 return self._get_export(volume)
607
608 def remove_export(self, context, volume):
609 """Driver exntry point to remove an export for a volume.
610
611 Since exporting is idempotent in this driver, we have nothing
612 to do for unexporting.
613 """
614 pass
615
616 def _find_igroup_for_initiator(self, host_id, initiator_name):
617 """Get the igroup for an initiator.
618
619 Look for an existing igroup (initiator group) on the storage system
620 containing a given iSCSI initiator and return the name of the igroup.
621 """
622 request = self.client.factory.create('Request')
623 request.Name = 'igroup-list-info'
624 response = self.client.service.ApiProxy(Target=host_id,
625 Request=request)
626 self._check_fail(request, response)
627 igroups = response.Results['initiator-groups']
628 if self._api_elem_is_empty(igroups):
629 return None
630 igroup_infos = igroups[0]['initiator-group-info']
631 for igroup_info in igroup_infos:
632 if ('iscsi' != igroup_info['initiator-group-type'][0] or
633 'linux' != igroup_info['initiator-group-os-type'][0]):
634 continue
635 igroup_name = igroup_info['initiator-group-name'][0]
636 if not igroup_name.startswith(self.IGROUP_PREFIX):
637 continue
638 initiators = igroup_info['initiators'][0]['initiator-info']
639 for initiator in initiators:
640 if initiator_name == initiator['initiator-name'][0]:
641 return igroup_name
642 return None
643
644 def _create_igroup(self, host_id, initiator_name):
645 """Create a new igroup.
646
647 Create a new igroup (initiator group) on the storage system to hold
648 the given iSCSI initiator. The group will only have 1 member and will
649 be named "openstack-${initiator_name}".
650 """
651 igroup_name = self.IGROUP_PREFIX + initiator_name
652 request = self.client.factory.create('Request')
653 request.Name = 'igroup-create'
654 igroup_create_xml = (
655 '<initiator-group-name>%s</initiator-group-name>'
656 '<initiator-group-type>iscsi</initiator-group-type>'
657 '<os-type>linux</os-type><ostype>linux</ostype>')
658 request.Args = text.Raw(igroup_create_xml % igroup_name)
659 response = self.client.service.ApiProxy(Target=host_id,
660 Request=request)
661 self._check_fail(request, response)
662 request = self.client.factory.create('Request')
663 request.Name = 'igroup-add'
664 igroup_add_xml = (
665 '<initiator-group-name>%s</initiator-group-name>'
666 '<initiator>%s</initiator>')
667 request.Args = text.Raw(igroup_add_xml % (igroup_name, initiator_name))
668 response = self.client.service.ApiProxy(Target=host_id,
669 Request=request)
670 self._check_fail(request, response)
671 return igroup_name
672
673 def _get_lun_mappping(self, host_id, lunpath, igroup_name):
674 """Get the mapping between a LUN and an igroup.
675
676 Check if a given LUN is already mapped to the given igroup (initiator
677 group). If the LUN is mapped, also return the LUN number for the
678 mapping.
679 """
680 request = self.client.factory.create('Request')
681 request.Name = 'lun-map-list-info'
682 request.Args = text.Raw('<path>%s</path>' % (lunpath))
683 response = self.client.service.ApiProxy(Target=host_id,
684 Request=request)
685 self._check_fail(request, response)
686 igroups = response.Results['initiator-groups']
687 if self._api_elem_is_empty(igroups):
688 return {'mapped': False}
689 igroup_infos = igroups[0]['initiator-group-info']
690 for igroup_info in igroup_infos:
691 if igroup_name == igroup_info['initiator-group-name'][0]:
692 return {'mapped': True, 'lun_num': igroup_info['lun-id'][0]}
693 return {'mapped': False}
694
695 def _map_initiator(self, host_id, lunpath, igroup_name):
696 """Map a LUN to an igroup.
697
698 Map the given LUN to the given igroup (initiator group). Return the LUN
699 number that the LUN was mapped to (the filer will choose the lowest
700 available number).
701 """
702 request = self.client.factory.create('Request')
703 request.Name = 'lun-map'
704 lun_map_xml = ('<initiator-group>%s</initiator-group>'
705 '<path>%s</path>')
706 request.Args = text.Raw(lun_map_xml % (igroup_name, lunpath))
707 response = self.client.service.ApiProxy(Target=host_id,
708 Request=request)
709 self._check_fail(request, response)
710 return response.Results['lun-id-assigned'][0]
711
712 def _unmap_initiator(self, host_id, lunpath, igroup_name):
713 """Unmap the given LUN from the given igroup (initiator group)."""
714 request = self.client.factory.create('Request')
715 request.Name = 'lun-unmap'
716 lun_unmap_xml = ('<initiator-group>%s</initiator-group>'
717 '<path>%s</path>')
718 request.Args = text.Raw(lun_unmap_xml % (igroup_name, lunpath))
719 response = self.client.service.ApiProxy(Target=host_id,
720 Request=request)
721 self._check_fail(request, response)
722
723 def _ensure_initiator_mapped(self, host_id, lunpath, initiator_name):
724 """Ensure that a LUN is mapped to a particular initiator.
725
726 Check if a LUN is mapped to a given initiator already and create
727 the mapping if it is not. A new igroup will be created if needed.
728 Returns the LUN number for the mapping between the LUN and initiator
729 in both cases.
730 """
731 lunpath = '/vol/' + lunpath
732 igroup_name = self._find_igroup_for_initiator(host_id, initiator_name)
733 if not igroup_name:
734 igroup_name = self._create_igroup(host_id, initiator_name)
735
736 mapping = self._get_lun_mappping(host_id, lunpath, igroup_name)
737 if mapping['mapped']:
738 return mapping['lun_num']
739 return self._map_initiator(host_id, lunpath, igroup_name)
740
741 def _ensure_initiator_unmapped(self, host_id, lunpath, initiator_name):
742 """Ensure that a LUN is not mapped to a particular initiator.
743
744 Check if a LUN is mapped to a given initiator and remove the
745 mapping if it is. This does not destroy the igroup.
746 """
747 lunpath = '/vol/' + lunpath
748 igroup_name = self._find_igroup_for_initiator(host_id, initiator_name)
749 if not igroup_name:
750 return
751
752 mapping = self._get_lun_mappping(host_id, lunpath, igroup_name)
753 if mapping['mapped']:
754 self._unmap_initiator(host_id, lunpath, igroup_name)
755
756 def initialize_connection(self, volume, connector):
757 """Driver entry point to attach a volume to an instance.
758
759 Do the LUN masking on the storage system so the initiator can access
760 the LUN on the target. Also return the iSCSI properties so the
761 initiator can find the LUN. This implementation does not call
762 _get_iscsi_properties() to get the properties because cannot store the
763 LUN number in the database. We only find out what the LUN number will
764 be during this method call so we construct the properties dictionary
765 ourselves.
766 """
767 initiator_name = connector['initiator']
768 lun_id = volume['provider_location']
769 if not lun_id:
770 msg = _("No LUN ID for volume %s") % volume['name']
771 raise exception.VolumeBackendAPIException(data=msg)
772 lun = self._get_lun_details(lun_id)
773 lun_num = self._ensure_initiator_mapped(lun.HostId, lun.LunPath,
774 initiator_name)
775 host = self._get_host_details(lun.HostId)
776 portal = self._get_target_portal_for_host(host.HostId,
777 host.HostAddress)
778 if not portal:
779 msg = _('Failed to get target portal for filer: %s')
780 raise exception.VolumeBackendAPIException(data=msg % host.HostName)
781
782 iqn = self._get_iqn_for_host(host.HostId)
783 if not iqn:
784 msg = _('Failed to get target IQN for filer: %s')
785 raise exception.VolumeBackendAPIException(data=msg % host.HostName)
786
787 properties = {}
788 properties['target_discovered'] = False
789 (address, port) = (portal['address'], portal['port'])
790 properties['target_portal'] = '%s:%s' % (address, port)
791 properties['target_iqn'] = iqn
792 properties['target_lun'] = lun_num
793 properties['volume_id'] = volume['id']
794
795 auth = volume['provider_auth']
796 if auth:
797 (auth_method, auth_username, auth_secret) = auth.split()
798
799 properties['auth_method'] = auth_method
800 properties['auth_username'] = auth_username
801 properties['auth_password'] = auth_secret
802
803 return {
804 'driver_volume_type': 'iscsi',
805 'data': properties,
806 }
807
808 def terminate_connection(self, volume, connector):
809 """Driver entry point to unattach a volume from an instance.
810
811 Unmask the LUN on the storage system so the given intiator can no
812 longer access it.
813 """
814 initiator_name = connector['initiator']
815 lun_id = volume['provider_location']
816 if not lun_id:
817 msg = _('No LUN ID for volume %s') % volume['name']
818 raise exception.VolumeBackendAPIException(data=msg)
819 lun = self._get_lun_details(lun_id)
820 self._ensure_initiator_unmapped(lun.HostId, lun.LunPath,
821 initiator_name)
822
823 def _is_clone_done(self, host_id, clone_op_id, volume_uuid):
824 """Check the status of a clone operation.
825
826 Return True if done, False otherwise.
827 """
828 request = self.client.factory.create('Request')
829 request.Name = 'clone-list-status'
830 clone_list_status_xml = (
831 '<clone-id><clone-id-info>'
832 '<clone-op-id>%s</clone-op-id>'
833 '<volume-uuid>%s</volume-uuid>'
834 '</clone-id-info></clone-id>')
835 request.Args = text.Raw(clone_list_status_xml % (clone_op_id,
836 volume_uuid))
837 response = self.client.service.ApiProxy(Target=host_id,
838 Request=request)
839 self._check_fail(request, response)
840 status = response.Results['status']
841 if self._api_elem_is_empty(status):
842 return False
843 ops_info = status[0]['ops-info'][0]
844 state = ops_info['clone-state'][0]
845 return 'completed' == state
846
847 def _clone_lun(self, host_id, src_path, dest_path, snap):
848 """Create a clone of a NetApp LUN.
849
850 The clone initially consumes no space and is not space reserved.
851 """
852 request = self.client.factory.create('Request')
853 request.Name = 'clone-start'
854 clone_start_xml = (
855 '<source-path>%s</source-path><no-snap>%s</no-snap>'
856 '<destination-path>%s</destination-path>')
857 if snap:
858 no_snap = 'false'
859 else:
860 no_snap = 'true'
861 request.Args = text.Raw(clone_start_xml % (src_path, no_snap,
862 dest_path))
863 response = self.client.service.ApiProxy(Target=host_id,
864 Request=request)
865 self._check_fail(request, response)
866 clone_id = response.Results['clone-id'][0]
867 clone_id_info = clone_id['clone-id-info'][0]
868 clone_op_id = clone_id_info['clone-op-id'][0]
869 volume_uuid = clone_id_info['volume-uuid'][0]
870 while not self._is_clone_done(host_id, clone_op_id, volume_uuid):
871 time.sleep(5)
872
873 def _refresh_dfm_luns(self, host_id):
874 """Refresh the LUN list for one filer in DFM."""
875 server = self.client.service
876 server.DfmObjectRefresh(ObjectNameOrId=host_id, ChildType='lun_path')
877 while True:
878 time.sleep(15)
879 res = server.DfmMonitorTimestampList(HostNameOrId=host_id)
880 for timestamp in res.DfmMonitoringTimestamp:
881 if 'lun' != timestamp.MonitorName:
882 continue
883 if timestamp.LastMonitoringTimestamp:
884 return
885
886 def _destroy_lun(self, host_id, lun_path):
887 """Destroy a LUN on the filer."""
888 request = self.client.factory.create('Request')
889 request.Name = 'lun-offline'
890 path_xml = '<path>%s</path>'
891 request.Args = text.Raw(path_xml % lun_path)
892 response = self.client.service.ApiProxy(Target=host_id,
893 Request=request)
894 self._check_fail(request, response)
895 request = self.client.factory.create('Request')
896 request.Name = 'lun-destroy'
897 request.Args = text.Raw(path_xml % lun_path)
898 response = self.client.service.ApiProxy(Target=host_id,
899 Request=request)
900 self._check_fail(request, response)
901
902 def _resize_volume(self, host_id, vol_name, new_size):
903 """Resize the volume by the amount requested."""
904 request = self.client.factory.create('Request')
905 request.Name = 'volume-size'
906 volume_size_xml = (
907 '<volume>%s</volume><new-size>%s</new-size>')
908 request.Args = text.Raw(volume_size_xml % (vol_name, new_size))
909 response = self.client.service.ApiProxy(Target=host_id,
910 Request=request)
911 self._check_fail(request, response)
912
913 def _create_qtree(self, host_id, vol_name, qtree_name):
914 """Create a qtree the filer."""
915 request = self.client.factory.create('Request')
916 request.Name = 'qtree-create'
917 qtree_create_xml = (
918 '<mode>0755</mode><volume>%s</volume><qtree>%s</qtree>')
919 request.Args = text.Raw(qtree_create_xml % (vol_name, qtree_name))
920 response = self.client.service.ApiProxy(Target=host_id,
921 Request=request)
922 self._check_fail(request, response)
923
924 def create_snapshot(self, snapshot):
925 """Driver entry point for creating a snapshot.
926
927 This driver implements snapshots by using efficient single-file
928 (LUN) cloning.
929 """
930 vol_name = snapshot['volume_name']
931 snapshot_name = snapshot['name']
932 project = snapshot['project_id']
933 lun = self._lookup_lun_for_volume(vol_name, project)
934 lun_id = lun.id
935 lun = self._get_lun_details(lun_id)
936 extra_gb = snapshot['volume_size']
937 new_size = '+%dg' % extra_gb
938 self._resize_volume(lun.HostId, lun.VolumeName, new_size)
939 # LunPath is the partial LUN path in this format: volume/qtree/lun
940 lun_path = str(lun.LunPath)
941 lun_name = lun_path[lun_path.rfind('/') + 1:]
942 qtree_path = '/vol/%s/%s' % (lun.VolumeName, lun.QtreeName)
943 src_path = '%s/%s' % (qtree_path, lun_name)
944 dest_path = '%s/%s' % (qtree_path, snapshot_name)
945 self._clone_lun(lun.HostId, src_path, dest_path, True)
946
947 def delete_snapshot(self, snapshot):
948 """Driver entry point for deleting a snapshot."""
949 vol_name = snapshot['volume_name']
950 snapshot_name = snapshot['name']
951 project = snapshot['project_id']
952 lun = self._lookup_lun_for_volume(vol_name, project)
953 lun_id = lun.id
954 lun = self._get_lun_details(lun_id)
955 lun_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName,
956 snapshot_name)
957 self._destroy_lun(lun.HostId, lun_path)
958 extra_gb = snapshot['volume_size']
959 new_size = '-%dg' % extra_gb
960 self._resize_volume(lun.HostId, lun.VolumeName, new_size)
961
962 def create_volume_from_snapshot(self, volume, snapshot):
963 """Driver entry point for creating a new volume from a snapshot.
964
965 Many would call this "cloning" and in fact we use cloning to implement
966 this feature.
967 """
968 vol_size = volume['size']
969 snap_size = snapshot['volume_size']
970 if vol_size != snap_size:
971 msg = _('Cannot create volume of size %(vol_size)s from '
972 'snapshot of size %(snap_size)s')
973 raise exception.VolumeBackendAPIException(data=msg % locals())
974 vol_name = snapshot['volume_name']
975 snapshot_name = snapshot['name']
976 project = snapshot['project_id']
977 lun = self._lookup_lun_for_volume(vol_name, project)
978 lun_id = lun.id
979 dataset = lun.dataset
980 old_type = dataset.type
981 new_type = self._get_ss_type(volume)
982 if new_type != old_type:
983 msg = _('Cannot create volume of type %(new_type)s from '
984 'snapshot of type %(old_type)s')
985 raise exception.VolumeBackendAPIException(data=msg % locals())
986 lun = self._get_lun_details(lun_id)
987 extra_gb = vol_size
988 new_size = '+%dg' % extra_gb
989 self._resize_volume(lun.HostId, lun.VolumeName, new_size)
990 clone_name = volume['name']
991 self._create_qtree(lun.HostId, lun.VolumeName, clone_name)
992 src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName,
993 snapshot_name)
994 dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name)
995 self._clone_lun(lun.HostId, src_path, dest_path, False)
996 self._refresh_dfm_luns(lun.HostId)
997 self._discover_dataset_luns(dataset, clone_name)
998
999 def check_for_export(self, context, volume_id):
1000 raise NotImplementedError()
1001
1002
1003class NetAppLun(object):
1004 """Represents a LUN on NetApp storage."""
1005
1006 def __init__(self, handle, name, size, metadata_dict):
1007 self.handle = handle
1008 self.name = name
1009 self.size = size
1010 self.metadata = metadata_dict
1011
1012 def get_metadata_property(self, prop):
1013 """Get the metadata property of a LUN."""
1014 if prop in self.metadata:
1015 return self.metadata[prop]
1016 name = self.name
1017 msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
1018 LOG.debug(msg % locals())
1019
1020
1021class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
1022 """NetApp C-mode iSCSI volume driver."""
1023
1024 def __init__(self, *args, **kwargs):
1025 super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs)
1026 self.lun_table = {}
1027
1028 def _create_client(self, **kwargs):
1029 """Instantiate a web services client.
1030
1031 This method creates a "suds" client to make web services calls to the
1032 DFM server. Note that the WSDL file is quite large and may take
1033 a few seconds to parse.
1034 """
1035 wsdl_url = kwargs['wsdl_url']
1036 LOG.debug(_('Using WSDL: %s') % wsdl_url)
1037 if kwargs['cache']:
1038 self.client = client.Client(wsdl_url, username=kwargs['login'],
1039 password=kwargs['password'])
1040 else:
1041 self.client = client.Client(wsdl_url, username=kwargs['login'],
1042 password=kwargs['password'],
1043 cache=None)
1044
1045 def _check_flags(self):
1046 """Ensure that the flags we care about are set."""
1047 required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
1048 'netapp_server_hostname', 'netapp_server_port']
1049 for flag in required_flags:
1050 if not getattr(FLAGS, flag, None):
1051 msg = _('%s is not set') % flag
1052 raise exception.InvalidInput(data=msg)
1053
1054 def do_setup(self, context):
1055 """Setup the NetApp Volume driver.
1056
1057 Called one time by the manager after the driver is loaded.
1058 Validate the flags we care about and setup the suds (web services)
1059 client.
1060 """
1061 self._check_flags()
1062 self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
1063 login=FLAGS.netapp_login, password=FLAGS.netapp_password,
1064 hostname=FLAGS.netapp_server_hostname,
1065 port=FLAGS.netapp_server_port, cache=True)
1066
1067 def check_for_setup_error(self):
1068 """Check that the driver is working and can communicate.
1069
1070 Discovers the LUNs on the NetApp server.
1071 """
1072 self.lun_table = {}
1073 luns = self.client.service.ListLuns()
1074 for lun in luns:
1075 meta_dict = {}
1076 if hasattr(lun, 'Metadata'):
1077 meta_dict = self._create_dict_from_meta(lun.Metadata)
1078 discovered_lun = NetAppLun(lun.Handle, lun.Name, lun.Size,
1079 meta_dict)
1080 self._add_lun_to_table(discovered_lun)
1081 LOG.debug(_("Success getting LUN list from server"))
1082
1083 def create_volume(self, volume):
1084 """Driver entry point for creating a new volume."""
1085 default_size = '104857600' # 100 MB
1086 gigabytes = 1073741824L # 2^30
1087 name = volume['name']
1088 if int(volume['size']) == 0:
1089 size = default_size
1090 else:
1091 size = str(int(volume['size']) * gigabytes)
1092 extra_args = {}
1093 extra_args['OsType'] = 'linux'
1094 extra_args['QosType'] = self._get_qos_type(volume)
1095 extra_args['Container'] = volume['project_id']
1096 extra_args['Display'] = volume['display_name']
1097 extra_args['Description'] = volume['display_description']
1098 extra_args['SpaceReserved'] = True
1099 server = self.client.service
1100 metadata = self._create_metadata_list(extra_args)
1101 lun = server.ProvisionLun(Name=name, Size=size,
1102 Metadata=metadata)
1103 LOG.debug(_("Created LUN with name %s") % name)
1104 self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
1105 lun.Size, self._create_dict_from_meta(lun.Metadata)))
1106
1107 def delete_volume(self, volume):
1108 """Driver entry point for destroying existing volumes."""
1109 name = volume['name']
1110 handle = self._get_lun_handle(name)
1111 self.client.service.DestroyLun(Handle=handle)
1112 LOG.debug(_("Destroyed LUN %s") % handle)
1113 self.lun_table.pop(name)
1114
1115 def ensure_export(self, context, volume):
1116 """Driver entry point to get the export info for an existing volume."""
1117 handle = self._get_lun_handle(volume['name'])
1118 return {'provider_location': handle}
1119
1120 def create_export(self, context, volume):
1121 """Driver entry point to get the export info for a new volume."""
1122 handle = self._get_lun_handle(volume['name'])
1123 return {'provider_location': handle}
1124
1125 def remove_export(self, context, volume):
1126 """Driver exntry point to remove an export for a volume.
1127
1128 Since exporting is idempotent in this driver, we have nothing
1129 to do for unexporting.
1130 """
1131 pass
1132
1133 def initialize_connection(self, volume, connector):
1134 """Driver entry point to attach a volume to an instance.
1135
1136 Do the LUN masking on the storage system so the initiator can access
1137 the LUN on the target. Also return the iSCSI properties so the
1138 initiator can find the LUN. This implementation does not call
1139 _get_iscsi_properties() to get the properties because cannot store the
1140 LUN number in the database. We only find out what the LUN number will
1141 be during this method call so we construct the properties dictionary
1142 ourselves.
1143 """
1144 initiator_name = connector['initiator']
1145 handle = volume['provider_location']
1146 server = self.client.service
1147 server.MapLun(Handle=handle, InitiatorType="iscsi",
1148 InitiatorName=initiator_name)
1149 msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s")
1150 LOG.debug(msg % locals())
1151
1152 target_details_list = server.GetLunTargetDetails(Handle=handle,
1153 InitiatorType="iscsi", InitiatorName=initiator_name)
1154 msg = _("Succesfully fetched target details for LUN %(handle)s and "
1155 "initiator %(initiator_name)s")
1156 LOG.debug(msg % locals())
1157
1158 if not target_details_list:
1159 msg = _('Failed to get LUN target details for the LUN %s')
1160 raise exception.VolumeBackendAPIException(data=msg % handle)
1161 target_details = target_details_list[0]
1162 if not target_details.Address and target_details.Port:
1163 msg = _('Failed to get target portal for the LUN %s')
1164 raise exception.VolumeBackendAPIException(data=msg % handle)
1165 iqn = target_details.Iqn
1166 if not iqn:
1167 msg = _('Failed to get target IQN for the LUN %s')
1168 raise exception.VolumeBackendAPIException(data=msg % handle)
1169
1170 properties = {}
1171 properties['target_discovered'] = False
1172 (address, port) = (target_details.Address, target_details.Port)
1173 properties['target_portal'] = '%s:%s' % (address, port)
1174 properties['target_iqn'] = iqn
1175 properties['target_lun'] = target_details.LunNumber
1176 properties['volume_id'] = volume['id']
1177
1178 auth = volume['provider_auth']
1179 if auth:
1180 (auth_method, auth_username, auth_secret) = auth.split()
1181 properties['auth_method'] = auth_method
1182 properties['auth_username'] = auth_username
1183 properties['auth_password'] = auth_secret
1184
1185 return {
1186 'driver_volume_type': 'iscsi',
1187 'data': properties,
1188 }
1189
1190 def terminate_connection(self, volume, connector):
1191 """Driver entry point to unattach a volume from an instance.
1192
1193 Unmask the LUN on the storage system so the given intiator can no
1194 longer access it.
1195 """
1196 initiator_name = connector['initiator']
1197 handle = volume['provider_location']
1198 self.client.service.UnmapLun(Handle=handle, InitiatorType="iscsi",
1199 InitiatorName=initiator_name)
1200 msg = _("Unmapped LUN %(handle)s from the initiator "
1201 "%(initiator_name)s")
1202 LOG.debug(msg % locals())
1203
1204 def create_snapshot(self, snapshot):
1205 """Driver entry point for creating a snapshot.
1206
1207 This driver implements snapshots by using efficient single-file
1208 (LUN) cloning.
1209 """
1210 vol_name = snapshot['volume_name']
1211 snapshot_name = snapshot['name']
1212 lun = self.lun_table[vol_name]
1213 extra_args = {'SpaceReserved': False}
1214 self._clone_lun(lun.handle, snapshot_name, extra_args)
1215
1216 def delete_snapshot(self, snapshot):
1217 """Driver entry point for deleting a snapshot."""
1218 handle = self._get_lun_handle(snapshot['name'])
1219 self.client.service.DestroyLun(Handle=handle)
1220 LOG.debug(_("Destroyed LUN %s") % handle)
1221
1222 def create_volume_from_snapshot(self, volume, snapshot):
1223 """Driver entry point for creating a new volume from a snapshot.
1224
1225 Many would call this "cloning" and in fact we use cloning to implement
1226 this feature.
1227 """
1228 snapshot_name = snapshot['name']
1229 lun = self.lun_table[snapshot_name]
1230 new_name = volume['name']
1231 extra_args = {}
1232 extra_args['OsType'] = 'linux'
1233 extra_args['QosType'] = self._get_qos_type(volume)
1234 extra_args['Container'] = volume['project_id']
1235 extra_args['Display'] = volume['display_name']
1236 extra_args['Description'] = volume['display_description']
1237 extra_args['SpaceReserved'] = True
1238 self._clone_lun(lun.handle, new_name, extra_args)
1239
1240 def check_for_export(self, context, volume_id):
1241 raise NotImplementedError()
1242
1243 def _get_qos_type(self, volume):
1244 """Get the storage service type for a volume."""
1245 type_id = volume['volume_type_id']
1246 if not type_id:
1247 return None
1248 volume_type = volume_types.get_volume_type(None, type_id)
1249 if not volume_type:
1250 return None
1251 return volume_type['name']
1252
1253 def _add_lun_to_table(self, lun):
1254 """Adds LUN to cache table."""
1255 if not isinstance(lun, NetAppLun):
1256 msg = _("Object is not a NetApp LUN.")
1257 raise exception.VolumeBackendAPIException(data=msg)
1258 self.lun_table[lun.name] = lun
1259
1260 def _clone_lun(self, handle, new_name, extra_args):
1261 """Clone LUN with the given handle to the new name."""
1262 server = self.client.service
1263 metadata = self._create_metadata_list(extra_args)
1264 lun = server.CloneLun(Handle=handle, NewName=new_name,
1265 Metadata=metadata)
1266 LOG.debug(_("Cloned LUN with new name %s") % new_name)
1267 self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
1268 lun.Size, self._create_dict_from_meta(lun.Metadata)))
1269
1270 def _create_metadata_list(self, extra_args):
1271 """Creates metadata from kwargs."""
1272 metadata = []
1273 for key in extra_args.keys():
1274 meta = self.client.factory.create("Metadata")
1275 meta.Key = key
1276 meta.Value = extra_args[key]
1277 metadata.append(meta)
1278 return metadata
1279
1280 def _get_lun_handle(self, name):
1281 """Get the details for a LUN from our cache table."""
1282 if not name in self.lun_table:
1283 LOG.warn(_("Could not find handle for LUN named %s") % name)
1284 return None
1285 return self.lun_table[name].handle
1286
1287 def _create_dict_from_meta(self, metadata):
1288 """Creates dictionary from metadata array."""
1289 meta_dict = {}
1290 if not metadata:
1291 return meta_dict
1292 for meta in metadata:
1293 meta_dict[meta.Key] = meta.Value
1294 return meta_dict
diff --git a/recipes/volume.rb b/recipes/volume.rb
index a1bfd9c..15022d0 100644
--- a/recipes/volume.rb
+++ b/recipes/volume.rb
@@ -96,3 +96,11 @@ template "/etc/tgt/targets.conf" do
96 96
97 notifies :restart, "service[iscsitarget]", :immediately 97 notifies :restart, "service[iscsitarget]", :immediately
98end 98end
99
100cookbook_file node["cinder"]["netapp"]["driver"] do
101 source "netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py"
102 mode 00644
103 owner "root"
104 group "root"
105 notifies :restart, "service[cinder-volume]"
106end