summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Dewey <john@dewey.ws>2013-05-14 20:53:04 -0700
committerJohn Dewey <john@dewey.ws>2013-05-16 14:10:13 -0700
commit36e51d89637eb195fe4bb0d718f737635ec279aa (patch)
tree6d3238fdf7f54ccafbefa1105cabf6a955d32d2a
parent4a68fb3b781f06aab8f76bd5aedda834fb4c7e40 (diff)
Removed local patches
These patches make assumptions that are a bit too liberal. As these cookbooks are more widely used, these should be moved into a wrapper cookbook. Change-Id: I5348876712418a520659aab606065e5a5b288d99
Notes
Notes (review): Verified+2: Jenkins Approved+1: Jay Pipes <jaypipes@gmail.com> Code-Review+2: Jay Pipes <jaypipes@gmail.com> Submitted-by: Jenkins Submitted-at: Thu, 16 May 2013 21:16:26 +0000 Reviewed-on: https://review.openstack.org/29284 Project: stackforge/cookbook-openstack-block-storage Branch: refs/heads/master
-rwxr-xr-xfiles/default/cinder-volume-usage-audit79
-rw-r--r--files/default/fileutils_new-b322585613c21067571442aaf9e4e6feb167832b.py35
-rw-r--r--files/default/gettextutils_new-8e450aaa6ba1a2a88f6326c2e8d285d00fd28691.py33
-rw-r--r--files/default/lockutils_new-6dda4af1dd50582a0271fd6c96044ae61af9df7e.py233
-rw-r--r--files/default/netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py1294
-rw-r--r--recipes/scheduler.rb24
-rw-r--r--recipes/volume.rb37
-rw-r--r--spec/scheduler_spec.rb23
-rw-r--r--spec/spec_helper.rb1
-rw-r--r--spec/volume_spec.rb89
10 files changed, 2 insertions, 1846 deletions
diff --git a/files/default/cinder-volume-usage-audit b/files/default/cinder-volume-usage-audit
deleted file mode 100755
index 403e901..0000000
--- a/files/default/cinder-volume-usage-audit
+++ /dev/null
@@ -1,79 +0,0 @@
1#!/usr/bin/env python
2# vim: tabstop=4 shiftwidth=4 softtabstop=4
3
4# Copyright (c) 2011 Openstack, LLC.
5# All Rights Reserved.
6#
7# Licensed under the Apache License, Version 2.0 (the "License"); you may
8# not use this file except in compliance with the License. You may obtain
9# a copy of the License at
10#
11# http://www.apache.org/licenses/LICENSE-2.0
12#
13# Unless required by applicable law or agreed to in writing, software
14# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16# License for the specific language governing permissions and limitations
17# under the License.
18
19"""Cron script to generate usage notifications for volumes existing during
20 the audit period.
21
22 Together with the notifications generated by volumes
23 create/delete/resize, over that time period, this allows an external
24 system consuming usage notification feeds to calculate volume usage
25 for each tenant.
26
27 Time periods are specified as 'hour', 'month', 'day' or 'year'
28
29 hour = previous hour. If run at 9:07am, will generate usage for 8-9am.
30 month = previous month. If the script is run April 1, it will generate
31 usages for March 1 through March 31.
32 day = previous day. if run on July 4th, it generates usages for July 3rd.
33 year = previous year. If run on Jan 1, it generates usages for
34 Jan 1 through Dec 31 of the previous year.
35"""
36
37import gettext
38import os
39import sys
40import traceback
41
42# If ../cinder/__init__.py exists, add ../ to Python search path, so that
43# it will override what happens to be installed in /usr/(local/)lib/python...
44POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
45 os.pardir,
46 os.pardir))
47if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')):
48 sys.path.insert(0, POSSIBLE_TOPDIR)
49
50gettext.install('cinder', unicode=1)
51from cinder import context
52from cinder import db
53from cinder import flags
54from cinder.openstack.common import log as logging
55from cinder.openstack.common import rpc
56from cinder import utils
57import cinder.volume.utils
58
59
60FLAGS = flags.FLAGS
61
62if __name__ == '__main__':
63 admin_context = context.get_admin_context()
64 flags.parse_args(sys.argv)
65 logging.setup("cinder")
66 begin, end = utils.last_completed_audit_period('day')
67 print "Starting volume usage audit"
68 print "Creating usages for %s until %s" % (str(begin), str(end))
69 volumes = db.volume_get_active_by_window(admin_context,
70 begin,
71 end)
72 print "Found %d volumes" % len(volumes)
73 for volume_ref in volumes:
74 try:
75 cinder.volume.utils.notify_usage_exists(
76 admin_context, volume_ref)
77 except Exception, e:
78 print traceback.format_exc(e)
79 print "Volume usage audit completed"
diff --git a/files/default/fileutils_new-b322585613c21067571442aaf9e4e6feb167832b.py b/files/default/fileutils_new-b322585613c21067571442aaf9e4e6feb167832b.py
deleted file mode 100644
index 4746ad4..0000000
--- a/files/default/fileutils_new-b322585613c21067571442aaf9e4e6feb167832b.py
+++ /dev/null
@@ -1,35 +0,0 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright 2011 OpenStack LLC.
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17
18
19import errno
20import os
21
22
23def ensure_tree(path):
24 """Create a directory (and any ancestor directories required)
25
26 :param path: Directory to create
27 """
28 try:
29 os.makedirs(path)
30 except OSError as exc:
31 if exc.errno == errno.EEXIST:
32 if not os.path.isdir(path):
33 raise
34 else:
35 raise
diff --git a/files/default/gettextutils_new-8e450aaa6ba1a2a88f6326c2e8d285d00fd28691.py b/files/default/gettextutils_new-8e450aaa6ba1a2a88f6326c2e8d285d00fd28691.py
deleted file mode 100644
index 87e3520..0000000
--- a/files/default/gettextutils_new-8e450aaa6ba1a2a88f6326c2e8d285d00fd28691.py
+++ /dev/null
@@ -1,33 +0,0 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright 2012 Red Hat, Inc.
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17
18"""
19gettext for openstack-common modules.
20
21Usual usage in an openstack.common module:
22
23 from cinder.openstack.common.gettextutils import _
24"""
25
26import gettext
27
28
29t = gettext.translation('openstack-common', 'locale', fallback=True)
30
31
32def _(msg):
33 return t.ugettext(msg)
diff --git a/files/default/lockutils_new-6dda4af1dd50582a0271fd6c96044ae61af9df7e.py b/files/default/lockutils_new-6dda4af1dd50582a0271fd6c96044ae61af9df7e.py
deleted file mode 100644
index 418bc3a..0000000
--- a/files/default/lockutils_new-6dda4af1dd50582a0271fd6c96044ae61af9df7e.py
+++ /dev/null
@@ -1,233 +0,0 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright 2011 OpenStack LLC.
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17
18
19import errno
20import functools
21import os
22import shutil
23import tempfile
24import time
25import weakref
26
27from eventlet import semaphore
28
29from cinder.openstack.common import cfg
30from cinder.openstack.common import fileutils
31from cinder.openstack.common.gettextutils import _
32from cinder.openstack.common import log as logging
33
34
35LOG = logging.getLogger(__name__)
36
37
38util_opts = [
39 cfg.BoolOpt('disable_process_locking', default=False,
40 help='Whether to disable inter-process locks'),
41 cfg.StrOpt('lock_path',
42 default=os.path.abspath(os.path.join(os.path.dirname(__file__),
43 '../')),
44 help='Directory to use for lock files')
45]
46
47
48CONF = cfg.CONF
49CONF.register_opts(util_opts)
50
51
52class _InterProcessLock(object):
53 """Lock implementation which allows multiple locks, working around
54 issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
55 not require any cleanup. Since the lock is always held on a file
56 descriptor rather than outside of the process, the lock gets dropped
57 automatically if the process crashes, even if __exit__ is not executed.
58
59 There are no guarantees regarding usage by multiple green threads in a
60 single process here. This lock works only between processes. Exclusive
61 access between local threads should be achieved using the semaphores
62 in the @synchronized decorator.
63
64 Note these locks are released when the descriptor is closed, so it's not
65 safe to close the file descriptor while another green thread holds the
66 lock. Just opening and closing the lock file can break synchronisation,
67 so lock files must be accessed only using this abstraction.
68 """
69
70 def __init__(self, name):
71 self.lockfile = None
72 self.fname = name
73
74 def __enter__(self):
75 self.lockfile = open(self.fname, 'w')
76
77 while True:
78 try:
79 # Using non-blocking locks since green threads are not
80 # patched to deal with blocking locking calls.
81 # Also upon reading the MSDN docs for locking(), it seems
82 # to have a laughable 10 attempts "blocking" mechanism.
83 self.trylock()
84 return self
85 except IOError, e:
86 if e.errno in (errno.EACCES, errno.EAGAIN):
87 # external locks synchronise things like iptables
88 # updates - give it some time to prevent busy spinning
89 time.sleep(0.01)
90 else:
91 raise
92
93 def __exit__(self, exc_type, exc_val, exc_tb):
94 try:
95 self.unlock()
96 self.lockfile.close()
97 except IOError:
98 LOG.exception(_("Could not release the acquired lock `%s`"),
99 self.fname)
100
101 def trylock(self):
102 raise NotImplementedError()
103
104 def unlock(self):
105 raise NotImplementedError()
106
107
108class _WindowsLock(_InterProcessLock):
109 def trylock(self):
110 msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
111
112 def unlock(self):
113 msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
114
115
116class _PosixLock(_InterProcessLock):
117 def trylock(self):
118 fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
119
120 def unlock(self):
121 fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
122
123
124if os.name == 'nt':
125 import msvcrt
126 InterProcessLock = _WindowsLock
127else:
128 import fcntl
129 InterProcessLock = _PosixLock
130
131_semaphores = weakref.WeakValueDictionary()
132
133
134def synchronized(name, lock_file_prefix, external=False, lock_path=None):
135 """Synchronization decorator.
136
137 Decorating a method like so::
138
139 @synchronized('mylock')
140 def foo(self, *args):
141 ...
142
143 ensures that only one thread will execute the bar method at a time.
144
145 Different methods can share the same lock::
146
147 @synchronized('mylock')
148 def foo(self, *args):
149 ...
150
151 @synchronized('mylock')
152 def bar(self, *args):
153 ...
154
155 This way only one of either foo or bar can be executing at a time.
156
157 The lock_file_prefix argument is used to provide lock files on disk with a
158 meaningful prefix. The prefix should end with a hyphen ('-') if specified.
159
160 The external keyword argument denotes whether this lock should work across
161 multiple processes. This means that if two different workers both run a
162 a method decorated with @synchronized('mylock', external=True), only one
163 of them will execute at a time.
164
165 The lock_path keyword argument is used to specify a special location for
166 external lock files to live. If nothing is set, then CONF.lock_path is
167 used as a default.
168 """
169
170 def wrap(f):
171 @functools.wraps(f)
172 def inner(*args, **kwargs):
173 # NOTE(soren): If we ever go natively threaded, this will be racy.
174 # See http://stackoverflow.com/questions/5390569/dyn
175 # amically-allocating-and-destroying-mutexes
176 sem = _semaphores.get(name, semaphore.Semaphore())
177 if name not in _semaphores:
178 # this check is not racy - we're already holding ref locally
179 # so GC won't remove the item and there was no IO switch
180 # (only valid in greenthreads)
181 _semaphores[name] = sem
182
183 with sem:
184 LOG.debug(_('Got semaphore "%(lock)s" for method '
185 '"%(method)s"...'), {'lock': name,
186 'method': f.__name__})
187 if external and not CONF.disable_process_locking:
188 LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
189 'method "%(method)s"...'),
190 {'lock': name, 'method': f.__name__})
191 cleanup_dir = False
192
193 # We need a copy of lock_path because it is non-local
194 local_lock_path = lock_path
195 if not local_lock_path:
196 local_lock_path = CONF.lock_path
197
198 if not local_lock_path:
199 cleanup_dir = True
200 local_lock_path = tempfile.mkdtemp()
201
202 if not os.path.exists(local_lock_path):
203 cleanup_dir = True
204 fileutils.ensure_tree(local_lock_path)
205
206 # NOTE(mikal): the lock name cannot contain directory
207 # separators
208 safe_name = name.replace(os.sep, '_')
209 lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
210 lock_file_path = os.path.join(local_lock_path,
211 lock_file_name)
212
213 try:
214 lock = InterProcessLock(lock_file_path)
215 with lock:
216 LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
217 'for method "%(method)s"...'),
218 {'lock': name,
219 'path': lock_file_path,
220 'method': f.__name__})
221 retval = f(*args, **kwargs)
222 finally:
223 # NOTE(vish): This removes the tempdir if we needed
224 # to create one. This is used to cleanup
225 # the locks left behind by unit tests.
226 if cleanup_dir:
227 shutil.rmtree(local_lock_path)
228 else:
229 retval = f(*args, **kwargs)
230
231 return retval
232 return inner
233 return wrap
diff --git a/files/default/netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py b/files/default/netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py
deleted file mode 100644
index 3d4ca20..0000000
--- a/files/default/netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py
+++ /dev/null
@@ -1,1294 +0,0 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright (c) 2012 NetApp, Inc.
4# Copyright (c) 2012 OpenStack LLC.
5# All Rights Reserved.
6#
7# Licensed under the Apache License, Version 2.0 (the "License"); you may
8# not use this file except in compliance with the License. You may obtain
9# a copy of the License at
10#
11# http://www.apache.org/licenses/LICENSE-2.0
12#
13# Unless required by applicable law or agreed to in writing, software
14# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
16# License for the specific language governing permissions and limitations
17# under the License.
18"""
19Volume driver for NetApp storage systems.
20
21This driver requires NetApp OnCommand 5.0 and one or more Data
22ONTAP 7-mode storage systems with installed iSCSI licenses.
23
24"""
25
26import time
27
28import suds
29from suds import client
30from suds.sax import text
31
32from cinder import exception
33from cinder import flags
34from cinder.openstack.common import lockutils
35from cinder.openstack.common import log as logging
36from cinder.openstack.common import cfg
37from cinder.volume import driver
38from cinder.volume import volume_types
39
40LOG = logging.getLogger("cinder.volume.driver")
41
42netapp_opts = [
43 cfg.StrOpt('netapp_wsdl_url',
44 default=None,
45 help='URL of the WSDL file for the DFM server'),
46 cfg.StrOpt('netapp_login',
47 default=None,
48 help='User name for the DFM server'),
49 cfg.StrOpt('netapp_password',
50 default=None,
51 help='Password for the DFM server'),
52 cfg.StrOpt('netapp_server_hostname',
53 default=None,
54 help='Hostname for the DFM server'),
55 cfg.IntOpt('netapp_server_port',
56 default=8088,
57 help='Port number for the DFM server'),
58 cfg.StrOpt('netapp_storage_service',
59 default=None,
60 help=('Storage service to use for provisioning '
61 '(when volume_type=None)')),
62 cfg.StrOpt('netapp_storage_service_prefix',
63 default=None,
64 help=('Prefix of storage service name to use for '
65 'provisioning (volume_type name will be appended)')),
66 cfg.StrOpt('netapp_vfiler',
67 default=None,
68 help='Vfiler to use for provisioning'),
69 ]
70
71FLAGS = flags.FLAGS
72FLAGS.register_opts(netapp_opts)
73
74
75class DfmDataset(object):
76 def __init__(self, id, name, project, type):
77 self.id = id
78 self.name = name
79 self.project = project
80 self.type = type
81
82
83class DfmLun(object):
84 def __init__(self, dataset, lunpath, id):
85 self.dataset = dataset
86 self.lunpath = lunpath
87 self.id = id
88
89
90class NetAppISCSIDriver(driver.ISCSIDriver):
91 """NetApp iSCSI volume driver."""
92
93 IGROUP_PREFIX = 'openstack-'
94 DATASET_PREFIX = 'OpenStack_'
95 DATASET_METADATA_PROJECT_KEY = 'OpenStackProject'
96 DATASET_METADATA_VOL_TYPE_KEY = 'OpenStackVolType'
97
98 def __init__(self, *args, **kwargs):
99 super(NetAppISCSIDriver, self).__init__(*args, **kwargs)
100 self.discovered_luns = []
101 self.discovered_datasets = []
102 self.lun_table = {}
103
104 def _check_fail(self, request, response):
105 """Utility routine to handle checking ZAPI failures."""
106 if 'failed' == response.Status:
107 name = request.Name
108 reason = response.Reason
109 msg = _('API %(name)s failed: %(reason)s')
110 raise exception.VolumeBackendAPIException(data=msg % locals())
111
112 def _create_client(self, **kwargs):
113 """Instantiate a web services client.
114
115 This method creates a "suds" client to make web services calls to the
116 DFM server. Note that the WSDL file is quite large and may take
117 a few seconds to parse.
118 """
119 wsdl_url = kwargs['wsdl_url']
120 LOG.debug(_('Using WSDL: %s') % wsdl_url)
121 if kwargs['cache']:
122 self.client = client.Client(wsdl_url, username=kwargs['login'],
123 password=kwargs['password'])
124 else:
125 self.client = client.Client(wsdl_url, username=kwargs['login'],
126 password=kwargs['password'],
127 cache=None)
128 soap_url = 'http://%s:%s/apis/soap/v1' % (kwargs['hostname'],
129 kwargs['port'])
130 LOG.debug(_('Using DFM server: %s') % soap_url)
131 self.client.set_options(location=soap_url)
132
133 def _set_storage_service(self, storage_service):
134 """Set the storage service to use for provisioning."""
135 LOG.debug(_('Using storage service: %s') % storage_service)
136 self.storage_service = storage_service
137
138 def _set_storage_service_prefix(self, storage_service_prefix):
139 """Set the storage service prefix to use for provisioning."""
140 LOG.debug(_('Using storage service prefix: %s') %
141 storage_service_prefix)
142 self.storage_service_prefix = storage_service_prefix
143
144 def _set_vfiler(self, vfiler):
145 """Set the vfiler to use for provisioning."""
146 LOG.debug(_('Using vfiler: %s') % vfiler)
147 self.vfiler = vfiler
148
149 def _check_flags(self):
150 """Ensure that the flags we care about are set."""
151 required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
152 'netapp_server_hostname', 'netapp_server_port']
153 for flag in required_flags:
154 if not getattr(FLAGS, flag, None):
155 raise exception.InvalidInput(reason=_('%s is not set') % flag)
156 if not (FLAGS.netapp_storage_service or
157 FLAGS.netapp_storage_service_prefix):
158 raise exception.InvalidInput(reason=_('Either '
159 'netapp_storage_service or netapp_storage_service_prefix must '
160 'be set'))
161
162 def do_setup(self, context):
163 """Setup the NetApp Volume driver.
164
165 Called one time by the manager after the driver is loaded.
166 Validate the flags we care about and setup the suds (web services)
167 client.
168 """
169 self._check_flags()
170 self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
171 login=FLAGS.netapp_login, password=FLAGS.netapp_password,
172 hostname=FLAGS.netapp_server_hostname,
173 port=FLAGS.netapp_server_port, cache=True)
174 self._set_storage_service(FLAGS.netapp_storage_service)
175 self._set_storage_service_prefix(FLAGS.netapp_storage_service_prefix)
176 self._set_vfiler(FLAGS.netapp_vfiler)
177
178 def check_for_setup_error(self):
179 """Check that the driver is working and can communicate.
180
181 Invoke a web services API to make sure we can talk to the server.
182 Also perform the discovery of datasets and LUNs from DFM.
183 """
184 self.client.service.DfmAbout()
185 LOG.debug(_("Connected to DFM server"))
186 self._discover_luns()
187
188 def _get_datasets(self):
189 """Get the list of datasets from DFM."""
190 server = self.client.service
191 res = server.DatasetListInfoIterStart(IncludeMetadata=True)
192 tag = res.Tag
193 datasets = []
194 try:
195 while True:
196 res = server.DatasetListInfoIterNext(Tag=tag, Maximum=100)
197 if not res.Datasets:
198 break
199 datasets.extend(res.Datasets.DatasetInfo)
200 finally:
201 server.DatasetListInfoIterEnd(Tag=tag)
202 return datasets
203
204 def _discover_dataset_luns(self, dataset, volume):
205 """Discover all of the LUNs in a dataset."""
206 server = self.client.service
207 res = server.DatasetMemberListInfoIterStart(
208 DatasetNameOrId=dataset.id,
209 IncludeExportsInfo=True,
210 IncludeIndirect=True,
211 MemberType='lun_path')
212 tag = res.Tag
213 suffix = None
214 if volume:
215 suffix = '/' + volume
216 try:
217 while True:
218 res = server.DatasetMemberListInfoIterNext(Tag=tag,
219 Maximum=100)
220 if (not hasattr(res, 'DatasetMembers') or
221 not res.DatasetMembers):
222 break
223 for member in res.DatasetMembers.DatasetMemberInfo:
224 if suffix and not member.MemberName.endswith(suffix):
225 continue
226 # MemberName is the full LUN path in this format:
227 # host:/volume/qtree/lun
228 lun = DfmLun(dataset, member.MemberName, member.MemberId)
229 self.discovered_luns.append(lun)
230 finally:
231 server.DatasetMemberListInfoIterEnd(Tag=tag)
232
233 def _discover_luns(self):
234 """Discover the LUNs from DFM.
235
236 Discover all of the OpenStack-created datasets and LUNs in the DFM
237 database.
238 """
239 datasets = self._get_datasets()
240 self.discovered_datasets = []
241 self.discovered_luns = []
242 for dataset in datasets:
243 if not dataset.DatasetName.startswith(self.DATASET_PREFIX):
244 continue
245 if (not hasattr(dataset, 'DatasetMetadata') or
246 not dataset.DatasetMetadata):
247 continue
248 project = None
249 type = None
250 for field in dataset.DatasetMetadata.DfmMetadataField:
251 if field.FieldName == self.DATASET_METADATA_PROJECT_KEY:
252 project = field.FieldValue
253 elif field.FieldName == self.DATASET_METADATA_VOL_TYPE_KEY:
254 type = field.FieldValue
255 if not project:
256 continue
257 ds = DfmDataset(dataset.DatasetId, dataset.DatasetName,
258 project, type)
259 self.discovered_datasets.append(ds)
260 self._discover_dataset_luns(ds, None)
261 dataset_count = len(self.discovered_datasets)
262 lun_count = len(self.discovered_luns)
263 msg = _("Discovered %(dataset_count)s datasets and %(lun_count)s LUNs")
264 LOG.debug(msg % locals())
265 self.lun_table = {}
266
267 def _get_job_progress(self, job_id):
268 """Get progress of one running DFM job.
269
270 Obtain the latest progress report for the job and return the
271 list of progress events.
272 """
273 server = self.client.service
274 res = server.DpJobProgressEventListIterStart(JobId=job_id)
275 tag = res.Tag
276 event_list = []
277 try:
278 while True:
279 res = server.DpJobProgressEventListIterNext(Tag=tag,
280 Maximum=100)
281 if not hasattr(res, 'ProgressEvents'):
282 break
283 event_list += res.ProgressEvents.DpJobProgressEventInfo
284 finally:
285 server.DpJobProgressEventListIterEnd(Tag=tag)
286 return event_list
287
288 def _wait_for_job(self, job_id):
289 """Wait until a job terminates.
290
291 Poll the job until it completes or an error is detected. Return the
292 final list of progress events if it completes successfully.
293 """
294 while True:
295 events = self._get_job_progress(job_id)
296 for event in events:
297 if event.EventStatus == 'error':
298 msg = _('Job failed: %s') % (event.ErrorMessage)
299 raise exception.VolumeBackendAPIException(data=msg)
300 if event.EventType == 'job-end':
301 return events
302 time.sleep(5)
303
304 def _dataset_name(self, project, ss_type):
305 """Return the dataset name for a given project and volume type."""
306 _project = project.replace(' ', '_').replace('-', '_')
307 dataset_name = self.DATASET_PREFIX + _project
308 if not ss_type:
309 return dataset_name
310 _type = ss_type.replace(' ', '_').replace('-', '_')
311 return dataset_name + '_' + _type
312
313 def _get_dataset(self, dataset_name):
314 """Lookup a dataset by name in the list of discovered datasets."""
315 for dataset in self.discovered_datasets:
316 if dataset.name == dataset_name:
317 return dataset
318 return None
319
320 def _create_dataset(self, dataset_name, project, ss_type):
321 """Create a new dataset using the storage service.
322
323 The export settings are set to create iSCSI LUNs aligned for Linux.
324 Returns the ID of the new dataset.
325 """
326 if ss_type and not self.storage_service_prefix:
327 msg = _('Attempt to use volume_type without specifying '
328 'netapp_storage_service_prefix flag.')
329 raise exception.VolumeBackendAPIException(data=msg)
330 if not (ss_type or self.storage_service):
331 msg = _('You must set the netapp_storage_service flag in order to '
332 'create volumes with no volume_type.')
333 raise exception.VolumeBackendAPIException(data=msg)
334 storage_service = self.storage_service
335 if ss_type:
336 storage_service = self.storage_service_prefix + ss_type
337
338 factory = self.client.factory
339
340 lunmap = factory.create('DatasetLunMappingInfo')
341 lunmap.IgroupOsType = 'linux'
342 export = factory.create('DatasetExportInfo')
343 export.DatasetExportProtocol = 'iscsi'
344 export.DatasetLunMappingInfo = lunmap
345 detail = factory.create('StorageSetInfo')
346 detail.DpNodeName = 'Primary data'
347 detail.DatasetExportInfo = export
348 if hasattr(self, 'vfiler') and self.vfiler:
349 detail.ServerNameOrId = self.vfiler
350 details = factory.create('ArrayOfStorageSetInfo')
351 details.StorageSetInfo = [detail]
352 field1 = factory.create('DfmMetadataField')
353 field1.FieldName = self.DATASET_METADATA_PROJECT_KEY
354 field1.FieldValue = project
355 field2 = factory.create('DfmMetadataField')
356 field2.FieldName = self.DATASET_METADATA_VOL_TYPE_KEY
357 field2.FieldValue = ss_type
358 metadata = factory.create('ArrayOfDfmMetadataField')
359 metadata.DfmMetadataField = [field1, field2]
360
361 res = self.client.service.StorageServiceDatasetProvision(
362 StorageServiceNameOrId=storage_service,
363 DatasetName=dataset_name,
364 AssumeConfirmation=True,
365 StorageSetDetails=details,
366 DatasetMetadata=metadata)
367
368 ds = DfmDataset(res.DatasetId, dataset_name, project, ss_type)
369 self.discovered_datasets.append(ds)
370 return ds
371
372 @lockutils.synchronized('netapp_dfm', 'cinder-', True)
373 def _provision(self, name, description, project, ss_type, size):
374 """Provision a LUN through provisioning manager.
375
376 The LUN will be created inside a dataset associated with the project.
377 If the dataset doesn't already exist, we create it using the storage
378 service specified in the cinder conf.
379 """
380 dataset_name = self._dataset_name(project, ss_type)
381 dataset = self._get_dataset(dataset_name)
382 if not dataset:
383 dataset = self._create_dataset(dataset_name, project, ss_type)
384
385 info = self.client.factory.create('ProvisionMemberRequestInfo')
386 info.Name = name
387 if description:
388 info.Description = description
389 info.Size = size
390 info.MaximumSnapshotSpace = 2 * long(size)
391
392 server = self.client.service
393 lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset.id)
394 try:
395 server.DatasetProvisionMember(EditLockId=lock_id,
396 ProvisionMemberRequestInfo=info)
397 res = server.DatasetEditCommit(EditLockId=lock_id,
398 AssumeConfirmation=True)
399 except (suds.WebFault, Exception):
400 server.DatasetEditRollback(EditLockId=lock_id)
401 msg = _('Failed to provision dataset member')
402 raise exception.VolumeBackendAPIException(data=msg)
403
404 lun_id = None
405 lunpath = None
406
407 for info in res.JobIds.JobInfo:
408 events = self._wait_for_job(info.JobId)
409 for event in events:
410 if event.EventType != 'lun-create':
411 continue
412 lunpath = event.ProgressLunInfo.LunName
413 lun_id = event.ProgressLunInfo.LunPathId
414
415 if not lun_id:
416 msg = _('No LUN was created by the provision job')
417 raise exception.VolumeBackendAPIException(data=msg)
418
419 lun = DfmLun(dataset, lunpath, lun_id)
420 self.discovered_luns.append(lun)
421 self.lun_table[name] = lun
422
423 def _get_ss_type(self, volume):
424 """Get the storage service type for a volume."""
425 id = volume['volume_type_id']
426 if not id:
427 return None
428 volume_type = volume_types.get_volume_type(None, id)
429 if not volume_type:
430 return None
431 return volume_type['name']
432
433 @lockutils.synchronized('netapp_dfm', 'cinder-', True)
434 def _remove_destroy(self, name, project):
435 """Remove the LUN from the dataset, also destroying it.
436
437 Remove the LUN from the dataset and destroy the actual LUN on the
438 storage system.
439 """
440 lun = self._lookup_lun_for_volume(name, project)
441 member = self.client.factory.create('DatasetMemberParameter')
442 member.ObjectNameOrId = lun.id
443 members = self.client.factory.create('ArrayOfDatasetMemberParameter')
444 members.DatasetMemberParameter = [member]
445
446 server = self.client.service
447 lock_id = server.DatasetEditBegin(DatasetNameOrId=lun.dataset.id)
448 try:
449 server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True,
450 DatasetMemberParameters=members)
451 server.DatasetEditCommit(EditLockId=lock_id,
452 AssumeConfirmation=True)
453 except (suds.WebFault, Exception):
454 server.DatasetEditRollback(EditLockId=lock_id)
455 msg = _('Failed to remove and delete dataset member')
456 raise exception.VolumeBackendAPIException(data=msg)
457
458 def create_volume(self, volume):
459 """Driver entry point for creating a new volume."""
460 default_size = '104857600' # 100 MB
461 gigabytes = 1073741824L # 2^30
462 name = volume['name']
463 project = volume['project_id']
464 display_name = volume['display_name']
465 display_description = volume['display_description']
466 description = None
467 if display_name:
468 if display_description:
469 description = display_name + "\n" + display_description
470 else:
471 description = display_name
472 elif display_description:
473 description = display_description
474 if int(volume['size']) == 0:
475 size = default_size
476 else:
477 size = str(int(volume['size']) * gigabytes)
478 ss_type = self._get_ss_type(volume)
479 self._provision(name, description, project, ss_type, size)
480
481 def _lookup_lun_for_volume(self, name, project):
482 """Lookup the LUN that corresponds to the give volume.
483
484 Initial lookups involve a table scan of all of the discovered LUNs,
485 but later lookups are done instantly from the hashtable.
486 """
487 if name in self.lun_table:
488 return self.lun_table[name]
489 lunpath_suffix = '/' + name
490 for lun in self.discovered_luns:
491 if lun.dataset.project != project:
492 continue
493 if lun.lunpath.endswith(lunpath_suffix):
494 self.lun_table[name] = lun
495 return lun
496 msg = _("No entry in LUN table for volume %s") % (name)
497 raise exception.VolumeBackendAPIException(data=msg)
498
499 def delete_volume(self, volume):
500 """Driver entry point for destroying existing volumes."""
501 name = volume['name']
502 project = volume['project_id']
503 self._remove_destroy(name, project)
504
505 def _get_lun_details(self, lun_id):
506 """Given the ID of a LUN, get the details about that LUN."""
507 server = self.client.service
508 res = server.LunListInfoIterStart(ObjectNameOrId=lun_id)
509 tag = res.Tag
510 try:
511 res = server.LunListInfoIterNext(Tag=tag, Maximum=1)
512 if hasattr(res, 'Luns') and res.Luns.LunInfo:
513 return res.Luns.LunInfo[0]
514 finally:
515 server.LunListInfoIterEnd(Tag=tag)
516 msg = _('Failed to get LUN details for LUN ID %s')
517 raise exception.VolumeBackendAPIException(data=msg % lun_id)
518
519 def _get_host_details(self, host_id):
520 """Given the ID of a host, get the details about it.
521
522 A "host" is a storage system here.
523 """
524 server = self.client.service
525 res = server.HostListInfoIterStart(ObjectNameOrId=host_id)
526 tag = res.Tag
527 try:
528 res = server.HostListInfoIterNext(Tag=tag, Maximum=1)
529 if hasattr(res, 'Hosts') and res.Hosts.HostInfo:
530 return res.Hosts.HostInfo[0]
531 finally:
532 server.HostListInfoIterEnd(Tag=tag)
533 msg = _('Failed to get host details for host ID %s')
534 raise exception.VolumeBackendAPIException(data=msg % host_id)
535
536 def _get_iqn_for_host(self, host_id):
537 """Get the iSCSI Target Name for a storage system."""
538 request = self.client.factory.create('Request')
539 request.Name = 'iscsi-node-get-name'
540 response = self.client.service.ApiProxy(Target=host_id,
541 Request=request)
542 self._check_fail(request, response)
543 return response.Results['node-name'][0]
544
545 def _api_elem_is_empty(self, elem):
546 """Return true if the API element should be considered empty.
547
548 Helper routine to figure out if a list returned from a proxy API
549 is empty. This is necessary because the API proxy produces nasty
550 looking XML.
551 """
552 if not type(elem) is list:
553 return True
554 if 0 == len(elem):
555 return True
556 child = elem[0]
557 if isinstance(child, text.Text):
558 return True
559 if type(child) is str:
560 return True
561 return False
562
563 def _get_target_portal_for_host(self, host_id, host_address):
564 """Get iSCSI target portal for a storage system.
565
566 Get the iSCSI Target Portal details for a particular IP address
567 on a storage system.
568 """
569 request = self.client.factory.create('Request')
570 request.Name = 'iscsi-portal-list-info'
571 response = self.client.service.ApiProxy(Target=host_id,
572 Request=request)
573 self._check_fail(request, response)
574 portal = {}
575 portals = response.Results['iscsi-portal-list-entries']
576 if self._api_elem_is_empty(portals):
577 return portal
578 portal_infos = portals[0]['iscsi-portal-list-entry-info']
579 for portal_info in portal_infos:
580 portal['address'] = portal_info['ip-address'][0]
581 portal['port'] = portal_info['ip-port'][0]
582 portal['portal'] = portal_info['tpgroup-tag'][0]
583 if host_address == portal['address']:
584 break
585 return portal
586
587 def _get_export(self, volume):
588 """Get the iSCSI export details for a volume.
589
590 Looks up the LUN in DFM based on the volume and project name, then get
591 the LUN's ID. We store that value in the database instead of the iSCSI
592 details because we will not have the true iSCSI details until masking
593 time (when initialize_connection() is called).
594 """
595 name = volume['name']
596 project = volume['project_id']
597 lun = self._lookup_lun_for_volume(name, project)
598 return {'provider_location': lun.id}
599
600 def ensure_export(self, context, volume):
601 """Driver entry point to get the export info for an existing volume."""
602 return self._get_export(volume)
603
604 def create_export(self, context, volume):
605 """Driver entry point to get the export info for a new volume."""
606 return self._get_export(volume)
607
608 def remove_export(self, context, volume):
609 """Driver exntry point to remove an export for a volume.
610
611 Since exporting is idempotent in this driver, we have nothing
612 to do for unexporting.
613 """
614 pass
615
616 def _find_igroup_for_initiator(self, host_id, initiator_name):
617 """Get the igroup for an initiator.
618
619 Look for an existing igroup (initiator group) on the storage system
620 containing a given iSCSI initiator and return the name of the igroup.
621 """
622 request = self.client.factory.create('Request')
623 request.Name = 'igroup-list-info'
624 response = self.client.service.ApiProxy(Target=host_id,
625 Request=request)
626 self._check_fail(request, response)
627 igroups = response.Results['initiator-groups']
628 if self._api_elem_is_empty(igroups):
629 return None
630 igroup_infos = igroups[0]['initiator-group-info']
631 for igroup_info in igroup_infos:
632 if ('iscsi' != igroup_info['initiator-group-type'][0] or
633 'linux' != igroup_info['initiator-group-os-type'][0]):
634 continue
635 igroup_name = igroup_info['initiator-group-name'][0]
636 if not igroup_name.startswith(self.IGROUP_PREFIX):
637 continue
638 initiators = igroup_info['initiators'][0]['initiator-info']
639 for initiator in initiators:
640 if initiator_name == initiator['initiator-name'][0]:
641 return igroup_name
642 return None
643
644 def _create_igroup(self, host_id, initiator_name):
645 """Create a new igroup.
646
647 Create a new igroup (initiator group) on the storage system to hold
648 the given iSCSI initiator. The group will only have 1 member and will
649 be named "openstack-${initiator_name}".
650 """
651 igroup_name = self.IGROUP_PREFIX + initiator_name
652 request = self.client.factory.create('Request')
653 request.Name = 'igroup-create'
654 igroup_create_xml = (
655 '<initiator-group-name>%s</initiator-group-name>'
656 '<initiator-group-type>iscsi</initiator-group-type>'
657 '<os-type>linux</os-type><ostype>linux</ostype>')
658 request.Args = text.Raw(igroup_create_xml % igroup_name)
659 response = self.client.service.ApiProxy(Target=host_id,
660 Request=request)
661 self._check_fail(request, response)
662 request = self.client.factory.create('Request')
663 request.Name = 'igroup-add'
664 igroup_add_xml = (
665 '<initiator-group-name>%s</initiator-group-name>'
666 '<initiator>%s</initiator>')
667 request.Args = text.Raw(igroup_add_xml % (igroup_name, initiator_name))
668 response = self.client.service.ApiProxy(Target=host_id,
669 Request=request)
670 self._check_fail(request, response)
671 return igroup_name
672
673 def _get_lun_mappping(self, host_id, lunpath, igroup_name):
674 """Get the mapping between a LUN and an igroup.
675
676 Check if a given LUN is already mapped to the given igroup (initiator
677 group). If the LUN is mapped, also return the LUN number for the
678 mapping.
679 """
680 request = self.client.factory.create('Request')
681 request.Name = 'lun-map-list-info'
682 request.Args = text.Raw('<path>%s</path>' % (lunpath))
683 response = self.client.service.ApiProxy(Target=host_id,
684 Request=request)
685 self._check_fail(request, response)
686 igroups = response.Results['initiator-groups']
687 if self._api_elem_is_empty(igroups):
688 return {'mapped': False}
689 igroup_infos = igroups[0]['initiator-group-info']
690 for igroup_info in igroup_infos:
691 if igroup_name == igroup_info['initiator-group-name'][0]:
692 return {'mapped': True, 'lun_num': igroup_info['lun-id'][0]}
693 return {'mapped': False}
694
695 def _map_initiator(self, host_id, lunpath, igroup_name):
696 """Map a LUN to an igroup.
697
698 Map the given LUN to the given igroup (initiator group). Return the LUN
699 number that the LUN was mapped to (the filer will choose the lowest
700 available number).
701 """
702 request = self.client.factory.create('Request')
703 request.Name = 'lun-map'
704 lun_map_xml = ('<initiator-group>%s</initiator-group>'
705 '<path>%s</path>')
706 request.Args = text.Raw(lun_map_xml % (igroup_name, lunpath))
707 response = self.client.service.ApiProxy(Target=host_id,
708 Request=request)
709 self._check_fail(request, response)
710 return response.Results['lun-id-assigned'][0]
711
712 def _unmap_initiator(self, host_id, lunpath, igroup_name):
713 """Unmap the given LUN from the given igroup (initiator group)."""
714 request = self.client.factory.create('Request')
715 request.Name = 'lun-unmap'
716 lun_unmap_xml = ('<initiator-group>%s</initiator-group>'
717 '<path>%s</path>')
718 request.Args = text.Raw(lun_unmap_xml % (igroup_name, lunpath))
719 response = self.client.service.ApiProxy(Target=host_id,
720 Request=request)
721 self._check_fail(request, response)
722
723 def _ensure_initiator_mapped(self, host_id, lunpath, initiator_name):
724 """Ensure that a LUN is mapped to a particular initiator.
725
726 Check if a LUN is mapped to a given initiator already and create
727 the mapping if it is not. A new igroup will be created if needed.
728 Returns the LUN number for the mapping between the LUN and initiator
729 in both cases.
730 """
731 lunpath = '/vol/' + lunpath
732 igroup_name = self._find_igroup_for_initiator(host_id, initiator_name)
733 if not igroup_name:
734 igroup_name = self._create_igroup(host_id, initiator_name)
735
736 mapping = self._get_lun_mappping(host_id, lunpath, igroup_name)
737 if mapping['mapped']:
738 return mapping['lun_num']
739 return self._map_initiator(host_id, lunpath, igroup_name)
740
741 def _ensure_initiator_unmapped(self, host_id, lunpath, initiator_name):
742 """Ensure that a LUN is not mapped to a particular initiator.
743
744 Check if a LUN is mapped to a given initiator and remove the
745 mapping if it is. This does not destroy the igroup.
746 """
747 lunpath = '/vol/' + lunpath
748 igroup_name = self._find_igroup_for_initiator(host_id, initiator_name)
749 if not igroup_name:
750 return
751
752 mapping = self._get_lun_mappping(host_id, lunpath, igroup_name)
753 if mapping['mapped']:
754 self._unmap_initiator(host_id, lunpath, igroup_name)
755
756 def initialize_connection(self, volume, connector):
757 """Driver entry point to attach a volume to an instance.
758
759 Do the LUN masking on the storage system so the initiator can access
760 the LUN on the target. Also return the iSCSI properties so the
761 initiator can find the LUN. This implementation does not call
762 _get_iscsi_properties() to get the properties because cannot store the
763 LUN number in the database. We only find out what the LUN number will
764 be during this method call so we construct the properties dictionary
765 ourselves.
766 """
767 initiator_name = connector['initiator']
768 lun_id = volume['provider_location']
769 if not lun_id:
770 msg = _("No LUN ID for volume %s") % volume['name']
771 raise exception.VolumeBackendAPIException(data=msg)
772 lun = self._get_lun_details(lun_id)
773 lun_num = self._ensure_initiator_mapped(lun.HostId, lun.LunPath,
774 initiator_name)
775 host = self._get_host_details(lun.HostId)
776 portal = self._get_target_portal_for_host(host.HostId,
777 host.HostAddress)
778 if not portal:
779 msg = _('Failed to get target portal for filer: %s')
780 raise exception.VolumeBackendAPIException(data=msg % host.HostName)
781
782 iqn = self._get_iqn_for_host(host.HostId)
783 if not iqn:
784 msg = _('Failed to get target IQN for filer: %s')
785 raise exception.VolumeBackendAPIException(data=msg % host.HostName)
786
787 properties = {}
788 properties['target_discovered'] = False
789 (address, port) = (portal['address'], portal['port'])
790 properties['target_portal'] = '%s:%s' % (address, port)
791 properties['target_iqn'] = iqn
792 properties['target_lun'] = lun_num
793 properties['volume_id'] = volume['id']
794
795 auth = volume['provider_auth']
796 if auth:
797 (auth_method, auth_username, auth_secret) = auth.split()
798
799 properties['auth_method'] = auth_method
800 properties['auth_username'] = auth_username
801 properties['auth_password'] = auth_secret
802
803 return {
804 'driver_volume_type': 'iscsi',
805 'data': properties,
806 }
807
808 def terminate_connection(self, volume, connector):
809 """Driver entry point to unattach a volume from an instance.
810
811 Unmask the LUN on the storage system so the given intiator can no
812 longer access it.
813 """
814 initiator_name = connector['initiator']
815 lun_id = volume['provider_location']
816 if not lun_id:
817 msg = _('No LUN ID for volume %s') % volume['name']
818 raise exception.VolumeBackendAPIException(data=msg)
819 lun = self._get_lun_details(lun_id)
820 self._ensure_initiator_unmapped(lun.HostId, lun.LunPath,
821 initiator_name)
822
823 def _is_clone_done(self, host_id, clone_op_id, volume_uuid):
824 """Check the status of a clone operation.
825
826 Return True if done, False otherwise.
827 """
828 request = self.client.factory.create('Request')
829 request.Name = 'clone-list-status'
830 clone_list_status_xml = (
831 '<clone-id><clone-id-info>'
832 '<clone-op-id>%s</clone-op-id>'
833 '<volume-uuid>%s</volume-uuid>'
834 '</clone-id-info></clone-id>')
835 request.Args = text.Raw(clone_list_status_xml % (clone_op_id,
836 volume_uuid))
837 response = self.client.service.ApiProxy(Target=host_id,
838 Request=request)
839 self._check_fail(request, response)
840 status = response.Results['status']
841 if self._api_elem_is_empty(status):
842 return False
843 ops_info = status[0]['ops-info'][0]
844 state = ops_info['clone-state'][0]
845 return 'completed' == state
846
847 def _clone_lun(self, host_id, src_path, dest_path, snap):
848 """Create a clone of a NetApp LUN.
849
850 The clone initially consumes no space and is not space reserved.
851 """
852 request = self.client.factory.create('Request')
853 request.Name = 'clone-start'
854 clone_start_xml = (
855 '<source-path>%s</source-path><no-snap>%s</no-snap>'
856 '<destination-path>%s</destination-path>')
857 if snap:
858 no_snap = 'false'
859 else:
860 no_snap = 'true'
861 request.Args = text.Raw(clone_start_xml % (src_path, no_snap,
862 dest_path))
863 response = self.client.service.ApiProxy(Target=host_id,
864 Request=request)
865 self._check_fail(request, response)
866 clone_id = response.Results['clone-id'][0]
867 clone_id_info = clone_id['clone-id-info'][0]
868 clone_op_id = clone_id_info['clone-op-id'][0]
869 volume_uuid = clone_id_info['volume-uuid'][0]
870 while not self._is_clone_done(host_id, clone_op_id, volume_uuid):
871 time.sleep(5)
872
873 def _refresh_dfm_luns(self, host_id):
874 """Refresh the LUN list for one filer in DFM."""
875 server = self.client.service
876 server.DfmObjectRefresh(ObjectNameOrId=host_id, ChildType='lun_path')
877 while True:
878 time.sleep(15)
879 res = server.DfmMonitorTimestampList(HostNameOrId=host_id)
880 for timestamp in res.DfmMonitoringTimestamp:
881 if 'lun' != timestamp.MonitorName:
882 continue
883 if timestamp.LastMonitoringTimestamp:
884 return
885
886 def _destroy_lun(self, host_id, lun_path):
887 """Destroy a LUN on the filer."""
888 request = self.client.factory.create('Request')
889 request.Name = 'lun-offline'
890 path_xml = '<path>%s</path>'
891 request.Args = text.Raw(path_xml % lun_path)
892 response = self.client.service.ApiProxy(Target=host_id,
893 Request=request)
894 self._check_fail(request, response)
895 request = self.client.factory.create('Request')
896 request.Name = 'lun-destroy'
897 request.Args = text.Raw(path_xml % lun_path)
898 response = self.client.service.ApiProxy(Target=host_id,
899 Request=request)
900 self._check_fail(request, response)
901
902 def _resize_volume(self, host_id, vol_name, new_size):
903 """Resize the volume by the amount requested."""
904 request = self.client.factory.create('Request')
905 request.Name = 'volume-size'
906 volume_size_xml = (
907 '<volume>%s</volume><new-size>%s</new-size>')
908 request.Args = text.Raw(volume_size_xml % (vol_name, new_size))
909 response = self.client.service.ApiProxy(Target=host_id,
910 Request=request)
911 self._check_fail(request, response)
912
913 def _create_qtree(self, host_id, vol_name, qtree_name):
914 """Create a qtree the filer."""
915 request = self.client.factory.create('Request')
916 request.Name = 'qtree-create'
917 qtree_create_xml = (
918 '<mode>0755</mode><volume>%s</volume><qtree>%s</qtree>')
919 request.Args = text.Raw(qtree_create_xml % (vol_name, qtree_name))
920 response = self.client.service.ApiProxy(Target=host_id,
921 Request=request)
922 self._check_fail(request, response)
923
924 def create_snapshot(self, snapshot):
925 """Driver entry point for creating a snapshot.
926
927 This driver implements snapshots by using efficient single-file
928 (LUN) cloning.
929 """
930 vol_name = snapshot['volume_name']
931 snapshot_name = snapshot['name']
932 project = snapshot['project_id']
933 lun = self._lookup_lun_for_volume(vol_name, project)
934 lun_id = lun.id
935 lun = self._get_lun_details(lun_id)
936 extra_gb = snapshot['volume_size']
937 new_size = '+%dg' % extra_gb
938 self._resize_volume(lun.HostId, lun.VolumeName, new_size)
939 # LunPath is the partial LUN path in this format: volume/qtree/lun
940 lun_path = str(lun.LunPath)
941 lun_name = lun_path[lun_path.rfind('/') + 1:]
942 qtree_path = '/vol/%s/%s' % (lun.VolumeName, lun.QtreeName)
943 src_path = '%s/%s' % (qtree_path, lun_name)
944 dest_path = '%s/%s' % (qtree_path, snapshot_name)
945 self._clone_lun(lun.HostId, src_path, dest_path, True)
946
947 def delete_snapshot(self, snapshot):
948 """Driver entry point for deleting a snapshot."""
949 vol_name = snapshot['volume_name']
950 snapshot_name = snapshot['name']
951 project = snapshot['project_id']
952 lun = self._lookup_lun_for_volume(vol_name, project)
953 lun_id = lun.id
954 lun = self._get_lun_details(lun_id)
955 lun_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName,
956 snapshot_name)
957 self._destroy_lun(lun.HostId, lun_path)
958 extra_gb = snapshot['volume_size']
959 new_size = '-%dg' % extra_gb
960 self._resize_volume(lun.HostId, lun.VolumeName, new_size)
961
962 def create_volume_from_snapshot(self, volume, snapshot):
963 """Driver entry point for creating a new volume from a snapshot.
964
965 Many would call this "cloning" and in fact we use cloning to implement
966 this feature.
967 """
968 vol_size = volume['size']
969 snap_size = snapshot['volume_size']
970 if vol_size != snap_size:
971 msg = _('Cannot create volume of size %(vol_size)s from '
972 'snapshot of size %(snap_size)s')
973 raise exception.VolumeBackendAPIException(data=msg % locals())
974 vol_name = snapshot['volume_name']
975 snapshot_name = snapshot['name']
976 project = snapshot['project_id']
977 lun = self._lookup_lun_for_volume(vol_name, project)
978 lun_id = lun.id
979 dataset = lun.dataset
980 old_type = dataset.type
981 new_type = self._get_ss_type(volume)
982 if new_type != old_type:
983 msg = _('Cannot create volume of type %(new_type)s from '
984 'snapshot of type %(old_type)s')
985 raise exception.VolumeBackendAPIException(data=msg % locals())
986 lun = self._get_lun_details(lun_id)
987 extra_gb = vol_size
988 new_size = '+%dg' % extra_gb
989 self._resize_volume(lun.HostId, lun.VolumeName, new_size)
990 clone_name = volume['name']
991 self._create_qtree(lun.HostId, lun.VolumeName, clone_name)
992 src_path = '/vol/%s/%s/%s' % (lun.VolumeName, lun.QtreeName,
993 snapshot_name)
994 dest_path = '/vol/%s/%s/%s' % (lun.VolumeName, clone_name, clone_name)
995 self._clone_lun(lun.HostId, src_path, dest_path, False)
996 self._refresh_dfm_luns(lun.HostId)
997 self._discover_dataset_luns(dataset, clone_name)
998
999 def check_for_export(self, context, volume_id):
1000 raise NotImplementedError()
1001
1002
1003class NetAppLun(object):
1004 """Represents a LUN on NetApp storage."""
1005
1006 def __init__(self, handle, name, size, metadata_dict):
1007 self.handle = handle
1008 self.name = name
1009 self.size = size
1010 self.metadata = metadata_dict
1011
1012 def get_metadata_property(self, prop):
1013 """Get the metadata property of a LUN."""
1014 if prop in self.metadata:
1015 return self.metadata[prop]
1016 name = self.name
1017 msg = _("No metadata property %(prop)s defined for the LUN %(name)s")
1018 LOG.debug(msg % locals())
1019
1020
1021class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
1022 """NetApp C-mode iSCSI volume driver."""
1023
1024 def __init__(self, *args, **kwargs):
1025 super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs)
1026 self.lun_table = {}
1027
1028 def _create_client(self, **kwargs):
1029 """Instantiate a web services client.
1030
1031 This method creates a "suds" client to make web services calls to the
1032 DFM server. Note that the WSDL file is quite large and may take
1033 a few seconds to parse.
1034 """
1035 wsdl_url = kwargs['wsdl_url']
1036 LOG.debug(_('Using WSDL: %s') % wsdl_url)
1037 if kwargs['cache']:
1038 self.client = client.Client(wsdl_url, username=kwargs['login'],
1039 password=kwargs['password'])
1040 else:
1041 self.client = client.Client(wsdl_url, username=kwargs['login'],
1042 password=kwargs['password'],
1043 cache=None)
1044
1045 def _check_flags(self):
1046 """Ensure that the flags we care about are set."""
1047 required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password',
1048 'netapp_server_hostname', 'netapp_server_port']
1049 for flag in required_flags:
1050 if not getattr(FLAGS, flag, None):
1051 msg = _('%s is not set') % flag
1052 raise exception.InvalidInput(data=msg)
1053
1054 def do_setup(self, context):
1055 """Setup the NetApp Volume driver.
1056
1057 Called one time by the manager after the driver is loaded.
1058 Validate the flags we care about and setup the suds (web services)
1059 client.
1060 """
1061 self._check_flags()
1062 self._create_client(wsdl_url=FLAGS.netapp_wsdl_url,
1063 login=FLAGS.netapp_login, password=FLAGS.netapp_password,
1064 hostname=FLAGS.netapp_server_hostname,
1065 port=FLAGS.netapp_server_port, cache=True)
1066
1067 def check_for_setup_error(self):
1068 """Check that the driver is working and can communicate.
1069
1070 Discovers the LUNs on the NetApp server.
1071 """
1072 self.lun_table = {}
1073 luns = self.client.service.ListLuns()
1074 for lun in luns:
1075 meta_dict = {}
1076 if hasattr(lun, 'Metadata'):
1077 meta_dict = self._create_dict_from_meta(lun.Metadata)
1078 discovered_lun = NetAppLun(lun.Handle, lun.Name, lun.Size,
1079 meta_dict)
1080 self._add_lun_to_table(discovered_lun)
1081 LOG.debug(_("Success getting LUN list from server"))
1082
1083 def create_volume(self, volume):
1084 """Driver entry point for creating a new volume."""
1085 default_size = '104857600' # 100 MB
1086 gigabytes = 1073741824L # 2^30
1087 name = volume['name']
1088 if int(volume['size']) == 0:
1089 size = default_size
1090 else:
1091 size = str(int(volume['size']) * gigabytes)
1092 extra_args = {}
1093 extra_args['OsType'] = 'linux'
1094 extra_args['QosType'] = self._get_qos_type(volume)
1095 extra_args['Container'] = volume['project_id']
1096 extra_args['Display'] = volume['display_name']
1097 extra_args['Description'] = volume['display_description']
1098 extra_args['SpaceReserved'] = True
1099 server = self.client.service
1100 metadata = self._create_metadata_list(extra_args)
1101 lun = server.ProvisionLun(Name=name, Size=size,
1102 Metadata=metadata)
1103 LOG.debug(_("Created LUN with name %s") % name)
1104 self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
1105 lun.Size, self._create_dict_from_meta(lun.Metadata)))
1106
1107 def delete_volume(self, volume):
1108 """Driver entry point for destroying existing volumes."""
1109 name = volume['name']
1110 handle = self._get_lun_handle(name)
1111 self.client.service.DestroyLun(Handle=handle)
1112 LOG.debug(_("Destroyed LUN %s") % handle)
1113 self.lun_table.pop(name)
1114
1115 def ensure_export(self, context, volume):
1116 """Driver entry point to get the export info for an existing volume."""
1117 handle = self._get_lun_handle(volume['name'])
1118 return {'provider_location': handle}
1119
1120 def create_export(self, context, volume):
1121 """Driver entry point to get the export info for a new volume."""
1122 handle = self._get_lun_handle(volume['name'])
1123 return {'provider_location': handle}
1124
1125 def remove_export(self, context, volume):
1126 """Driver exntry point to remove an export for a volume.
1127
1128 Since exporting is idempotent in this driver, we have nothing
1129 to do for unexporting.
1130 """
1131 pass
1132
1133 def initialize_connection(self, volume, connector):
1134 """Driver entry point to attach a volume to an instance.
1135
1136 Do the LUN masking on the storage system so the initiator can access
1137 the LUN on the target. Also return the iSCSI properties so the
1138 initiator can find the LUN. This implementation does not call
1139 _get_iscsi_properties() to get the properties because cannot store the
1140 LUN number in the database. We only find out what the LUN number will
1141 be during this method call so we construct the properties dictionary
1142 ourselves.
1143 """
1144 initiator_name = connector['initiator']
1145 handle = volume['provider_location']
1146 server = self.client.service
1147 server.MapLun(Handle=handle, InitiatorType="iscsi",
1148 InitiatorName=initiator_name)
1149 msg = _("Mapped LUN %(handle)s to the initiator %(initiator_name)s")
1150 LOG.debug(msg % locals())
1151
1152 target_details_list = server.GetLunTargetDetails(Handle=handle,
1153 InitiatorType="iscsi", InitiatorName=initiator_name)
1154 msg = _("Succesfully fetched target details for LUN %(handle)s and "
1155 "initiator %(initiator_name)s")
1156 LOG.debug(msg % locals())
1157
1158 if not target_details_list:
1159 msg = _('Failed to get LUN target details for the LUN %s')
1160 raise exception.VolumeBackendAPIException(data=msg % handle)
1161 target_details = target_details_list[0]
1162 if not target_details.Address and target_details.Port:
1163 msg = _('Failed to get target portal for the LUN %s')
1164 raise exception.VolumeBackendAPIException(data=msg % handle)
1165 iqn = target_details.Iqn
1166 if not iqn:
1167 msg = _('Failed to get target IQN for the LUN %s')
1168 raise exception.VolumeBackendAPIException(data=msg % handle)
1169
1170 properties = {}
1171 properties['target_discovered'] = False
1172 (address, port) = (target_details.Address, target_details.Port)
1173 properties['target_portal'] = '%s:%s' % (address, port)
1174 properties['target_iqn'] = iqn
1175 properties['target_lun'] = target_details.LunNumber
1176 properties['volume_id'] = volume['id']
1177
1178 auth = volume['provider_auth']
1179 if auth:
1180 (auth_method, auth_username, auth_secret) = auth.split()
1181 properties['auth_method'] = auth_method
1182 properties['auth_username'] = auth_username
1183 properties['auth_password'] = auth_secret
1184
1185 return {
1186 'driver_volume_type': 'iscsi',
1187 'data': properties,
1188 }
1189
1190 def terminate_connection(self, volume, connector):
1191 """Driver entry point to unattach a volume from an instance.
1192
1193 Unmask the LUN on the storage system so the given intiator can no
1194 longer access it.
1195 """
1196 initiator_name = connector['initiator']
1197 handle = volume['provider_location']
1198 self.client.service.UnmapLun(Handle=handle, InitiatorType="iscsi",
1199 InitiatorName=initiator_name)
1200 msg = _("Unmapped LUN %(handle)s from the initiator "
1201 "%(initiator_name)s")
1202 LOG.debug(msg % locals())
1203
1204 def create_snapshot(self, snapshot):
1205 """Driver entry point for creating a snapshot.
1206
1207 This driver implements snapshots by using efficient single-file
1208 (LUN) cloning.
1209 """
1210 vol_name = snapshot['volume_name']
1211 snapshot_name = snapshot['name']
1212 lun = self.lun_table[vol_name]
1213 extra_args = {'SpaceReserved': False}
1214 self._clone_lun(lun.handle, snapshot_name, extra_args)
1215
1216 def delete_snapshot(self, snapshot):
1217 """Driver entry point for deleting a snapshot."""
1218 handle = self._get_lun_handle(snapshot['name'])
1219 self.client.service.DestroyLun(Handle=handle)
1220 LOG.debug(_("Destroyed LUN %s") % handle)
1221
1222 def create_volume_from_snapshot(self, volume, snapshot):
1223 """Driver entry point for creating a new volume from a snapshot.
1224
1225 Many would call this "cloning" and in fact we use cloning to implement
1226 this feature.
1227 """
1228 snapshot_name = snapshot['name']
1229 lun = self.lun_table[snapshot_name]
1230 new_name = volume['name']
1231 extra_args = {}
1232 extra_args['OsType'] = 'linux'
1233 extra_args['QosType'] = self._get_qos_type(volume)
1234 extra_args['Container'] = volume['project_id']
1235 extra_args['Display'] = volume['display_name']
1236 extra_args['Description'] = volume['display_description']
1237 extra_args['SpaceReserved'] = True
1238 self._clone_lun(lun.handle, new_name, extra_args)
1239
1240 def check_for_export(self, context, volume_id):
1241 raise NotImplementedError()
1242
1243 def _get_qos_type(self, volume):
1244 """Get the storage service type for a volume."""
1245 type_id = volume['volume_type_id']
1246 if not type_id:
1247 return None
1248 volume_type = volume_types.get_volume_type(None, type_id)
1249 if not volume_type:
1250 return None
1251 return volume_type['name']
1252
1253 def _add_lun_to_table(self, lun):
1254 """Adds LUN to cache table."""
1255 if not isinstance(lun, NetAppLun):
1256 msg = _("Object is not a NetApp LUN.")
1257 raise exception.VolumeBackendAPIException(data=msg)
1258 self.lun_table[lun.name] = lun
1259
1260 def _clone_lun(self, handle, new_name, extra_args):
1261 """Clone LUN with the given handle to the new name."""
1262 server = self.client.service
1263 metadata = self._create_metadata_list(extra_args)
1264 lun = server.CloneLun(Handle=handle, NewName=new_name,
1265 Metadata=metadata)
1266 LOG.debug(_("Cloned LUN with new name %s") % new_name)
1267 self._add_lun_to_table(NetAppLun(lun.Handle, lun.Name,
1268 lun.Size, self._create_dict_from_meta(lun.Metadata)))
1269
1270 def _create_metadata_list(self, extra_args):
1271 """Creates metadata from kwargs."""
1272 metadata = []
1273 for key in extra_args.keys():
1274 meta = self.client.factory.create("Metadata")
1275 meta.Key = key
1276 meta.Value = extra_args[key]
1277 metadata.append(meta)
1278 return metadata
1279
1280 def _get_lun_handle(self, name):
1281 """Get the details for a LUN from our cache table."""
1282 if not name in self.lun_table:
1283 LOG.warn(_("Could not find handle for LUN named %s") % name)
1284 return None
1285 return self.lun_table[name].handle
1286
1287 def _create_dict_from_meta(self, metadata):
1288 """Creates dictionary from metadata array."""
1289 meta_dict = {}
1290 if not metadata:
1291 return meta_dict
1292 for meta in metadata:
1293 meta_dict[meta.Key] = meta.Value
1294 return meta_dict
diff --git a/recipes/scheduler.rb b/recipes/scheduler.rb
index f55a051..e63d4e9 100644
--- a/recipes/scheduler.rb
+++ b/recipes/scheduler.rb
@@ -54,30 +54,6 @@ service "cinder-scheduler" do
54 action [ :enable, :start ] 54 action [ :enable, :start ]
55end 55end
56 56
57cookbook_file "/usr/local/bin/cinder-volume-usage-audit" do
58 source "cinder-volume-usage-audit"
59 mode 00755
60 owner "root"
61 group "root"
62end
63
64# run cronjob only on one node
65cron_cmd = "/usr/local/bin/cinder-volume-usage-audit > /var/log/cinder/audit.log 2>&1"
66node_search = search(:node, "roles:openstack-volume-scheduler AND chef_environment:#{node.chef_environment}")
67cron_node = node_search.collect{|a| a.name}.sort[0]
68if node.name == cron_node
69 cron "cinder-volume-usage-audit" do
70 action :create
71 minute node["cinder"]["cron"]["minute"]
72 command cron_cmd
73 end
74else
75 cron "cinder-volume-usage-audit" do
76 action :delete
77 command cron_cmd
78 end
79end
80
81template "/etc/cinder/cinder.conf" do 57template "/etc/cinder/cinder.conf" do
82 source "cinder.conf.erb" 58 source "cinder.conf.erb"
83 group node["cinder"]["group"] 59 group node["cinder"]["group"]
diff --git a/recipes/volume.rb b/recipes/volume.rb
index 1146992..638f652 100644
--- a/recipes/volume.rb
+++ b/recipes/volume.rb
@@ -100,40 +100,3 @@ template "/etc/tgt/targets.conf" do
100 100
101 notifies :restart, "service[iscsitarget]", :immediately 101 notifies :restart, "service[iscsitarget]", :immediately
102end 102end
103
104cookbook_file "/usr/share/pyshared/cinder/openstack/common/fileutils.py" do
105 source "fileutils_new-b322585613c21067571442aaf9e4e6feb167832b.py"
106 mode 00644
107 owner "root"
108 group "root"
109end
110
111link "/usr/lib/python2.7/dist-packages/cinder/openstack/common/fileutils.py" do
112 to "/usr/share/pyshared/cinder/openstack/common/fileutils.py"
113end
114
115cookbook_file "/usr/share/pyshared/cinder/openstack/common/gettextutils.py" do
116 source "gettextutils_new-8e450aaa6ba1a2a88f6326c2e8d285d00fd28691.py"
117 mode 00644
118 owner "root"
119 group "root"
120end
121
122cookbook_file "/usr/share/pyshared/cinder/openstack/common/lockutils.py" do
123 source "lockutils_new-6dda4af1dd50582a0271fd6c96044ae61af9df7e.py"
124 mode 00644
125 owner "root"
126 group "root"
127end
128
129link "/usr/lib/python2.7/dist-packages/cinder/openstack/common/lockutils.py" do
130 to "/usr/share/pyshared/cinder/openstack/common/lockutils.py"
131end
132
133cookbook_file node["cinder"]["netapp"]["driver"] do
134 source "netapp_new-42cdc4d947a73ae6a3dbbaab36634e425b57c18c.py"
135 mode 00644
136 owner "root"
137 group "root"
138 notifies :restart, "service[cinder-volume]"
139end
diff --git a/spec/scheduler_spec.rb b/spec/scheduler_spec.rb
index 99dfcb8..c438ea4 100644
--- a/spec/scheduler_spec.rb
+++ b/spec/scheduler_spec.rb
@@ -32,29 +32,6 @@ describe "cinder::scheduler" do
32 expect(@chef_run).to set_service_to_start_on_boot "cinder-scheduler" 32 expect(@chef_run).to set_service_to_start_on_boot "cinder-scheduler"
33 end 33 end
34 34
35 describe "cinder-volume-usage-audit" do
36 before do
37 f = "/usr/local/bin/cinder-volume-usage-audit"
38 @file = @chef_run.cookbook_file f
39 end
40
41 it "has proper owner" do
42 expect(@file).to be_owned_by "root", "root"
43 end
44
45 it "has proper modes" do
46 expect(sprintf("%o", @file.mode)).to eq "755"
47 end
48 end
49
50 it "has cinder-volume-usage-audit cronjob" do
51 cron = @chef_run.cron "cinder-volume-usage-audit"
52 cmd = "/usr/local/bin/cinder-volume-usage-audit > " \
53 "/var/log/cinder/audit.log 2>&1"
54 expect(cron.command).to eq cmd
55 expect(cron.minute).to eq '00'
56 end
57
58 expect_creates_cinder_conf "service[cinder-scheduler]" 35 expect_creates_cinder_conf "service[cinder-scheduler]"
59 end 36 end
60end 37end
diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
index a330b97..ded9c29 100644
--- a/spec/spec_helper.rb
+++ b/spec/spec_helper.rb
@@ -22,7 +22,6 @@ def cinder_stubs
22 ::Chef::Recipe.any_instance.stub(:db_password).and_return String.new 22 ::Chef::Recipe.any_instance.stub(:db_password).and_return String.new
23 ::Chef::Recipe.any_instance.stub(:user_password).and_return String.new 23 ::Chef::Recipe.any_instance.stub(:user_password).and_return String.new
24 ::Chef::Recipe.any_instance.stub(:service_password).and_return String.new 24 ::Chef::Recipe.any_instance.stub(:service_password).and_return String.new
25 ::Chef::Recipe.any_instance.stub(:search).and_return([OpenStruct.new(:name => 'fauxhai.local')])
26end 25end
27 26
28def expect_runs_openstack_common_logging_recipe 27def expect_runs_openstack_common_logging_recipe
diff --git a/spec/volume_spec.rb b/spec/volume_spec.rb
index a5ce5a7..62c4adc 100644
--- a/spec/volume_spec.rb
+++ b/spec/volume_spec.rb
@@ -45,11 +45,11 @@ describe "cinder::volume" do
45 describe "targets.conf" do 45 describe "targets.conf" do
46 before do 46 before do
47 @file = @chef_run.template "/etc/tgt/targets.conf" 47 @file = @chef_run.template "/etc/tgt/targets.conf"
48 end 48 end
49 49
50 it "has proper modes" do 50 it "has proper modes" do
51 expect(sprintf("%o", @file.mode)).to eq "600" 51 expect(sprintf("%o", @file.mode)).to eq "600"
52 end 52 end
53 53
54 it "notifies iscsi restart" do 54 it "notifies iscsi restart" do
55 expect(@file).to notify "service[iscsitarget]", :restart 55 expect(@file).to notify "service[iscsitarget]", :restart
@@ -61,91 +61,6 @@ describe "cinder::volume" do
61 expect(@chef_run).not_to create_file_with_content @file.name, 61 expect(@chef_run).not_to create_file_with_content @file.name,
62 "include /var/lib/cinder/volumes/*" 62 "include /var/lib/cinder/volumes/*"
63 end 63 end
64 end
65
66 describe "patches" do
67 before do
68 @os_dir = "/usr/share/pyshared/cinder/openstack/common"
69 @dist_dir = "/usr/lib/python2.7/dist-packages/cinder/openstack/common"
70 end
71
72 describe "fileutils.py" do
73 before do
74 @source = ::File.join @os_dir, "fileutils.py"
75 @file = @chef_run.cookbook_file @source
76 end
77
78 it "has proper owner" do
79 expect(@file).to be_owned_by "root", "root"
80 end
81
82 it "has proper modes" do
83 expect(sprintf("%o", @file.mode)).to eq "644"
84 end
85
86 it "symlinks fileutils.py" do
87 ln = ::File.join @dist_dir, "fileutils.py"
88 expect(@chef_run.link(ln)).to link_to @source
89 end
90 end
91
92 describe "gettextutils.py" do
93 before do
94 @source = ::File.join @os_dir, "gettextutils.py"
95 @file = @chef_run.cookbook_file @source
96 end
97
98 it "has proper owner" do
99 expect(@file).to be_owned_by "root", "root"
100 end
101
102 it "has proper modes" do
103 expect(sprintf("%o", @file.mode)).to eq "644"
104 end
105
106 it "symlinks gettextutils.py" do
107 pending "TODO: should there be a gettextutils symlink?"
108 end
109 end
110
111 describe "lockutils.py" do
112 before do
113 @source = ::File.join @os_dir, "lockutils.py"
114 @file = @chef_run.cookbook_file @source
115 end
116
117 it "has proper owner" do
118 expect(@file).to be_owned_by "root", "root"
119 end
120
121 it "has proper modes" do
122 expect(sprintf("%o", @file.mode)).to eq "644"
123 end
124
125 it "symlinks gettextutils.py" do
126 ln = ::File.join @dist_dir, "lockutils.py"
127 expect(@chef_run.link(ln)).to link_to @source
128 end
129 end
130
131 describe "netapp.py" do
132 before do
133 f = "/usr/share/pyshared/cinder/volume/netapp.py"
134 @file = @chef_run.cookbook_file f
135 end
136
137 it "has proper owner" do
138 expect(@file).to be_owned_by "root", "root"
139 end
140
141 it "has proper modes" do
142 expect(sprintf("%o", @file.mode)).to eq "644"
143 end
144
145 it "notifies nova-api-ec2 restart" do
146 expect(@file).to notify "service[cinder-volume]", :restart
147 end
148 end
149 end 64 end
150 end 65 end
151end 66end