diff options
author | Pradip Rawat <Pradip.Rawat.CTR@kaminario.com> | 2016-10-16 11:36:29 +0530 |
---|---|---|
committer | Pradip Rawat <Pradip.Rawat.CTR@kaminario.com> | 2016-10-16 11:36:29 +0530 |
commit | 9550a4d0e62a676740b7ddede3df90aa7f0e5048 (patch) | |
tree | d60df664e9cabd1631d1d97919ee35da440c319f | |
parent | be18f78d2dac0ea4b0307f8f3d2c5c82903edae0 (diff) |
Update fuel plugin repo for Kamianrio with latest
Kaminario driver files from github
Change-Id: Ib16114525eed6066c19dfc4b2b28f5e2128eb56f
Co-Authored-By: Chaithanya Kopparthi<chaithanyak@biarca.com>
Notes
Notes (review):
Code-Review+2: Pradip Rawat <Pradip.Rawat.CTR@kaminario.com>
Workflow+1: Pradip Rawat <Pradip.Rawat.CTR@kaminario.com>
Verified+2: Jenkins
Submitted-by: Jenkins
Submitted-at: Mon, 17 Oct 2016 10:02:57 +0000
Reviewed-on: https://review.openstack.org/387004
Project: openstack/fuel-plugin-cinder-kaminario
Branch: refs/heads/master
15 files changed, 88 insertions, 2691 deletions
diff --git a/.gitreview b/.gitreview new file mode 100644 index 0000000..d0d3710 --- /dev/null +++ b/.gitreview | |||
@@ -0,0 +1,4 @@ | |||
1 | [gerrit] | ||
2 | host=review.openstack.org | ||
3 | port=29418 | ||
4 | project=openstack/fuel-plugin-cinder-kaminario.git | ||
diff --git a/deployment_scripts/puppet/manifests/cinder_kaminario.pp b/deployment_scripts/puppet/manifests/cinder_kaminario.pp index 78727fa..481f928 100644 --- a/deployment_scripts/puppet/manifests/cinder_kaminario.pp +++ b/deployment_scripts/puppet/manifests/cinder_kaminario.pp | |||
@@ -1,8 +1,8 @@ | |||
1 | notice('MODULAR: cinder_kaminario') | 1 | notice('MODULAR: cinder_kaminario') |
2 | 2 | ||
3 | 3 | ||
4 | class { 'kaminario::driver': }-> | ||
5 | class { 'kaminario::krest': }-> | 4 | class { 'kaminario::krest': }-> |
5 | class { 'kaminario::driver': }-> | ||
6 | class { 'kaminario::config': }~> Exec[cinder_volume] | 6 | class { 'kaminario::config': }~> Exec[cinder_volume] |
7 | 7 | ||
8 | exec {'cinder_volume': | 8 | exec {'cinder_volume': |
diff --git a/deployment_scripts/puppet/modules/kaminario/files/__init__.py b/deployment_scripts/puppet/modules/kaminario/files/__init__.py deleted file mode 100644 index e69de29..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/__init__.py +++ /dev/null | |||
diff --git a/deployment_scripts/puppet/modules/kaminario/files/exception.py b/deployment_scripts/puppet/modules/kaminario/files/exception.py deleted file mode 100644 index 1927e8d..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/exception.py +++ /dev/null | |||
@@ -1,1128 +0,0 @@ | |||
1 | # Copyright 2010 United States Government as represented by the | ||
2 | # Administrator of the National Aeronautics and Space Administration. | ||
3 | # All Rights Reserved. | ||
4 | # | ||
5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may | ||
6 | # not use this file except in compliance with the License. You may obtain | ||
7 | # a copy of the License at | ||
8 | # | ||
9 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
10 | # | ||
11 | # Unless required by applicable law or agreed to in writing, software | ||
12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | ||
13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | ||
14 | # License for the specific language governing permissions and limitations | ||
15 | # under the License. | ||
16 | |||
17 | """Cinder base exception handling. | ||
18 | |||
19 | Includes decorator for re-raising Cinder-type exceptions. | ||
20 | |||
21 | SHOULD include dedicated exception logging. | ||
22 | |||
23 | """ | ||
24 | |||
25 | import sys | ||
26 | |||
27 | from oslo_config import cfg | ||
28 | from oslo_log import log as logging | ||
29 | from oslo_versionedobjects import exception as obj_exc | ||
30 | import six | ||
31 | import webob.exc | ||
32 | from webob.util import status_generic_reasons | ||
33 | from webob.util import status_reasons | ||
34 | |||
35 | from cinder.i18n import _, _LE | ||
36 | |||
37 | |||
38 | LOG = logging.getLogger(__name__) | ||
39 | |||
40 | exc_log_opts = [ | ||
41 | cfg.BoolOpt('fatal_exception_format_errors', | ||
42 | default=False, | ||
43 | help='Make exception message format errors fatal.'), | ||
44 | ] | ||
45 | |||
46 | CONF = cfg.CONF | ||
47 | CONF.register_opts(exc_log_opts) | ||
48 | |||
49 | |||
50 | class ConvertedException(webob.exc.WSGIHTTPException): | ||
51 | def __init__(self, code=500, title="", explanation=""): | ||
52 | self.code = code | ||
53 | # There is a strict rule about constructing status line for HTTP: | ||
54 | # '...Status-Line, consisting of the protocol version followed by a | ||
55 | # numeric status code and its associated textual phrase, with each | ||
56 | # element separated by SP characters' | ||
57 | # (http://www.faqs.org/rfcs/rfc2616.html) | ||
58 | # 'code' and 'title' can not be empty because they correspond | ||
59 | # to numeric status code and its associated text | ||
60 | if title: | ||
61 | self.title = title | ||
62 | else: | ||
63 | try: | ||
64 | self.title = status_reasons[self.code] | ||
65 | except KeyError: | ||
66 | generic_code = self.code // 100 | ||
67 | self.title = status_generic_reasons[generic_code] | ||
68 | self.explanation = explanation | ||
69 | super(ConvertedException, self).__init__() | ||
70 | |||
71 | |||
72 | class Error(Exception): | ||
73 | pass | ||
74 | |||
75 | |||
76 | class CinderException(Exception): | ||
77 | """Base Cinder Exception | ||
78 | |||
79 | To correctly use this class, inherit from it and define | ||
80 | a 'message' property. That message will get printf'd | ||
81 | with the keyword arguments provided to the constructor. | ||
82 | |||
83 | """ | ||
84 | message = _("An unknown exception occurred.") | ||
85 | code = 500 | ||
86 | headers = {} | ||
87 | safe = False | ||
88 | |||
89 | def __init__(self, message=None, **kwargs): | ||
90 | self.kwargs = kwargs | ||
91 | self.kwargs['message'] = message | ||
92 | |||
93 | if 'code' not in self.kwargs: | ||
94 | try: | ||
95 | self.kwargs['code'] = self.code | ||
96 | except AttributeError: | ||
97 | pass | ||
98 | |||
99 | for k, v in self.kwargs.items(): | ||
100 | if isinstance(v, Exception): | ||
101 | self.kwargs[k] = six.text_type(v) | ||
102 | |||
103 | if self._should_format(): | ||
104 | try: | ||
105 | message = self.message % kwargs | ||
106 | |||
107 | except Exception: | ||
108 | exc_info = sys.exc_info() | ||
109 | # kwargs doesn't match a variable in the message | ||
110 | # log the issue and the kwargs | ||
111 | LOG.exception(_LE('Exception in string format operation')) | ||
112 | for name, value in kwargs.items(): | ||
113 | LOG.error(_LE("%(name)s: %(value)s"), | ||
114 | {'name': name, 'value': value}) | ||
115 | if CONF.fatal_exception_format_errors: | ||
116 | six.reraise(*exc_info) | ||
117 | # at least get the core message out if something happened | ||
118 | message = self.message | ||
119 | elif isinstance(message, Exception): | ||
120 | message = six.text_type(message) | ||
121 | |||
122 | # NOTE(luisg): We put the actual message in 'msg' so that we can access | ||
123 | # it, because if we try to access the message via 'message' it will be | ||
124 | # overshadowed by the class' message attribute | ||
125 | self.msg = message | ||
126 | super(CinderException, self).__init__(message) | ||
127 | |||
128 | def _should_format(self): | ||
129 | return self.kwargs['message'] is None or '%(message)' in self.message | ||
130 | |||
131 | def __unicode__(self): | ||
132 | return six.text_type(self.msg) | ||
133 | |||
134 | |||
135 | class VolumeBackendAPIException(CinderException): | ||
136 | message = _("Bad or unexpected response from the storage volume " | ||
137 | "backend API: %(data)s") | ||
138 | |||
139 | |||
140 | class VolumeDriverException(CinderException): | ||
141 | message = _("Volume driver reported an error: %(message)s") | ||
142 | |||
143 | |||
144 | class BackupDriverException(CinderException): | ||
145 | message = _("Backup driver reported an error: %(message)s") | ||
146 | |||
147 | |||
148 | class GlanceConnectionFailed(CinderException): | ||
149 | message = _("Connection to glance failed: %(reason)s") | ||
150 | |||
151 | |||
152 | class NotAuthorized(CinderException): | ||
153 | message = _("Not authorized.") | ||
154 | code = 403 | ||
155 | |||
156 | |||
157 | class AdminRequired(NotAuthorized): | ||
158 | message = _("User does not have admin privileges") | ||
159 | |||
160 | |||
161 | class PolicyNotAuthorized(NotAuthorized): | ||
162 | message = _("Policy doesn't allow %(action)s to be performed.") | ||
163 | |||
164 | |||
165 | class ImageNotAuthorized(CinderException): | ||
166 | message = _("Not authorized for image %(image_id)s.") | ||
167 | |||
168 | |||
169 | class DriverNotInitialized(CinderException): | ||
170 | message = _("Volume driver not ready.") | ||
171 | |||
172 | |||
173 | class Invalid(CinderException): | ||
174 | message = _("Unacceptable parameters.") | ||
175 | code = 400 | ||
176 | |||
177 | |||
178 | class InvalidSnapshot(Invalid): | ||
179 | message = _("Invalid snapshot: %(reason)s") | ||
180 | |||
181 | |||
182 | class InvalidVolumeAttachMode(Invalid): | ||
183 | message = _("Invalid attaching mode '%(mode)s' for " | ||
184 | "volume %(volume_id)s.") | ||
185 | |||
186 | |||
187 | class VolumeAttached(Invalid): | ||
188 | message = _("Volume %(volume_id)s is still attached, detach volume first.") | ||
189 | |||
190 | |||
191 | class InvalidResults(Invalid): | ||
192 | message = _("The results are invalid.") | ||
193 | |||
194 | |||
195 | class InvalidInput(Invalid): | ||
196 | message = _("Invalid input received: %(reason)s") | ||
197 | |||
198 | |||
199 | class InvalidVolumeType(Invalid): | ||
200 | message = _("Invalid volume type: %(reason)s") | ||
201 | |||
202 | |||
203 | class InvalidVolume(Invalid): | ||
204 | message = _("Invalid volume: %(reason)s") | ||
205 | |||
206 | |||
207 | class InvalidContentType(Invalid): | ||
208 | message = _("Invalid content type %(content_type)s.") | ||
209 | |||
210 | |||
211 | class InvalidHost(Invalid): | ||
212 | message = _("Invalid host: %(reason)s") | ||
213 | |||
214 | |||
215 | # Cannot be templated as the error syntax varies. | ||
216 | # msg needs to be constructed when raised. | ||
217 | class InvalidParameterValue(Invalid): | ||
218 | message = _("%(err)s") | ||
219 | |||
220 | |||
221 | class InvalidAuthKey(Invalid): | ||
222 | message = _("Invalid auth key: %(reason)s") | ||
223 | |||
224 | |||
225 | class InvalidConfigurationValue(Invalid): | ||
226 | message = _('Value "%(value)s" is not valid for ' | ||
227 | 'configuration option "%(option)s"') | ||
228 | |||
229 | |||
230 | class ServiceUnavailable(Invalid): | ||
231 | message = _("Service is unavailable at this time.") | ||
232 | |||
233 | |||
234 | class ImageUnacceptable(Invalid): | ||
235 | message = _("Image %(image_id)s is unacceptable: %(reason)s") | ||
236 | |||
237 | |||
238 | class DeviceUnavailable(Invalid): | ||
239 | message = _("The device in the path %(path)s is unavailable: %(reason)s") | ||
240 | |||
241 | |||
242 | class InvalidUUID(Invalid): | ||
243 | message = _("Expected a uuid but received %(uuid)s.") | ||
244 | |||
245 | |||
246 | class InvalidAPIVersionString(Invalid): | ||
247 | message = _("API Version String %(version)s is of invalid format. Must " | ||
248 | "be of format MajorNum.MinorNum.") | ||
249 | |||
250 | |||
251 | class VersionNotFoundForAPIMethod(Invalid): | ||
252 | message = _("API version %(version)s is not supported on this method.") | ||
253 | |||
254 | |||
255 | class InvalidGlobalAPIVersion(Invalid): | ||
256 | message = _("Version %(req_ver)s is not supported by the API. Minimum " | ||
257 | "is %(min_ver)s and maximum is %(max_ver)s.") | ||
258 | |||
259 | |||
260 | class APIException(CinderException): | ||
261 | message = _("Error while requesting %(service)s API.") | ||
262 | |||
263 | def __init__(self, message=None, **kwargs): | ||
264 | if 'service' not in kwargs: | ||
265 | kwargs['service'] = 'unknown' | ||
266 | super(APIException, self).__init__(message, **kwargs) | ||
267 | |||
268 | |||
269 | class APITimeout(APIException): | ||
270 | message = _("Timeout while requesting %(service)s API.") | ||
271 | |||
272 | |||
273 | class RPCTimeout(CinderException): | ||
274 | message = _("Timeout while requesting capabilities from backend " | ||
275 | "%(service)s.") | ||
276 | code = 502 | ||
277 | |||
278 | |||
279 | class NotFound(CinderException): | ||
280 | message = _("Resource could not be found.") | ||
281 | code = 404 | ||
282 | safe = True | ||
283 | |||
284 | |||
285 | class VolumeNotFound(NotFound): | ||
286 | message = _("Volume %(volume_id)s could not be found.") | ||
287 | |||
288 | |||
289 | class VolumeAttachmentNotFound(NotFound): | ||
290 | message = _("Volume attachment could not be found with " | ||
291 | "filter: %(filter)s .") | ||
292 | |||
293 | |||
294 | class VolumeMetadataNotFound(NotFound): | ||
295 | message = _("Volume %(volume_id)s has no metadata with " | ||
296 | "key %(metadata_key)s.") | ||
297 | |||
298 | |||
299 | class VolumeAdminMetadataNotFound(NotFound): | ||
300 | message = _("Volume %(volume_id)s has no administration metadata with " | ||
301 | "key %(metadata_key)s.") | ||
302 | |||
303 | |||
304 | class InvalidVolumeMetadata(Invalid): | ||
305 | message = _("Invalid metadata: %(reason)s") | ||
306 | |||
307 | |||
308 | class InvalidVolumeMetadataSize(Invalid): | ||
309 | message = _("Invalid metadata size: %(reason)s") | ||
310 | |||
311 | |||
312 | class SnapshotMetadataNotFound(NotFound): | ||
313 | message = _("Snapshot %(snapshot_id)s has no metadata with " | ||
314 | "key %(metadata_key)s.") | ||
315 | |||
316 | |||
317 | class VolumeTypeNotFound(NotFound): | ||
318 | message = _("Volume type %(volume_type_id)s could not be found.") | ||
319 | |||
320 | |||
321 | class VolumeTypeNotFoundByName(VolumeTypeNotFound): | ||
322 | message = _("Volume type with name %(volume_type_name)s " | ||
323 | "could not be found.") | ||
324 | |||
325 | |||
326 | class VolumeTypeAccessNotFound(NotFound): | ||
327 | message = _("Volume type access not found for %(volume_type_id)s / " | ||
328 | "%(project_id)s combination.") | ||
329 | |||
330 | |||
331 | class VolumeTypeExtraSpecsNotFound(NotFound): | ||
332 | message = _("Volume Type %(volume_type_id)s has no extra specs with " | ||
333 | "key %(extra_specs_key)s.") | ||
334 | |||
335 | |||
336 | class VolumeTypeInUse(CinderException): | ||
337 | message = _("Volume Type %(volume_type_id)s deletion is not allowed with " | ||
338 | "volumes present with the type.") | ||
339 | |||
340 | |||
341 | class SnapshotNotFound(NotFound): | ||
342 | message = _("Snapshot %(snapshot_id)s could not be found.") | ||
343 | |||
344 | |||
345 | class ServerNotFound(NotFound): | ||
346 | message = _("Instance %(uuid)s could not be found.") | ||
347 | |||
348 | |||
349 | class VolumeIsBusy(CinderException): | ||
350 | message = _("deleting volume %(volume_name)s that has snapshot") | ||
351 | |||
352 | |||
353 | class SnapshotIsBusy(CinderException): | ||
354 | message = _("deleting snapshot %(snapshot_name)s that has " | ||
355 | "dependent volumes") | ||
356 | |||
357 | |||
358 | class ISCSITargetNotFoundForVolume(NotFound): | ||
359 | message = _("No target id found for volume %(volume_id)s.") | ||
360 | |||
361 | |||
362 | class InvalidImageRef(Invalid): | ||
363 | message = _("Invalid image href %(image_href)s.") | ||
364 | |||
365 | |||
366 | class ImageNotFound(NotFound): | ||
367 | message = _("Image %(image_id)s could not be found.") | ||
368 | |||
369 | |||
370 | class ServiceNotFound(NotFound): | ||
371 | |||
372 | def __init__(self, message=None, **kwargs): | ||
373 | if kwargs.get('host', None): | ||
374 | self.message = _("Service %(service_id)s could not be " | ||
375 | "found on host %(host)s.") | ||
376 | else: | ||
377 | self.message = _("Service %(service_id)s could not be found.") | ||
378 | super(ServiceNotFound, self).__init__(None, **kwargs) | ||
379 | |||
380 | |||
381 | class ServiceTooOld(Invalid): | ||
382 | message = _("Service is too old to fulfil this request.") | ||
383 | |||
384 | |||
385 | class HostNotFound(NotFound): | ||
386 | message = _("Host %(host)s could not be found.") | ||
387 | |||
388 | |||
389 | class SchedulerHostFilterNotFound(NotFound): | ||
390 | message = _("Scheduler Host Filter %(filter_name)s could not be found.") | ||
391 | |||
392 | |||
393 | class SchedulerHostWeigherNotFound(NotFound): | ||
394 | message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") | ||
395 | |||
396 | |||
397 | class InvalidReservationExpiration(Invalid): | ||
398 | message = _("Invalid reservation expiration %(expire)s.") | ||
399 | |||
400 | |||
401 | class InvalidQuotaValue(Invalid): | ||
402 | message = _("Change would make usage less than 0 for the following " | ||
403 | "resources: %(unders)s") | ||
404 | |||
405 | |||
406 | class InvalidNestedQuotaSetup(CinderException): | ||
407 | message = _("Project quotas are not properly setup for nested quotas: " | ||
408 | "%(reason)s.") | ||
409 | |||
410 | |||
411 | class QuotaNotFound(NotFound): | ||
412 | message = _("Quota could not be found") | ||
413 | |||
414 | |||
415 | class QuotaResourceUnknown(QuotaNotFound): | ||
416 | message = _("Unknown quota resources %(unknown)s.") | ||
417 | |||
418 | |||
419 | class ProjectQuotaNotFound(QuotaNotFound): | ||
420 | message = _("Quota for project %(project_id)s could not be found.") | ||
421 | |||
422 | |||
423 | class QuotaClassNotFound(QuotaNotFound): | ||
424 | message = _("Quota class %(class_name)s could not be found.") | ||
425 | |||
426 | |||
427 | class QuotaUsageNotFound(QuotaNotFound): | ||
428 | message = _("Quota usage for project %(project_id)s could not be found.") | ||
429 | |||
430 | |||
431 | class ReservationNotFound(QuotaNotFound): | ||
432 | message = _("Quota reservation %(uuid)s could not be found.") | ||
433 | |||
434 | |||
435 | class OverQuota(CinderException): | ||
436 | message = _("Quota exceeded for resources: %(overs)s") | ||
437 | |||
438 | |||
439 | class FileNotFound(NotFound): | ||
440 | message = _("File %(file_path)s could not be found.") | ||
441 | |||
442 | |||
443 | class Duplicate(CinderException): | ||
444 | pass | ||
445 | |||
446 | |||
447 | class VolumeTypeExists(Duplicate): | ||
448 | message = _("Volume Type %(id)s already exists.") | ||
449 | |||
450 | |||
451 | class VolumeTypeAccessExists(Duplicate): | ||
452 | message = _("Volume type access for %(volume_type_id)s / " | ||
453 | "%(project_id)s combination already exists.") | ||
454 | |||
455 | |||
456 | class VolumeTypeEncryptionExists(Invalid): | ||
457 | message = _("Volume type encryption for type %(type_id)s already exists.") | ||
458 | |||
459 | |||
460 | class VolumeTypeEncryptionNotFound(NotFound): | ||
461 | message = _("Volume type encryption for type %(type_id)s does not exist.") | ||
462 | |||
463 | |||
464 | class MalformedRequestBody(CinderException): | ||
465 | message = _("Malformed message body: %(reason)s") | ||
466 | |||
467 | |||
468 | class ConfigNotFound(NotFound): | ||
469 | message = _("Could not find config at %(path)s") | ||
470 | |||
471 | |||
472 | class ParameterNotFound(NotFound): | ||
473 | message = _("Could not find parameter %(param)s") | ||
474 | |||
475 | |||
476 | class PasteAppNotFound(NotFound): | ||
477 | message = _("Could not load paste app '%(name)s' from %(path)s") | ||
478 | |||
479 | |||
480 | class NoValidHost(CinderException): | ||
481 | message = _("No valid host was found. %(reason)s") | ||
482 | |||
483 | |||
484 | class NoMoreTargets(CinderException): | ||
485 | """No more available targets.""" | ||
486 | pass | ||
487 | |||
488 | |||
489 | class QuotaError(CinderException): | ||
490 | message = _("Quota exceeded: code=%(code)s") | ||
491 | code = 413 | ||
492 | headers = {'Retry-After': '0'} | ||
493 | safe = True | ||
494 | |||
495 | |||
496 | class VolumeSizeExceedsAvailableQuota(QuotaError): | ||
497 | message = _("Requested volume or snapshot exceeds allowed %(name)s " | ||
498 | "quota. Requested %(requested)sG, quota is %(quota)sG and " | ||
499 | "%(consumed)sG has been consumed.") | ||
500 | |||
501 | def __init__(self, message=None, **kwargs): | ||
502 | kwargs.setdefault('name', 'gigabytes') | ||
503 | super(VolumeSizeExceedsAvailableQuota, self).__init__( | ||
504 | message, **kwargs) | ||
505 | |||
506 | |||
507 | class VolumeSizeExceedsLimit(QuotaError): | ||
508 | message = _("Requested volume size %(size)d is larger than " | ||
509 | "maximum allowed limit %(limit)d.") | ||
510 | |||
511 | |||
512 | class VolumeBackupSizeExceedsAvailableQuota(QuotaError): | ||
513 | message = _("Requested backup exceeds allowed Backup gigabytes " | ||
514 | "quota. Requested %(requested)sG, quota is %(quota)sG and " | ||
515 | "%(consumed)sG has been consumed.") | ||
516 | |||
517 | |||
518 | class VolumeLimitExceeded(QuotaError): | ||
519 | message = _("Maximum number of volumes allowed (%(allowed)d) exceeded for " | ||
520 | "quota '%(name)s'.") | ||
521 | |||
522 | def __init__(self, message=None, **kwargs): | ||
523 | kwargs.setdefault('name', 'volumes') | ||
524 | super(VolumeLimitExceeded, self).__init__(message, **kwargs) | ||
525 | |||
526 | |||
527 | class SnapshotLimitExceeded(QuotaError): | ||
528 | message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") | ||
529 | |||
530 | |||
531 | class BackupLimitExceeded(QuotaError): | ||
532 | message = _("Maximum number of backups allowed (%(allowed)d) exceeded") | ||
533 | |||
534 | |||
535 | class DuplicateSfVolumeNames(Duplicate): | ||
536 | message = _("Detected more than one volume with name %(vol_name)s") | ||
537 | |||
538 | |||
539 | class VolumeTypeCreateFailed(CinderException): | ||
540 | message = _("Cannot create volume_type with " | ||
541 | "name %(name)s and specs %(extra_specs)s") | ||
542 | |||
543 | |||
544 | class VolumeTypeUpdateFailed(CinderException): | ||
545 | message = _("Cannot update volume_type %(id)s") | ||
546 | |||
547 | |||
548 | class UnknownCmd(VolumeDriverException): | ||
549 | message = _("Unknown or unsupported command %(cmd)s") | ||
550 | |||
551 | |||
552 | class MalformedResponse(VolumeDriverException): | ||
553 | message = _("Malformed response to command %(cmd)s: %(reason)s") | ||
554 | |||
555 | |||
556 | class FailedCmdWithDump(VolumeDriverException): | ||
557 | message = _("Operation failed with status=%(status)s. Full dump: %(data)s") | ||
558 | |||
559 | |||
560 | class InvalidConnectorException(VolumeDriverException): | ||
561 | message = _("Connector doesn't have required information: %(missing)s") | ||
562 | |||
563 | |||
564 | class GlanceMetadataExists(Invalid): | ||
565 | message = _("Glance metadata cannot be updated, key %(key)s" | ||
566 | " exists for volume id %(volume_id)s") | ||
567 | |||
568 | |||
569 | class GlanceMetadataNotFound(NotFound): | ||
570 | message = _("Glance metadata for volume/snapshot %(id)s cannot be found.") | ||
571 | |||
572 | |||
573 | class ExportFailure(Invalid): | ||
574 | message = _("Failed to export for volume: %(reason)s") | ||
575 | |||
576 | |||
577 | class RemoveExportException(VolumeDriverException): | ||
578 | message = _("Failed to remove export for volume %(volume)s: %(reason)s") | ||
579 | |||
580 | |||
581 | class MetadataCreateFailure(Invalid): | ||
582 | message = _("Failed to create metadata for volume: %(reason)s") | ||
583 | |||
584 | |||
585 | class MetadataUpdateFailure(Invalid): | ||
586 | message = _("Failed to update metadata for volume: %(reason)s") | ||
587 | |||
588 | |||
589 | class MetadataCopyFailure(Invalid): | ||
590 | message = _("Failed to copy metadata to volume: %(reason)s") | ||
591 | |||
592 | |||
593 | class InvalidMetadataType(Invalid): | ||
594 | message = _("The type of metadata: %(metadata_type)s for volume/snapshot " | ||
595 | "%(id)s is invalid.") | ||
596 | |||
597 | |||
598 | class ImageCopyFailure(Invalid): | ||
599 | message = _("Failed to copy image to volume: %(reason)s") | ||
600 | |||
601 | |||
602 | class BackupInvalidCephArgs(BackupDriverException): | ||
603 | message = _("Invalid Ceph args provided for backup rbd operation") | ||
604 | |||
605 | |||
606 | class BackupOperationError(Invalid): | ||
607 | message = _("An error has occurred during backup operation") | ||
608 | |||
609 | |||
610 | class BackupMetadataUnsupportedVersion(BackupDriverException): | ||
611 | message = _("Unsupported backup metadata version requested") | ||
612 | |||
613 | |||
614 | class BackupVerifyUnsupportedDriver(BackupDriverException): | ||
615 | message = _("Unsupported backup verify driver") | ||
616 | |||
617 | |||
618 | class VolumeMetadataBackupExists(BackupDriverException): | ||
619 | message = _("Metadata backup already exists for this volume") | ||
620 | |||
621 | |||
622 | class BackupRBDOperationFailed(BackupDriverException): | ||
623 | message = _("Backup RBD operation failed") | ||
624 | |||
625 | |||
626 | class EncryptedBackupOperationFailed(BackupDriverException): | ||
627 | message = _("Backup operation of an encrypted volume failed.") | ||
628 | |||
629 | |||
630 | class BackupNotFound(NotFound): | ||
631 | message = _("Backup %(backup_id)s could not be found.") | ||
632 | |||
633 | |||
634 | class BackupFailedToGetVolumeBackend(NotFound): | ||
635 | message = _("Failed to identify volume backend.") | ||
636 | |||
637 | |||
638 | class InvalidBackup(Invalid): | ||
639 | message = _("Invalid backup: %(reason)s") | ||
640 | |||
641 | |||
642 | class SwiftConnectionFailed(BackupDriverException): | ||
643 | message = _("Connection to swift failed: %(reason)s") | ||
644 | |||
645 | |||
646 | class TransferNotFound(NotFound): | ||
647 | message = _("Transfer %(transfer_id)s could not be found.") | ||
648 | |||
649 | |||
650 | class VolumeMigrationFailed(CinderException): | ||
651 | message = _("Volume migration failed: %(reason)s") | ||
652 | |||
653 | |||
654 | class SSHInjectionThreat(CinderException): | ||
655 | message = _("SSH command injection detected: %(command)s") | ||
656 | |||
657 | |||
658 | class QoSSpecsExists(Duplicate): | ||
659 | message = _("QoS Specs %(specs_id)s already exists.") | ||
660 | |||
661 | |||
662 | class QoSSpecsCreateFailed(CinderException): | ||
663 | message = _("Failed to create qos_specs: " | ||
664 | "%(name)s with specs %(qos_specs)s.") | ||
665 | |||
666 | |||
667 | class QoSSpecsUpdateFailed(CinderException): | ||
668 | message = _("Failed to update qos_specs: " | ||
669 | "%(specs_id)s with specs %(qos_specs)s.") | ||
670 | |||
671 | |||
672 | class QoSSpecsNotFound(NotFound): | ||
673 | message = _("No such QoS spec %(specs_id)s.") | ||
674 | |||
675 | |||
676 | class QoSSpecsAssociateFailed(CinderException): | ||
677 | message = _("Failed to associate qos_specs: " | ||
678 | "%(specs_id)s with type %(type_id)s.") | ||
679 | |||
680 | |||
681 | class QoSSpecsDisassociateFailed(CinderException): | ||
682 | message = _("Failed to disassociate qos_specs: " | ||
683 | "%(specs_id)s with type %(type_id)s.") | ||
684 | |||
685 | |||
686 | class QoSSpecsKeyNotFound(NotFound): | ||
687 | message = _("QoS spec %(specs_id)s has no spec with " | ||
688 | "key %(specs_key)s.") | ||
689 | |||
690 | |||
691 | class InvalidQoSSpecs(Invalid): | ||
692 | message = _("Invalid qos specs: %(reason)s") | ||
693 | |||
694 | |||
695 | class QoSSpecsInUse(CinderException): | ||
696 | message = _("QoS Specs %(specs_id)s is still associated with entities.") | ||
697 | |||
698 | |||
699 | class KeyManagerError(CinderException): | ||
700 | message = _("key manager error: %(reason)s") | ||
701 | |||
702 | |||
703 | class ManageExistingInvalidReference(CinderException): | ||
704 | message = _("Manage existing volume failed due to invalid backend " | ||
705 | "reference %(existing_ref)s: %(reason)s") | ||
706 | |||
707 | |||
708 | class ManageExistingAlreadyManaged(CinderException): | ||
709 | message = _("Unable to manage existing volume. " | ||
710 | "Volume %(volume_ref)s already managed.") | ||
711 | |||
712 | |||
713 | class InvalidReplicationTarget(Invalid): | ||
714 | message = _("Invalid Replication Target: %(reason)s") | ||
715 | |||
716 | |||
717 | class UnableToFailOver(CinderException): | ||
718 | message = _("Unable to failover to replication target:" | ||
719 | "%(reason)s).") | ||
720 | |||
721 | |||
722 | class ReplicationError(CinderException): | ||
723 | message = _("Volume %(volume_id)s replication " | ||
724 | "error: %(reason)s") | ||
725 | |||
726 | |||
727 | class ReplicationNotFound(NotFound): | ||
728 | message = _("Volume replication for %(volume_id)s " | ||
729 | "could not be found.") | ||
730 | |||
731 | |||
732 | class ManageExistingVolumeTypeMismatch(CinderException): | ||
733 | message = _("Manage existing volume failed due to volume type mismatch: " | ||
734 | "%(reason)s") | ||
735 | |||
736 | |||
737 | class ExtendVolumeError(CinderException): | ||
738 | message = _("Error extending volume: %(reason)s") | ||
739 | |||
740 | |||
741 | class EvaluatorParseException(Exception): | ||
742 | message = _("Error during evaluator parsing: %(reason)s") | ||
743 | |||
744 | |||
745 | class LockCreationFailed(CinderException): | ||
746 | message = _('Unable to create lock. Coordination backend not started.') | ||
747 | |||
748 | |||
749 | class LockingFailed(CinderException): | ||
750 | message = _('Lock acquisition failed.') | ||
751 | |||
752 | |||
753 | UnsupportedObjectError = obj_exc.UnsupportedObjectError | ||
754 | OrphanedObjectError = obj_exc.OrphanedObjectError | ||
755 | IncompatibleObjectVersion = obj_exc.IncompatibleObjectVersion | ||
756 | ReadOnlyFieldError = obj_exc.ReadOnlyFieldError | ||
757 | ObjectActionError = obj_exc.ObjectActionError | ||
758 | ObjectFieldInvalid = obj_exc.ObjectFieldInvalid | ||
759 | |||
760 | |||
761 | class CappedVersionUnknown(CinderException): | ||
762 | message = _('Unrecoverable Error: Versioned Objects in DB are capped to ' | ||
763 | 'unknown version %(version)s.') | ||
764 | |||
765 | |||
766 | class VolumeGroupNotFound(CinderException): | ||
767 | message = _('Unable to find Volume Group: %(vg_name)s') | ||
768 | |||
769 | |||
770 | class VolumeGroupCreationFailed(CinderException): | ||
771 | message = _('Failed to create Volume Group: %(vg_name)s') | ||
772 | |||
773 | |||
774 | class VolumeDeviceNotFound(CinderException): | ||
775 | message = _('Volume device not found at %(device)s.') | ||
776 | |||
777 | |||
778 | # Driver specific exceptions | ||
779 | # Pure Storage | ||
780 | class PureDriverException(VolumeDriverException): | ||
781 | message = _("Pure Storage Cinder driver failure: %(reason)s") | ||
782 | |||
783 | |||
784 | # SolidFire | ||
785 | class SolidFireAPIException(VolumeBackendAPIException): | ||
786 | message = _("Bad response from SolidFire API") | ||
787 | |||
788 | |||
789 | class SolidFireDriverException(VolumeDriverException): | ||
790 | message = _("SolidFire Cinder Driver exception") | ||
791 | |||
792 | |||
793 | class SolidFireAPIDataException(SolidFireAPIException): | ||
794 | message = _("Error in SolidFire API response: data=%(data)s") | ||
795 | |||
796 | |||
797 | class SolidFireAccountNotFound(SolidFireDriverException): | ||
798 | message = _("Unable to locate account %(account_name)s on " | ||
799 | "Solidfire device") | ||
800 | |||
801 | |||
802 | class SolidFireRetryableException(VolumeBackendAPIException): | ||
803 | message = _("Retryable SolidFire Exception encountered") | ||
804 | |||
805 | |||
806 | # HP 3Par | ||
807 | class Invalid3PARDomain(VolumeDriverException): | ||
808 | message = _("Invalid 3PAR Domain: %(err)s") | ||
809 | |||
810 | |||
811 | # RemoteFS drivers | ||
812 | class RemoteFSException(VolumeDriverException): | ||
813 | message = _("Unknown RemoteFS exception") | ||
814 | |||
815 | |||
816 | class RemoteFSConcurrentRequest(RemoteFSException): | ||
817 | message = _("A concurrent, possibly contradictory, request " | ||
818 | "has been made.") | ||
819 | |||
820 | |||
821 | class RemoteFSNoSharesMounted(RemoteFSException): | ||
822 | message = _("No mounted shares found") | ||
823 | |||
824 | |||
825 | class RemoteFSNoSuitableShareFound(RemoteFSException): | ||
826 | message = _("There is no share which can host %(volume_size)sG") | ||
827 | |||
828 | |||
829 | # NFS driver | ||
830 | class NfsException(RemoteFSException): | ||
831 | message = _("Unknown NFS exception") | ||
832 | |||
833 | |||
834 | class NfsNoSharesMounted(RemoteFSNoSharesMounted): | ||
835 | message = _("No mounted NFS shares found") | ||
836 | |||
837 | |||
838 | class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): | ||
839 | message = _("There is no share which can host %(volume_size)sG") | ||
840 | |||
841 | |||
842 | # Smbfs driver | ||
843 | class SmbfsException(RemoteFSException): | ||
844 | message = _("Unknown SMBFS exception.") | ||
845 | |||
846 | |||
847 | class SmbfsNoSharesMounted(RemoteFSNoSharesMounted): | ||
848 | message = _("No mounted SMBFS shares found.") | ||
849 | |||
850 | |||
851 | class SmbfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): | ||
852 | message = _("There is no share which can host %(volume_size)sG.") | ||
853 | |||
854 | |||
855 | # Gluster driver | ||
856 | class GlusterfsException(RemoteFSException): | ||
857 | message = _("Unknown Gluster exception") | ||
858 | |||
859 | |||
860 | class GlusterfsNoSharesMounted(RemoteFSNoSharesMounted): | ||
861 | message = _("No mounted Gluster shares found") | ||
862 | |||
863 | |||
864 | class GlusterfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): | ||
865 | message = _("There is no share which can host %(volume_size)sG") | ||
866 | |||
867 | |||
868 | # Virtuozzo Storage Driver | ||
869 | |||
870 | class VzStorageException(RemoteFSException): | ||
871 | message = _("Unknown Virtuozzo Storage exception") | ||
872 | |||
873 | |||
874 | class VzStorageNoSharesMounted(RemoteFSNoSharesMounted): | ||
875 | message = _("No mounted Virtuozzo Storage shares found") | ||
876 | |||
877 | |||
878 | class VzStorageNoSuitableShareFound(RemoteFSNoSuitableShareFound): | ||
879 | message = _("There is no share which can host %(volume_size)sG") | ||
880 | |||
881 | |||
882 | # Fibre Channel Zone Manager | ||
883 | class ZoneManagerException(CinderException): | ||
884 | message = _("Fibre Channel connection control failure: %(reason)s") | ||
885 | |||
886 | |||
887 | class FCZoneDriverException(CinderException): | ||
888 | message = _("Fibre Channel Zone operation failed: %(reason)s") | ||
889 | |||
890 | |||
891 | class FCSanLookupServiceException(CinderException): | ||
892 | message = _("Fibre Channel SAN Lookup failure: %(reason)s") | ||
893 | |||
894 | |||
895 | class BrocadeZoningCliException(CinderException): | ||
896 | message = _("Brocade Fibre Channel Zoning CLI error: %(reason)s") | ||
897 | |||
898 | |||
899 | class BrocadeZoningHttpException(CinderException): | ||
900 | message = _("Brocade Fibre Channel Zoning HTTP error: %(reason)s") | ||
901 | |||
902 | |||
903 | class CiscoZoningCliException(CinderException): | ||
904 | message = _("Cisco Fibre Channel Zoning CLI error: %(reason)s") | ||
905 | |||
906 | |||
907 | class NetAppDriverException(VolumeDriverException): | ||
908 | message = _("NetApp Cinder Driver exception.") | ||
909 | |||
910 | |||
911 | class EMCVnxCLICmdError(VolumeBackendAPIException): | ||
912 | message = _("EMC VNX Cinder Driver CLI exception: %(cmd)s " | ||
913 | "(Return Code: %(rc)s) (Output: %(out)s).") | ||
914 | |||
915 | |||
916 | class EMCSPUnavailableException(EMCVnxCLICmdError): | ||
917 | message = _("EMC VNX Cinder Driver SPUnavailableException: %(cmd)s " | ||
918 | "(Return Code: %(rc)s) (Output: %(out)s).") | ||
919 | |||
920 | |||
921 | # ConsistencyGroup | ||
922 | class ConsistencyGroupNotFound(NotFound): | ||
923 | message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.") | ||
924 | |||
925 | |||
926 | class InvalidConsistencyGroup(Invalid): | ||
927 | message = _("Invalid ConsistencyGroup: %(reason)s") | ||
928 | |||
929 | |||
930 | # CgSnapshot | ||
931 | class CgSnapshotNotFound(NotFound): | ||
932 | message = _("CgSnapshot %(cgsnapshot_id)s could not be found.") | ||
933 | |||
934 | |||
935 | class InvalidCgSnapshot(Invalid): | ||
936 | message = _("Invalid CgSnapshot: %(reason)s") | ||
937 | |||
938 | |||
939 | # Hitachi Block Storage Driver | ||
940 | class HBSDError(CinderException): | ||
941 | message = _("HBSD error occurs.") | ||
942 | |||
943 | |||
944 | class HBSDCmdError(HBSDError): | ||
945 | |||
946 | def __init__(self, message=None, ret=None, err=None): | ||
947 | self.ret = ret | ||
948 | self.stderr = err | ||
949 | |||
950 | super(HBSDCmdError, self).__init__(message=message) | ||
951 | |||
952 | |||
953 | class HBSDBusy(HBSDError): | ||
954 | message = "Device or resource is busy." | ||
955 | |||
956 | |||
957 | class HBSDNotFound(NotFound): | ||
958 | message = _("Storage resource could not be found.") | ||
959 | |||
960 | |||
961 | class HBSDVolumeIsBusy(VolumeIsBusy): | ||
962 | message = _("Volume %(volume_name)s is busy.") | ||
963 | |||
964 | |||
965 | # Datera driver | ||
966 | class DateraAPIException(VolumeBackendAPIException): | ||
967 | message = _("Bad response from Datera API") | ||
968 | |||
969 | |||
970 | # Target drivers | ||
971 | class ISCSITargetCreateFailed(CinderException): | ||
972 | message = _("Failed to create iscsi target for volume %(volume_id)s.") | ||
973 | |||
974 | |||
975 | class ISCSITargetRemoveFailed(CinderException): | ||
976 | message = _("Failed to remove iscsi target for volume %(volume_id)s.") | ||
977 | |||
978 | |||
979 | class ISCSITargetAttachFailed(CinderException): | ||
980 | message = _("Failed to attach iSCSI target for volume %(volume_id)s.") | ||
981 | |||
982 | |||
983 | class ISCSITargetDetachFailed(CinderException): | ||
984 | message = _("Failed to detach iSCSI target for volume %(volume_id)s.") | ||
985 | |||
986 | |||
987 | class ISCSITargetHelperCommandFailed(CinderException): | ||
988 | message = _("%(error_message)s") | ||
989 | |||
990 | |||
991 | # X-IO driver exception. | ||
992 | class XIODriverException(VolumeDriverException): | ||
993 | message = _("X-IO Volume Driver exception!") | ||
994 | |||
995 | |||
996 | # Violin Memory drivers | ||
997 | class ViolinInvalidBackendConfig(CinderException): | ||
998 | message = _("Volume backend config is invalid: %(reason)s") | ||
999 | |||
1000 | |||
1001 | class ViolinRequestRetryTimeout(CinderException): | ||
1002 | message = _("Backend service retry timeout hit: %(timeout)s sec") | ||
1003 | |||
1004 | |||
1005 | class ViolinBackendErr(CinderException): | ||
1006 | message = _("Backend reports: %(message)s") | ||
1007 | |||
1008 | |||
1009 | class ViolinBackendErrExists(CinderException): | ||
1010 | message = _("Backend reports: item already exists") | ||
1011 | |||
1012 | |||
1013 | class ViolinBackendErrNotFound(CinderException): | ||
1014 | message = _("Backend reports: item not found") | ||
1015 | |||
1016 | |||
1017 | # ZFSSA NFS driver exception. | ||
1018 | class WebDAVClientError(CinderException): | ||
1019 | message = _("The WebDAV request failed. Reason: %(msg)s, " | ||
1020 | "Return code/reason: %(code)s, Source Volume: %(src)s, " | ||
1021 | "Destination Volume: %(dst)s, Method: %(method)s.") | ||
1022 | |||
1023 | |||
1024 | # XtremIO Drivers | ||
1025 | class XtremIOAlreadyMappedError(CinderException): | ||
1026 | message = _("Volume to Initiator Group mapping already exists") | ||
1027 | |||
1028 | |||
1029 | class XtremIOArrayBusy(CinderException): | ||
1030 | message = _("System is busy, retry operation.") | ||
1031 | |||
1032 | |||
1033 | class XtremIOSnapshotsLimitExceeded(CinderException): | ||
1034 | message = _("Exceeded the limit of snapshots per volume") | ||
1035 | |||
1036 | |||
1037 | # Infortrend EonStor DS Driver | ||
1038 | class InfortrendCliException(CinderException): | ||
1039 | message = _("Infortrend CLI exception: %(err)s Param: %(param)s " | ||
1040 | "(Return Code: %(rc)s) (Output: %(out)s)") | ||
1041 | |||
1042 | |||
1043 | # DOTHILL drivers | ||
1044 | class DotHillInvalidBackend(CinderException): | ||
1045 | message = _("Backend doesn't exist (%(backend)s)") | ||
1046 | |||
1047 | |||
1048 | class DotHillConnectionError(CinderException): | ||
1049 | message = _("%(message)s") | ||
1050 | |||
1051 | |||
1052 | class DotHillAuthenticationError(CinderException): | ||
1053 | message = _("%(message)s") | ||
1054 | |||
1055 | |||
1056 | class DotHillNotEnoughSpace(CinderException): | ||
1057 | message = _("Not enough space on backend (%(backend)s)") | ||
1058 | |||
1059 | |||
1060 | class DotHillRequestError(CinderException): | ||
1061 | message = _("%(message)s") | ||
1062 | |||
1063 | |||
1064 | class DotHillNotTargetPortal(CinderException): | ||
1065 | message = _("No active iSCSI portals with supplied iSCSI IPs") | ||
1066 | |||
1067 | |||
1068 | # Sheepdog | ||
1069 | class SheepdogError(VolumeBackendAPIException): | ||
1070 | message = _("An error has occured in SheepdogDriver. (Reason: %(reason)s)") | ||
1071 | |||
1072 | |||
1073 | class SheepdogCmdError(SheepdogError): | ||
1074 | message = _("(Command: %(cmd)s) " | ||
1075 | "(Return Code: %(exit_code)s) " | ||
1076 | "(Stdout: %(stdout)s) " | ||
1077 | "(Stderr: %(stderr)s)") | ||
1078 | |||
1079 | |||
1080 | class MetadataAbsent(CinderException): | ||
1081 | message = _("There is no metadata in DB object.") | ||
1082 | |||
1083 | |||
1084 | class NotSupportedOperation(Invalid): | ||
1085 | message = _("Operation not supported: %(operation)s.") | ||
1086 | code = 405 | ||
1087 | |||
1088 | |||
1089 | # Hitachi HNAS drivers | ||
1090 | class HNASConnError(CinderException): | ||
1091 | message = _("%(message)s") | ||
1092 | |||
1093 | |||
1094 | # Coho drivers | ||
1095 | class CohoException(VolumeDriverException): | ||
1096 | message = _("Coho Data Cinder driver failure: %(message)s") | ||
1097 | |||
1098 | |||
1099 | # Tegile Storage drivers | ||
1100 | class TegileAPIException(VolumeBackendAPIException): | ||
1101 | message = _("Unexpected response from Tegile IntelliFlash API") | ||
1102 | |||
1103 | |||
1104 | # NexentaStor driver exception | ||
1105 | class NexentaException(VolumeDriverException): | ||
1106 | message = _("%(message)s") | ||
1107 | |||
1108 | |||
1109 | # Google Cloud Storage(GCS) backup driver | ||
1110 | class GCSConnectionFailure(BackupDriverException): | ||
1111 | message = _("Google Cloud Storage connection failure: %(reason)s") | ||
1112 | |||
1113 | |||
1114 | class GCSApiFailure(BackupDriverException): | ||
1115 | message = _("Google Cloud Storage api failure: %(reason)s") | ||
1116 | |||
1117 | |||
1118 | class GCSOAuth2Failure(BackupDriverException): | ||
1119 | message = _("Google Cloud Storage oauth2 failure: %(reason)s") | ||
1120 | |||
1121 | |||
1122 | # Kaminario K2 | ||
1123 | class KaminarioCinderDriverException(VolumeDriverException): | ||
1124 | message = _("KaminarioCinderDriver failure: %(reason)s") | ||
1125 | |||
1126 | |||
1127 | class KaminarioRetryableException(VolumeDriverException): | ||
1128 | message = _("Kaminario retryable exception: %(reason)s") | ||
diff --git a/deployment_scripts/puppet/modules/kaminario/files/exception.sh b/deployment_scripts/puppet/modules/kaminario/files/exception.sh new file mode 100644 index 0000000..a5993e6 --- /dev/null +++ b/deployment_scripts/puppet/modules/kaminario/files/exception.sh | |||
@@ -0,0 +1,2 @@ | |||
1 | grep -q -F 'Kaminario' /usr/lib/python2.7/dist-packages/cinder/exception.py || sudo sed -i '$a \ \ \n\nclass KaminarioCinderDriverException(VolumeDriverException):\n\ \message = _("KaminarioCinderDriver failure: %(reason)s")\n\n\nclass KaminarioRetryableException(VolumeDriverException):\n\ \message = _("Kaminario retryable exception: %(reason)s")' /usr/lib/python2.7/dist-packages/cinder/exception.py | ||
2 | |||
diff --git a/deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py b/deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py deleted file mode 100644 index 9e79b80..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/kaminario_common.py +++ /dev/null | |||
@@ -1,1155 +0,0 @@ | |||
1 | # Copyright (c) 2016 by Kaminario Technologies, Ltd. | ||
2 | # All Rights Reserved. | ||
3 | # | ||
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may | ||
5 | # not use this file except in compliance with the License. You may obtain | ||
6 | # a copy of the License at | ||
7 | # | ||
8 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | # | ||
10 | # Unless required by applicable law or agreed to in writing, software | ||
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | ||
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | ||
13 | # License for the specific language governing permissions and limitations | ||
14 | # under the License. | ||
15 | """Volume driver for Kaminario K2 all-flash arrays.""" | ||
16 | |||
17 | import math | ||
18 | import re | ||
19 | import threading | ||
20 | import time | ||
21 | |||
22 | import eventlet | ||
23 | from oslo_config import cfg | ||
24 | from oslo_log import log as logging | ||
25 | from oslo_utils import importutils | ||
26 | from oslo_utils import units | ||
27 | from oslo_utils import versionutils | ||
28 | import requests | ||
29 | import six | ||
30 | |||
31 | import cinder | ||
32 | from cinder import exception | ||
33 | from cinder.i18n import _, _LE, _LW, _LI | ||
34 | from cinder import objects | ||
35 | from cinder.objects import fields | ||
36 | from cinder import utils | ||
37 | from cinder.volume.drivers.san import san | ||
38 | from cinder.volume import utils as vol_utils | ||
39 | |||
40 | krest = importutils.try_import("krest") | ||
41 | |||
42 | K2_MIN_VERSION = '2.2.0' | ||
43 | K2_LOCK_NAME = 'Kaminario' | ||
44 | MAX_K2_RETRY = 5 | ||
45 | K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER | ||
46 | LOG = logging.getLogger(__name__) | ||
47 | |||
48 | kaminario1_opts = [ | ||
49 | cfg.StrOpt('kaminario_nodedup_substring', | ||
50 | default='K2-nodedup', | ||
51 | help="If volume-type name contains this substring " | ||
52 | "nodedup volume will be created, otherwise " | ||
53 | "dedup volume wil be created.", | ||
54 | deprecated_for_removal=True, | ||
55 | deprecated_reason="This option is deprecated in favour of " | ||
56 | "'kaminario:thin_prov_type' in extra-specs " | ||
57 | "and will be removed in the next release.")] | ||
58 | kaminario2_opts = [ | ||
59 | cfg.BoolOpt('auto_calc_max_oversubscription_ratio', | ||
60 | default=False, | ||
61 | help="K2 driver will calculate max_oversubscription_ratio " | ||
62 | "on setting this option as True.")] | ||
63 | |||
64 | CONF = cfg.CONF | ||
65 | CONF.register_opts(kaminario1_opts) | ||
66 | |||
67 | K2HTTPError = requests.exceptions.HTTPError | ||
68 | K2_RETRY_ERRORS = ("MC_ERR_BUSY", "MC_ERR_BUSY_SPECIFIC", | ||
69 | "MC_ERR_INPROGRESS", "MC_ERR_START_TIMEOUT") | ||
70 | |||
71 | if krest: | ||
72 | class KrestWrap(krest.EndPoint): | ||
73 | def __init__(self, *args, **kwargs): | ||
74 | self.krestlock = threading.Lock() | ||
75 | super(KrestWrap, self).__init__(*args, **kwargs) | ||
76 | |||
77 | def _should_retry(self, err_code, err_msg): | ||
78 | if err_code == 400: | ||
79 | for er in K2_RETRY_ERRORS: | ||
80 | if er in err_msg: | ||
81 | LOG.debug("Retry ERROR: %d with status %s", | ||
82 | err_code, err_msg) | ||
83 | return True | ||
84 | return False | ||
85 | |||
86 | @utils.retry(exception.KaminarioRetryableException, | ||
87 | retries=MAX_K2_RETRY) | ||
88 | def _request(self, method, *args, **kwargs): | ||
89 | try: | ||
90 | LOG.debug("running through the _request wrapper...") | ||
91 | self.krestlock.acquire() | ||
92 | return super(KrestWrap, self)._request(method, | ||
93 | *args, **kwargs) | ||
94 | except K2HTTPError as err: | ||
95 | err_code = err.response.status_code | ||
96 | err_msg = err.response.text | ||
97 | if self._should_retry(err_code, err_msg): | ||
98 | raise exception.KaminarioRetryableException( | ||
99 | reason=six.text_type(err_msg)) | ||
100 | raise | ||
101 | finally: | ||
102 | self.krestlock.release() | ||
103 | |||
104 | |||
105 | def kaminario_logger(func): | ||
106 | """Return a function wrapper. | ||
107 | |||
108 | The wrapper adds log for entry and exit to the function. | ||
109 | """ | ||
110 | def func_wrapper(*args, **kwargs): | ||
111 | LOG.debug('Entering %(function)s of %(class)s with arguments: ' | ||
112 | ' %(args)s, %(kwargs)s', | ||
113 | {'class': args[0].__class__.__name__, | ||
114 | 'function': func.__name__, | ||
115 | 'args': args[1:], | ||
116 | 'kwargs': kwargs}) | ||
117 | ret = func(*args, **kwargs) | ||
118 | LOG.debug('Exiting %(function)s of %(class)s ' | ||
119 | 'having return value: %(ret)s', | ||
120 | {'class': args[0].__class__.__name__, | ||
121 | 'function': func.__name__, | ||
122 | 'ret': ret}) | ||
123 | return ret | ||
124 | return func_wrapper | ||
125 | |||
126 | |||
127 | class Replication(object): | ||
128 | def __init__(self, config, *args, **kwargs): | ||
129 | self.backend_id = config.get('backend_id') | ||
130 | self.login = config.get('login') | ||
131 | self.password = config.get('password') | ||
132 | self.rpo = config.get('rpo') | ||
133 | |||
134 | |||
135 | class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): | ||
136 | VENDOR = "Kaminario" | ||
137 | stats = {} | ||
138 | |||
139 | def __init__(self, *args, **kwargs): | ||
140 | super(KaminarioCinderDriver, self).__init__(*args, **kwargs) | ||
141 | self.configuration.append_config_values(san.san_opts) | ||
142 | self.configuration.append_config_values(kaminario2_opts) | ||
143 | self.replica = None | ||
144 | self._protocol = None | ||
145 | |||
146 | def check_for_setup_error(self): | ||
147 | if krest is None: | ||
148 | msg = _("Unable to import 'krest' python module.") | ||
149 | LOG.error(msg) | ||
150 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
151 | else: | ||
152 | conf = self.configuration | ||
153 | self.client = KrestWrap(conf.san_ip, | ||
154 | conf.san_login, | ||
155 | conf.san_password, | ||
156 | ssl_validate=False) | ||
157 | if self.replica: | ||
158 | self.target = KrestWrap(self.replica.backend_id, | ||
159 | self.replica.login, | ||
160 | self.replica.password, | ||
161 | ssl_validate=False) | ||
162 | v_rs = self.client.search("system/state") | ||
163 | if hasattr(v_rs, 'hits') and v_rs.total != 0: | ||
164 | ver = v_rs.hits[0].rest_api_version | ||
165 | ver_exist = versionutils.convert_version_to_int(ver) | ||
166 | ver_min = versionutils.convert_version_to_int(K2_MIN_VERSION) | ||
167 | if ver_exist < ver_min: | ||
168 | msg = _("K2 rest api version should be " | ||
169 | ">= %s.") % K2_MIN_VERSION | ||
170 | LOG.error(msg) | ||
171 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
172 | |||
173 | else: | ||
174 | msg = _("K2 rest api version search failed.") | ||
175 | LOG.error(msg) | ||
176 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
177 | |||
178 | @kaminario_logger | ||
179 | def _check_ops(self): | ||
180 | """Ensure that the options we care about are set.""" | ||
181 | required_ops = ['san_ip', 'san_login', 'san_password'] | ||
182 | for attr in required_ops: | ||
183 | if not getattr(self.configuration, attr, None): | ||
184 | raise exception.InvalidInput(reason=_('%s is not set.') % attr) | ||
185 | |||
186 | replica = self.configuration.safe_get('replication_device') | ||
187 | if replica and isinstance(replica, list): | ||
188 | replica_ops = ['backend_id', 'login', 'password', 'rpo'] | ||
189 | for attr in replica_ops: | ||
190 | if attr not in replica[0]: | ||
191 | msg = _('replication_device %s is not set.') % attr | ||
192 | raise exception.InvalidInput(reason=msg) | ||
193 | self.replica = Replication(replica[0]) | ||
194 | |||
195 | @kaminario_logger | ||
196 | def do_setup(self, context): | ||
197 | super(KaminarioCinderDriver, self).do_setup(context) | ||
198 | self._check_ops() | ||
199 | |||
200 | @kaminario_logger | ||
201 | def create_volume(self, volume): | ||
202 | """Volume creation in K2 needs a volume group. | ||
203 | |||
204 | - create a volume group | ||
205 | - create a volume in the volume group | ||
206 | """ | ||
207 | vg_name = self.get_volume_group_name(volume.id) | ||
208 | vol_name = self.get_volume_name(volume.id) | ||
209 | prov_type = self._get_is_dedup(volume.get('volume_type')) | ||
210 | try: | ||
211 | LOG.debug("Creating volume group with name: %(name)s, " | ||
212 | "quota: unlimited and dedup_support: %(dedup)s", | ||
213 | {'name': vg_name, 'dedup': prov_type}) | ||
214 | |||
215 | vg = self.client.new("volume_groups", name=vg_name, quota=0, | ||
216 | is_dedup=prov_type).save() | ||
217 | LOG.debug("Creating volume with name: %(name)s, size: %(size)s " | ||
218 | "GB, volume_group: %(vg)s", | ||
219 | {'name': vol_name, 'size': volume.size, 'vg': vg_name}) | ||
220 | vol = self.client.new("volumes", name=vol_name, | ||
221 | size=volume.size * units.Mi, | ||
222 | volume_group=vg).save() | ||
223 | except Exception as ex: | ||
224 | vg_rs = self.client.search("volume_groups", name=vg_name) | ||
225 | if vg_rs.total != 0: | ||
226 | LOG.debug("Deleting vg: %s for failed volume in K2.", vg_name) | ||
227 | vg_rs.hits[0].delete() | ||
228 | LOG.exception(_LE("Creation of volume %s failed."), vol_name) | ||
229 | raise exception.KaminarioCinderDriverException( | ||
230 | reason=six.text_type(ex.message)) | ||
231 | |||
232 | if self._get_is_replica(volume.volume_type) and self.replica: | ||
233 | self._create_volume_replica(volume, vg, vol, self.replica.rpo) | ||
234 | |||
235 | @kaminario_logger | ||
236 | def _create_volume_replica(self, volume, vg, vol, rpo): | ||
237 | """Volume replica creation in K2 needs session and remote volume. | ||
238 | |||
239 | - create a session | ||
240 | - create a volume in the volume group | ||
241 | |||
242 | """ | ||
243 | session_name = self.get_session_name(volume.id) | ||
244 | rsession_name = self.get_rep_name(session_name) | ||
245 | |||
246 | rvg_name = self.get_rep_name(vg.name) | ||
247 | rvol_name = self.get_rep_name(vol.name) | ||
248 | |||
249 | k2peer_rs = self.client.search("replication/peer_k2arrays", | ||
250 | mgmt_host=self.replica.backend_id) | ||
251 | if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: | ||
252 | k2peer = k2peer_rs.hits[0] | ||
253 | else: | ||
254 | msg = _("Unable to find K2peer in source K2:") | ||
255 | LOG.error(msg) | ||
256 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
257 | try: | ||
258 | LOG.debug("Creating source session with name: %(sname)s and " | ||
259 | " target session name: %(tname)s", | ||
260 | {'sname': session_name, 'tname': rsession_name}) | ||
261 | src_ssn = self.client.new("replication/sessions") | ||
262 | src_ssn.replication_peer_k2array = k2peer | ||
263 | src_ssn.auto_configure_peer_volumes = "False" | ||
264 | src_ssn.local_volume_group = vg | ||
265 | src_ssn.replication_peer_volume_group_name = rvg_name | ||
266 | src_ssn.remote_replication_session_name = rsession_name | ||
267 | src_ssn.name = session_name | ||
268 | src_ssn.rpo = rpo | ||
269 | src_ssn.save() | ||
270 | LOG.debug("Creating remote volume with name: %s", | ||
271 | rvol_name) | ||
272 | self.client.new("replication/peer_volumes", | ||
273 | local_volume=vol, | ||
274 | name=rvol_name, | ||
275 | replication_session=src_ssn).save() | ||
276 | src_ssn.state = "in_sync" | ||
277 | src_ssn.save() | ||
278 | except Exception as ex: | ||
279 | LOG.exception(_LE("Replication for the volume %s has " | ||
280 | "failed."), vol.name) | ||
281 | self._delete_by_ref(self.client, "replication/sessions", | ||
282 | session_name, 'session') | ||
283 | self._delete_by_ref(self.target, "replication/sessions", | ||
284 | rsession_name, 'remote session') | ||
285 | self._delete_by_ref(self.target, "volumes", | ||
286 | rvol_name, 'remote volume') | ||
287 | self._delete_by_ref(self.client, "volumes", vol.name, "volume") | ||
288 | self._delete_by_ref(self.target, "volume_groups", | ||
289 | rvg_name, "remote vg") | ||
290 | self._delete_by_ref(self.client, "volume_groups", vg.name, "vg") | ||
291 | raise exception.KaminarioCinderDriverException( | ||
292 | reason=six.text_type(ex.message)) | ||
293 | |||
294 | @kaminario_logger | ||
295 | def _create_failover_volume_replica(self, volume, vg_name, vol_name): | ||
296 | """Volume replica creation in K2 needs session and remote volume. | ||
297 | |||
298 | - create a session | ||
299 | - create a volume in the volume group | ||
300 | |||
301 | """ | ||
302 | session_name = self.get_session_name(volume.id) | ||
303 | rsession_name = self.get_rep_name(session_name) | ||
304 | |||
305 | rvg_name = self.get_rep_name(vg_name) | ||
306 | rvol_name = self.get_rep_name(vol_name) | ||
307 | rvg = self.target.search("volume_groups", name=rvg_name).hits[0] | ||
308 | rvol = self.target.search("volumes", name=rvol_name).hits[0] | ||
309 | k2peer_rs = self.target.search("replication/peer_k2arrays", | ||
310 | mgmt_host=self.configuration.san_ip) | ||
311 | if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: | ||
312 | k2peer = k2peer_rs.hits[0] | ||
313 | else: | ||
314 | msg = _("Unable to find K2peer in source K2:") | ||
315 | LOG.error(msg) | ||
316 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
317 | try: | ||
318 | LOG.debug("Creating source session with name: %(sname)s and " | ||
319 | " target session name: %(tname)s", | ||
320 | {'sname': rsession_name, 'tname': session_name}) | ||
321 | tgt_ssn = self.target.new("replication/sessions") | ||
322 | tgt_ssn.replication_peer_k2array = k2peer | ||
323 | tgt_ssn.auto_configure_peer_volumes = "False" | ||
324 | tgt_ssn.local_volume_group = rvg | ||
325 | tgt_ssn.replication_peer_volume_group_name = vg_name | ||
326 | tgt_ssn.remote_replication_session_name = session_name | ||
327 | tgt_ssn.name = rsession_name | ||
328 | tgt_ssn.rpo = self.replica.rpo | ||
329 | tgt_ssn.save() | ||
330 | LOG.debug("Creating remote volume with name: %s", | ||
331 | rvol_name) | ||
332 | self.target.new("replication/peer_volumes", | ||
333 | local_volume=rvol, | ||
334 | name=vol_name, | ||
335 | replication_session=tgt_ssn).save() | ||
336 | tgt_ssn.state = "in_sync" | ||
337 | tgt_ssn.save() | ||
338 | except Exception as ex: | ||
339 | LOG.exception(_LE("Replication for the volume %s has " | ||
340 | "failed."), rvol_name) | ||
341 | self._delete_by_ref(self.target, "replication/sessions", | ||
342 | rsession_name, 'session') | ||
343 | self._delete_by_ref(self.client, "replication/sessions", | ||
344 | session_name, 'remote session') | ||
345 | self._delete_by_ref(self.client, "volumes", vol_name, "volume") | ||
346 | self._delete_by_ref(self.client, "volume_groups", vg_name, "vg") | ||
347 | raise exception.KaminarioCinderDriverException( | ||
348 | reason=six.text_type(ex.message)) | ||
349 | |||
350 | def _delete_by_ref(self, device, url, name, msg): | ||
351 | rs = device.search(url, name=name) | ||
352 | for result in rs.hits: | ||
353 | result.delete() | ||
354 | LOG.debug("Deleting %(msg)s: %(name)s", {'msg': msg, 'name': name}) | ||
355 | |||
356 | @kaminario_logger | ||
357 | def _failover_volume(self, volume): | ||
358 | """Promoting a secondary volume to primary volume.""" | ||
359 | session_name = self.get_session_name(volume.id) | ||
360 | rsession_name = self.get_rep_name(session_name) | ||
361 | tgt_ssn = self.target.search("replication/sessions", | ||
362 | name=rsession_name).hits[0] | ||
363 | if tgt_ssn.state == 'in_sync': | ||
364 | tgt_ssn.state = 'failed_over' | ||
365 | tgt_ssn.save() | ||
366 | LOG.debug("The target session: %s state is " | ||
367 | "changed to failed_over ", rsession_name) | ||
368 | |||
369 | @kaminario_logger | ||
370 | def failover_host(self, context, volumes, secondary_id=None): | ||
371 | """Failover to replication target.""" | ||
372 | volume_updates = [] | ||
373 | back_end_ip = None | ||
374 | svc_host = vol_utils.extract_host(self.host, 'backend') | ||
375 | service = objects.Service.get_by_args(context, svc_host, | ||
376 | 'cinder-volume') | ||
377 | |||
378 | if secondary_id and secondary_id != self.replica.backend_id: | ||
379 | LOG.error(_LE("Kaminario driver received failover_host " | ||
380 | "request, But backend is non replicated device")) | ||
381 | raise exception.UnableToFailOver(reason=_("Failover requested " | ||
382 | "on non replicated " | ||
383 | "backend.")) | ||
384 | |||
385 | if (service.active_backend_id and | ||
386 | service.active_backend_id != self.configuration.san_ip): | ||
387 | self.snap_updates = [] | ||
388 | rep_volumes = [] | ||
389 | # update status for non-replicated primary volumes | ||
390 | for v in volumes: | ||
391 | vol_name = self.get_volume_name(v['id']) | ||
392 | vol = self.client.search("volumes", name=vol_name) | ||
393 | if v.replication_status != K2_REP_FAILED_OVER and vol.total: | ||
394 | status = 'available' | ||
395 | if v.volume_attachment: | ||
396 | map_rs = self.client.search("mappings", | ||
397 | volume=vol.hits[0]) | ||
398 | status = 'in-use' | ||
399 | if map_rs.total: | ||
400 | map_rs.hits[0].delete() | ||
401 | volume_updates.append({'volume_id': v['id'], | ||
402 | 'updates': | ||
403 | {'status': status}}) | ||
404 | else: | ||
405 | rep_volumes.append(v) | ||
406 | |||
407 | # In-sync from secondaray array to primary array | ||
408 | for v in rep_volumes: | ||
409 | vol_name = self.get_volume_name(v['id']) | ||
410 | vol = self.client.search("volumes", name=vol_name) | ||
411 | rvol_name = self.get_rep_name(vol_name) | ||
412 | rvol = self.target.search("volumes", name=rvol_name) | ||
413 | session_name = self.get_session_name(v['id']) | ||
414 | rsession_name = self.get_rep_name(session_name) | ||
415 | ssn = self.target.search("replication/sessions", | ||
416 | name=rsession_name) | ||
417 | if ssn.total: | ||
418 | tgt_ssn = ssn.hits[0] | ||
419 | ssn = self.client.search("replication/sessions", | ||
420 | name=session_name) | ||
421 | if ssn.total: | ||
422 | src_ssn = ssn.hits[0] | ||
423 | |||
424 | if (tgt_ssn.state == 'failed_over' and | ||
425 | tgt_ssn.current_role == 'target' and vol.total and src_ssn): | ||
426 | map_rs = self.client.search("mappings", volume=vol.hits[0]) | ||
427 | if map_rs.total: | ||
428 | map_rs.hits[0].delete() | ||
429 | tgt_ssn.state = 'in_sync' | ||
430 | tgt_ssn.save() | ||
431 | self._check_for_status(src_ssn, 'in_sync') | ||
432 | if (rvol.total and src_ssn.state == 'in_sync' and | ||
433 | src_ssn.current_role == 'target'): | ||
434 | gen_no = self._create_volume_replica_user_snap(self.target, | ||
435 | tgt_ssn) | ||
436 | self.snap_updates.append({'tgt_ssn': tgt_ssn, | ||
437 | 'gno': gen_no, | ||
438 | 'stime': time.time()}) | ||
439 | LOG.debug("The target session: %s state is " | ||
440 | "changed to in sync", rsession_name) | ||
441 | |||
442 | self._is_user_snap_sync_finished() | ||
443 | |||
444 | # Delete secondary volume mappings and create snapshot | ||
445 | for v in rep_volumes: | ||
446 | vol_name = self.get_volume_name(v['id']) | ||
447 | vol = self.client.search("volumes", name=vol_name) | ||
448 | rvol_name = self.get_rep_name(vol_name) | ||
449 | rvol = self.target.search("volumes", name=rvol_name) | ||
450 | session_name = self.get_session_name(v['id']) | ||
451 | rsession_name = self.get_rep_name(session_name) | ||
452 | ssn = self.target.search("replication/sessions", | ||
453 | name=rsession_name) | ||
454 | if ssn.total: | ||
455 | tgt_ssn = ssn.hits[0] | ||
456 | ssn = self.client.search("replication/sessions", | ||
457 | name=session_name) | ||
458 | if ssn.total: | ||
459 | src_ssn = ssn.hits[0] | ||
460 | if (rvol.total and src_ssn.state == 'in_sync' and | ||
461 | src_ssn.current_role == 'target'): | ||
462 | map_rs = self.target.search("mappings", | ||
463 | volume=rvol.hits[0]) | ||
464 | if map_rs.total: | ||
465 | map_rs.hits[0].delete() | ||
466 | gen_no = self._create_volume_replica_user_snap(self.target, | ||
467 | tgt_ssn) | ||
468 | self.snap_updates.append({'tgt_ssn': tgt_ssn, | ||
469 | 'gno': gen_no, | ||
470 | 'stime': time.time()}) | ||
471 | self._is_user_snap_sync_finished() | ||
472 | # changing source sessions to failed-over | ||
473 | for v in rep_volumes: | ||
474 | vol_name = self.get_volume_name(v['id']) | ||
475 | vol = self.client.search("volumes", name=vol_name) | ||
476 | rvol_name = self.get_rep_name(vol_name) | ||
477 | rvol = self.target.search("volumes", name=rvol_name) | ||
478 | session_name = self.get_session_name(v['id']) | ||
479 | rsession_name = self.get_rep_name(session_name) | ||
480 | ssn = self.target.search("replication/sessions", | ||
481 | name=rsession_name) | ||
482 | if ssn.total: | ||
483 | tgt_ssn = ssn.hits[0] | ||
484 | ssn = self.client.search("replication/sessions", | ||
485 | name=session_name) | ||
486 | if ssn.total: | ||
487 | src_ssn = ssn.hits[0] | ||
488 | if (rvol.total and src_ssn.state == 'in_sync' and | ||
489 | src_ssn.current_role == 'target'): | ||
490 | src_ssn.state = 'failed_over' | ||
491 | src_ssn.save() | ||
492 | self._check_for_status(tgt_ssn, 'suspended') | ||
493 | LOG.debug("The target session: %s state is " | ||
494 | "changed to failed over", session_name) | ||
495 | |||
496 | src_ssn.state = 'in_sync' | ||
497 | src_ssn.save() | ||
498 | LOG.debug("The target session: %s state is " | ||
499 | "changed to in sync", session_name) | ||
500 | rep_status = fields.ReplicationStatus.DISABLED | ||
501 | volume_updates.append({'volume_id': v['id'], | ||
502 | 'updates': | ||
503 | {'replication_status': rep_status}}) | ||
504 | |||
505 | back_end_ip = self.configuration.san_ip | ||
506 | else: | ||
507 | """Failover to replication target.""" | ||
508 | for v in volumes: | ||
509 | vol_name = self.get_volume_name(v['id']) | ||
510 | rv = self.get_rep_name(vol_name) | ||
511 | if self.target.search("volumes", name=rv).total: | ||
512 | self._failover_volume(v) | ||
513 | volume_updates.append( | ||
514 | {'volume_id': v['id'], | ||
515 | 'updates': | ||
516 | {'replication_status': K2_REP_FAILED_OVER}}) | ||
517 | else: | ||
518 | volume_updates.append({'volume_id': v['id'], | ||
519 | 'updates': {'status': 'error', }}) | ||
520 | back_end_ip = self.replica.backend_id | ||
521 | return back_end_ip, volume_updates | ||
522 | |||
523 | def _create_volume_replica_user_snap(self, k2, sess): | ||
524 | snap = k2.new("snapshots") | ||
525 | snap.is_application_consistent = "False" | ||
526 | snap.replication_session = sess | ||
527 | snap.save() | ||
528 | return snap.generation_number | ||
529 | |||
530 | def _is_user_snap_sync_finished(self): | ||
531 | # waiting for user snapshot to be synced | ||
532 | while len(self.snap_updates) > 0: | ||
533 | for l in self.snap_updates: | ||
534 | sess = l.get('tgt_ssn') | ||
535 | gno = l.get('gno') | ||
536 | stime = l.get('stime') | ||
537 | sess.refresh() | ||
538 | if (sess.generation_number == gno and | ||
539 | sess.current_snapshot_progress == 100 | ||
540 | and sess.current_snapshot_id is None): | ||
541 | if time.time() - stime > 300: | ||
542 | gen_no = self._create_volume_replica_user_snap( | ||
543 | self.target, | ||
544 | sess) | ||
545 | self.snap_updates.append({'tgt_ssn': sess, | ||
546 | 'gno': gen_no, | ||
547 | 'stime': time.time()}) | ||
548 | self.snap_updates.remove(l) | ||
549 | eventlet.sleep(1) | ||
550 | |||
551 | @kaminario_logger | ||
552 | def create_volume_from_snapshot(self, volume, snapshot): | ||
553 | """Create volume from snapshot. | ||
554 | |||
555 | - search for snapshot and retention_policy | ||
556 | - create a view from snapshot and attach view | ||
557 | - create a volume and attach volume | ||
558 | - copy data from attached view to attached volume | ||
559 | - detach volume and view and finally delete view | ||
560 | """ | ||
561 | snap_name = self.get_snap_name(snapshot.id) | ||
562 | view_name = self.get_view_name(volume.id) | ||
563 | vol_name = self.get_volume_name(volume.id) | ||
564 | cview = src_attach_info = dest_attach_info = None | ||
565 | rpolicy = self.get_policy() | ||
566 | properties = utils.brick_get_connector_properties() | ||
567 | LOG.debug("Searching for snapshot: %s in K2.", snap_name) | ||
568 | snap_rs = self.client.search("snapshots", short_name=snap_name) | ||
569 | if hasattr(snap_rs, 'hits') and snap_rs.total != 0: | ||
570 | snap = snap_rs.hits[0] | ||
571 | LOG.debug("Creating a view: %(view)s from snapshot: %(snap)s", | ||
572 | {'view': view_name, 'snap': snap_name}) | ||
573 | try: | ||
574 | cview = self.client.new("snapshots", | ||
575 | short_name=view_name, | ||
576 | source=snap, retention_policy=rpolicy, | ||
577 | is_exposable=True).save() | ||
578 | except Exception as ex: | ||
579 | LOG.exception(_LE("Creating a view: %(view)s from snapshot: " | ||
580 | "%(snap)s failed"), {"view": view_name, | ||
581 | "snap": snap_name}) | ||
582 | raise exception.KaminarioCinderDriverException( | ||
583 | reason=six.text_type(ex.message)) | ||
584 | |||
585 | else: | ||
586 | msg = _("Snapshot: %s search failed in K2.") % snap_name | ||
587 | LOG.error(msg) | ||
588 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
589 | |||
590 | try: | ||
591 | conn = self.initialize_connection(cview, properties) | ||
592 | src_attach_info = self._connect_device(conn) | ||
593 | self.create_volume(volume) | ||
594 | conn = self.initialize_connection(volume, properties) | ||
595 | dest_attach_info = self._connect_device(conn) | ||
596 | vol_utils.copy_volume(src_attach_info['device']['path'], | ||
597 | dest_attach_info['device']['path'], | ||
598 | snapshot.volume.size * units.Ki, | ||
599 | self.configuration.volume_dd_blocksize, | ||
600 | sparse=True) | ||
601 | self._kaminario_disconnect_volume(src_attach_info, | ||
602 | dest_attach_info) | ||
603 | self.terminate_connection(volume, properties) | ||
604 | self.terminate_connection(cview, properties) | ||
605 | cview.delete() | ||
606 | except Exception as ex: | ||
607 | self._kaminario_disconnect_volume(src_attach_info, | ||
608 | dest_attach_info) | ||
609 | self.terminate_connection(cview, properties) | ||
610 | self.terminate_connection(volume, properties) | ||
611 | cview.delete() | ||
612 | self.delete_volume(volume) | ||
613 | LOG.exception(_LE("Copy to volume: %(vol)s from view: %(view)s " | ||
614 | "failed"), {"vol": vol_name, "view": view_name}) | ||
615 | raise exception.KaminarioCinderDriverException( | ||
616 | reason=six.text_type(ex.message)) | ||
617 | |||
618 | @kaminario_logger | ||
619 | def create_cloned_volume(self, volume, src_vref): | ||
620 | """Create a clone from source volume. | ||
621 | |||
622 | - attach source volume | ||
623 | - create and attach new volume | ||
624 | - copy data from attached source volume to attached new volume | ||
625 | - detach both volumes | ||
626 | """ | ||
627 | clone_name = self.get_volume_name(volume.id) | ||
628 | src_name = self.get_volume_name(src_vref.id) | ||
629 | src_vol = self.client.search("volumes", name=src_name) | ||
630 | src_map = self.client.search("mappings", volume=src_vol) | ||
631 | src_attach_info = dest_attach_info = None | ||
632 | if src_map.total != 0: | ||
633 | msg = _("K2 driver does not support clone of a attached volume. " | ||
634 | "To get this done, create a snapshot from the attached " | ||
635 | "volume and then create a volume from the snapshot.") | ||
636 | LOG.error(msg) | ||
637 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
638 | try: | ||
639 | properties = utils.brick_get_connector_properties() | ||
640 | conn = self.initialize_connection(src_vref, properties) | ||
641 | src_attach_info = self._connect_device(conn) | ||
642 | self.create_volume(volume) | ||
643 | conn = self.initialize_connection(volume, properties) | ||
644 | dest_attach_info = self._connect_device(conn) | ||
645 | vol_utils.copy_volume(src_attach_info['device']['path'], | ||
646 | dest_attach_info['device']['path'], | ||
647 | src_vref.size * units.Ki, | ||
648 | self.configuration.volume_dd_blocksize, | ||
649 | sparse=True) | ||
650 | self._kaminario_disconnect_volume(src_attach_info, | ||
651 | dest_attach_info) | ||
652 | self.terminate_connection(volume, properties) | ||
653 | self.terminate_connection(src_vref, properties) | ||
654 | except Exception as ex: | ||
655 | self._kaminario_disconnect_volume(src_attach_info, | ||
656 | dest_attach_info) | ||
657 | self.terminate_connection(src_vref, properties) | ||
658 | self.terminate_connection(volume, properties) | ||
659 | self.delete_volume(volume) | ||
660 | LOG.exception(_LE("Create a clone: %s failed."), clone_name) | ||
661 | raise exception.KaminarioCinderDriverException( | ||
662 | reason=six.text_type(ex.message)) | ||
663 | |||
664 | @kaminario_logger | ||
665 | def delete_volume(self, volume): | ||
666 | """Volume in K2 exists in a volume group. | ||
667 | |||
668 | - delete the volume | ||
669 | - delete the corresponding volume group | ||
670 | """ | ||
671 | vg_name = self.get_volume_group_name(volume.id) | ||
672 | vol_name = self.get_volume_name(volume.id) | ||
673 | try: | ||
674 | if self._get_is_replica(volume.volume_type) and self.replica: | ||
675 | self._delete_volume_replica(volume, vg_name, vol_name) | ||
676 | |||
677 | LOG.debug("Searching and deleting volume: %s in K2.", vol_name) | ||
678 | vol_rs = self.client.search("volumes", name=vol_name) | ||
679 | if vol_rs.total != 0: | ||
680 | vol_rs.hits[0].delete() | ||
681 | LOG.debug("Searching and deleting vg: %s in K2.", vg_name) | ||
682 | vg_rs = self.client.search("volume_groups", name=vg_name) | ||
683 | if vg_rs.total != 0: | ||
684 | vg_rs.hits[0].delete() | ||
685 | except Exception as ex: | ||
686 | LOG.exception(_LE("Deletion of volume %s failed."), vol_name) | ||
687 | raise exception.KaminarioCinderDriverException( | ||
688 | reason=six.text_type(ex.message)) | ||
689 | |||
690 | @kaminario_logger | ||
691 | def _delete_volume_replica(self, volume, vg_name, vol_name): | ||
692 | rvg_name = self.get_rep_name(vg_name) | ||
693 | rvol_name = self.get_rep_name(vol_name) | ||
694 | session_name = self.get_session_name(volume.id) | ||
695 | rsession_name = self.get_rep_name(session_name) | ||
696 | src_ssn = self.client.search('replication/sessions', | ||
697 | name=session_name).hits[0] | ||
698 | tgt_ssn = self.target.search('replication/sessions', | ||
699 | name=rsession_name).hits[0] | ||
700 | src_ssn.state = 'suspended' | ||
701 | src_ssn.save() | ||
702 | self._check_for_status(tgt_ssn, 'suspended') | ||
703 | src_ssn.state = 'idle' | ||
704 | src_ssn.save() | ||
705 | self._check_for_status(tgt_ssn, 'idle') | ||
706 | tgt_ssn.delete() | ||
707 | src_ssn.delete() | ||
708 | |||
709 | LOG.debug("Searching and deleting snapshots for volume groups:" | ||
710 | "%(vg1)s, %(vg2)s in K2.", {'vg1': vg_name, 'vg2': rvg_name}) | ||
711 | vg = self.client.search('volume_groups', name=vg_name).hits | ||
712 | rvg = self.target.search('volume_groups', name=rvg_name).hits | ||
713 | snaps = self.client.search('snapshots', volume_group=vg).hits | ||
714 | for s in snaps: | ||
715 | s.delete() | ||
716 | rsnaps = self.target.search('snapshots', volume_group=rvg).hits | ||
717 | for s in rsnaps: | ||
718 | s.delete() | ||
719 | |||
720 | self._delete_by_ref(self.target, "volumes", rvol_name, 'remote volume') | ||
721 | self._delete_by_ref(self.target, "volume_groups", | ||
722 | rvg_name, "remote vg") | ||
723 | |||
724 | @kaminario_logger | ||
725 | def _delete_failover_volume_replica(self, volume, vg_name, vol_name): | ||
726 | rvg_name = self.get_rep_name(vg_name) | ||
727 | rvol_name = self.get_rep_name(vol_name) | ||
728 | session_name = self.get_session_name(volume.id) | ||
729 | rsession_name = self.get_rep_name(session_name) | ||
730 | tgt_ssn = self.target.search('replication/sessions', | ||
731 | name=rsession_name).hits[0] | ||
732 | tgt_ssn.state = 'idle' | ||
733 | tgt_ssn.save() | ||
734 | tgt_ssn.delete() | ||
735 | |||
736 | LOG.debug("Searching and deleting snapshots for target volume group " | ||
737 | "and target volume: %(vol)s, %(vg)s in K2.", | ||
738 | {'vol': rvol_name, 'vg': rvg_name}) | ||
739 | rvg = self.target.search('volume_groups', name=rvg_name).hits | ||
740 | rsnaps = self.target.search('snapshots', volume_group=rvg).hits | ||
741 | for s in rsnaps: | ||
742 | s.delete() | ||
743 | |||
744 | @kaminario_logger | ||
745 | def _check_for_status(self, obj, status): | ||
746 | while obj.state != status: | ||
747 | obj.refresh() | ||
748 | eventlet.sleep(1) | ||
749 | |||
750 | @kaminario_logger | ||
751 | def get_volume_stats(self, refresh=False): | ||
752 | if refresh: | ||
753 | self.update_volume_stats() | ||
754 | stats = self.stats | ||
755 | stats['storage_protocol'] = self._protocol | ||
756 | stats['driver_version'] = self.VERSION | ||
757 | stats['vendor_name'] = self.VENDOR | ||
758 | backend_name = self.configuration.safe_get('volume_backend_name') | ||
759 | stats['volume_backend_name'] = (backend_name or | ||
760 | self.__class__.__name__) | ||
761 | return stats | ||
762 | |||
763 | def create_export(self, context, volume, connector): | ||
764 | pass | ||
765 | |||
766 | def ensure_export(self, context, volume): | ||
767 | pass | ||
768 | |||
769 | def remove_export(self, context, volume): | ||
770 | pass | ||
771 | |||
772 | @kaminario_logger | ||
773 | def create_snapshot(self, snapshot): | ||
774 | """Create a snapshot from a volume_group.""" | ||
775 | vg_name = self.get_volume_group_name(snapshot.volume_id) | ||
776 | snap_name = self.get_snap_name(snapshot.id) | ||
777 | rpolicy = self.get_policy() | ||
778 | try: | ||
779 | LOG.debug("Searching volume_group: %s in K2.", vg_name) | ||
780 | vg = self.client.search("volume_groups", name=vg_name).hits[0] | ||
781 | LOG.debug("Creating a snapshot: %(snap)s from vg: %(vg)s", | ||
782 | {'snap': snap_name, 'vg': vg_name}) | ||
783 | self.client.new("snapshots", short_name=snap_name, | ||
784 | source=vg, retention_policy=rpolicy, | ||
785 | is_auto_deleteable=False).save() | ||
786 | except Exception as ex: | ||
787 | LOG.exception(_LE("Creation of snapshot: %s failed."), snap_name) | ||
788 | raise exception.KaminarioCinderDriverException( | ||
789 | reason=six.text_type(ex.message)) | ||
790 | |||
791 | @kaminario_logger | ||
792 | def delete_snapshot(self, snapshot): | ||
793 | """Delete a snapshot.""" | ||
794 | snap_name = self.get_snap_name(snapshot.id) | ||
795 | try: | ||
796 | LOG.debug("Searching and deleting snapshot: %s in K2.", snap_name) | ||
797 | snap_rs = self.client.search("snapshots", short_name=snap_name) | ||
798 | if snap_rs.total != 0: | ||
799 | snap_rs.hits[0].delete() | ||
800 | except Exception as ex: | ||
801 | LOG.exception(_LE("Deletion of snapshot: %s failed."), snap_name) | ||
802 | raise exception.KaminarioCinderDriverException( | ||
803 | reason=six.text_type(ex.message)) | ||
804 | |||
805 | @kaminario_logger | ||
806 | def extend_volume(self, volume, new_size): | ||
807 | """Extend volume.""" | ||
808 | vol_name = self.get_volume_name(volume.id) | ||
809 | try: | ||
810 | LOG.debug("Searching volume: %s in K2.", vol_name) | ||
811 | vol = self.client.search("volumes", name=vol_name).hits[0] | ||
812 | vol.size = new_size * units.Mi | ||
813 | LOG.debug("Extending volume: %s in K2.", vol_name) | ||
814 | vol.save() | ||
815 | except Exception as ex: | ||
816 | LOG.exception(_LE("Extending volume: %s failed."), vol_name) | ||
817 | raise exception.KaminarioCinderDriverException( | ||
818 | reason=six.text_type(ex.message)) | ||
819 | |||
820 | @kaminario_logger | ||
821 | def update_volume_stats(self): | ||
822 | conf = self.configuration | ||
823 | LOG.debug("Searching system capacity in K2.") | ||
824 | cap = self.client.search("system/capacity").hits[0] | ||
825 | LOG.debug("Searching total volumes in K2 for updating stats.") | ||
826 | total_volumes = self.client.search("volumes").total - 1 | ||
827 | provisioned_vol = cap.provisioned_volumes | ||
828 | if (conf.auto_calc_max_oversubscription_ratio and cap.provisioned | ||
829 | and (cap.total - cap.free) != 0): | ||
830 | ratio = provisioned_vol / float(cap.total - cap.free) | ||
831 | else: | ||
832 | ratio = conf.max_over_subscription_ratio | ||
833 | self.stats = {'QoS_support': False, | ||
834 | 'free_capacity_gb': cap.free / units.Mi, | ||
835 | 'total_capacity_gb': cap.total / units.Mi, | ||
836 | 'thin_provisioning_support': True, | ||
837 | 'sparse_copy_volume': True, | ||
838 | 'total_volumes': total_volumes, | ||
839 | 'thick_provisioning_support': False, | ||
840 | 'provisioned_capacity_gb': provisioned_vol / units.Mi, | ||
841 | 'max_oversubscription_ratio': ratio, | ||
842 | 'kaminario:thin_prov_type': 'dedup/nodedup', | ||
843 | 'replication_enabled': True, | ||
844 | 'kaminario:replication': True} | ||
845 | |||
846 | @kaminario_logger | ||
847 | def get_initiator_host_name(self, connector): | ||
848 | """Return the initiator host name. | ||
849 | |||
850 | Valid characters: 0-9, a-z, A-Z, '-', '_' | ||
851 | All other characters are replaced with '_'. | ||
852 | Total characters in initiator host name: 32 | ||
853 | """ | ||
854 | return re.sub('[^0-9a-zA-Z-_]', '_', connector.get('host', ''))[:32] | ||
855 | |||
856 | @kaminario_logger | ||
857 | def get_volume_group_name(self, vid): | ||
858 | """Return the volume group name.""" | ||
859 | return "cvg-{0}".format(vid) | ||
860 | |||
861 | @kaminario_logger | ||
862 | def get_volume_name(self, vid): | ||
863 | """Return the volume name.""" | ||
864 | return "cv-{0}".format(vid) | ||
865 | |||
866 | @kaminario_logger | ||
867 | def get_session_name(self, vid): | ||
868 | """Return the volume name.""" | ||
869 | return "ssn-{0}".format(vid) | ||
870 | |||
871 | @kaminario_logger | ||
872 | def get_snap_name(self, sid): | ||
873 | """Return the snapshot name.""" | ||
874 | return "cs-{0}".format(sid) | ||
875 | |||
876 | @kaminario_logger | ||
877 | def get_view_name(self, vid): | ||
878 | """Return the view name.""" | ||
879 | return "cview-{0}".format(vid) | ||
880 | |||
881 | @kaminario_logger | ||
882 | def get_rep_name(self, name): | ||
883 | """Return the corresponding replication names.""" | ||
884 | return "r{0}".format(name) | ||
885 | |||
886 | @kaminario_logger | ||
887 | def _delete_host_by_name(self, name): | ||
888 | """Deleting host by name.""" | ||
889 | host_rs = self.client.search("hosts", name=name) | ||
890 | if hasattr(host_rs, "hits") and host_rs.total != 0: | ||
891 | host = host_rs.hits[0] | ||
892 | host.delete() | ||
893 | |||
894 | @kaminario_logger | ||
895 | def get_policy(self): | ||
896 | """Return the retention policy.""" | ||
897 | try: | ||
898 | LOG.debug("Searching for retention_policy in K2.") | ||
899 | return self.client.search("retention_policies", | ||
900 | name="Best_Effort_Retention").hits[0] | ||
901 | except Exception as ex: | ||
902 | LOG.exception(_LE("Retention policy search failed in K2.")) | ||
903 | raise exception.KaminarioCinderDriverException( | ||
904 | reason=six.text_type(ex.message)) | ||
905 | |||
906 | @kaminario_logger | ||
907 | def _get_volume_object(self, volume): | ||
908 | vol_name = self.get_volume_name(volume.id) | ||
909 | if volume.replication_status == K2_REP_FAILED_OVER: | ||
910 | vol_name = self.get_rep_name(vol_name) | ||
911 | LOG.debug("Searching volume : %s in K2.", vol_name) | ||
912 | vol_rs = self.client.search("volumes", name=vol_name) | ||
913 | if not hasattr(vol_rs, 'hits') or vol_rs.total == 0: | ||
914 | msg = _("Unable to find volume: %s from K2.") % vol_name | ||
915 | LOG.error(msg) | ||
916 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
917 | return vol_rs.hits[0] | ||
918 | |||
919 | @kaminario_logger | ||
920 | def _get_lun_number(self, vol, host): | ||
921 | volsnap = None | ||
922 | LOG.debug("Searching volsnaps in K2.") | ||
923 | volsnap_rs = self.client.search("volsnaps", snapshot=vol) | ||
924 | if hasattr(volsnap_rs, 'hits') and volsnap_rs.total != 0: | ||
925 | volsnap = volsnap_rs.hits[0] | ||
926 | |||
927 | LOG.debug("Searching mapping of volsnap in K2.") | ||
928 | map_rs = self.client.search("mappings", volume=volsnap, host=host) | ||
929 | return map_rs.hits[0].lun | ||
930 | |||
931 | def initialize_connection(self, volume, connector): | ||
932 | pass | ||
933 | |||
934 | @kaminario_logger | ||
935 | def terminate_connection(self, volume, connector): | ||
936 | """Terminate connection of volume from host.""" | ||
937 | # Get volume object | ||
938 | if type(volume).__name__ != 'RestObject': | ||
939 | vol_name = self.get_volume_name(volume.id) | ||
940 | if volume.replication_status == K2_REP_FAILED_OVER: | ||
941 | vol_name = self.get_rep_name(vol_name) | ||
942 | LOG.debug("Searching volume: %s in K2.", vol_name) | ||
943 | volume_rs = self.client.search("volumes", name=vol_name) | ||
944 | if hasattr(volume_rs, "hits") and volume_rs.total != 0: | ||
945 | volume = volume_rs.hits[0] | ||
946 | else: | ||
947 | vol_name = volume.name | ||
948 | |||
949 | # Get host object. | ||
950 | host_name = self.get_initiator_host_name(connector) | ||
951 | host_rs = self.client.search("hosts", name=host_name) | ||
952 | if hasattr(host_rs, "hits") and host_rs.total != 0 and volume: | ||
953 | host = host_rs.hits[0] | ||
954 | LOG.debug("Searching and deleting mapping of volume: %(name)s to " | ||
955 | "host: %(host)s", {'host': host_name, 'name': vol_name}) | ||
956 | map_rs = self.client.search("mappings", volume=volume, host=host) | ||
957 | if hasattr(map_rs, "hits") and map_rs.total != 0: | ||
958 | map_rs.hits[0].delete() | ||
959 | if self.client.search("mappings", host=host).total == 0: | ||
960 | LOG.debug("Deleting initiator hostname: %s in K2.", host_name) | ||
961 | host.delete() | ||
962 | else: | ||
963 | LOG.warning(_LW("Host: %s not found on K2."), host_name) | ||
964 | |||
965 | def k2_initialize_connection(self, volume, connector): | ||
966 | # Get volume object. | ||
967 | if type(volume).__name__ != 'RestObject': | ||
968 | vol = self._get_volume_object(volume) | ||
969 | else: | ||
970 | vol = volume | ||
971 | # Get host object. | ||
972 | host, host_rs, host_name = self._get_host_object(connector) | ||
973 | try: | ||
974 | # Map volume object to host object. | ||
975 | LOG.debug("Mapping volume: %(vol)s to host: %(host)s", | ||
976 | {'host': host_name, 'vol': vol.name}) | ||
977 | mapping = self.client.new("mappings", volume=vol, host=host).save() | ||
978 | except Exception as ex: | ||
979 | if host_rs.total == 0: | ||
980 | self._delete_host_by_name(host_name) | ||
981 | LOG.exception(_LE("Unable to map volume: %(vol)s to host: " | ||
982 | "%(host)s"), {'host': host_name, | ||
983 | 'vol': vol.name}) | ||
984 | raise exception.KaminarioCinderDriverException( | ||
985 | reason=six.text_type(ex.message)) | ||
986 | # Get lun number. | ||
987 | if type(volume).__name__ == 'RestObject': | ||
988 | return self._get_lun_number(vol, host) | ||
989 | else: | ||
990 | return mapping.lun | ||
991 | |||
992 | def _get_host_object(self, connector): | ||
993 | pass | ||
994 | |||
995 | def _get_is_dedup(self, vol_type): | ||
996 | if vol_type: | ||
997 | specs_val = vol_type.get('extra_specs', {}).get( | ||
998 | 'kaminario:thin_prov_type') | ||
999 | if specs_val == 'nodedup': | ||
1000 | return False | ||
1001 | elif CONF.kaminario_nodedup_substring in vol_type.get('name'): | ||
1002 | LOG.info(_LI("'kaminario_nodedup_substring' option is " | ||
1003 | "deprecated in favour of 'kaminario:thin_prov_" | ||
1004 | "type' in extra-specs and will be removed in " | ||
1005 | "the 10.0.0 release.")) | ||
1006 | return False | ||
1007 | else: | ||
1008 | return True | ||
1009 | else: | ||
1010 | return True | ||
1011 | |||
1012 | def _get_is_replica(self, vol_type): | ||
1013 | replica = False | ||
1014 | if vol_type and vol_type.get('extra_specs'): | ||
1015 | specs = vol_type.get('extra_specs') | ||
1016 | if (specs.get('kaminario:replication') == 'enabled' and | ||
1017 | self.replica): | ||
1018 | replica = True | ||
1019 | return replica | ||
1020 | |||
1021 | def _get_replica_status(self, vg_name): | ||
1022 | vg_rs = self.client.search("volume_groups", name=vg_name) | ||
1023 | if vg_rs.total: | ||
1024 | vg = vg_rs.hits[0] | ||
1025 | if self.client.search("replication/sessions", | ||
1026 | local_volume_group=vg).total: | ||
1027 | return True | ||
1028 | return False | ||
1029 | |||
1030 | def manage_existing(self, volume, existing_ref): | ||
1031 | vol_name = existing_ref['source-name'] | ||
1032 | new_name = self.get_volume_name(volume.id) | ||
1033 | vg_new_name = self.get_volume_group_name(volume.id) | ||
1034 | vg_name = None | ||
1035 | is_dedup = self._get_is_dedup(volume.get('volume_type')) | ||
1036 | try: | ||
1037 | LOG.debug("Searching volume: %s in K2.", vol_name) | ||
1038 | vol = self.client.search("volumes", name=vol_name).hits[0] | ||
1039 | vg = vol.volume_group | ||
1040 | vg_replica = self._get_replica_status(vg.name) | ||
1041 | vol_map = False | ||
1042 | if self.client.search("mappings", volume=vol).total != 0: | ||
1043 | vol_map = True | ||
1044 | if is_dedup != vg.is_dedup or vg_replica or vol_map: | ||
1045 | raise exception.ManageExistingInvalidReference( | ||
1046 | existing_ref=existing_ref, | ||
1047 | reason=_('Manage volume type invalid.')) | ||
1048 | vol.name = new_name | ||
1049 | vg_name = vg.name | ||
1050 | LOG.debug("Manage new volume name: %s", new_name) | ||
1051 | vg.name = vg_new_name | ||
1052 | LOG.debug("Manage volume group name: %s", vg_new_name) | ||
1053 | vg.save() | ||
1054 | LOG.debug("Manage volume: %s in K2.", vol_name) | ||
1055 | vol.save() | ||
1056 | except Exception as ex: | ||
1057 | vg_rs = self.client.search("volume_groups", name=vg_new_name) | ||
1058 | if hasattr(vg_rs, 'hits') and vg_rs.total != 0: | ||
1059 | vg = vg_rs.hits[0] | ||
1060 | if vg_name and vg.name == vg_new_name: | ||
1061 | vg.name = vg_name | ||
1062 | LOG.debug("Updating vg new name to old name: %s ", vg_name) | ||
1063 | vg.save() | ||
1064 | LOG.exception(_LE("manage volume: %s failed."), vol_name) | ||
1065 | raise exception.ManageExistingInvalidReference( | ||
1066 | existing_ref=existing_ref, | ||
1067 | reason=six.text_type(ex.message)) | ||
1068 | |||
1069 | def manage_existing_get_size(self, volume, existing_ref): | ||
1070 | vol_name = existing_ref['source-name'] | ||
1071 | v_rs = self.client.search("volumes", name=vol_name) | ||
1072 | if hasattr(v_rs, 'hits') and v_rs.total != 0: | ||
1073 | vol = v_rs.hits[0] | ||
1074 | size = vol.size / units.Mi | ||
1075 | return math.ceil(size) | ||
1076 | else: | ||
1077 | raise exception.ManageExistingInvalidReference( | ||
1078 | existing_ref=existing_ref, | ||
1079 | reason=_('Unable to get size of manage volume.')) | ||
1080 | |||
1081 | def after_volume_copy(self, ctxt, volume, new_volume, remote=None): | ||
1082 | self.delete_volume(volume) | ||
1083 | vg_name_old = self.get_volume_group_name(volume.id) | ||
1084 | vol_name_old = self.get_volume_name(volume.id) | ||
1085 | vg_name_new = self.get_volume_group_name(new_volume.id) | ||
1086 | vol_name_new = self.get_volume_name(new_volume.id) | ||
1087 | vg_new = self.client.search("volume_groups", name=vg_name_new).hits[0] | ||
1088 | vg_new.name = vg_name_old | ||
1089 | vg_new.save() | ||
1090 | vol_new = self.client.search("volumes", name=vol_name_new).hits[0] | ||
1091 | vol_new.name = vol_name_old | ||
1092 | vol_new.save() | ||
1093 | |||
1094 | def retype(self, ctxt, volume, new_type, diff, host): | ||
1095 | old_type = volume.get('volume_type') | ||
1096 | vg_name = self.get_volume_group_name(volume.id) | ||
1097 | vol_name = self.get_volume_name(volume.id) | ||
1098 | vol_rs = self.client.search("volumes", name=vol_name) | ||
1099 | if vol_rs.total: | ||
1100 | vol = vol_rs.hits[0] | ||
1101 | vmap = self.client.search("mappings", volume=vol).total | ||
1102 | old_rep_type = self._get_replica_status(vg_name) | ||
1103 | new_rep_type = self._get_is_replica(new_type) | ||
1104 | new_prov_type = self._get_is_dedup(new_type) | ||
1105 | old_prov_type = self._get_is_dedup(old_type) | ||
1106 | # Change dedup<->nodedup with add/remove replication is complex in K2 | ||
1107 | # since K2 does not have api to change dedup<->nodedup. | ||
1108 | if new_prov_type == old_prov_type: | ||
1109 | if not old_rep_type and new_rep_type: | ||
1110 | self._add_replication(volume) | ||
1111 | return True | ||
1112 | elif old_rep_type and not new_rep_type: | ||
1113 | self._delete_replication(volume) | ||
1114 | return True | ||
1115 | elif not new_rep_type and not old_rep_type: | ||
1116 | msg = ("Use '--migration-policy on-demand' to change 'dedup " | ||
1117 | "without replication'<->'nodedup without replication'.") | ||
1118 | if vol_rs.total and vmap: | ||
1119 | msg = "Unattach volume and {0}".format(msg) | ||
1120 | LOG.debug(msg) | ||
1121 | return False | ||
1122 | else: | ||
1123 | LOG.error(_LE('Change from type1: %(type1)s to type2: %(type2)s ' | ||
1124 | 'is not supported directly in K2.'), | ||
1125 | {'type1': old_type, 'type2': new_type}) | ||
1126 | return False | ||
1127 | |||
1128 | def _add_replication(self, volume): | ||
1129 | vg_name = self.get_volume_group_name(volume.id) | ||
1130 | vol_name = self.get_volume_name(volume.id) | ||
1131 | if volume.replication_status == K2_REP_FAILED_OVER: | ||
1132 | self._create_failover_volume_replica(volume, vg_name, vol_name) | ||
1133 | else: | ||
1134 | LOG.debug("Searching volume group with name: %(name)s", | ||
1135 | {'name': vg_name}) | ||
1136 | vg = self.client.search("volume_groups", name=vg_name).hits[0] | ||
1137 | LOG.debug("Searching volume with name: %(name)s", | ||
1138 | {'name': vol_name}) | ||
1139 | vol = self.client.search("volumes", name=vol_name).hits[0] | ||
1140 | self._create_volume_replica(volume, vg, vol, self.replica.rpo) | ||
1141 | |||
1142 | def _delete_replication(self, volume): | ||
1143 | vg_name = self.get_volume_group_name(volume.id) | ||
1144 | vol_name = self.get_volume_name(volume.id) | ||
1145 | if volume.replication_status == K2_REP_FAILED_OVER: | ||
1146 | self._delete_failover_volume_replica(volume, vg_name, vol_name) | ||
1147 | else: | ||
1148 | self._delete_volume_replica(volume, vg_name, vol_name) | ||
1149 | |||
1150 | def _kaminario_disconnect_volume(self, *attach_info): | ||
1151 | for info in attach_info: | ||
1152 | if (info and info.get('connector') and | ||
1153 | info.get('conn', {}).get('data') and info.get('device')): | ||
1154 | info['connector'].disconnect_volume(info['conn']['data'], | ||
1155 | info['device']) | ||
diff --git a/deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py b/deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py deleted file mode 100644 index 202be92..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/kaminario_fc.py +++ /dev/null | |||
@@ -1,196 +0,0 @@ | |||
1 | # Copyright (c) 2016 by Kaminario Technologies, Ltd. | ||
2 | # All Rights Reserved. | ||
3 | # | ||
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may | ||
5 | # not use this file except in compliance with the License. You may obtain | ||
6 | # a copy of the License at | ||
7 | # | ||
8 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | # | ||
10 | # Unless required by applicable law or agreed to in writing, software | ||
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | ||
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | ||
13 | # License for the specific language governing permissions and limitations | ||
14 | # under the License. | ||
15 | """Volume driver for Kaminario K2 all-flash arrays.""" | ||
16 | import six | ||
17 | |||
18 | from oslo_log import log as logging | ||
19 | |||
20 | from cinder import exception | ||
21 | from cinder import utils | ||
22 | from cinder.i18n import _, _LE | ||
23 | from cinder.objects import fields | ||
24 | from cinder.volume.drivers.kaminario import kaminario_common as common | ||
25 | from cinder.zonemanager import utils as fczm_utils | ||
26 | |||
27 | K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER | ||
28 | LOG = logging.getLogger(__name__) | ||
29 | kaminario_logger = common.kaminario_logger | ||
30 | |||
31 | |||
32 | class KaminarioFCDriver(common.KaminarioCinderDriver): | ||
33 | """Kaminario K2 FC Volume Driver. | ||
34 | |||
35 | Version history: | ||
36 | 1.0.2.0 - Initial driver | ||
37 | """ | ||
38 | |||
39 | VERSION = '1.0.2.0' | ||
40 | |||
41 | # ThirdPartySystems wiki page name | ||
42 | CI_WIKI_NAME = "Kaminario_K2_CI" | ||
43 | |||
44 | @kaminario_logger | ||
45 | def __init__(self, *args, **kwargs): | ||
46 | super(KaminarioFCDriver, self).__init__(*args, **kwargs) | ||
47 | self._protocol = 'FC' | ||
48 | self.lookup_service = fczm_utils.create_lookup_service() | ||
49 | |||
50 | @fczm_utils.AddFCZone | ||
51 | @kaminario_logger | ||
52 | @utils.synchronized(common.K2_LOCK_NAME, external=True) | ||
53 | def initialize_connection(self, volume, connector): | ||
54 | """Attach K2 volume to host.""" | ||
55 | # Check wwpns in host connector. | ||
56 | if not connector.get('wwpns'): | ||
57 | msg = _("No wwpns found in host connector.") | ||
58 | LOG.error(msg) | ||
59 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
60 | # To support replication failback | ||
61 | temp_client = None | ||
62 | if (hasattr(volume, 'replication_status') and | ||
63 | volume.replication_status == K2_REP_FAILED_OVER): | ||
64 | temp_client = self.client | ||
65 | self.client = self.target | ||
66 | # Get target wwpns. | ||
67 | target_wwpns = self.get_target_info(volume) | ||
68 | # Map volume. | ||
69 | lun = self.k2_initialize_connection(volume, connector) | ||
70 | # Create initiator-target mapping. | ||
71 | target_wwpns, init_target_map = self._build_initiator_target_map( | ||
72 | connector, target_wwpns) | ||
73 | # To support replication failback | ||
74 | if temp_client: | ||
75 | self.client = temp_client | ||
76 | # Return target volume information. | ||
77 | return {'driver_volume_type': 'fibre_channel', | ||
78 | 'data': {"target_discovered": True, | ||
79 | "target_lun": lun, | ||
80 | "target_wwn": target_wwpns, | ||
81 | "initiator_target_map": init_target_map}} | ||
82 | |||
83 | @fczm_utils.RemoveFCZone | ||
84 | @kaminario_logger | ||
85 | @utils.synchronized(common.K2_LOCK_NAME, external=True) | ||
86 | def terminate_connection(self, volume, connector, **kwargs): | ||
87 | # To support replication failback | ||
88 | temp_client = None | ||
89 | if (hasattr(volume, 'replication_status') and | ||
90 | volume.replication_status == K2_REP_FAILED_OVER): | ||
91 | temp_client = self.client | ||
92 | self.client = self.target | ||
93 | super(KaminarioFCDriver, self).terminate_connection(volume, connector) | ||
94 | properties = {"driver_volume_type": "fibre_channel", "data": {}} | ||
95 | host_name = self.get_initiator_host_name(connector) | ||
96 | host_rs = self.client.search("hosts", name=host_name) | ||
97 | # In terminate_connection, host_entry is deleted if host | ||
98 | # is not attached to any volume | ||
99 | if host_rs.total == 0: | ||
100 | # Get target wwpns. | ||
101 | target_wwpns = self.get_target_info(volume) | ||
102 | target_wwpns, init_target_map = self._build_initiator_target_map( | ||
103 | connector, target_wwpns) | ||
104 | properties["data"] = {"target_wwn": target_wwpns, | ||
105 | "initiator_target_map": init_target_map} | ||
106 | # To support replication failback | ||
107 | if temp_client: | ||
108 | self.client = temp_client | ||
109 | return properties | ||
110 | |||
111 | @kaminario_logger | ||
112 | def get_target_info(self, volume): | ||
113 | LOG.debug("Searching target wwpns in K2.") | ||
114 | fc_ports_rs = self.client.search("system/fc_ports") | ||
115 | target_wwpns = [] | ||
116 | if hasattr(fc_ports_rs, 'hits') and fc_ports_rs.total != 0: | ||
117 | for port in fc_ports_rs.hits: | ||
118 | if port.pwwn: | ||
119 | target_wwpns.append((port.pwwn).replace(':', '')) | ||
120 | if not target_wwpns: | ||
121 | msg = _("Unable to get FC target wwpns from K2.") | ||
122 | LOG.error(msg) | ||
123 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
124 | return target_wwpns | ||
125 | |||
126 | @kaminario_logger | ||
127 | def _get_host_object(self, connector): | ||
128 | host_name = self.get_initiator_host_name(connector) | ||
129 | LOG.debug("Searching initiator hostname: %s in K2.", host_name) | ||
130 | host_rs = self.client.search("hosts", name=host_name) | ||
131 | host_wwpns = connector['wwpns'] | ||
132 | if host_rs.total == 0: | ||
133 | try: | ||
134 | LOG.debug("Creating initiator hostname: %s in K2.", host_name) | ||
135 | host = self.client.new("hosts", name=host_name, | ||
136 | type="Linux").save() | ||
137 | except Exception as ex: | ||
138 | LOG.exception(_LE("Unable to create host : %s in K2."), | ||
139 | host_name) | ||
140 | raise exception.KaminarioCinderDriverException( | ||
141 | reason=six.text_type(ex.message)) | ||
142 | else: | ||
143 | # Use existing host. | ||
144 | LOG.debug("Use existing initiator hostname: %s in K2.", host_name) | ||
145 | host = host_rs.hits[0] | ||
146 | # Adding host wwpn. | ||
147 | for wwpn in host_wwpns: | ||
148 | wwpn = ":".join([wwpn[i:i + 2] for i in range(0, len(wwpn), 2)]) | ||
149 | if self.client.search("host_fc_ports", pwwn=wwpn, | ||
150 | host=host).total == 0: | ||
151 | LOG.debug("Adding wwpn: %(wwpn)s to host: " | ||
152 | "%(host)s in K2.", {'wwpn': wwpn, | ||
153 | 'host': host_name}) | ||
154 | try: | ||
155 | self.client.new("host_fc_ports", pwwn=wwpn, | ||
156 | host=host).save() | ||
157 | except Exception as ex: | ||
158 | if host_rs.total == 0: | ||
159 | self._delete_host_by_name(host_name) | ||
160 | LOG.exception(_LE("Unable to add wwpn : %(wwpn)s to " | ||
161 | "host: %(host)s in K2."), | ||
162 | {'wwpn': wwpn, 'host': host_name}) | ||
163 | raise exception.KaminarioCinderDriverException( | ||
164 | reason=six.text_type(ex.message)) | ||
165 | return host, host_rs, host_name | ||
166 | |||
167 | @kaminario_logger | ||
168 | def _build_initiator_target_map(self, connector, all_target_wwns): | ||
169 | """Build the target_wwns and the initiator target map.""" | ||
170 | target_wwns = [] | ||
171 | init_targ_map = {} | ||
172 | |||
173 | if self.lookup_service is not None: | ||
174 | # use FC san lookup. | ||
175 | dev_map = self.lookup_service.get_device_mapping_from_network( | ||
176 | connector.get('wwpns'), | ||
177 | all_target_wwns) | ||
178 | |||
179 | for fabric_name in dev_map: | ||
180 | fabric = dev_map[fabric_name] | ||
181 | target_wwns += fabric['target_port_wwn_list'] | ||
182 | for initiator in fabric['initiator_port_wwn_list']: | ||
183 | if initiator not in init_targ_map: | ||
184 | init_targ_map[initiator] = [] | ||
185 | init_targ_map[initiator] += fabric['target_port_wwn_list'] | ||
186 | init_targ_map[initiator] = list(set( | ||
187 | init_targ_map[initiator])) | ||
188 | target_wwns = list(set(target_wwns)) | ||
189 | else: | ||
190 | initiator_wwns = connector.get('wwpns', []) | ||
191 | target_wwns = all_target_wwns | ||
192 | |||
193 | for initiator in initiator_wwns: | ||
194 | init_targ_map[initiator] = target_wwns | ||
195 | |||
196 | return target_wwns, init_targ_map | ||
diff --git a/deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py b/deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py deleted file mode 100644 index dae1634..0000000 --- a/deployment_scripts/puppet/modules/kaminario/files/kaminario_iscsi.py +++ /dev/null | |||
@@ -1,137 +0,0 @@ | |||
1 | # Copyright (c) 2016 by Kaminario Technologies, Ltd. | ||
2 | # All Rights Reserved. | ||
3 | # | ||
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may | ||
5 | # not use this file except in compliance with the License. You may obtain | ||
6 | # a copy of the License at | ||
7 | # | ||
8 | # http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | # | ||
10 | # Unless required by applicable law or agreed to in writing, software | ||
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | ||
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | ||
13 | # License for the specific language governing permissions and limitations | ||
14 | # under the License. | ||
15 | """Volume driver for Kaminario K2 all-flash arrays.""" | ||
16 | import six | ||
17 | |||
18 | from oslo_log import log as logging | ||
19 | |||
20 | from cinder import exception | ||
21 | from cinder import utils | ||
22 | from cinder.i18n import _, _LE | ||
23 | from cinder.objects import fields | ||
24 | from cinder.volume.drivers.kaminario import kaminario_common as common | ||
25 | |||
26 | ISCSI_TCP_PORT = "3260" | ||
27 | K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER | ||
28 | LOG = logging.getLogger(__name__) | ||
29 | kaminario_logger = common.kaminario_logger | ||
30 | |||
31 | |||
32 | class KaminarioISCSIDriver(common.KaminarioCinderDriver): | ||
33 | """Kaminario K2 iSCSI Volume Driver. | ||
34 | |||
35 | Version history: | ||
36 | 1.0.2.0 - Initial driver | ||
37 | """ | ||
38 | |||
39 | VERSION = '1.0.2.0' | ||
40 | |||
41 | # ThirdPartySystems wiki page name | ||
42 | CI_WIKI_NAME = "Kaminario_K2_CI" | ||
43 | |||
44 | @kaminario_logger | ||
45 | def __init__(self, *args, **kwargs): | ||
46 | super(KaminarioISCSIDriver, self).__init__(*args, **kwargs) | ||
47 | self._protocol = 'iSCSI' | ||
48 | |||
49 | @kaminario_logger | ||
50 | @utils.synchronized(common.K2_LOCK_NAME, external=True) | ||
51 | def initialize_connection(self, volume, connector): | ||
52 | """Attach K2 volume to host.""" | ||
53 | # To support replication failback | ||
54 | temp_client = None | ||
55 | if (hasattr(volume, 'replication_status') and | ||
56 | volume.replication_status == K2_REP_FAILED_OVER): | ||
57 | temp_client = self.client | ||
58 | self.client = self.target | ||
59 | # Get target_portal and target iqn. | ||
60 | iscsi_portal, target_iqn = self.get_target_info(volume) | ||
61 | # Map volume. | ||
62 | lun = self.k2_initialize_connection(volume, connector) | ||
63 | # To support replication failback | ||
64 | if temp_client: | ||
65 | self.client = temp_client | ||
66 | # Return target volume information. | ||
67 | return {"driver_volume_type": "iscsi", | ||
68 | "data": {"target_iqn": target_iqn, | ||
69 | "target_portal": iscsi_portal, | ||
70 | "target_lun": lun, | ||
71 | "target_discovered": True}} | ||
72 | |||
73 | @kaminario_logger | ||
74 | @utils.synchronized(common.K2_LOCK_NAME, external=True) | ||
75 | def terminate_connection(self, volume, connector, **kwargs): | ||
76 | # To support replication failback | ||
77 | temp_client = None | ||
78 | if (hasattr(volume, 'replication_status') and | ||
79 | volume.replication_status == K2_REP_FAILED_OVER): | ||
80 | temp_client = self.client | ||
81 | self.client = self.target | ||
82 | super(KaminarioISCSIDriver, self).terminate_connection(volume, | ||
83 | connector) | ||
84 | # To support replication failback | ||
85 | if temp_client: | ||
86 | self.client = temp_client | ||
87 | |||
88 | @kaminario_logger | ||
89 | def get_target_info(self, volume): | ||
90 | LOG.debug("Searching first iscsi port ip without wan in K2.") | ||
91 | iscsi_ip_rs = self.client.search("system/net_ips", wan_port="") | ||
92 | iscsi_ip = target_iqn = None | ||
93 | if hasattr(iscsi_ip_rs, 'hits') and iscsi_ip_rs.total != 0: | ||
94 | iscsi_ip = iscsi_ip_rs.hits[0].ip_address | ||
95 | if not iscsi_ip: | ||
96 | msg = _("Unable to get ISCSI IP address from K2.") | ||
97 | LOG.error(msg) | ||
98 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
99 | iscsi_portal = "{0}:{1}".format(iscsi_ip, ISCSI_TCP_PORT) | ||
100 | LOG.debug("Searching system state for target iqn in K2.") | ||
101 | sys_state_rs = self.client.search("system/state") | ||
102 | |||
103 | if hasattr(sys_state_rs, 'hits') and sys_state_rs.total != 0: | ||
104 | target_iqn = sys_state_rs.hits[0].iscsi_qualified_target_name | ||
105 | |||
106 | if not target_iqn: | ||
107 | msg = _("Unable to get target iqn from K2.") | ||
108 | LOG.error(msg) | ||
109 | raise exception.KaminarioCinderDriverException(reason=msg) | ||
110 | return iscsi_portal, target_iqn | ||
111 | |||
112 | @kaminario_logger | ||
113 | def _get_host_object(self, connector): | ||
114 | host_name = self.get_initiator_host_name(connector) | ||
115 | LOG.debug("Searching initiator hostname: %s in K2.", host_name) | ||
116 | host_rs = self.client.search("hosts", name=host_name) | ||
117 | """Create a host if not exists.""" | ||
118 | if host_rs.total == 0: | ||
119 | try: | ||
120 | LOG.debug("Creating initiator hostname: %s in K2.", host_name) | ||
121 | host = self.client.new("hosts", name=host_name, | ||
122 | type="Linux").save() | ||
123 | LOG.debug("Adding iqn: %(iqn)s to host: %(host)s in K2.", | ||
124 | {'iqn': connector['initiator'], 'host': host_name}) | ||
125 | iqn = self.client.new("host_iqns", iqn=connector['initiator'], | ||
126 | host=host) | ||
127 | iqn.save() | ||
128 | except Exception as ex: | ||
129 | self._delete_host_by_name(host_name) | ||
130 | LOG.exception(_LE("Unable to create host: %s in K2."), | ||
131 | host_name) | ||
132 | raise exception.KaminarioCinderDriverException( | ||
133 | reason=six.text_type(ex.message)) | ||
134 | else: | ||
135 | LOG.debug("Use existing initiator hostname: %s in K2.", host_name) | ||
136 | host = host_rs.hits[0] | ||
137 | return host, host_rs, host_name | ||
diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp b/deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp index 3b9022c..ceb35e0 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/controller_config.pp | |||
@@ -5,7 +5,7 @@ $plugin_settings = hiera('cinder_kaminario') | |||
5 | 5 | ||
6 | if $plugin_settings['scheduler_default_filters'] != '' | 6 | if $plugin_settings['scheduler_default_filters'] != '' |
7 | { | 7 | { |
8 | ini_subsetting {"scheduler_default_filters": | 8 | ini_subsetting {'scheduler_default_filters': |
9 | ensure => present, | 9 | ensure => present, |
10 | section => 'DEFAULT', | 10 | section => 'DEFAULT', |
11 | key_val_separator => '=', | 11 | key_val_separator => '=', |
@@ -18,18 +18,18 @@ $plugin_settings = hiera('cinder_kaminario') | |||
18 | if $plugin_settings['scheduler_default_weighers'] != '' | 18 | if $plugin_settings['scheduler_default_weighers'] != '' |
19 | { | 19 | { |
20 | cinder_config { | 20 | cinder_config { |
21 | "DEFAULT/scheduler_default_weighers" : value => $plugin_settings['scheduler_default_weighers']; | 21 | 'DEFAULT/scheduler_default_weighers' : value => $plugin_settings['scheduler_default_weighers']; |
22 | } | 22 | } |
23 | } | 23 | } |
24 | if $plugin_settings['rpc_response_timeout'] != '' | 24 | if $plugin_settings['rpc_response_timeout'] != '' |
25 | { | 25 | { |
26 | cinder_config { | 26 | cinder_config { |
27 | "DEFAULT/rpc_response_timeout" : value => $plugin_settings['rpc_response_timeout']; | 27 | 'DEFAULT/rpc_response_timeout' : value => $plugin_settings['rpc_response_timeout']; |
28 | } | 28 | } |
29 | } | 29 | } |
30 | 30 | ||
31 | cinder_config { | 31 | cinder_config { |
32 | "DEFAULT/default_volume_type" : value => $default_volume_type | 32 | 'DEFAULT/default_volume_type' : value => $default_volume_type |
33 | }~> Exec[cinder_api] | 33 | }~> Exec[cinder_api] |
34 | 34 | ||
35 | exec {'cinder_api': | 35 | exec {'cinder_api': |
diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/driver.pp b/deployment_scripts/puppet/modules/kaminario/manifests/driver.pp index ef583e7..c4150e4 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/driver.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/driver.pp | |||
@@ -1,39 +1,35 @@ | |||
1 | class kaminario::driver{ | 1 | class kaminario::driver{ |
2 | 2 | ||
3 | file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario': | 3 | $source_directory = '/tmp/openstack-cinder-driver/source/kaminario' |
4 | ensure => 'directory', | 4 | $target_directory = '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario' |
5 | owner => 'root', | 5 | vcsrepo { '/tmp/openstack-cinder-driver': |
6 | group => 'root', | 6 | ensure => present, |
7 | mode => '0755',} | 7 | provider => git, |
8 | source => 'https://github.com/Kaminario/openstack-cinder-driver.git', | ||
9 | user => 'root', | ||
10 | revision => 'Mitaka', | ||
11 | } | ||
12 | file {$target_directory: | ||
13 | ensure => 'directory', | ||
14 | recurse => true, | ||
15 | source => "file:///${source_directory}", | ||
16 | } | ||
8 | 17 | ||
9 | file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/__init__.py': | 18 | file {'/usr/lib/python2.7/dist-packages/cinder/tests/unit/volume/drivers/': |
10 | mode => '0644', | 19 | ensure => 'file', |
11 | owner => root, | 20 | recurse => true, |
12 | group => root, | 21 | source => 'file:///tmp/openstack-cinder-driver/test', |
13 | source => 'puppet:///modules/kaminario/__init__.py'} | 22 | } |
14 | 23 | ||
15 | file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_common.py': | 24 | file { '/tmp/exception.sh': |
16 | mode => '0644', | 25 | source => 'puppet:///modules/kaminario/exception.sh', |
17 | owner => root, | 26 | recurse => true, |
18 | group => root, | 27 | mode => '0744', |
19 | source => 'puppet:///modules/kaminario/kaminario_common.py'} | 28 | notify => Exec['modify_exception'], |
20 | 29 | } | |
21 | file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_fc.py': | 30 | exec { 'modify_exception': |
22 | mode => '0644', | 31 | command => '/tmp/exception.sh', |
23 | owner => root, | 32 | refreshonly => true, |
24 | group => root, | 33 | } |
25 | source => 'puppet:///modules/kaminario/kaminario_fc.py'} | ||
26 | |||
27 | file { '/usr/lib/python2.7/dist-packages/cinder/volume/drivers/kaminario/kaminario_iscsi.py': | ||
28 | mode => '0644', | ||
29 | owner => root, | ||
30 | group => root, | ||
31 | source => 'puppet:///modules/kaminario/kaminario_iscsi.py'} | ||
32 | |||
33 | file { '/usr/lib/python2.7/dist-packages/cinder/exception.py': | ||
34 | mode => '0644', | ||
35 | owner => root, | ||
36 | group => root, | ||
37 | source => 'puppet:///modules/kaminario/exception.py'} | ||
38 | 34 | ||
39 | } | 35 | } |
diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/init.pp b/deployment_scripts/puppet/modules/kaminario/manifests/init.pp index 23ac189..f7960c1 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/init.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/init.pp | |||
@@ -30,9 +30,9 @@ $plugin_settings = hiera('cinder_kaminario') | |||
30 | num => $value | 30 | num => $value |
31 | } | 31 | } |
32 | $minus1 = inline_template('<%= @value.to_i - 1 %>') | 32 | $minus1 = inline_template('<%= @value.to_i - 1 %>') |
33 | if "${minus1}" < '0' { | 33 | if $minus1 < '0' { |
34 | 34 | ||
35 | } else { | 35 | } else { |
36 | recursion { "value-${minus1}": | 36 | recursion { "value-${minus1}": |
37 | value => $minus1, | 37 | value => $minus1, |
38 | } | 38 | } |
@@ -44,7 +44,7 @@ $plugin_settings = hiera('cinder_kaminario') | |||
44 | define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio,$goodness_function) { | 44 | define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storage_password,$storage_ip,$num,$cinder_node,$enable_replication,$replication_ip,$replication_login,$replication_rpo,$replication_password,$enable_multipath,$suppress_logs,$filter_function,$oversubscription_ratio,$goodness_function) { |
45 | 45 | ||
46 | $sec_name = section_name( $storage_ip , $backend_name ) | 46 | $sec_name = section_name( $storage_ip , $backend_name ) |
47 | $config_file = "/etc/cinder/cinder.conf" | 47 | $config_file = '/etc/cinder/cinder.conf' |
48 | if $cinder_node == hiera(user_node_name) { | 48 | if $cinder_node == hiera(user_node_name) { |
49 | if $add_backend == true { | 49 | if $add_backend == true { |
50 | 50 | ||
@@ -56,58 +56,58 @@ define config($add_backend,$storage_protocol,$backend_name,$storage_user,$storag | |||
56 | setting => 'enabled_backends', | 56 | setting => 'enabled_backends', |
57 | subsetting => $sec_name, | 57 | subsetting => $sec_name, |
58 | subsetting_separator => ',', | 58 | subsetting_separator => ',', |
59 | }-> | 59 | }-> |
60 | cinder_config { | 60 | cinder_config { |
61 | "$sec_name/volume_backend_name" : value => $backend_name; | 61 | "${sec_name}/volume_backend_name" : value => $backend_name; |
62 | "$sec_name/san_ip" : value => $storage_ip; | 62 | "${sec_name}/san_ip" : value => $storage_ip; |
63 | "$sec_name/san_login" : value => $storage_user; | 63 | "${sec_name}/san_login" : value => $storage_user; |
64 | "$sec_name/san_password" : value => $storage_password; | 64 | "${sec_name}/san_password" : value => $storage_password; |
65 | } | 65 | } |
66 | 66 | ||
67 | if $storage_protocol == 'FC'{ | 67 | if $storage_protocol == 'FC'{ |
68 | cinder_config { | 68 | cinder_config { |
69 | "$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver"; | 69 | "${sec_name}/volume_driver" : value => 'cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver'; |
70 | } | 70 | } |
71 | } | 71 | } |
72 | elsif $storage_protocol == 'ISCSI'{ | 72 | elsif $storage_protocol == 'ISCSI'{ |
73 | cinder_config { | 73 | cinder_config { |
74 | "$sec_name/volume_driver" : value => "cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver"; | 74 | "${sec_name}/volume_driver" : value => 'cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver'; |
75 | } | 75 | } |
76 | } | 76 | } |
77 | if $enable_replication == true { | 77 | if $enable_replication == true { |
78 | $replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo) | 78 | $replication_device = get_replication_device($replication_ip, $replication_login , $replication_password , $replication_rpo) |
79 | cinder_config { | 79 | cinder_config { |
80 | "$sec_name/replication_device" : value => $replication_device; | 80 | "${sec_name}/replication_device" : value => $replication_device; |
81 | } | 81 | } |
82 | } | 82 | } |
83 | 83 | ||
84 | if $enable_multipath == true { | 84 | if $enable_multipath == true { |
85 | cinder_config { | 85 | cinder_config { |
86 | "$sec_name/use_multipath_for_image_xfer" : value => "True"; | 86 | "${sec_name}/use_multipath_for_image_xfer" : value => 'True'; |
87 | "$sec_name/enforce_multipath_for_image_xfer" : value => "True"; | 87 | "${sec_name}/enforce_multipath_for_image_xfer" : value => 'True'; |
88 | } | 88 | } |
89 | } | 89 | } |
90 | if $suppress_logs == true { | 90 | if $suppress_logs == true { |
91 | cinder_config { | 91 | cinder_config { |
92 | "$sec_name/suppress_requests_ssl_warnings" : value => "True"; | 92 | "${sec_name}/suppress_requests_ssl_warnings" : value => 'True'; |
93 | } | 93 | } |
94 | } | 94 | } |
95 | 95 | ||
96 | if $filter_function != '' { | 96 | if $filter_function != '' { |
97 | cinder_config { | 97 | cinder_config { |
98 | "$sec_name/filter_function" : value => $filter_function; | 98 | "${sec_name}/filter_function" : value => $filter_function; |
99 | } | 99 | } |
100 | } | 100 | } |
101 | 101 | ||
102 | if $goodness_function != '' { | 102 | if $goodness_function != '' { |
103 | cinder_config { | 103 | cinder_config { |
104 | "$sec_name/goodness_function" : value => $goodness_function; | 104 | "${sec_name}/goodness_function" : value => $goodness_function; |
105 | } | 105 | } |
106 | } | 106 | } |
107 | 107 | ||
108 | if $oversubscription_ratio == true { | 108 | if $oversubscription_ratio == true { |
109 | cinder_config { | 109 | cinder_config { |
110 | "$sec_name/auto_calc_max_oversubscription_ratio" : value => "True"; | 110 | "${sec_name}/auto_calc_max_oversubscription_ratio" : value => 'True'; |
111 | } | 111 | } |
112 | } | 112 | } |
113 | } | 113 | } |
diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/krest.pp b/deployment_scripts/puppet/modules/kaminario/manifests/krest.pp index 6fcb046..cf4b18f 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/krest.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/krest.pp | |||
@@ -5,4 +5,6 @@ package { 'krest': | |||
5 | ensure => installed, | 5 | ensure => installed, |
6 | provider => pip, | 6 | provider => pip, |
7 | require => Package['python-pip'],} | 7 | require => Package['python-pip'],} |
8 | package { 'git': | ||
9 | ensure => installed,} | ||
8 | } | 10 | } |
diff --git a/deployment_scripts/puppet/modules/kaminario/manifests/type.pp b/deployment_scripts/puppet/modules/kaminario/manifests/type.pp index 43357f9..9c6c782 100644 --- a/deployment_scripts/puppet/modules/kaminario/manifests/type.pp +++ b/deployment_scripts/puppet/modules/kaminario/manifests/type.pp | |||
@@ -15,42 +15,42 @@ define recursion( | |||
15 | type_name => $plugin_settings["type_name_${value}"] | 15 | type_name => $plugin_settings["type_name_${value}"] |
16 | } | 16 | } |
17 | $minus1 = inline_template('<%= @value.to_i - 1 %>') | 17 | $minus1 = inline_template('<%= @value.to_i - 1 %>') |
18 | if "${minus1}" < '0' { | 18 | if $minus1 < '0' { |
19 | 19 | ||
20 | } else { | 20 | } else { |
21 | recursion { "value-${minus1}": | 21 | recursion { "value-${minus1}": |
22 | value => $minus1, | 22 | value => $minus1, |
23 | } | 23 | } |
24 | } | 24 | } |
25 | } | 25 | } |
26 | } | 26 | } |
27 | 27 | ||
28 | define kaminario_type ($create_type,$options,$backend_name,$type_name) { | 28 | define kaminario_type ($create_type,$options,$backend_name,$type_name) { |
29 | if $create_type == true { | 29 | if $create_type == true { |
30 | case $options { | 30 | case $options { |
31 | "enable_replication_type": { | 31 | 'enable_replication_type': { |
32 | cinder_type {$type_name: | 32 | cinder_type {$type_name: |
33 | ensure => present, | 33 | ensure => present, |
34 | properties => ["volume_backend_name=${backend_name}",'kaminario:replication=enabled'], | 34 | properties => ["volume_backend_name=${backend_name}",'kaminario:replication=enabled'], |
35 | } | 35 | } |
36 | } | 36 | } |
37 | "enable_dedup": { | 37 | 'enable_dedup': { |
38 | cinder_type {$type_name: | 38 | cinder_type {$type_name: |
39 | ensure => present, | 39 | ensure => present, |
40 | properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup'], | 40 | properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup'], |
41 | } | 41 | } |
42 | } | 42 | } |
43 | "replication_dedup": { | 43 | 'replication_dedup': { |
44 | cinder_type {$type_name: | 44 | cinder_type {$type_name: |
45 | ensure => present, | 45 | ensure => present, |
46 | properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:replication=enabled'], | 46 | properties => ["volume_backend_name=${backend_name}",'kaminario:thin_prov_type=nodedup','kaminario:replication=enabled'], |
47 | } | 47 | } |
48 | } | 48 | } |
49 | "default": { | 49 | 'default': { |
50 | cinder_type {$type_name: | 50 | cinder_type {$type_name: |
51 | ensure => present, | 51 | ensure => present, |
52 | properties => ["volume_backend_name=${backend_name}"], | 52 | properties => ["volume_backend_name=${backend_name}"], |
53 | } | 53 | } |
54 | } | 54 | } |
55 | 55 | ||
56 | } | 56 | } |
diff --git a/deployment_scripts/puppet/modules/multipath/manifests/init.pp b/deployment_scripts/puppet/modules/multipath/manifests/init.pp index ac0b41f..bda78a1 100644 --- a/deployment_scripts/puppet/modules/multipath/manifests/init.pp +++ b/deployment_scripts/puppet/modules/multipath/manifests/init.pp | |||
@@ -1,12 +1,21 @@ | |||
1 | class multipath { | 1 | class multipath { |
2 | |||
3 | include ::nova::params | ||
4 | |||
2 | $multipath_packages = [ 'sg3-utils', 'multipath-tools' ] | 5 | $multipath_packages = [ 'sg3-utils', 'multipath-tools' ] |
3 | package { $multipath_packages: ensure => 'installed' } | 6 | package { $multipath_packages: ensure => 'installed' } |
4 | 7 | ||
5 | nova_config { | 8 | nova_config { |
6 | 'libvirt/iscsi_use_multipath' : value => True, | 9 | 'libvirt/iscsi_use_multipath' : value => True, |
7 | }~> Exec[cinder_volume] | 10 | } |
8 | 11 | ||
9 | exec {'cinder_volume': | 12 | service { 'nova_compute': |
10 | command => '/usr/sbin/service nova-compute restart',} | 13 | ensure => running, |
14 | name => $::nova::params::compute_service_name, | ||
15 | enable => true, | ||
16 | hasstatus => true, | ||
17 | hasrestart => true, | ||
18 | } | ||
11 | 19 | ||
20 | Nova_config<||> ~> Service['nova-compute'] | ||
12 | } | 21 | } |
diff --git a/deployment_tasks.yaml b/deployment_tasks.yaml index 803ea95..49eafc7 100644 --- a/deployment_tasks.yaml +++ b/deployment_tasks.yaml | |||
@@ -41,7 +41,7 @@ | |||
41 | type: puppet | 41 | type: puppet |
42 | version: 2.1.0 | 42 | version: 2.1.0 |
43 | groups: [compute] | 43 | groups: [compute] |
44 | requires: [top-role-compute] | 44 | requires: [top-role-compute,enable_nova_compute_service] |
45 | required_for: [deploy_end] | 45 | required_for: [deploy_end] |
46 | parameters: | 46 | parameters: |
47 | puppet_manifest: puppet/manifests/cinder_multipath.pp | 47 | puppet_manifest: puppet/manifests/cinder_multipath.pp |