Adding boot from volume to osc

Adding boot from volume to python-sahahraclient CLI.

Change-Id: I302aaeb7a0f831730301162f8c51e0eebcbd66f8
Story: #2001820
Task: #12559
This commit is contained in:
Telles Nobrega 2018-07-18 14:22:50 -03:00
parent db3ade4fc7
commit 851b2621b6
25 changed files with 1125 additions and 323 deletions

View File

@ -34,7 +34,7 @@ netifaces==0.10.4
openstacksdk==0.11.2
os-client-config==1.28.0
os-service-types==1.2.0
osc-lib==1.8.0
osc-lib==1.11.0
oslo.config==5.2.0
oslo.context==2.19.2
oslo.i18n==3.15.3

View File

@ -6,7 +6,7 @@ pbr!=2.1.0,>=2.0.0 # Apache-2.0
Babel!=2.4.0,>=2.3.4 # BSD
keystoneauth1>=3.4.0 # Apache-2.0
osc-lib>=1.8.0 # Apache-2.0
osc-lib>=1.11.0 # Apache-2.0
oslo.log>=3.36.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.i18n>=3.15.3 # Apache-2.0

View File

@ -24,7 +24,8 @@ DEFAULT_DATA_PROCESSING_API_VERSION = "1.1"
API_VERSION_OPTION = "os_data_processing_api_version"
API_NAME = "data_processing"
API_VERSIONS = {
"1.1": "saharaclient.api.client.Client"
"1.1": "saharaclient.api.client.Client",
"2": "saharaclient.api.client.ClientV2"
}

373
saharaclient/osc/utils.py Normal file
View File

@ -0,0 +1,373 @@
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from osc_lib import exceptions
from osc_lib import utils as osc_utils
from oslo_serialization import jsonutils as json
from oslo_utils import timeutils
from oslo_utils import uuidutils
from saharaclient.api import base
def get_resource(manager, name_or_id, **kwargs):
if uuidutils.is_uuid_like(name_or_id):
return manager.get(name_or_id, **kwargs)
else:
resource = manager.find_unique(name=name_or_id)
if kwargs:
# we really need additional call to apply kwargs
resource = manager.get(resource.id, **kwargs)
return resource
def created_at_sorted(objs, reverse=False):
return sorted(objs, key=created_at_key, reverse=reverse)
def random_name(prefix=None):
return "%s-%s" % (prefix, uuidutils.generate_uuid()[:8])
def created_at_key(obj):
return timeutils.parse_isotime(obj["created_at"])
def get_resource_id(manager, name_or_id):
if uuidutils.is_uuid_like(name_or_id):
return name_or_id
else:
return manager.find_unique(name=name_or_id).id
def create_dict_from_kwargs(**kwargs):
return {k: v for (k, v) in kwargs.items() if v is not None}
def prepare_data(data, fields):
new_data = {}
for f in fields:
if f in data:
new_data[f.replace('_', ' ').capitalize()] = data[f]
return new_data
def unzip(data):
return zip(*data)
def extend_columns(columns, items):
return unzip(list(unzip(columns)) + [('', '')] + items)
def prepare_column_headers(columns, remap=None):
remap = remap if remap else {}
new_columns = []
for c in columns:
for old, new in remap.items():
c = c.replace(old, new)
new_columns.append(c.replace('_', ' ').capitalize())
return new_columns
def get_by_name_substring(data, name):
return [obj for obj in data if name in obj.name]
def wait_for_delete(manager, obj_id, sleep_time=5, timeout=3000):
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
try:
manager.get(obj_id)
except base.APIException as ex:
if ex.error_code == 404:
return True
raise
time.sleep(sleep_time)
return False
def create_node_group_templates(client, app, parsed_args, flavor_id, configs,
shares):
if app.api_version['data_processing'] == '2':
data = client.node_group_templates.create(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
plugin_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.proxy_gateway,
volume_local_to_instance=parsed_args.volumes_locality,
use_autoconfig=parsed_args.autoconfig,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix,
boot_from_volume=parsed_args.boot_from_volume).to_dict()
else:
data = client.node_group_templates.create(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
hadoop_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.proxy_gateway,
volume_local_to_instance=parsed_args.volumes_locality,
use_autoconfig=parsed_args.autoconfig,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix).to_dict()
return data
class NodeGroupTemplatesUtils(object):
def _create_take_action(self, client, app, parsed_args):
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.node_group_templates.create(**template).to_dict()
else:
if (not parsed_args.name or not parsed_args.plugin or
not parsed_args.plugin_version or not parsed_args.flavor or
not parsed_args.processes):
raise exceptions.CommandError(
'At least --name, --plugin, --plugin-version, --processes,'
' --flavor arguments should be specified or json template '
'should be provided with --json argument')
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
compute_client = app.client_manager.compute
flavor_id = osc_utils.find_resource(
compute_client.flavors, parsed_args.flavor).id
data = create_node_group_templates(client, app, parsed_args,
flavor_id, configs, shares)
return data
def _list_take_action(self, client, app, parsed_args):
search_opts = {}
if parsed_args.plugin:
search_opts['plugin_name'] = parsed_args.plugin
if parsed_args.plugin_version:
search_opts['hadoop_version'] = parsed_args.plugin_version
data = client.node_group_templates.list(search_opts=search_opts)
if parsed_args.name:
data = get_by_name_substring(data, parsed_args.name)
if app.api_version['data_processing'] == '2':
if parsed_args.long:
columns = ('name', 'id', 'plugin_name', 'plugin_version',
'node_processes', 'description')
column_headers = prepare_column_headers(columns)
else:
columns = ('name', 'id', 'plugin_name', 'plugin_version')
column_headers = prepare_column_headers(columns)
else:
if parsed_args.long:
columns = ('name', 'id', 'plugin_name', 'hadoop_version',
'node_processes', 'description')
column_headers = prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
else:
columns = ('name', 'id', 'plugin_name', 'hadoop_version')
column_headers = prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
return (
column_headers,
(osc_utils.get_item_properties(
s,
columns,
formatters={
'node_processes': osc_utils.format_list
}
) for s in data)
)
def _update_take_action(self, client, app, parsed_args):
ngt_id = get_resource_id(
client.node_group_templates, parsed_args.node_group_template)
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.node_group_templates.update(
ngt_id, **template).to_dict()
else:
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
flavor_id = None
if parsed_args.flavor:
compute_client = self.app.client_manager.compute
flavor_id = osc_utils.find_resource(
compute_client.flavors, parsed_args.flavor).id
update_dict = create_dict_from_kwargs(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
hadoop_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.use_auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.is_proxy_gateway,
volume_local_to_instance=parsed_args.volume_locality,
use_autoconfig=parsed_args.use_autoconfig,
is_public=parsed_args.is_public,
is_protected=parsed_args.is_protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix
)
if app.api_version['data_processing'] == '2':
if 'hadoop_version' in update_dict:
update_dict.pop('hadoop_version')
update_dict['plugin_version'] = parsed_args.plugin_version
if parsed_args.boot_from_volume is not None:
update_dict['boot_from_volume'] = (
parsed_args.boot_from_volume)
data = client.node_group_templates.update(
ngt_id, **update_dict).to_dict()
return data
def _import_take_action(self, client, parsed_args):
if (not parsed_args.image_id or
not parsed_args.flavor_id):
raise exceptions.CommandError(
'At least --image_id and --flavor_id should be specified')
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
template['node_group_template']['floating_ip_pool'] = (
parsed_args.floating_ip_pool)
template['node_group_template']['image_id'] = (
parsed_args.image_id)
template['node_group_template']['flavor_id'] = (
parsed_args.flavor_id)
template['node_group_template']['security_groups'] = (
parsed_args.security_groups)
if parsed_args.name:
template['node_group_template']['name'] = parsed_args.name
data = client.node_group_templates.create(
**template['node_group_template']).to_dict()
return data
def _export_take_action(self, client, parsed_args):
ngt_id = get_resource_id(
client.node_group_templates, parsed_args.node_group_template)
response = client.node_group_templates.export(ngt_id)
result = json.dumps(response._info, indent=4)+"\n"
if parsed_args.file:
with open(parsed_args.file, "w+") as file:
file.write(result)
else:
sys.stdout.write(result)

View File

@ -21,7 +21,7 @@ from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
CT_FIELDS = ['id', 'name', 'plugin_name', 'plugin_version', 'description',
'node_groups', 'anti_affinity', 'use_autoconfig', 'is_default',

View File

@ -21,7 +21,7 @@ from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
CLUSTER_FIELDS = ["cluster_template_id", "use_autoconfig", "user_keypair_id",
"status", "image", "node_groups", "id", "info",

View File

@ -19,7 +19,7 @@ from osc_lib.command import command
from osc_lib import utils as osc_utils
from oslo_log import log as logging
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
DATA_SOURCE_FIELDS = ['name', 'id', 'type', 'url', 'description', 'is_public',
'is_protected']

View File

@ -19,7 +19,7 @@ from osc_lib.command import command
from osc_lib import utils as osc_utils
from oslo_log import log as logging
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
IMAGE_FIELDS = ['name', 'id', 'username', 'tags', 'status', 'description']

View File

@ -23,7 +23,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from saharaclient.api import base
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
JOB_BINARY_FIELDS = ['name', 'id', 'url', 'description', 'is_public',
'is_protected']

View File

@ -21,7 +21,7 @@ from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
JOB_TEMPLATE_FIELDS = ['name', 'id', 'type', 'mains', 'libs', 'description',
'is_public', 'is_protected']

View File

@ -22,8 +22,8 @@ from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from saharaclient.osc import utils
from saharaclient.osc.v1.job_templates import JOB_TYPES_CHOICES
from saharaclient.osc.v1 import utils
class ListJobTypes(command.Lister):

View File

@ -21,7 +21,7 @@ from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
JOB_FIELDS = ['id', 'job_template_id', 'cluster_id', 'input_id', 'output_id',
'start_time', 'end_time', 'status', 'is_public', 'is_protected',

View File

@ -16,12 +16,10 @@
import sys
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
NGT_FIELDS = ['id', 'name', 'plugin_name', 'plugin_version', 'node_processes',
'description', 'auto_security_group', 'security_groups',
@ -43,7 +41,7 @@ def _format_ngt_output(data):
del data['volumes_size']
class CreateNodeGroupTemplate(command.ShowOne):
class CreateNodeGroupTemplate(command.ShowOne, utils.NodeGroupTemplatesUtils):
"""Creates node group template"""
log = logging.getLogger(__name__ + ".CreateNodeGroupTemplate")
@ -202,73 +200,7 @@ class CreateNodeGroupTemplate(command.ShowOne):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.node_group_templates.create(**template).to_dict()
else:
if (not parsed_args.name or not parsed_args.plugin or
not parsed_args.plugin_version or not parsed_args.flavor or
not parsed_args.processes):
raise exceptions.CommandError(
'At least --name, --plugin, --plugin-version, --processes,'
' --flavor arguments should be specified or json template '
'should be provided with --json argument')
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
compute_client = self.app.client_manager.compute
flavor_id = osc_utils.find_resource(
compute_client.flavors, parsed_args.flavor).id
data = client.node_group_templates.create(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
hadoop_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.proxy_gateway,
volume_local_to_instance=parsed_args.volumes_locality,
use_autoconfig=parsed_args.autoconfig,
is_public=parsed_args.public,
is_protected=parsed_args.protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix
).to_dict()
data = self._create_take_action(client, self.app, parsed_args)
_format_ngt_output(data)
data = utils.prepare_data(data, NGT_FIELDS)
@ -276,7 +208,7 @@ class CreateNodeGroupTemplate(command.ShowOne):
return self.dict2columns(data)
class ListNodeGroupTemplates(command.Lister):
class ListNodeGroupTemplates(command.Lister, utils.NodeGroupTemplatesUtils):
"""Lists node group templates"""
log = logging.getLogger(__name__ + ".ListNodeGroupTemplates")
@ -314,41 +246,10 @@ class ListNodeGroupTemplates(command.Lister):
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
search_opts = {}
if parsed_args.plugin:
search_opts['plugin_name'] = parsed_args.plugin
if parsed_args.plugin_version:
search_opts['hadoop_version'] = parsed_args.plugin_version
data = client.node_group_templates.list(search_opts=search_opts)
if parsed_args.name:
data = utils.get_by_name_substring(data, parsed_args.name)
if parsed_args.long:
columns = ('name', 'id', 'plugin_name', 'hadoop_version',
'node_processes', 'description')
column_headers = utils.prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
else:
columns = ('name', 'id', 'plugin_name', 'hadoop_version')
column_headers = utils.prepare_column_headers(
columns, {'hadoop_version': 'plugin_version'})
return (
column_headers,
(osc_utils.get_item_properties(
s,
columns,
formatters={
'node_processes': osc_utils.format_list
}
) for s in data)
)
return self._list_take_action(client, self.app, parsed_args)
class ShowNodeGroupTemplate(command.ShowOne):
class ShowNodeGroupTemplate(command.ShowOne, utils.NodeGroupTemplatesUtils):
"""Display node group template details"""
log = logging.getLogger(__name__ + ".ShowNodeGroupTemplate")
@ -378,7 +279,7 @@ class ShowNodeGroupTemplate(command.ShowOne):
return self.dict2columns(data)
class DeleteNodeGroupTemplate(command.Command):
class DeleteNodeGroupTemplate(command.Command, utils.NodeGroupTemplatesUtils):
"""Deletes node group template"""
log = logging.getLogger(__name__ + ".DeleteNodeGroupTemplate")
@ -406,7 +307,7 @@ class DeleteNodeGroupTemplate(command.Command):
'successfully.\n'.format(ngt=ngt))
class UpdateNodeGroupTemplate(command.ShowOne):
class UpdateNodeGroupTemplate(command.ShowOne, utils.NodeGroupTemplatesUtils):
"""Updates node group template"""
log = logging.getLogger(__name__ + ".UpdateNodeGroupTemplate")
@ -620,74 +521,7 @@ class UpdateNodeGroupTemplate(command.ShowOne):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
ngt_id = utils.get_resource_id(
client.node_group_templates, parsed_args.node_group_template)
if parsed_args.json:
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
data = client.node_group_templates.update(
ngt_id, **template).to_dict()
else:
configs = None
if parsed_args.configs:
blob = osc_utils.read_blob_file_contents(parsed_args.configs)
try:
configs = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'configs from file %s: %s' % (parsed_args.configs, e))
shares = None
if parsed_args.shares:
blob = osc_utils.read_blob_file_contents(parsed_args.shares)
try:
shares = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'shares from file %s: %s' % (parsed_args.shares, e))
flavor_id = None
if parsed_args.flavor:
compute_client = self.app.client_manager.compute
flavor_id = osc_utils.find_resource(
compute_client.flavors, parsed_args.flavor).id
update_dict = utils.create_dict_from_kwargs(
name=parsed_args.name,
plugin_name=parsed_args.plugin,
hadoop_version=parsed_args.plugin_version,
flavor_id=flavor_id,
description=parsed_args.description,
volumes_per_node=parsed_args.volumes_per_node,
volumes_size=parsed_args.volumes_size,
node_processes=parsed_args.processes,
floating_ip_pool=parsed_args.floating_ip_pool,
security_groups=parsed_args.security_groups,
auto_security_group=parsed_args.use_auto_security_group,
availability_zone=parsed_args.availability_zone,
volume_type=parsed_args.volumes_type,
is_proxy_gateway=parsed_args.is_proxy_gateway,
volume_local_to_instance=parsed_args.volume_locality,
use_autoconfig=parsed_args.use_autoconfig,
is_public=parsed_args.is_public,
is_protected=parsed_args.is_protected,
node_configs=configs,
shares=shares,
volumes_availability_zone=(
parsed_args.volumes_availability_zone),
volume_mount_prefix=parsed_args.volumes_mount_prefix
)
data = client.node_group_templates.update(
ngt_id, **update_dict).to_dict()
data = self._update_take_action(client, self.app, parsed_args)
_format_ngt_output(data)
data = utils.prepare_data(data, NGT_FIELDS)
@ -695,7 +529,7 @@ class UpdateNodeGroupTemplate(command.ShowOne):
return self.dict2columns(data)
class ImportNodeGroupTemplate(command.ShowOne):
class ImportNodeGroupTemplate(command.ShowOne, utils.NodeGroupTemplatesUtils):
"""Imports node group template"""
log = logging.getLogger(__name__ + ".ImportNodeGroupTemplate")
@ -740,29 +574,8 @@ class ImportNodeGroupTemplate(command.ShowOne):
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
if (not parsed_args.image_id or
not parsed_args.flavor_id):
raise exceptions.CommandError(
'At least --image_id and --flavor_id should be specified')
blob = osc_utils.read_blob_file_contents(parsed_args.json)
try:
template = json.loads(blob)
except ValueError as e:
raise exceptions.CommandError(
'An error occurred when reading '
'template from file %s: %s' % (parsed_args.json, e))
template['node_group_template']['floating_ip_pool'] = (
parsed_args.floating_ip_pool)
template['node_group_template']['image_id'] = (
parsed_args.image_id)
template['node_group_template']['flavor_id'] = (
parsed_args.flavor_id)
template['node_group_template']['security_groups'] = (
parsed_args.security_groups)
if parsed_args.name:
template['node_group_template']['name'] = parsed_args.name
data = client.node_group_templates.create(
**template['node_group_template']).to_dict()
data = self._import_take_action(client, parsed_args)
_format_ngt_output(data)
data = utils.prepare_data(data, NGT_FIELDS)
@ -770,7 +583,7 @@ class ImportNodeGroupTemplate(command.ShowOne):
return self.dict2columns(data)
class ExportNodeGroupTemplate(command.Command):
class ExportNodeGroupTemplate(command.Command, utils.NodeGroupTemplatesUtils):
"""Export node group template to JSON"""
log = logging.getLogger(__name__ + ".ExportNodeGroupTemplate")
@ -794,12 +607,4 @@ class ExportNodeGroupTemplate(command.Command):
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
ngt_id = utils.get_resource_id(
client.node_group_templates, parsed_args.node_group_template)
response = client.node_group_templates.export(ngt_id)
result = json.dumps(response._info, indent=4)+"\n"
if parsed_args.file:
with open(parsed_args.file, "w+") as file:
file.write(result)
else:
sys.stdout.write(result)
self._export_take_action(client, parsed_args)

View File

@ -22,7 +22,7 @@ from osc_lib import utils as osc_utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
def _serialize_label_items(plugin):

View File

@ -1,101 +0,0 @@
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from oslo_utils import timeutils
from oslo_utils import uuidutils
from saharaclient.api import base
def get_resource(manager, name_or_id, **kwargs):
if uuidutils.is_uuid_like(name_or_id):
return manager.get(name_or_id, **kwargs)
else:
resource = manager.find_unique(name=name_or_id)
if kwargs:
# we really need additional call to apply kwargs
resource = manager.get(resource.id, **kwargs)
return resource
def created_at_sorted(objs, reverse=False):
return sorted(objs, key=created_at_key, reverse=reverse)
def random_name(prefix=None):
return "%s-%s" % (prefix, uuidutils.generate_uuid()[:8])
def created_at_key(obj):
return timeutils.parse_isotime(obj["created_at"])
def get_resource_id(manager, name_or_id):
if uuidutils.is_uuid_like(name_or_id):
return name_or_id
else:
return manager.find_unique(name=name_or_id).id
def create_dict_from_kwargs(**kwargs):
return {k: v for (k, v) in kwargs.items() if v is not None}
def prepare_data(data, fields):
new_data = {}
for f in fields:
if f in data:
new_data[f.replace('_', ' ').capitalize()] = data[f]
return new_data
def unzip(data):
return zip(*data)
def extend_columns(columns, items):
return unzip(list(unzip(columns)) + [('', '')] + items)
def prepare_column_headers(columns, remap=None):
remap = remap if remap else {}
new_columns = []
for c in columns:
for old, new in remap.items():
c = c.replace(old, new)
new_columns.append(c.replace('_', ' ').capitalize())
return new_columns
def get_by_name_substring(data, name):
return [obj for obj in data if name in obj.name]
def wait_for_delete(manager, obj_id, sleep_time=5, timeout=3000):
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
try:
manager.get(obj_id)
except base.APIException as ex:
if ex.error_code == 404:
return True
raise
time.sleep(sleep_time)
return False

View File

View File

@ -0,0 +1,176 @@
# Copyright (c) 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from osc_lib import utils as osc_utils
from saharaclient.osc import utils
from saharaclient.osc.v1 import node_group_templates as ngt_v1
NGT_FIELDS = ['id', 'name', 'plugin_name', 'plugin_version', 'node_processes',
'description', 'auto_security_group', 'security_groups',
'availability_zone', 'flavor_id', 'floating_ip_pool',
'volumes_per_node', 'volumes_size',
'volume_type', 'volume_local_to_instance', 'volume_mount_prefix',
'volumes_availability_zone', 'use_autoconfig',
'is_proxy_gateway', 'is_default', 'is_protected', 'is_public',
'boot_from_volume']
def _format_ngt_output(data):
data['node_processes'] = osc_utils.format_list(data['node_processes'])
if data['volumes_per_node'] == 0:
del data['volume_local_to_instance']
del data['volume_mount_prefix']
del data['volume_type'],
del data['volumes_availability_zone']
del data['volumes_size']
class CreateNodeGroupTemplate(ngt_v1.CreateNodeGroupTemplate,
utils.NodeGroupTemplatesUtils):
"""Creates node group template"""
def get_parser(self, prog_name):
parser = super(CreateNodeGroupTemplate, self).get_parser(prog_name)
parser.add_argument(
'--boot-from-volume',
action='store_true',
default=False,
help="Make the node group bootable from volume",
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = self._create_take_action(client, self.app, parsed_args)
_format_ngt_output(data)
data = utils.prepare_data(data, NGT_FIELDS)
return self.dict2columns(data)
class ListNodeGroupTemplates(ngt_v1.ListNodeGroupTemplates,
utils.NodeGroupTemplatesUtils):
"""Lists node group templates"""
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
return self._list_take_action(client, self.app, parsed_args)
class ShowNodeGroupTemplate(ngt_v1.ShowNodeGroupTemplate,
utils.NodeGroupTemplatesUtils):
"""Display node group template details"""
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = utils.get_resource(
client.node_group_templates,
parsed_args.node_group_template).to_dict()
_format_ngt_output(data)
data = utils.prepare_data(data, NGT_FIELDS)
return self.dict2columns(data)
class DeleteNodeGroupTemplate(ngt_v1.DeleteNodeGroupTemplate,
utils.NodeGroupTemplatesUtils):
"""Deletes node group template"""
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
for ngt in parsed_args.node_group_template:
ngt_id = utils.get_resource_id(
client.node_group_templates, ngt)
client.node_group_templates.delete(ngt_id)
sys.stdout.write(
'Node group template "{ngt}" has been removed '
'successfully.\n'.format(ngt=ngt))
class UpdateNodeGroupTemplate(ngt_v1.UpdateNodeGroupTemplate,
utils.NodeGroupTemplatesUtils):
"""Updates node group template"""
def get_parser(self, prog_name):
parser = super(UpdateNodeGroupTemplate, self).get_parser(prog_name)
bootfromvolume = parser.add_mutually_exclusive_group()
bootfromvolume.add_argument(
'--boot-from-volume-enable',
action='store_true',
help='Makes node group bootable from volume.',
dest='boot_from_volume'
)
bootfromvolume.add_argument(
'--boot-from-volume-disable',
action='store_false',
help='Makes node group not bootable from volume.',
dest='boot_from_volume'
)
parser.set_defaults(is_public=None, is_protected=None,
is_proxy_gateway=None, volume_locality=None,
use_auto_security_group=None, use_autoconfig=None,
boot_from_volume=None)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = self._update_take_action(client, self.app, parsed_args)
_format_ngt_output(data)
data = utils.prepare_data(data, NGT_FIELDS)
return self.dict2columns(data)
class ImportNodeGroupTemplate(ngt_v1.ImportNodeGroupTemplate,
utils.NodeGroupTemplatesUtils):
"""Imports node group template"""
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
data = self._import_take_action(client, parsed_args)
_format_ngt_output(data)
data = utils.prepare_data(data, NGT_FIELDS)
return self.dict2columns(data)
class ExportNodeGroupTemplate(ngt_v1.ExportNodeGroupTemplate,
utils.NodeGroupTemplatesUtils):
"""Export node group template to JSON"""
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
client = self.app.client_manager.data_processing
self._export_take_action(client, parsed_args)

View File

@ -30,6 +30,8 @@ class BaseTestCase(testtools.TestCase):
super(BaseTestCase, self).setUp()
self.responses = self.useFixture(fixture.Fixture())
self.client = client.Client(session=self.SESSION, sahara_url=self.URL)
self.client_v2 = client.ClientV2(session=self.SESSION,
sahara_url=self.URL)
def assertFields(self, body, obj):
for key, value in body.items():

View File

@ -36,3 +36,19 @@ class TestDataProcessingPlugin(base.BaseTestCase):
region_name='region_name',
sahara_url='url',
endpoint_type='public')
@mock.patch("saharaclient.api.client.ClientV2")
def test_make_client_v2(self, p_client):
instance = mock.Mock()
instance._api_version = {"data_processing": '2'}
instance.session = 'session'
instance._region_name = 'region_name'
instance._cli_options.data_processing_url = 'url'
instance._interface = 'public'
plugin.make_client(instance)
p_client.assert_called_with(session='session',
region_name='region_name',
sahara_url='url',
endpoint_type='public')

View File

@ -59,6 +59,7 @@ class TestNodeGroupTemplates(fakes.TestDataProcessing):
self.ngt_mock = (
self.app.client_manager.data_processing.node_group_templates)
self.ngt_mock.reset_mock()
self.app.api_version['data_processing'] = '1'
class TestCreateNodeGroupTemplate(TestNodeGroupTemplates):

View File

@ -15,7 +15,7 @@
import mock
from saharaclient.osc.v1 import utils
from saharaclient.osc import utils
from saharaclient.tests.unit import base

View File

@ -0,0 +1,412 @@
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from osc_lib.tests import utils as osc_utils
from saharaclient.api import node_group_templates as api_ngt
from saharaclient.osc.v2 import node_group_templates as osc_ngt
from saharaclient.tests.unit.osc.v1 import fakes
NGT_INFO = {
"node_processes": [
"namenode",
"tasktracker"
],
"name": "template",
"tenant_id": "tenant_id",
"availability_zone": 'av_zone',
"use_autoconfig": True,
"plugin_version": "0.1",
"shares": None,
"is_default": False,
"description": 'description',
"node_configs": {},
"is_proxy_gateway": False,
"auto_security_group": True,
"volume_type": None,
"volumes_size": 2,
"volume_mount_prefix": "/volumes/disk",
"plugin_name": "fake",
"is_protected": False,
"security_groups": None,
"floating_ip_pool": "floating_pool",
"is_public": True,
"id": "ng_id",
"flavor_id": "flavor_id",
"volumes_availability_zone": None,
"volumes_per_node": 2,
"volume_local_to_instance": False,
"boot_from_volume": False
}
class TestNodeGroupTemplates(fakes.TestDataProcessing):
def setUp(self):
super(TestNodeGroupTemplates, self).setUp()
self.ngt_mock = (
self.app.client_manager.data_processing.node_group_templates)
self.ngt_mock.reset_mock()
self.app.api_version['data_processing'] = '2'
class TestCreateNodeGroupTemplate(TestNodeGroupTemplates):
# TODO(apavlov): check for creation with --json
def setUp(self):
super(TestCreateNodeGroupTemplate, self).setUp()
self.ngt_mock.create.return_value = api_ngt.NodeGroupTemplate(
None, NGT_INFO)
self.fl_mock = self.app.client_manager.compute.flavors
self.fl_mock.get.return_value = mock.Mock(id='flavor_id')
self.fl_mock.reset_mock()
# Command to test
self.cmd = osc_ngt.CreateNodeGroupTemplate(self.app, None)
def test_ngt_create_minimum_options(self):
arglist = ['--name', 'template', '--plugin', 'fake',
'--plugin-version', '0.1', '--processes', 'namenode',
'tasktracker', '--flavor', 'flavor_id']
verifylist = [('name', 'template'), ('plugin', 'fake'),
('plugin_version', '0.1'), ('flavor', 'flavor_id'),
('processes', ['namenode', 'tasktracker'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ngt_mock.create.assert_called_once_with(
auto_security_group=False, availability_zone=None,
description=None, flavor_id='flavor_id', floating_ip_pool=None,
plugin_version='0.1', is_protected=False, is_proxy_gateway=False,
is_public=False, name='template',
node_processes=['namenode', 'tasktracker'], plugin_name='fake',
security_groups=None, use_autoconfig=False,
volume_local_to_instance=False,
volume_type=None, volumes_availability_zone=None,
volumes_per_node=None, volumes_size=None, shares=None,
node_configs=None, volume_mount_prefix=None,
boot_from_volume=False)
def test_ngt_create_all_options(self):
arglist = ['--name', 'template', '--plugin', 'fake',
'--plugin-version', '0.1', '--processes', 'namenode',
'tasktracker', '--security-groups', 'secgr',
'--auto-security-group', '--availability-zone', 'av_zone',
'--flavor', 'flavor_id', '--floating-ip-pool',
'floating_pool', '--volumes-per-node',
'2', '--volumes-size', '2', '--volumes-type', 'type',
'--volumes-availability-zone', 'vavzone',
'--volumes-mount-prefix', '/volume/asd',
'--volumes-locality', '--description', 'descr',
'--autoconfig', '--proxy-gateway', '--public',
'--protected', '--boot-from-volume']
verifylist = [('name', 'template'), ('plugin', 'fake'),
('plugin_version', '0.1'),
('processes', ['namenode', 'tasktracker']),
('security_groups', ['secgr']),
('auto_security_group', True),
('availability_zone', 'av_zone'),
('flavor', 'flavor_id'),
('floating_ip_pool', 'floating_pool'),
('volumes_per_node', 2), ('volumes_size', 2),
('volumes_type', 'type'),
('volumes_availability_zone', 'vavzone'),
('volumes_mount_prefix', '/volume/asd'),
('volumes_locality', True), ('description', 'descr'),
('autoconfig', True), ('proxy_gateway', True),
('public', True), ('protected', True),
('boot_from_volume', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ngt_mock.create.assert_called_once_with(
auto_security_group=True, availability_zone='av_zone',
description='descr', flavor_id='flavor_id',
floating_ip_pool='floating_pool', plugin_version='0.1',
is_protected=True, is_proxy_gateway=True, is_public=True,
name='template', node_processes=['namenode', 'tasktracker'],
plugin_name='fake', security_groups=['secgr'], use_autoconfig=True,
volume_local_to_instance=True, volume_type='type',
volumes_availability_zone='vavzone', volumes_per_node=2,
volumes_size=2, shares=None, node_configs=None,
volume_mount_prefix='/volume/asd', boot_from_volume=True)
# Check that columns are correct
expected_columns = (
'Auto security group', 'Availability zone', 'Boot from volume',
'Description', 'Flavor id', 'Floating ip pool', 'Id',
'Is default', 'Is protected', 'Is proxy gateway', 'Is public',
'Name', 'Node processes', 'Plugin name', 'Plugin version',
'Security groups', 'Use autoconfig', 'Volume local to instance',
'Volume mount prefix', 'Volume type', 'Volumes availability zone',
'Volumes per node', 'Volumes size')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = (
True, 'av_zone', False, 'description', 'flavor_id',
'floating_pool', 'ng_id', False, False, False, True,
'template', 'namenode, tasktracker', 'fake', '0.1', None, True,
False, '/volumes/disk', None, None, 2, 2)
self.assertEqual(expected_data, data)
class TestListNodeGroupTemplates(TestNodeGroupTemplates):
def setUp(self):
super(TestListNodeGroupTemplates, self).setUp()
self.ngt_mock.list.return_value = [api_ngt.NodeGroupTemplate(
None, NGT_INFO)]
# Command to test
self.cmd = osc_ngt.ListNodeGroupTemplates(self.app, None)
def test_ngt_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('template', 'ng_id', 'fake', '0.1')]
self.assertEqual(expected_data, list(data))
def test_ngt_list_long(self):
arglist = ['--long']
verifylist = [('long', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version',
'Node processes', 'Description']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('template', 'ng_id', 'fake', '0.1',
'namenode, tasktracker', 'description')]
self.assertEqual(expected_data, list(data))
def test_ngt_list_extra_search_opts(self):
arglist = ['--plugin', 'fake', '--plugin-version', '0.1', '--name',
'templ']
verifylist = [('plugin', 'fake'), ('plugin_version', '0.1'),
('name', 'templ')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('template', 'ng_id', 'fake', '0.1')]
self.assertEqual(expected_data, list(data))
class TestShowNodeGroupTemplate(TestNodeGroupTemplates):
def setUp(self):
super(TestShowNodeGroupTemplate, self).setUp()
self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
None, NGT_INFO)
# Command to test
self.cmd = osc_ngt.ShowNodeGroupTemplate(self.app, None)
def test_ngt_show(self):
arglist = ['template']
verifylist = [('node_group_template', 'template')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ngt_mock.find_unique.assert_called_once_with(name='template')
# Check that columns are correct
expected_columns = (
'Auto security group', 'Availability zone', 'Boot from volume',
'Description', 'Flavor id', 'Floating ip pool', 'Id',
'Is default', 'Is protected', 'Is proxy gateway', 'Is public',
'Name', 'Node processes', 'Plugin name', 'Plugin version',
'Security groups', 'Use autoconfig', 'Volume local to instance',
'Volume mount prefix', 'Volume type', 'Volumes availability zone',
'Volumes per node', 'Volumes size')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = (
True, 'av_zone', False, 'description', 'flavor_id',
'floating_pool', 'ng_id', False, False, False, True,
'template', 'namenode, tasktracker', 'fake', '0.1', None, True,
False, '/volumes/disk', None, None, 2, 2)
self.assertEqual(expected_data, data)
class TestDeleteNodeGroupTemplate(TestNodeGroupTemplates):
def setUp(self):
super(TestDeleteNodeGroupTemplate, self).setUp()
self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
None, NGT_INFO)
# Command to test
self.cmd = osc_ngt.DeleteNodeGroupTemplate(self.app, None)
def test_ngt_delete(self):
arglist = ['template']
verifylist = [('node_group_template', ['template'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ngt_mock.delete.assert_called_once_with('ng_id')
class TestUpdateNodeGroupTemplate(TestNodeGroupTemplates):
# TODO(apavlov): check for update with --json
def setUp(self):
super(TestUpdateNodeGroupTemplate, self).setUp()
self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
None, NGT_INFO)
self.ngt_mock.update.return_value = api_ngt.NodeGroupTemplate(
None, NGT_INFO)
self.fl_mock = self.app.client_manager.compute.flavors
self.fl_mock.get.return_value = mock.Mock(id='flavor_id')
self.fl_mock.reset_mock()
# Command to test
self.cmd = osc_ngt.UpdateNodeGroupTemplate(self.app, None)
def test_ngt_update_no_options(self):
arglist = []
verifylist = []
self.assertRaises(osc_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_ngt_update_nothing_updated(self):
arglist = ['template']
verifylist = [('node_group_template', 'template')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ngt_mock.update.assert_called_once_with('ng_id')
def test_ngt_update_all_options(self):
arglist = ['template', '--name', 'template', '--plugin', 'fake',
'--plugin-version', '0.1', '--processes', 'namenode',
'tasktracker', '--security-groups', 'secgr',
'--auto-security-group-enable',
'--availability-zone', 'av_zone', '--flavor', 'flavor_id',
'--floating-ip-pool', 'floating_pool', '--volumes-per-node',
'2', '--volumes-size', '2', '--volumes-type', 'type',
'--volumes-availability-zone', 'vavzone',
'--volumes-mount-prefix', '/volume/asd',
'--volumes-locality-enable', '--description', 'descr',
'--autoconfig-enable', '--proxy-gateway-enable', '--public',
'--protected', '--boot-from-volume-enable']
verifylist = [('node_group_template', 'template'),
('name', 'template'), ('plugin', 'fake'),
('plugin_version', '0.1'),
('processes', ['namenode', 'tasktracker']),
('security_groups', ['secgr']),
('use_auto_security_group', True),
('availability_zone', 'av_zone'),
('flavor', 'flavor_id'),
('floating_ip_pool', 'floating_pool'),
('volumes_per_node', 2), ('volumes_size', 2),
('volumes_type', 'type'),
('volumes_availability_zone', 'vavzone'),
('volumes_mount_prefix', '/volume/asd'),
('volume_locality', True),
('description', 'descr'), ('use_autoconfig', True),
('is_proxy_gateway', True),
('is_public', True), ('is_protected', True),
('boot_from_volume', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ngt_mock.update.assert_called_once_with(
'ng_id',
auto_security_group=True, availability_zone='av_zone',
description='descr', flavor_id='flavor_id',
floating_ip_pool='floating_pool', plugin_version='0.1',
is_protected=True, is_proxy_gateway=True, is_public=True,
name='template', node_processes=['namenode', 'tasktracker'],
plugin_name='fake', security_groups=['secgr'], use_autoconfig=True,
volume_local_to_instance=True, volume_type='type',
volumes_availability_zone='vavzone', volumes_per_node=2,
volumes_size=2, volume_mount_prefix='/volume/asd',
boot_from_volume=True)
# Check that columns are correct
expected_columns = (
'Auto security group', 'Availability zone', 'Boot from volume',
'Description', 'Flavor id', 'Floating ip pool', 'Id',
'Is default', 'Is protected', 'Is proxy gateway', 'Is public',
'Name', 'Node processes', 'Plugin name', 'Plugin version',
'Security groups', 'Use autoconfig', 'Volume local to instance',
'Volume mount prefix', 'Volume type', 'Volumes availability zone',
'Volumes per node', 'Volumes size')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = (
True, 'av_zone', False, 'description', 'flavor_id',
'floating_pool', 'ng_id', False, False, False, True,
'template', 'namenode, tasktracker', 'fake', '0.1', None, True,
False, '/volumes/disk', None, None, 2, 2)
self.assertEqual(expected_data, data)
def test_ngt_update_private_unprotected(self):
arglist = ['template', '--private', '--unprotected']
verifylist = [('node_group_template', 'template'),
('is_public', False), ('is_protected', False)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ngt_mock.update.assert_called_once_with(
'ng_id', is_protected=False, is_public=False)

View File

@ -154,3 +154,111 @@ class NodeGroupTemplateTest(base.BaseTestCase):
self.assertEqual(url, self.responses.last_request.url)
self.assertIsInstance(resp, ng.NodeGroupTemplate)
self.assertDictsEqual(self.body, resp.__dict__[u'node_group_template'])
class NodeGroupTemplateTestV2(base.BaseTestCase):
body = {
"name": "name",
"plugin_name": "plugin",
"plugin_version": "1",
"flavor_id": "2",
"description": "description",
"volumes_per_node": "3",
"volumes_size": "4",
"node_processes": ["datanode"],
"use_autoconfig": True,
"volume_mount_prefix": '/volumes/disk',
"boot_from_volume": False
}
update_json = {
"node_group_template": {
"name": "UpdatedName",
"plugin_name": "new_plugin",
"plugin_version": "2",
"flavor_id": "7",
"description": "description",
"volumes_per_node": "3",
"volumes_size": "4",
"node_processes": ["datanode", "namenode"],
"use_autoconfig": False,
"volume_mount_prefix": '/volumes/newdisk',
"boot_from_volume": True
}
}
def test_create_node_group_template_v2(self):
url = self.URL + '/node-group-templates'
self.responses.post(url, status_code=202,
json={'node_group_template': self.body})
resp = self.client_v2.node_group_templates.create(**self.body)
self.assertEqual(url, self.responses.last_request.url)
self.assertEqual(self.body,
json.loads(self.responses.last_request.body))
self.assertIsInstance(resp, ng.NodeGroupTemplate)
self.assertFields(self.body, resp)
def test_update_node_group_template_v2(self):
url = self.URL + '/node-group-templates'
self.responses.post(url, status_code=202,
json={'node_group_template': self.body})
resp = self.client_v2.node_group_templates.create(**self.body)
update_url = self.URL + '/node-group-templates/id'
self.responses.patch(update_url, status_code=202,
json=self.update_json)
# check that all parameters will be updated
updated = self.client_v2.node_group_templates.update(
"id",
resp.name,
resp.plugin_name,
resp.plugin_version,
resp.flavor_id,
description=getattr(resp, "description", None),
volumes_per_node=getattr(resp, "volumes_per_node", None),
node_configs=getattr(resp, "node_configs", None),
floating_ip_pool=getattr(resp, "floating_ip_pool", None),
security_groups=getattr(resp, "security_groups", None),
auto_security_group=getattr(resp, "auto_security_group", None),
availability_zone=getattr(resp, "availability_zone", None),
volumes_availability_zone=getattr(resp,
"volumes_availability_zone",
None),
volume_type=getattr(resp, "volume_type", None),
image_id=getattr(resp, "image_id", None),
is_proxy_gateway=getattr(resp, "is_proxy_gateway", None),
volume_local_to_instance=getattr(resp,
"volume_local_to_instance",
None),
use_autoconfig=False,
boot_from_volume=getattr(resp, "boot_from_volume", None)
)
self.assertIsInstance(updated, ng.NodeGroupTemplate)
self.assertFields(self.update_json["node_group_template"], updated)
# check that parameters will not be updated
self.client_v2.node_group_templates.update("id")
self.assertEqual(update_url, self.responses.last_request.url)
self.assertEqual({},
json.loads(self.responses.last_request.body))
# check that all parameters will be unset
unset_json = {
'auto_security_group': None, 'availability_zone': None,
'description': None, 'flavor_id': None, 'floating_ip_pool': None,
'plugin_version': None, 'image_id': None, 'is_protected': None,
'is_proxy_gateway': None, 'is_public': None, 'name': None,
'node_configs': None, 'node_processes': None, 'plugin_name': None,
'security_groups': None, 'shares': None, 'use_autoconfig': None,
'volume_local_to_instance': None, 'volume_mount_prefix': None,
'volume_type': None, 'volumes_availability_zone': None,
'volumes_per_node': None, 'volumes_size': None,
'boot_from_volume': None}
self.client_v2.node_group_templates.update("id", **unset_json)
self.assertEqual(update_url, self.responses.last_request.url)
self.assertEqual(unset_json,
json.loads(self.responses.last_request.body))

View File

@ -96,5 +96,14 @@ openstack.data_processing.v1 =
dataprocessing_job_binary_delete = saharaclient.osc.v1.job_binaries:DeleteJobBinary
dataprocessing_job_binary_download = saharaclient.osc.v1.job_binaries:DownloadJobBinary
openstack.data_processing.v2 =
dataprocessing_node_group_template_create = saharaclient.osc.v2.node_group_templates:CreateNodeGroupTemplate
dataprocessing_node_group_template_list = saharaclient.osc.v2.node_group_templates:ListNodeGroupTemplates
dataprocessing_node_group_template_show = saharaclient.osc.v2.node_group_templates:ShowNodeGroupTemplate
dataprocessing_node_group_template_update = saharaclient.osc.v2.node_group_templates:UpdateNodeGroupTemplate
dataprocessing_node_group_template_delete = saharaclient.osc.v2.node_group_templates:DeleteNodeGroupTemplate
dataprocessing_node_group_template_import = saharaclient.osc.v2.node_group_templates:ImportNodeGroupTemplate
dataprocessing_node_group_template_export = saharaclient.osc.v2.node_group_templates:ExportNodeGroupTemplate
[wheel]
universal = 1