Change-Id: I837ab1b70e7b3b45d53902b00c0f0e2f6b0c6818
This commit is contained in:
Fabio 2016-04-21 14:31:04 +02:00
parent b15fad7a67
commit f99dd45c62
40 changed files with 213 additions and 6215 deletions

View File

@ -1,29 +0,0 @@
#!/bin/bash
HOST='localhost'
PORT='1288'
VERSION='v1'
BASE=http://$HOST:$PORT/$VERSION
if [ $# -lt 1 ]
then
echo "list - create-node - delete-node - show"
fi
case "$1" in
list) curl -sS $BASE/nodes/ | python -m json.tool
echo "";
;;
create-node) curl -sS -H "Content-Type: application/json" -X POST $BASE/nodes/ \
-d '{"device":"arduino","code":"'"$2"'","name":"'"$3"'","location":[{"latitude":"'"$4"'","longitude":"'"$5"'","altitude":"'"$6"'"}]}' | python -m json.tool
echo "";
;;
delete-node) curl -sS -X DELETE $BASE/nodes/$2 | python -m json.tool
echo "";
;;
show) curl -sS $BASE/nodes/$2 | python -m json.tool
echo "";
;;
*) echo "list - create-node - delete-node - show"
esac

View File

@ -4,15 +4,13 @@ transport_url=rabbit://<user>:<password>@<host>:5672/
debug=True
verbose=False
#
# Options defined in ironic.api.app
#
# Authentication strategy used by ironic-api: one of
# Authentication strategy used by iotronic-api: one of
# "keystone" or "noauth". "noauth" should not be used in a
# production environment because all authentication will be
# disabled. (string value)
auth_strategy=noauth
#auth_strategy=keystone
# Enable pecan debug mode. WARNING: this is insecure and
# should not be used in a production environment. (boolean
@ -20,10 +18,26 @@ auth_strategy=noauth
#pecan_debug=false
[wamp]
wamp_ip = <wamp_router_ip>
wamp_port = <port>
wamp_realm = s4t
[database]
connection = mysql://<user>:<password>@<host>/iotronic
[wamp]
#wamp_ip = <ip>
#wamp_port = <port>
#wamp_realm = s4t
[oslo_messaging_rabbit]
#rabbit_host = controller
#rabbit_userid = openstack
#rabbit_password = RABBIT_PASS
[keystone_authtoken]
auth_uri = http://<keystone_host>:5000
auth_url = http://<keystone_host>:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = iotronic
password = <password>

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +0,0 @@
yum install mariadb mariadb-server MySQL-python
yum install rabbitmq-server
yum install httpd mod_wsgi memcached python-memcached
yum install gcc python-devel pip
pip install eventlet
yum install python-oslo-config
pip install pecan
pip install keystonemiddleware
yum install python-oslo-log
yum install python-oslo-concurrency
pip install paramiko
yum install python-oslo-policy
yum install python-wsme
yum install python-oslo-policy
yum install python-oslo-messaging
yum install python-oslo-db
pip install jsonpatch

View File

@ -30,8 +30,6 @@ app = {
'acl_public_routes': [
'/',
'/v1',
# '/v1/drivers/[a-z_]*/vendor_passthru/lookup',
# '/v1/nodes/[a-z0-9\-]+/vendor_passthru/heartbeat',
'/v1/nodes/[a-z0-9\-]',
],
}

View File

@ -52,7 +52,7 @@ class Link(base.APIBase):
@classmethod
def sample(cls):
sample = cls(href="http://localhost:6385/chassis/"
sample = cls(href="http://localhost:1288/node/"
"eaaca217-e7d8-47b4-bb41-3f99f20eed89",
rel="bookmark")
return sample

View File

@ -60,8 +60,8 @@ class Root(base.APIBase):
def convert():
root = Root()
root.name = "OpenStack Iotronic API"
root.description = ("Iotronic is an OpenStack project which aims to "
"provision baremetal machines.")
root.description = ("IoTronic is an Internet of Things resource \
management service for OpenStack clouds.")
root.versions = [Version.convert('v1')]
root.default_version = Version.convert('v1')
return root

View File

@ -26,12 +26,6 @@ from pecan import rest
from webob import exc
from wsme import types as wtypes
'''
# from iotronic.api.controllers.v1 import chassis
# from iotronic.api.controllers.v1 import driver
# from iotronic.api.controllers.v1 import port
'''
BASE_VERSION = 1
@ -45,18 +39,6 @@ MIN_VER = base.Version({base.Version.string: MIN_VER_STR},
MAX_VER = base.Version({base.Version.string: MAX_VER_STR},
MIN_VER_STR, MAX_VER_STR)
'''
class MediaType(base.APIBase):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
def __init__(self, base, type):
self.base = base
self.type = type
'''
class V1(base.APIBase):
"""The representation of the version 1 of the API."""
@ -64,24 +46,12 @@ class V1(base.APIBase):
id = wtypes.text
"""The ID of the version, also acts as the release number"""
# media_types = [MediaType]
"""An array of supported media types for this version"""
# links = [link.Link]
"""Links that point to a specific URL for this version and documentation"""
# chassis = [link.Link]
"""Links to the chassis resource"""
nodes = [link.Link]
"""Links to the nodes resource"""
# ports = [link.Link]
"""Links to the ports resource"""
# drivers = [link.Link]
"""Links to the drivers resource"""
@staticmethod
def convert():
v1 = V1()
@ -104,33 +74,6 @@ class V1(base.APIBase):
'api-spec-v1.html',
bookmark=True, type='text/html')
]
v1.media_types = [MediaType('application/json',
'application/vnd.openstack.iotronic.v1+json')]
v1.chassis = [link.Link.make_link('self', pecan.request.host_url,
'chassis', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'chassis', '',
bookmark=True)
]
'''
'''
v1.ports = [link.Link.make_link('self', pecan.request.host_url,
'ports', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'ports', '',
bookmark=True)
]
v1.drivers = [link.Link.make_link('self', pecan.request.host_url,
'drivers', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'drivers', '',
bookmark=True)
]
'''
return v1
@ -139,9 +82,6 @@ class Controller(rest.RestController):
"""Version 1 API controller root."""
nodes = node.NodesController()
# ports = port.PortsController()
# chassis = chassis.ChassisController()
# drivers = driver.DriversController()
@expose.expose(V1)
def get(self):

View File

@ -43,23 +43,3 @@ class Location(base.APIBase):
for l in list:
list_locations.append(Location(**l.as_dict()))
return list_locations
'''
class LocationCollection(collection.Collection):
"""API representation of a collection of locations."""
locations = [Location]
"""A list containing locations objects"""
def __init__(self, **kwargs):
self._type = 'locations'
@staticmethod
def convert_with_locates(locations,
limit, url=None, expand=False, **kwargs):
collection = LocationCollection()
collection.locations = [Location.convert_with_locates(n, expand)
for n in locations]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
'''

View File

@ -59,27 +59,6 @@ class Node(base.APIBase):
list_loc = objects.Location({}).list_by_node_id({}, node.id)
node.location = loc.Location.convert_with_list(list_loc)
'''
else:
if not show_password:
node.driver_info = ast.literal_eval(strutils.mask_password(
node.driver_info,
"******"))
node.ports = [link.Link.make_link('self', url, 'nodes',
node.uuid + "/ports"),
link.Link.make_link('bookmark', url, 'nodes',
node.uuid + "/ports",
bookmark=True)
]
node.chassis_id = wtypes.Unset
node.links = [link.Link.make_link('self', url, 'nodes',
node.uuid),
link.Link.make_link('bookmark', url, 'nodes',
node.uuid, bookmark=True)
]
'''
return node
@classmethod
@ -124,8 +103,7 @@ class NodesController(rest.RestController):
invalid_sort_key_list = ['properties']
def _get_nodes_collection(self, chassis_uuid, instance_uuid, associated,
maintenance, marker, limit, sort_key, sort_dir,
def _get_nodes_collection(self, marker, limit, sort_key, sort_dir,
expand=False, resource_url=None):
limit = api_utils.validate_limit(limit)
@ -141,58 +119,28 @@ class NodesController(rest.RestController):
_("The sort_key value %(key)s is an invalid field for "
"sorting") % {'key': sort_key})
if instance_uuid:
nodes = self._get_nodes_by_instance(instance_uuid)
else:
filters = {}
'''
if chassis_uuid:
filters['chassis_uuid'] = chassis_uuid
if associated is not None:
filters['associated'] = associated
if maintenance is not None:
filters['maintenance'] = maintenance
'''
nodes = objects.Node.list(pecan.request.context, limit, marker_obj,
sort_key=sort_key, sort_dir=sort_dir,
filters=filters)
filters = {}
nodes = objects.Node.list(pecan.request.context, limit, marker_obj,
sort_key=sort_key, sort_dir=sort_dir,
filters=filters)
parameters = {'sort_key': sort_key, 'sort_dir': sort_dir}
'''
if associated:
parameters['associated'] = associated
if maintenance:
parameters['maintenance'] = maintenance
'''
return NodeCollection.convert_with_locates(nodes, limit,
url=resource_url,
expand=expand,
**parameters)
@expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean,
types.boolean, types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None,
maintenance=None, marker=None, limit=None, sort_key='id',
@expose.expose(NodeCollection, types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of nodes.
:param chassis_uuid: Optional UUID of a chassis, to get only nodes for
that chassis.
:param instance_uuid: Optional UUID of an instance, to find the node
associated with that instance.
:param associated: Optional boolean whether to return a list of
associated or unassociated nodes. May be combined
with other parameters.
:param maintenance: Optional boolean value that indicates whether
to get nodes in maintenance mode ("True"), or not
in maintenance mode ("False").
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_nodes_collection(chassis_uuid, instance_uuid,
associated, maintenance, marker,
return self._get_nodes_collection(marker,
limit, sort_key, sort_dir)
@expose.expose(Node, types.uuid_or_name)
@ -213,15 +161,8 @@ class NodesController(rest.RestController):
:param node_ident: UUID or logical name of a node.
"""
rpc_node = api_utils.get_rpc_node(node_ident)
try:
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
except exception.NoValidHost as e:
e.code = 400
raise e
pecan.request.rpcapi.destroy_node(pecan.request.context,
rpc_node.uuid, topic)
rpc_node.uuid)
@expose.expose(Node, body=Node, status_code=201)
def post(self, Node):
@ -254,5 +195,4 @@ class NodesController(rest.RestController):
new_Location.node_id = new_Node.id
new_Location.create()
# pecan.response.location = link.build_url('Nodes', new_Node.uuid)
return Node.convert_with_locates(new_Node)

View File

@ -1,42 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mapping of boot devices used when requesting the system to boot
from an alternate device.
The options presented were based on the IPMItool chassis
bootdev command. You can find the documentation at:
http://linux.die.net/man/1/ipmitool
NOTE: This module does not include all the options from ipmitool because
they don't make sense in the limited context of Iotronic right now.
"""
PXE = 'pxe'
"Boot from PXE boot"
DISK = 'disk'
"Boot from default Hard-drive"
CDROM = 'cdrom'
"Boot from CD/DVD"
BIOS = 'bios'
"Boot into BIOS setup"
SAFE = 'safe'
"Boot from default Hard-drive, request Safe Mode"

View File

@ -1,213 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from iotronic.common import exception
from iotronic.common.i18n import _
from iotronic.common.i18n import _LW
from iotronic.common import utils
from iotronic.openstack.common import loopingcall
opts = [
cfg.IntOpt(
'check_device_interval',
default=1,
help='After Iotronic has completed creating the partition table, '
'it continues to check for activity on the attached iSCSI '
'device status at this interval prior to copying the image'
' to the node, in seconds'),
cfg.IntOpt(
'check_device_max_retries',
default=20,
help='The maximum number of times to check that the device is '
'not accessed by another process. If the device is still '
'busy after that, the disk partitioning will be treated as'
' having failed.'),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='disk_partitioner',
title='Options for the disk partitioner')
CONF.register_group(opt_group)
CONF.register_opts(opts, opt_group)
LOG = logging.getLogger(__name__)
class DiskPartitioner(object):
def __init__(self, device, disk_label='msdos', alignment='optimal'):
"""A convenient wrapper around the parted tool.
:param device: The device path.
:param disk_label: The type of the partition table. Valid types are:
"bsd", "dvh", "gpt", "loop", "mac", "msdos",
"pc98", or "sun".
:param alignment: Set alignment for newly created partitions.
Valid types are: none, cylinder, minimal and
optimal.
"""
self._device = device
self._disk_label = disk_label
self._alignment = alignment
self._partitions = []
self._fuser_pids_re = re.compile(r'((\d)+\s*)+')
def _exec(self, *args):
# NOTE(lucasagomes): utils.execute() is already a wrapper on top
# of processutils.execute() which raises specific
# exceptions. It also logs any failure so we don't
# need to log it again here.
utils.execute('parted', '-a', self._alignment, '-s', self._device,
'--', 'unit', 'MiB', *args, check_exit_code=[0],
run_as_root=True)
def add_partition(self, size, part_type='primary', fs_type='',
bootable=False):
"""Add a partition.
:param size: The size of the partition in MiB.
:param part_type: The type of the partition. Valid values are:
primary, logical, or extended.
:param fs_type: The filesystem type. Valid types are: ext2, fat32,
fat16, HFS, linux-swap, NTFS, reiserfs, ufs.
If blank (''), it will create a Linux native
partition (83).
:param bootable: Boolean value; whether the partition is bootable
or not.
:returns: The partition number.
"""
self._partitions.append({'size': size,
'type': part_type,
'fs_type': fs_type,
'bootable': bootable})
return len(self._partitions)
def get_partitions(self):
"""Get the partitioning layout.
:returns: An iterator with the partition number and the
partition layout.
"""
return enumerate(self._partitions, 1)
def _wait_for_disk_to_become_available(self, retries, max_retries, pids,
stderr):
retries[0] += 1
if retries[0] > max_retries:
raise loopingcall.LoopingCallDone()
try:
# NOTE(ifarkas): fuser returns a non-zero return code if none of
# the specified files is accessed
out, err = utils.execute('fuser', self._device,
check_exit_code=[0, 1], run_as_root=True)
if not out and not err:
raise loopingcall.LoopingCallDone()
else:
if err:
stderr[0] = err
if out:
pids_match = re.search(self._fuser_pids_re, out)
pids[0] = pids_match.group()
except processutils.ProcessExecutionError as exc:
LOG.warning(_LW('Failed to check the device %(device)s with fuser:'
' %(err)s'), {'device': self._device, 'err': exc})
def commit(self):
"""Write to the disk."""
LOG.debug("Committing partitions to disk.")
cmd_args = ['mklabel', self._disk_label]
# NOTE(lucasagomes): Lead in with 1MiB to allow room for the
# partition table itself.
start = 1
for num, part in self.get_partitions():
end = start + part['size']
cmd_args.extend(['mkpart', part['type'], part['fs_type'],
str(start), str(end)])
if part['bootable']:
cmd_args.extend(['set', str(num), 'boot', 'on'])
start = end
self._exec(*cmd_args)
retries = [0]
pids = ['']
fuser_err = ['']
interval = CONF.disk_partitioner.check_device_interval
max_retries = CONF.disk_partitioner.check_device_max_retries
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_disk_to_become_available,
retries, max_retries, pids, fuser_err)
timer.start(interval=interval).wait()
if retries[0] > max_retries:
if pids[0]:
raise exception.InstanceDeployFailure(
_('Disk partitioning failed on device %(device)s. '
'Processes with the following PIDs are holding it: '
'%(pids)s. Time out waiting for completion.')
% {'device': self._device, 'pids': pids[0]})
else:
raise exception.InstanceDeployFailure(
_('Disk partitioning failed on device %(device)s. Fuser '
'exited with "%(fuser_err)s". Time out waiting for '
'completion.')
% {'device': self._device, 'fuser_err': fuser_err[0]})
_PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:"
"([\d\.]+)MiB:([\d\.]+)MiB:(\w*)::(\w*)")
def list_partitions(device):
"""Get partitions information from given device.
:param device: The device path.
:returns: list of dictionaries (one per partition) with keys:
number, start, end, size (in MiB), filesystem, flags
"""
output = utils.execute(
'parted', '-s', '-m', device, 'unit', 'MiB', 'print',
use_standard_locale=True, run_as_root=True)[0]
if isinstance(output, bytes):
output = output.decode("utf-8")
lines = [line for line in output.split('\n') if line.strip()][2:]
# Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot
fields = ('number', 'start', 'end', 'size', 'filesystem', 'flags')
result = []
for line in lines:
match = _PARTED_PRINT_RE.match(line)
if match is None:
LOG.warn(_LW("Partition information from parted for device "
"%(device)s does not match "
"expected format: %(line)s"),
dict(device=device, line=line))
continue
# Cast int fields to ints (some are floats and we round them down)
groups = [int(float(x)) if i < 4 else x
for i, x in enumerate(match.groups())]
result.append(dict(zip(fields, groups)))
return result

View File

@ -1,144 +0,0 @@
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log
from stevedore import dispatch
from iotronic.common import exception
from iotronic.common.i18n import _LI
LOG = log.getLogger(__name__)
driver_opts = [
cfg.ListOpt('enabled_drivers',
default=['pxe_ipmitool'],
help='Specify the list of drivers to load during service '
'initialization. Missing drivers, or drivers which '
'fail to initialize, will prevent the conductor '
'service from starting. The option default is a '
'recommended set of production-oriented drivers. A '
'complete list of drivers present on your system may '
'be found by enumerating the "iotronic.drivers" '
'entrypoint. An example may be found in the '
'developer documentation online.'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
EM_SEMAPHORE = 'extension_manager'
def get_driver(driver_name):
"""Simple method to get a ref to an instance of a driver.
Driver loading is handled by the DriverFactory class. This method
conveniently wraps that class and returns the actual driver object.
:param driver_name: the name of the driver class to load
:returns: An instance of a class which implements
iotronic.drivers.base.BaseDriver
:raises: DriverNotFound if the requested driver_name could not be
found in the "iotronic.drivers" namespace.
"""
try:
factory = DriverFactory()
return factory[driver_name].obj
except KeyError:
raise exception.DriverNotFound(driver_name=driver_name)
def drivers():
"""Get all drivers as a dict name -> driver object."""
factory = DriverFactory()
return {name: factory[name].obj for name in factory.names}
class DriverFactory(object):
"""Discover, load and manage the drivers available."""
# NOTE(deva): loading the _extension_manager as a class member will break
# stevedore when it loads a driver, because the driver will
# import this file (and thus instantiate another factory).
# Instead, we instantiate a NameDispatchExtensionManager only
# once, the first time DriverFactory.__init__ is called.
_extension_manager = None
def __init__(self):
if not DriverFactory._extension_manager:
DriverFactory._init_extension_manager()
def __getitem__(self, name):
return self._extension_manager[name]
# NOTE(deva): Use lockutils to avoid a potential race in eventlet
# that might try to create two driver factories.
@classmethod
@lockutils.synchronized(EM_SEMAPHORE, 'iotronic-')
def _init_extension_manager(cls):
# NOTE(deva): In case multiple greenthreads queue up on this lock
# before _extension_manager is initialized, prevent
# creation of multiple NameDispatchExtensionManagers.
if cls._extension_manager:
return
# NOTE(deva): Drivers raise "DriverLoadError" if they are unable to be
# loaded, eg. due to missing external dependencies.
# We capture that exception, and, only if it is for an
# enabled driver, raise it from here. If enabled driver
# raises other exception type, it is wrapped in
# "DriverLoadError", providing the name of the driver that
# caused it, and raised. If the exception is for a
# non-enabled driver, we suppress it.
def _catch_driver_not_found(mgr, ep, exc):
# NOTE(deva): stevedore loads plugins *before* evaluating
# _check_func, so we need to check here, too.
if ep.name in CONF.enabled_drivers:
if not isinstance(exc, exception.DriverLoadError):
raise exception.DriverLoadError(driver=ep.name, reason=exc)
raise exc
def _check_func(ext):
return ext.name in CONF.enabled_drivers
cls._extension_manager = (
dispatch.NameDispatchExtensionManager(
'iotronic.drivers',
_check_func,
invoke_on_load=True,
on_load_failure_callback=_catch_driver_not_found))
# NOTE(deva): if we were unable to load any configured driver, perhaps
# because it is not present on the system, raise an error.
if (sorted(CONF.enabled_drivers) !=
sorted(cls._extension_manager.names())):
found = cls._extension_manager.names()
names = [n for n in CONF.enabled_drivers if n not in found]
# just in case more than one could not be found ...
names = ', '.join(names)
raise exception.DriverNotFound(driver_name=names)
LOG.info(_LI("Loaded the following drivers: %s"),
cls._extension_manager.names())
@property
def names(self):
"""The list of driver names available."""
return self._extension_manager.names()

View File

@ -147,10 +147,6 @@ class MACAlreadyExists(Conflict):
message = _("A port with MAC address %(mac)s already exists.")
class ChassisAlreadyExists(Conflict):
message = _("A chassis with UUID %(uuid)s already exists.")
class PortAlreadyExists(Conflict):
message = _("A port with UUID %(uuid)s already exists.")
@ -281,10 +277,6 @@ class FailedToUpdateMacOnPort(IotronicException):
message = _("Update MAC address on port: %(port_id)s failed.")
class ChassisNotFound(NotFound):
message = _("Chassis %(chassis)s could not be found.")
class NoDriversLoaded(IotronicException):
message = _("Conductor %(conductor)s cannot be started "
"because no drivers were loaded.")
@ -321,11 +313,6 @@ class NodeInMaintenance(Invalid):
"%(node)s because it's in maintenance mode.")
class ChassisNotEmpty(Invalid):
message = _("Cannot complete the requested action because chassis "
"%(chassis)s contains nodes.")
class IPMIFailure(IotronicException):
message = _("IPMI call failed: %(cmd)s.")

View File

@ -94,7 +94,7 @@ def get_keystone_url(auth_url, auth_version):
return parse.urljoin(auth_url.rstrip('/'), api_version)
def get_service_url(service_type='baremetal', endpoint_type='internal'):
def get_service_url(service_type='iot', endpoint_type='internal'):
"""Wrapper for get service url from keystone service catalog.
Given a service_type and an endpoint_type, this method queries keystone

View File

@ -1,285 +0,0 @@
#
# Copyright 2014 Rackspace, Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import jinja2
from oslo_config import cfg
from oslo_log import log as logging
from iotronic.common import dhcp_factory
from iotronic.common import exception
from iotronic.common.i18n import _
from iotronic.common import utils
from iotronic.drivers.modules import deploy_utils
from iotronic.drivers import utils as driver_utils
from iotronic.openstack.common import fileutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
PXE_CFG_DIR_NAME = 'pxelinux.cfg'
def get_root_dir():
"""Returns the directory where the config files and images will live."""
if CONF.pxe.ipxe_enabled:
return CONF.pxe.http_root
else:
return CONF.pxe.tftp_root
def _ensure_config_dirs_exist(node_uuid):
"""Ensure that the node's and PXE configuration directories exist.
:param node_uuid: the UUID of the node.
"""
root_dir = get_root_dir()
fileutils.ensure_tree(os.path.join(root_dir, node_uuid))
fileutils.ensure_tree(os.path.join(root_dir, PXE_CFG_DIR_NAME))
def _build_pxe_config(pxe_options, template):
"""Build the PXE boot configuration file.
This method builds the PXE boot configuration file by rendering the
template with the given parameters.
:param pxe_options: A dict of values to set on the configuration file.
:param template: The PXE configuration template.
:returns: A formatted string with the file content.
"""
tmpl_path, tmpl_file = os.path.split(template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'pxe_options': pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}',
})
def _link_mac_pxe_configs(task):
"""Link each MAC address with the PXE configuration file.
:param task: A TaskManager instance.
"""
def create_link(mac_path):
utils.unlink_without_raise(mac_path)
utils.create_link_without_raise(pxe_config_file_path, mac_path)
pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
for mac in driver_utils.get_node_mac_addresses(task):
create_link(_get_pxe_mac_path(mac))
# TODO(lucasagomes): Backward compatibility with :hexraw,
# to be removed in M.
# see: https://bugs.launchpad.net/iotronic/+bug/1441710
if CONF.pxe.ipxe_enabled:
create_link(_get_pxe_mac_path(mac, delimiter=''))
def _link_ip_address_pxe_configs(task):
"""Link each IP address with the PXE configuration file.
:param task: A TaskManager instance.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
api = dhcp_factory.DHCPFactory().provider
ip_addrs = api.get_ip_addresses(task)
if not ip_addrs:
raise exception.FailedToGetIPAddressOnPort(_(
"Failed to get IP address for any port on node %s.") %
task.node.uuid)
for port_ip_address in ip_addrs:
ip_address_path = _get_pxe_ip_address_path(port_ip_address)
utils.unlink_without_raise(ip_address_path)
utils.create_link_without_raise(pxe_config_file_path,
ip_address_path)
def _get_pxe_mac_path(mac, delimiter=None):
"""Convert a MAC address into a PXE config file name.
:param mac: A MAC address string in the format xx:xx:xx:xx:xx:xx.
:param delimiter: The MAC address delimiter. Defaults to dash ('-').
:returns: the path to the config file.
"""
if delimiter is None:
delimiter = '-'
mac_file_name = mac.replace(':', delimiter).lower()
if not CONF.pxe.ipxe_enabled:
mac_file_name = '01-' + mac_file_name
return os.path.join(get_root_dir(), PXE_CFG_DIR_NAME, mac_file_name)
def _get_pxe_ip_address_path(ip_address):
"""Convert an ipv4 address into a PXE config file name.
:param ip_address: A valid IPv4 address string in the format 'n.n.n.n'.
:returns: the path to the config file.
"""
ip = ip_address.split('.')
hex_ip = '{0:02X}{1:02X}{2:02X}{3:02X}'.format(*map(int, ip))
return os.path.join(
CONF.pxe.tftp_root, hex_ip + ".conf"
)
def get_deploy_kr_info(node_uuid, driver_info):
"""Get href and tftp path for deploy kernel and ramdisk.
Note: driver_info should be validated outside of this method.
"""
root_dir = get_root_dir()
image_info = {}
for label in ('deploy_kernel', 'deploy_ramdisk'):
image_info[label] = (
str(driver_info[label]),
os.path.join(root_dir, node_uuid, label)
)
return image_info
def get_pxe_config_file_path(node_uuid):
"""Generate the path for the node's PXE configuration file.
:param node_uuid: the UUID of the node.
:returns: The path to the node's PXE configuration file.
"""
return os.path.join(get_root_dir(), node_uuid, 'config')
def create_pxe_config(task, pxe_options, template=None):
"""Generate PXE configuration file and MAC address links for it.
This method will generate the PXE configuration file for the task's
node under a directory named with the UUID of that node. For each
MAC address (port) of that node, a symlink for the configuration file
will be created under the PXE configuration directory, so regardless
of which port boots first they'll get the same PXE configuration.
:param task: A TaskManager instance.
:param pxe_options: A dictionary with the PXE configuration
parameters.
:param template: The PXE configuration template. If no template is
given the CONF.pxe.pxe_config_template will be used.
"""
LOG.debug("Building PXE config for node %s", task.node.uuid)
if template is None:
template = CONF.pxe.pxe_config_template
_ensure_config_dirs_exist(task.node.uuid)
pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
pxe_config = _build_pxe_config(pxe_options, template)
utils.write_to_file(pxe_config_file_path, pxe_config)
if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
_link_ip_address_pxe_configs(task)
else:
_link_mac_pxe_configs(task)
def clean_up_pxe_config(task):
"""Clean up the TFTP environment for the task's node.
:param task: A TaskManager instance.
"""
LOG.debug("Cleaning up PXE config for node %s", task.node.uuid)
if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
api = dhcp_factory.DHCPFactory().provider
ip_addresses = api.get_ip_addresses(task)
if not ip_addresses:
return
for port_ip_address in ip_addresses:
try:
ip_address_path = _get_pxe_ip_address_path(port_ip_address)
except exception.InvalidIPv4Address:
continue
utils.unlink_without_raise(ip_address_path)
else:
for mac in driver_utils.get_node_mac_addresses(task):
utils.unlink_without_raise(_get_pxe_mac_path(mac))
# TODO(lucasagomes): Backward compatibility with :hexraw,
# to be removed in M.
# see: https://bugs.launchpad.net/iotronic/+bug/1441710
if CONF.pxe.ipxe_enabled:
utils.unlink_without_raise(_get_pxe_mac_path(mac,
delimiter=''))
utils.rmtree_without_raise(os.path.join(get_root_dir(),
task.node.uuid))
def dhcp_options_for_instance(task):
"""Retrieves the DHCP PXE boot options.
:param task: A TaskManager instance.
"""
dhcp_opts = []
if CONF.pxe.ipxe_enabled:
script_name = os.path.basename(CONF.pxe.ipxe_boot_script)
ipxe_script_url = '/'.join([CONF.pxe.http_url, script_name])
dhcp_provider_name = dhcp_factory.CONF.dhcp.dhcp_provider
# if the request comes from dumb firmware send them the iPXE
# boot image.
if dhcp_provider_name == 'neutron':
# Neutron use dnsmasq as default DHCP agent, add extra config
# to neutron "dhcp-match=set:ipxe,175" and use below option
dhcp_opts.append({'opt_name': 'tag:!ipxe,bootfile-name',
'opt_value': CONF.pxe.pxe_bootfile_name})
dhcp_opts.append({'opt_name': 'tag:ipxe,bootfile-name',
'opt_value': ipxe_script_url})
else:
# !175 == non-iPXE.
# http://ipxe.org/howto/dhcpd#ipxe-specific_options
dhcp_opts.append({'opt_name': '!175,bootfile-name',
'opt_value': CONF.pxe.pxe_bootfile_name})
dhcp_opts.append({'opt_name': 'bootfile-name',
'opt_value': ipxe_script_url})
else:
if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
boot_file = CONF.pxe.uefi_pxe_bootfile_name
else:
boot_file = CONF.pxe.pxe_bootfile_name
dhcp_opts.append({'opt_name': 'bootfile-name',
'opt_value': boot_file})
dhcp_opts.append({'opt_name': 'server-ip-address',
'opt_value': CONF.pxe.tftp_server})
dhcp_opts.append({'opt_name': 'tftp-server',
'opt_value': CONF.pxe.tftp_server})
return dhcp_opts

File diff suppressed because it is too large Load Diff

View File

@ -18,8 +18,6 @@
Client side of the conductor RPC API.
"""
import random
import oslo_messaging as messaging
from iotronic.common import hash_ring
@ -56,45 +54,6 @@ class ConductorAPI(object):
# NOTE(deva): this is going to be buggy
self.ring_manager = hash_ring.HashRingManager()
def get_topic_for(self, node):
"""Get the RPC topic for the conductor service the node is mapped to.
:param node: a node object.
:returns: an RPC topic string.
:raises: NoValidHost
"""
'''
self.ring_manager.reset()
try:
ring = self.ring_manager[node.driver]
dest = ring.get_hosts(node.uuid)
return self.topic + "." + dest[0]
except exception.DriverNotFound:
reason = (_('No conductor service registered which supports '
'driver %s.') % node.driver)
raise exception.NoValidHost(reason=reason)
'''
def get_topic_for_driver(self, driver_name):
"""Get RPC topic name for a conductor supporting the given driver.
The topic is used to route messages to the conductor supporting
the specified driver. A conductor is selected at random from the
set of qualified conductors.
:param driver_name: the name of the driver to route to.
:returns: an RPC topic string.
:raises: DriverNotFound
"""
self.ring_manager.reset()
hash_ring = self.ring_manager[driver_name]
host = random.choice(list(hash_ring.hosts))
return self.topic + "." + host
def update_node(self, context, node_obj, topic=None):
"""Synchronously, have a conductor update the node's information.
@ -116,212 +75,6 @@ class ConductorAPI(object):
cctxt = self.client.prepare(topic=topic or self.topic, version='1.1')
return cctxt.call(context, 'update_node', node_obj=node_obj)
def change_node_power_state(self, context, node_id, new_state, topic=None):
"""Change a node's power state.
Synchronously, acquire lock and start the conductor background task
to change power state of a node.
:param context: request context.
:param node_id: node id or uuid.
:param new_state: one of iotronic.common.states power state values
:param topic: RPC topic. Defaults to self.topic.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.6')
return cctxt.call(context, 'change_node_power_state', node_id=node_id,
new_state=new_state)
def vendor_passthru(self, context, node_id, driver_method, http_method,
info, topic=None):
"""Receive requests for vendor-specific actions.
Synchronously validate driver specific info or get driver status,
and if successful invokes the vendor method. If the method mode
is async the conductor will start background worker to perform
vendor action.
:param context: request context.
:param node_id: node id or uuid.
:param driver_method: name of method for driver.
:param http_method: the HTTP method used for the request.
:param info: info for node driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue if supplied info is not valid.
:raises: MissingParameterValue if a required parameter is missing
:raises: UnsupportedDriverExtension if current driver does not have
vendor interface.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: NodeLocked if node is locked by another conductor.
:returns: A tuple containing the response of the invoked method
and a boolean value indicating whether the method was
invoked asynchronously (True) or synchronously (False).
If invoked asynchronously the response field will be
always None.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
return cctxt.call(context, 'vendor_passthru', node_id=node_id,
driver_method=driver_method,
http_method=http_method,
info=info)
def driver_vendor_passthru(self, context, driver_name, driver_method,
http_method, info, topic=None):
"""Pass vendor-specific calls which don't specify a node to a driver.
Handles driver-level vendor passthru calls. These calls don't
require a node UUID and are executed on a random conductor with
the specified driver. If the method mode is async the conductor
will start background worker to perform vendor action.
:param context: request context.
:param driver_name: name of the driver on which to call the method.
:param driver_method: name of the vendor method, for use by the driver.
:param http_method: the HTTP method used for the request.
:param info: data to pass through to the driver.
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue for parameter errors.
:raises: MissingParameterValue if a required parameter is missing
:raises: UnsupportedDriverExtension if the driver doesn't have a vendor
interface, or if the vendor interface does not support the
specified driver_method.
:raises: DriverNotFound if the supplied driver is not loaded.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:returns: A tuple containing the response of the invoked method
and a boolean value indicating whether the method was
invoked asynchronously (True) or synchronously (False).
If invoked asynchronously the response field will be
always None.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
return cctxt.call(context, 'driver_vendor_passthru',
driver_name=driver_name,
driver_method=driver_method,
http_method=http_method,
info=info)
def get_node_vendor_passthru_methods(self, context, node_id, topic=None):
"""Retrieve information about vendor methods of the given node.
:param context: an admin context.
:param node_id: the id or uuid of a node.
:param topic: RPC topic. Defaults to self.topic.
:returns: dictionary of <method name>:<method metadata> entries.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
return cctxt.call(context, 'get_node_vendor_passthru_methods',
node_id=node_id)
def get_driver_vendor_passthru_methods(self, context, driver_name,
topic=None):
"""Retrieve information about vendor methods of the given driver.
:param context: an admin context.
:param driver_name: name of the driver.
:param topic: RPC topic. Defaults to self.topic.
:returns: dictionary of <method name>:<method metadata> entries.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
return cctxt.call(context, 'get_driver_vendor_passthru_methods',
driver_name=driver_name)
def do_node_deploy(self, context, node_id, rebuild, configdrive,
topic=None):
"""Signal to conductor service to perform a deployment.
:param context: request context.
:param node_id: node id or uuid.
:param rebuild: True if this is a rebuild request.
:param configdrive: A gzipped and base64 encoded configdrive.
:param topic: RPC topic. Defaults to self.topic.
:raises: InstanceDeployFailure
:raises: InvalidParameterValue if validation fails
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
The node must already be configured and in the appropriate
undeployed state before this method is called.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.22')
return cctxt.call(context, 'do_node_deploy', node_id=node_id,
rebuild=rebuild, configdrive=configdrive)
def do_node_tear_down(self, context, node_id, topic=None):
"""Signal to conductor service to tear down a deployment.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: InstanceDeployFailure
:raises: InvalidParameterValue if validation fails
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
The node must already be configured and in the appropriate
deployed state before this method is called.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.6')
return cctxt.call(context, 'do_node_tear_down', node_id=node_id)
def do_provisioning_action(self, context, node_id, action, topic=None):
"""Signal to conductor service to perform the given action on a node.
:param context: request context.
:param node_id: node id or uuid.
:param action: an action. One of iotronic.common.states.VERBS
:param topic: RPC topic. Defaults to self.topic.
:raises: InvalidParameterValue
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: InvalidStateRequested if the requested action can not
be performed.
This encapsulates some provisioning actions in a single call.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.23')
return cctxt.call(context, 'do_provisioning_action',
node_id=node_id, action=action)
def continue_node_clean(self, context, node_id, topic=None):
"""Signal to conductor service to start the next cleaning action.
NOTE(JoshNang) this is an RPC cast, there will be no response or
exception raised by the conductor for this RPC.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.27')
return cctxt.cast(context, 'continue_node_clean',
node_id=node_id)
def validate_driver_interfaces(self, context, node_id, topic=None):
"""Validate the `core` and `standardized` interfaces for drivers.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:returns: a dictionary containing the results of each
interface validation.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.5')
return cctxt.call(context, 'validate_driver_interfaces',
node_id=node_id)
def destroy_node(self, context, node_id, topic=None):
"""Delete a node.
@ -335,168 +88,3 @@ class ConductorAPI(object):
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.0')
return cctxt.call(context, 'destroy_node', node_id=node_id)
def get_console_information(self, context, node_id, topic=None):
"""Get connection information about the console.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support console.
:raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if a required parameter is missing
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
return cctxt.call(context, 'get_console_information', node_id=node_id)
def set_console_mode(self, context, node_id, enabled, topic=None):
"""Enable/Disable the console.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:param enabled: Boolean value; whether the console is enabled or
disabled.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support console.
:raises: InvalidParameterValue when the wrong driver info is specified.
:raises: MissingParameterValue if a required parameter is missing
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
return cctxt.call(context, 'set_console_mode', node_id=node_id,
enabled=enabled)
def update_port(self, context, port_obj, topic=None):
"""Synchronously, have a conductor update the port's information.
Update the port's information in the database and return a port object.
The conductor will lock related node and trigger specific driver
actions if they are needed.
:param context: request context.
:param port_obj: a changed (but not saved) port object.
:param topic: RPC topic. Defaults to self.topic.
:returns: updated port object, including all fields.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.13')
return cctxt.call(context, 'update_port', port_obj=port_obj)
def get_driver_properties(self, context, driver_name, topic=None):
"""Get the properties of the driver.
:param context: request context.
:param driver_name: name of the driver.
:param topic: RPC topic. Defaults to self.topic.
:returns: a dictionary with <property name>:<property description>
entries.
:raises: DriverNotFound.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.16')
return cctxt.call(context, 'get_driver_properties',
driver_name=driver_name)
def set_boot_device(self, context, node_id, device, persistent=False,
topic=None):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node. Be aware
that not all drivers support this.
:param context: request context.
:param node_id: node id or uuid.
:param device: the boot device, one of
:mod:`iotronic.common.boot_devices`.
:param persistent: Whether to set next-boot, or make the change
permanent. Default: False.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified or an invalid boot device is specified.
:raises: MissingParameterValue if missing supplied info.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'set_boot_device', node_id=node_id,
device=device, persistent=persistent)
def get_boot_device(self, context, node_id, topic=None):
"""Get the current boot device.
Returns the current boot device of a node.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified.
:raises: MissingParameterValue if missing supplied info.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`iotronic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'get_boot_device', node_id=node_id)
def get_supported_boot_devices(self, context, node_id, topic=None):
"""Get the list of supported devices.
Returns the list of supported boot devices of a node.
:param context: request context.
:param node_id: node id or uuid.
:raises: NodeLocked if node is locked by another conductor.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support management.
:raises: InvalidParameterValue when the wrong driver info is
specified.
:raises: MissingParameterValue if missing supplied info.
:returns: A list with the supported boot devices defined
in :mod:`iotronic.common.boot_devices`.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
return cctxt.call(context, 'get_supported_boot_devices',
node_id=node_id)
def inspect_hardware(self, context, node_id, topic=None):
"""Signals the conductor service to perform hardware introspection.
:param context: request context.
:param node_id: node id or uuid.
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: HardwareInspectionFailure
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
:raises: UnsupportedDriverExtension if the node's driver doesn't
support inspection.
:raises: InvalidStateRequested if 'inspect' is not a valid
action to do in the current state.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.24')
return cctxt.call(context, 'inspect_hardware', node_id=node_id)
def destroy_port(self, context, port, topic=None):
"""Delete a port.
:param context: request context.
:param port: port object
:param topic: RPC topic. Defaults to self.topic.
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeNotFound if the node associated with the port does not
exist.
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.25')
return cctxt.call(context, 'destroy_port', port=port)

View File

@ -48,18 +48,6 @@ attributes that you may access:
'shared' kwarg arg of TaskManager())
task.node
The Node object
task.ports
Ports belonging to the Node
task.driver
The Driver for the Node, or the Driver based on the
'driver_name' kwarg of TaskManager().
Example usage:
::
with task_manager.acquire(context, node_id) as task:
task.driver.power.power_on(task.node)
If you need to execute task-requiring code in a background thread, the
TaskManager instance provides an interface to handle this for you, making
@ -175,7 +163,6 @@ class TaskManager(object):
self._on_error_method = None
self.context = context
# self.node = None
self.node = None
self.shared = shared
@ -200,19 +187,7 @@ class TaskManager(object):
else:
"""
self.node = objects.Node.get(context, node_id)
# self.ports = objects.Port.list_by_node_id(context, self.node.id)
# self.driver = driver_factory.get_driver(driver_name or
# self.node.driver)
# NOTE(deva): this handles the Juno-era NOSTATE state
# and should be deleted after Kilo is released
'''
if self.node.provision_state is states.NOSTATE:
self.node.provision_state = states.AVAILABLE
self.node.save()
self.fsm.initialize(self.node.provision_state)
'''
except Exception:
with excutils.save_and_reraise_exception():
self.release_resources()

View File

@ -57,8 +57,6 @@ class Connection(object):
:associated: True | False
:reserved: True | False
:maintenance: True | False
:chassis_uuid: uuid of chassis
:driver: driver's name
:provision_state: provision state of node
:provisioned_before:
nodes with provision_updated_at field before this
@ -82,8 +80,6 @@ class Connection(object):
:associated: True | False
:reserved: True | False
:maintenance: True | False
:chassis_uuid: uuid of chassis
:driver: driver's name
:provision_state: provision state of node
:provisioned_before:
nodes with provision_updated_at field before this
@ -95,33 +91,7 @@ class Connection(object):
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
'''
@abc.abstractmethod
def reserve_node(self, tag, node_id):
"""Reserve a node.
To prevent other ManagerServices from manipulating the given
Node while a Task is performed, mark it reserved by this host.
:param tag: A string uniquely identifying the reservation holder.
:param node_id: A node id or uuid.
:returns: A Node object.
:raises: NodeNotFound if the node is not found.
:raises: NodeLocked if the node is already reserved.
"""
@abc.abstractmethod
def release_node(self, tag, node_id):
"""Release the reservation on a node.
:param tag: A string uniquely identifying the reservation holder.
:param node_id: A node id or uuid.
:raises: NodeNotFound if the node is not found.
:raises: NodeLocked if the node is reserved by another host.
:raises: NodeNotLocked if the node was found to not have a
reservation at all.
"""
'''
@abc.abstractmethod
def create_node(self, values):
"""Create a new node.
@ -137,8 +107,6 @@ class Connection(object):
'instance_uuid': None,
'power_state': states.POWER_OFF,
'provision_state': states.AVAILABLE,
'driver': 'pxe_ipmitool',
'driver_info': { ... },
'properties': { ... },
'extra': { ... },
}
@ -168,15 +136,6 @@ class Connection(object):
:param node_name: The logical name of a node.
:returns: A node.
"""
'''
@abc.abstractmethod
def get_node_by_instance(self, instance):
"""Return a node.
:param instance: The instance name or uuid to search for.
:returns: A node.
"""
'''
@abc.abstractmethod
def get_node_by_code(self, instance):
@ -199,171 +158,10 @@ class Connection(object):
:param node_id: The id or uuid of a node.
:param values: Dict of values to update.
May be a partial list, eg. when setting the
properties for a driver. For example:
::
{
'driver_info':
{
'my-field-1': val1,
'my-field-2': val2,
}
}
:returns: A node.
:raises: NodeAssociated
:raises: NodeNotFound
"""
'''
@abc.abstractmethod
def get_port_by_id(self, port_id):
"""Return a network port representation.
:param port_id: The id of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_uuid(self, port_uuid):
"""Return a network port representation.
:param port_uuid: The uuid of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_by_address(self, address):
"""Return a network port representation.
:param address: The MAC address of a port.
:returns: A port.
"""
@abc.abstractmethod
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of ports.
:param limit: Maximum number of ports to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the ports for a given node.
:param node_id: The integer node ID.
:param limit: Maximum number of ports to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted
(asc, desc)
:returns: A list of ports.
"""
@abc.abstractmethod
def create_port(self, values):
"""Create a new port.
:param values: Dict of values.
"""
@abc.abstractmethod
def update_port(self, port_id, values):
"""Update properties of an port.
:param port_id: The id or MAC of a port.
:param values: Dict of values to update.
:returns: A port.
"""
@abc.abstractmethod
def destroy_port(self, port_id):
"""Destroy an port.
:param port_id: The id or MAC of a port.
"""
@abc.abstractmethod
def create_chassis(self, values):
"""Create a new chassis.
:param values: Dict of values.
"""
@abc.abstractmethod
def get_chassis_by_id(self, chassis_id):
"""Return a chassis representation.
:param chassis_id: The id of a chassis.
:returns: A chassis.
"""
@abc.abstractmethod
def get_chassis_by_uuid(self, chassis_uuid):
"""Return a chassis representation.
:param chassis_uuid: The uuid of a chassis.
:returns: A chassis.
"""
@abc.abstractmethod
def get_chassis_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of chassis.
:param limit: Maximum number of chassis to return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def update_chassis(self, chassis_id, values):
"""Update properties of an chassis.
:param chassis_id: The id or the uuid of a chassis.
:param values: Dict of values to update.
:returns: A chassis.
"""
@abc.abstractmethod
def destroy_chassis(self, chassis_id):
"""Destroy a chassis.
:param chassis_id: The id or the uuid of a chassis.
"""
'''
@abc.abstractmethod
def register_conductor(self, values, update_existing=False):
"""Register an active conductor with the cluster.
:param values: A dict of values which must contain the following:
::
{
'hostname': the unique hostname which identifies
this Conductor service.
'drivers': a list of supported drivers.
}
:param update_existing: When false, registration will raise an
exception when a conflicting online record
is found. When true, will overwrite the
existing record. Default: False.
:returns: A conductor.
:raises: ConductorAlreadyRegistered
"""
@abc.abstractmethod
def get_conductor(self, hostname):
@ -390,23 +188,6 @@ class Connection(object):
:raises: ConductorNotFound
"""
@abc.abstractmethod
def get_active_driver_dict(self, interval):
"""Retrieve drivers for the registered and active conductors.
:param interval: Seconds since last check-in of a conductor.
:returns: A dict which maps driver names to the set of hosts
which support them. For example:
::
{driverA: set([host1, host2]),
driverB: set([host2, host3])}
"""
# ##################### NEW #############################
@abc.abstractmethod
def create_session(self, values):
"""Create a new location.

View File

@ -1,40 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add inspection_started_at and inspection_finished_at
Revision ID: 1e1d5ace7dc6
Revises: 3ae36a5f5131
Create Date: 2015-02-26 10:46:46.861927
"""
# revision identifiers, used by Alembic.
revision = '1e1d5ace7dc6'
down_revision = '3ae36a5f5131'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes', sa.Column('inspection_started_at',
sa.DateTime(),
nullable=True))
op.add_column('nodes', sa.Column('inspection_finished_at',
sa.DateTime(),
nullable=True))
def downgrade():
op.drop_column('nodes', 'inspection_started_at')
op.drop_column('nodes', 'inspection_finished_at')

View File

@ -1,35 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add provision_updated_at
Revision ID: 21b331f883ef
Revises: 2581ebaf0cb2
Create Date: 2014-02-19 13:45:30.150632
"""
# revision identifiers, used by Alembic.
revision = '21b331f883ef'
down_revision = '2581ebaf0cb2'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes', sa.Column('provision_updated_at', sa.DateTime(),
nullable=True))
def downgrade():
op.drop_column('nodes', 'provision_updated_at')

View File

@ -1,36 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add Node.maintenance_reason
Revision ID: 242cc6a923b3
Revises: 487deb87cc9d
Create Date: 2014-10-15 23:00:43.164061
"""
# revision identifiers, used by Alembic.
revision = '242cc6a923b3'
down_revision = '487deb87cc9d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes', sa.Column('maintenance_reason',
sa.Text(),
nullable=True))
def downgrade():
op.drop_column('nodes', 'maintenance_reason')

View File

@ -1,106 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""initial migration
Revision ID: 2581ebaf0cb2
Revises: None
Create Date: 2014-01-17 12:14:07.754448
"""
# revision identifiers, used by Alembic.
revision = '2581ebaf0cb2'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# commands auto generated by Alembic - please adjust!
op.create_table(
'conductors',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hostname', sa.String(length=255), nullable=False),
sa.Column('drivers', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('hostname', name='uniq_conductors0hostname'),
mysql_ENGINE='InnoDB',
mysql_DEFAULT_CHARSET='UTF8'
)
op.create_table(
'chassis',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=True),
sa.Column('extra', sa.Text(), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
mysql_ENGINE='InnoDB',
mysql_DEFAULT_CHARSET='UTF8'
)
op.create_table(
'nodes',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=True),
sa.Column('instance_uuid', sa.String(length=36), nullable=True),
sa.Column('chassis_id', sa.Integer(), nullable=True),
sa.Column('power_state', sa.String(length=15), nullable=True),
sa.Column('target_power_state', sa.String(length=15), nullable=True),
sa.Column('provision_state', sa.String(length=15), nullable=True),
sa.Column('target_provision_state', sa.String(length=15),
nullable=True),
sa.Column('last_error', sa.Text(), nullable=True),
sa.Column('properties', sa.Text(), nullable=True),
sa.Column('driver', sa.String(length=15), nullable=True),
sa.Column('driver_info', sa.Text(), nullable=True),
sa.Column('reservation', sa.String(length=255), nullable=True),
sa.Column('maintenance', sa.Boolean(), nullable=True),
sa.Column('extra', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['chassis_id'], ['chassis.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
mysql_ENGINE='InnoDB',
mysql_DEFAULT_CHARSET='UTF8'
)
op.create_index('node_instance_uuid', 'nodes', ['instance_uuid'],
unique=False)
op.create_table(
'ports',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=True),
sa.Column('address', sa.String(length=18), nullable=True),
sa.Column('node_id', sa.Integer(), nullable=True),
sa.Column('extra', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('address', name='uniq_ports0address'),
sa.UniqueConstraint('uuid', name='uniq_ports0uuid'),
mysql_ENGINE='InnoDB',
mysql_DEFAULT_CHARSET='UTF8'
)
# end Alembic commands
def downgrade():
raise NotImplementedError(('Downgrade from initial migration is'
' unsupported.'))

View File

@ -1,42 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""increase-node-name-length
Revision ID: 2fb93ffd2af1
Revises: 4f399b21ae71
Create Date: 2015-03-18 17:08:11.470791
"""
# revision identifiers, used by Alembic.
revision = '2fb93ffd2af1'
down_revision = '4f399b21ae71'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
op.alter_column('nodes', 'name',
existing_type=mysql.VARCHAR(length=63),
type_=sa.String(length=255),
existing_nullable=True)
def downgrade():
op.alter_column('nodes', 'name',
existing_type=sa.String(length=255),
type_=mysql.VARCHAR(length=63),
existing_nullable=True)

View File

@ -1,40 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add Node instance info
Revision ID: 31baaf680d2b
Revises: 3cb628139ea4
Create Date: 2014-03-05 21:09:32.372463
"""
# revision identifiers, used by Alembic.
revision = '31baaf680d2b'
down_revision = '3cb628139ea4'
from alembic import op
import sqlalchemy as sa
def upgrade():
# commands auto generated by Alembic - please adjust
op.add_column('nodes', sa.Column('instance_info',
sa.Text(),
nullable=True))
# end Alembic commands
def downgrade():
# commands auto generated by Alembic - please adjust
op.drop_column('nodes', 'instance_info')
# end Alembic commands

View File

@ -1,37 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_logical_name
Revision ID: 3ae36a5f5131
Revises: bb59b63f55a
Create Date: 2014-12-10 14:27:26.323540
"""
# revision identifiers, used by Alembic.
revision = '3ae36a5f5131'
down_revision = 'bb59b63f55a'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes', sa.Column('name', sa.String(length=63),
nullable=True))
op.create_unique_constraint('uniq_nodes0name', 'nodes', ['name'])
def downgrade():
op.drop_constraint('uniq_nodes0name', 'nodes', type_='unique')
op.drop_column('nodes', 'name')

View File

@ -1,39 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add unique constraint to instance_uuid
Revision ID: 3bea56f25597
Revises: 31baaf680d2b
Create Date: 2014-06-05 11:45:07.046670
"""
# revision identifiers, used by Alembic.
revision = '3bea56f25597'
down_revision = '31baaf680d2b'
from alembic import op
def upgrade():
op.create_unique_constraint("uniq_nodes0instance_uuid", "nodes",
["instance_uuid"])
op.drop_index('node_instance_uuid', 'nodes')
def downgrade():
op.drop_constraint("uniq_nodes0instance_uuid", "nodes", type_='unique')
op.create_index('node_instance_uuid', 'nodes', ['instance_uuid'])

View File

@ -1,34 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nodes add console enabled
Revision ID: 3cb628139ea4
Revises: 21b331f883ef
Create Date: 2014-02-26 11:24:11.318023
"""
# revision identifiers, used by Alembic.
revision = '3cb628139ea4'
down_revision = '21b331f883ef'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes', sa.Column('console_enabled', sa.Boolean))
def downgrade():
op.drop_column('nodes', 'console_enabled')

View File

@ -1,45 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add conductor_affinity and online
Revision ID: 487deb87cc9d
Revises: 3bea56f25597
Create Date: 2014-09-26 16:16:30.988900
"""
# revision identifiers, used by Alembic.
revision = '487deb87cc9d'
down_revision = '3bea56f25597'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'conductors',
sa.Column('online', sa.Boolean(), default=True))
op.add_column(
'nodes',
sa.Column('conductor_affinity', sa.Integer(),
sa.ForeignKey('conductors.id',
name='nodes_conductor_affinity_fk'),
nullable=True))
def downgrade():
op.drop_constraint('nodes_conductor_affinity_fk', 'nodes',
type_='foreignkey')
op.drop_column('nodes', 'conductor_affinity')
op.drop_column('conductors', 'online')

View File

@ -1,35 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add node.clean_step
Revision ID: 4f399b21ae71
Revises: 1e1d5ace7dc6
Create Date: 2015-02-18 01:21:46.062311
"""
# revision identifiers, used by Alembic.
revision = '4f399b21ae71'
down_revision = '1e1d5ace7dc6'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes', sa.Column('clean_step', sa.Text(),
nullable=True))
def downgrade():
op.drop_column('nodes', 'clean_step')

View File

@ -1,52 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""replace NOSTATE with AVAILABLE
Revision ID: 5674c57409b9
Revises: 242cc6a923b3
Create Date: 2015-01-14 16:55:44.718196
"""
# revision identifiers, used by Alembic.
revision = '5674c57409b9'
down_revision = '242cc6a923b3'
from alembic import op
from sqlalchemy import String
from sqlalchemy.sql import table, column
node = table('nodes',
column('uuid', String(36)),
column('provision_state', String(15)))
# NOTE(deva): We must represent the states as static strings in this migration
# file, rather than import iotronic.common.states, because that file may change
# in the future. This migration script must still be able to be run with
# future versions of the code and still produce the same results.
AVAILABLE = 'available'
def upgrade():
op.execute(
node.update().where(
node.c.provision_state is None).values(
{'provision_state': op.inline_literal(AVAILABLE)}))
def downgrade():
op.execute(
node.update().where(
node.c.provision_state == op.inline_literal(AVAILABLE)).values(
{'provision_state': None}))

View File

@ -1,36 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add_node_driver_internal_info
Revision ID: bb59b63f55a
Revises: 5674c57409b9
Create Date: 2015-01-28 14:28:22.212790
"""
# revision identifiers, used by Alembic.
revision = 'bb59b63f55a'
down_revision = '5674c57409b9'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes', sa.Column('driver_internal_info',
sa.Text(),
nullable=True))
def downgrade():
op.drop_column('nodes', 'driver_internal_info')

View File

@ -16,9 +16,6 @@
"""SQLAlchemy storage backend."""
import collections
import datetime
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
@ -31,9 +28,7 @@ from sqlalchemy.orm.exc import NoResultFound
from iotronic.common import exception
from iotronic.common.i18n import _
from iotronic.common.i18n import _LW
from iotronic.common import states
from iotronic.common import utils
from iotronic.db import api
from iotronic.db.sqlalchemy import models
@ -99,40 +94,6 @@ def add_identity_filter(query, value):
raise exception.InvalidIdentity(identity=value)
def add_port_filter(query, value):
"""Adds a port-specific filter to a query.
Filters results by address, if supplied value is a valid MAC
address. Otherwise attempts to filter results by identity.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_valid_mac(value):
return query.filter_by(address=value)
else:
return add_identity_filter(query, value)
def add_port_filter_by_node(query, value):
if strutils.is_int_like(value):
return query.filter_by(node_id=value)
else:
query = query.join(models.Node,
models.Port.node_id == models.Node.id)
return query.filter(models.Node.uuid == value)
def add_node_filter_by_chassis(query, value):
if strutils.is_int_like(value):
return query.filter_by(chassis_id=value)
else:
query = query.join(models.Chassis,
models.Node.chassis_id == models.Chassis.id)
return query.filter(models.Chassis.uuid == value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
@ -149,8 +110,6 @@ def _paginate_query(model, limit=None, marker=None, sort_key=None,
% {'key': sort_key})
return query.all()
# NEW
def add_location_filter_by_node(query, value):
if strutils.is_int_like(value):
@ -171,38 +130,11 @@ class Connection(api.Connection):
if filters is None:
filters = []
if 'chassis_uuid' in filters:
# get_chassis_by_uuid() to raise an exception if the chassis
# is not found
chassis_obj = self.get_chassis_by_uuid(filters['chassis_uuid'])
query = query.filter_by(chassis_id=chassis_obj.id)
if 'associated' in filters:
if filters['associated']:
query = query.filter(models.Node.instance_uuid is not None)
else:
query = query.filter(models.Node.instance_uuid is None)
"""
if 'reserved' in filters:
if filters['reserved']:
query = query.filter(models.Node.reservation != None)
else:
query = query.filter(models.Node.reservation == None)
"""
if 'maintenance' in filters:
query = query.filter_by(maintenance=filters['maintenance'])
if 'driver' in filters:
query = query.filter_by(driver=filters['driver'])
if 'provision_state' in filters:
query = query.filter_by(provision_state=filters['provision_state'])
if 'provisioned_before' in filters:
limit = (timeutils.utcnow() -
datetime.timedelta(seconds=filters['provisioned_before']))
query = query.filter(models.Node.provision_updated_at < limit)
if 'inspection_started_before' in filters:
limit = ((timeutils.utcnow()) -
(datetime.timedelta(
seconds=filters['inspection_started_before'])))
query = query.filter(models.Node.inspection_started_at < limit)
return query
@ -227,46 +159,6 @@ class Connection(api.Connection):
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
"""
def reserve_node(self, tag, node_id):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
# be optimistic and assume we usually create a reservation
count = query.filter_by(reservation=None).update(
{'reservation': tag}, synchronize_session=False)
try:
node = query.one()
if count != 1:
# Nothing updated and node exists. Must already be
# locked.
raise exception.NodeLocked(node=node_id,
host=node['reservation'])
return node
except NoResultFound:
raise exception.NodeNotFound(node_id)
def release_node(self, tag, node_id):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
# be optimistic and assume we usually release a reservation
count = query.filter_by(reservation=tag).update(
{'reservation': None}, synchronize_session=False)
try:
if count != 1:
node = query.one()
if node['reservation'] is None:
raise exception.NodeNotLocked(node=node_id)
else:
raise exception.NodeLocked(node=node_id,
host=node['reservation'])
except NoResultFound:
raise exception.NodeNotFound(node_id)
"""
def create_node(self, values):
# ensure defaults are present for new nodes
if 'uuid' not in values:
@ -311,21 +203,6 @@ class Connection(api.Connection):
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_code)
'''
def get_node_by_instance(self, instance):
if not uuidutils.is_uuid_like(instance):
raise exception.InvalidUUID(uuid=instance)
query = (model_query(models.Node)
.filter_by(instance_uuid=instance))
try:
result = query.one()
except NoResultFound:
raise exception.InstanceNotFound(instance=instance)
return result
'''
def destroy_node(self, node_id):
@ -349,28 +226,6 @@ class Connection(api.Connection):
location_query.delete()
query.delete()
"""
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
try:
node_ref = query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
# Get node ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the node.
if uuidutils.is_uuid_like(node_id):
node_id = node_ref['id']
#port_query = model_query(models.Port, session=session)
#port_query = add_port_filter_by_node(port_query, node_id)
#port_query.delete()
query.delete()
"""
def update_node(self, node_id, values):
# NOTE(dtantsur): this can lead to very strange errors
@ -422,149 +277,6 @@ class Connection(api.Connection):
ref.update(values)
return ref
"""
def get_port_by_id(self, port_id):
query = model_query(models.Port).filter_by(id=port_id)
try:
return query.one()
except NoResultFound:
raise exception.PortNotFound(port=port_id)
def get_port_by_uuid(self, port_uuid):
query = model_query(models.Port).filter_by(uuid=port_uuid)
try:
return query.one()
except NoResultFound:
raise exception.PortNotFound(port=port_uuid)
def get_port_by_address(self, address):
query = model_query(models.Port).filter_by(address=address)
try:
return query.one()
except NoResultFound:
raise exception.PortNotFound(port=address)
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir)
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Port)
query = query.filter_by(node_id=node_id)
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
def create_port(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
port = models.Port()
port.update(values)
try:
port.save()
except db_exc.DBDuplicateEntry as exc:
if 'address' in exc.columns:
raise exception.MACAlreadyExists(mac=values['address'])
raise exception.PortAlreadyExists(uuid=values['uuid'])
return port
def update_port(self, port_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Port.")
raise exception.InvalidParameterValue(err=msg)
session = get_session()
try:
with session.begin():
query = model_query(models.Port, session=session)
query = add_port_filter(query, port_id)
ref = query.one()
ref.update(values)
except NoResultFound:
raise exception.PortNotFound(port=port_id)
except db_exc.DBDuplicateEntry:
raise exception.MACAlreadyExists(mac=values['address'])
return ref
def destroy_port(self, port_id):
session = get_session()
with session.begin():
query = model_query(models.Port, session=session)
query = add_port_filter(query, port_id)
count = query.delete()
if count == 0:
raise exception.PortNotFound(port=port_id)
def get_chassis_by_id(self, chassis_id):
query = model_query(models.Chassis).filter_by(id=chassis_id)
try:
return query.one()
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis_id)
def get_chassis_by_uuid(self, chassis_uuid):
query = model_query(models.Chassis).filter_by(uuid=chassis_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis_uuid)
def get_chassis_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return _paginate_query(models.Chassis, limit, marker,
sort_key, sort_dir)
def create_chassis(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
chassis = models.Chassis()
chassis.update(values)
try:
chassis.save()
except db_exc.DBDuplicateEntry:
raise exception.ChassisAlreadyExists(uuid=values['uuid'])
return chassis
def update_chassis(self, chassis_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Chassis.")
raise exception.InvalidParameterValue(err=msg)
session = get_session()
with session.begin():
query = model_query(models.Chassis, session=session)
query = add_identity_filter(query, chassis_id)
count = query.update(values)
if count != 1:
raise exception.ChassisNotFound(chassis=chassis_id)
ref = query.one()
return ref
def destroy_chassis(self, chassis_id):
def chassis_not_empty(session):
#Checks whether the chassis does not have nodes.
query = model_query(models.Node, session=session)
query = add_node_filter_by_chassis(query, chassis_id)
return query.count() != 0
session = get_session()
with session.begin():
if chassis_not_empty(session):
raise exception.ChassisNotEmpty(chassis=chassis_id)
query = model_query(models.Chassis, session=session)
query = add_identity_filter(query, chassis_id)
count = query.delete()
if count != 1:
raise exception.ChassisNotFound(chassis=chassis_id)
"""
def register_conductor(self, values, update_existing=False):
session = get_session()
@ -615,39 +327,6 @@ class Connection(api.Connection):
if count == 0:
raise exception.ConductorNotFound(conductor=hostname)
def clear_node_reservations_for_conductor(self, hostname):
session = get_session()
nodes = []
with session.begin():
query = (model_query(models.Node, session=session)
.filter_by(reservation=hostname))
nodes = [node['uuid'] for node in query]
query.update({'reservation': None})
if nodes:
nodes = ', '.join(nodes)
LOG.warn(_LW('Cleared reservations held by %(hostname)s: '
'%(nodes)s'), {'hostname': hostname, 'nodes': nodes})
def get_active_driver_dict(self, interval=None):
if interval is None:
interval = CONF.conductor.heartbeat_timeout
limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
result = (model_query(models.Conductor)
.filter_by(online=True)
.filter(models.Conductor.updated_at >= limit)
.all())
# build mapping of drivers to the set of hosts which support them
d2c = collections.defaultdict(set)
for row in result:
for driver in row['drivers']:
d2c[driver].add(row['hostname'])
return d2c
# ##################### NEW #############################
def create_session(self, values):
session = models.SessionWP()
session.update(values)

View File

@ -15,7 +15,7 @@
# under the License.
"""
SQLAlchemy models for baremetal data.
SQLAlchemy models for iot data.
"""
import json
@ -112,20 +112,6 @@ class IotronicBase(models.TimestampMixin,
Base = declarative_base(cls=IotronicBase)
class Chassis(Base):
"""Represents a hardware chassis."""
__tablename__ = 'chassis'
__table_args__ = (
schema.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
table_args()
)
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
extra = Column(JSONEncodedDict)
description = Column(String(255), nullable=True)
class Conductor(Base):
"""Represents a conductor service entry."""
@ -136,7 +122,6 @@ class Conductor(Base):
)
id = Column(Integer, primary_key=True)
hostname = Column(String(255), nullable=False)
drivers = Column(JSONEncodedList)
online = Column(Boolean, default=True)
@ -158,60 +143,6 @@ class Node(Base):
session = Column(String(255), nullable=True)
mobile = Column(Boolean, default=False)
extra = Column(JSONEncodedDict)
"""
__tablename__ = 'nodes'
'''
__table_args__ = (
schema.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_nodes0instance_uuid'),
schema.UniqueConstraint('name', name='uniq_nodes0name'),
table_args())
'''
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
# NOTE(deva): we store instance_uuid directly on the node so that we can
# filter on it more efficiently, even though it is
# user-settable, and would otherwise be in node.properties.
uuid = Column(String(36), nullable=True)
name = Column(String(255), nullable=True)
status = Column(String(10), nullable=True)
#chassis_id = Column(Integer, ForeignKey('chassis.id'), nullable=True)
#power_state = Column(String(15), nullable=True)
#target_power_state = Column(String(15), nullable=True)
#provision_state = Column(String(15), nullable=True)
#target_provision_state = Column(String(15), nullable=True)
#provision_updated_at = Column(DateTime, nullable=True)
#last_error = Column(Text, nullable=True)
#instance_info = Column(JSONEncodedDict)
#properties = Column(JSONEncodedDict)
#driver = Column(String(15))
#driver_info = Column(JSONEncodedDict)
#driver_internal_info = Column(JSONEncodedDict)
#clean_step = Column(JSONEncodedDict)
# NOTE(deva): this is the host name of the conductor which has
# acquired a TaskManager lock on the node.
# We should use an INT FK (conductors.id) in the future.
reservation = Column(String(255), nullable=True)
# NOTE(deva): this is the id of the last conductor which prepared local
# state for the node (eg, a PXE config file).
# When affinity and the hash ring's mapping do not match,
# this indicates that a conductor should rebuild local state.
'''
conductor_affinity = Column(Integer,
ForeignKey('conductors.id',
name='nodes_conductor_affinity_fk'),
nullable=True)
'''
#maintenance = Column(Boolean, default=False)
#maintenance_reason = Column(Text, nullable=True)
#console_enabled = Column(Boolean, default=False)
#inspection_finished_at = Column(DateTime, nullable=True)
#inspection_started_at = Column(DateTime, nullable=True)
#extra = Column(JSONEncodedDict)
"""
class Location(Base):
@ -244,18 +175,3 @@ class SessionWP(Base):
session_id = Column(String(15))
node_uuid = Column(String(36))
node_id = Column(Integer, ForeignKey('nodes.id'))
class Port(Base):
"""Represents a network port of a bare metal node."""
__tablename__ = 'ports'
__table_args__ = (
schema.UniqueConstraint('address', name='uniq_ports0address'),
schema.UniqueConstraint('uuid', name='uniq_ports0uuid'),
table_args())
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
address = Column(String(18))
node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True)
extra = Column(JSONEncodedDict)

View File

@ -12,26 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
# from iotronic.objects import chassis
from iotronic.objects import conductor
from iotronic.objects import location
from iotronic.objects import node
from iotronic.objects import sessionwp
# from iotronic.objects import port
# Chassis = chassis.Chassis
Conductor = conductor.Conductor
Node = node.Node
Location = location.Location
SessionWP = sessionwp.SessionWP
# Port = port.Port
__all__ = (
# Chassis,
Conductor,
Node,
Location,
SessionWP,
# Port
)

View File

@ -17,7 +17,6 @@
from iotronic.common.i18n import _
from iotronic.db import api as db_api
from iotronic.objects import base
from iotronic.objects import utils
class Conductor(base.IotronicObject):
@ -26,7 +25,6 @@ class Conductor(base.IotronicObject):
fields = {
'id': int,
'drivers': utils.list_or_none,
'hostname': str,
}
@ -60,8 +58,7 @@ class Conductor(base.IotronicObject):
"""Loads and applies updates for this Conductor.
Loads a :class:`Conductor` with the same uuid from the database and
checks for updated attributes. Updates are applied from
the loaded chassis column by column, if there are any updates.
checks for updated attributes.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.

View File

@ -106,17 +106,6 @@ class Node(base.IotronicObject):
node = Node._from_db_object(cls(context), db_node)
return node
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
"""Find a node based on the instance uuid and return a Node object.
:param uuid: the uuid of the instance.
:returns: a :class:`Node` object.
"""
db_node = cls.dbapi.get_node_by_instance(instance_uuid)
node = Node._from_db_object(cls(context), db_node)
return node
@base.remotable_classmethod
def list(cls, context, limit=None, marker=None, sort_key=None,
sort_dir=None, filters=None):
@ -218,10 +207,6 @@ class Node(base.IotronicObject):
object, e.g.: Node(context)
"""
updates = self.obj_get_changes()
if 'driver' in updates and 'driver_internal_info' not in updates:
# Clean driver_internal_info when changes driver
self.driver_internal_info = {}
updates = self.obj_get_changes()
self.dbapi.update_node(self.uuid, updates)
self.obj_reset_changes()

View File

@ -1,108 +0,0 @@
-- MySQL dump 10.14 Distrib 5.5.44-MariaDB, for Linux (x86_64)
--
-- Host: localhost Database: iotronic
-- ------------------------------------------------------
-- Server version 5.5.44-MariaDB
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
CREATE DATABASE /*!32312 IF NOT EXISTS*/ `iotronic` /*!40100 DEFAULT CHARACTER SET utf8 */;
USE `iotronic`;
--
-- Table structure for table `nodes`
--
DROP TABLE IF EXISTS `nodes`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `nodes` (
`created_at` datetime DEFAULT NULL,
`updated_at` datetime DEFAULT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
`uuid` varchar(36) NOT NULL,
`code` varchar(25) NOT NULL,
`status` varchar(15) DEFAULT NULL,
`name` varchar(255) DEFAULT NULL,
`device` varchar(255) NOT NULL,
`session` varchar(255) DEFAULT NULL,
`mobile` tinyint(1) DEFAULT 0 NOT NULL,
`extra` text,
PRIMARY KEY (`id`),
UNIQUE KEY `uuid` (`uuid`),
UNIQUE KEY `code` (`code`)
) ENGINE=InnoDB AUTO_INCREMENT=127 DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
DROP TABLE IF EXISTS `sessions`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `sessions` (
`created_at` datetime DEFAULT NULL,
`updated_at` datetime DEFAULT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
`valid` tinyint(1) DEFAULT 1 NOT NULL,
`session_id` varchar(18) NOT NULL,
`node_uuid` varchar(36) NOT NULL,
`node_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `session_id` (`session_id`),
CONSTRAINT `session_node_id` FOREIGN KEY (`node_id`) REFERENCES `nodes` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
DROP TABLE IF EXISTS `locations`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `locations` (
`created_at` datetime DEFAULT NULL,
`updated_at` datetime DEFAULT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
`longitude` varchar(18) DEFAULT NULL,
`latitude` varchar(18) DEFAULT NULL,
`altitude` varchar(18) DEFAULT NULL,
`node_id` int(11) NOT NULL,
PRIMARY KEY (`id`),
KEY `node_id` (`node_id`),
CONSTRAINT `location_ibfk_1` FOREIGN KEY (`node_id`) REFERENCES `nodes` (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
--
-- Table structure for table `conductors`
--
DROP TABLE IF EXISTS `conductors`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `conductors` (
`created_at` datetime DEFAULT NULL,
`updated_at` datetime DEFAULT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
`hostname` varchar(255) NOT NULL,
`drivers` text,
`online` tinyint(1) DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_conductors0hostname` (`hostname`)
) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8;
/*!40101 SET character_set_client = @saved_cs_client */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-- Dump completed on 2015-10-12 16:35:22

169
utils/iotronic.sql Normal file
View File

@ -0,0 +1,169 @@
-- MySQL Script generated by MySQL Workbench
-- lun 04 apr 2016 15:41:37 CEST
-- Model: New Model Version: 1.0
-- MySQL Workbench Forward Engineering
SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;
SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='TRADITIONAL,ALLOW_INVALID_DATES';
-- -----------------------------------------------------
-- Schema iotronic
-- -----------------------------------------------------
DROP SCHEMA IF EXISTS `iotronic` ;
-- -----------------------------------------------------
-- Schema iotronic
-- -----------------------------------------------------
CREATE SCHEMA IF NOT EXISTS `iotronic` DEFAULT CHARACTER SET utf8 ;
USE `iotronic` ;
-- -----------------------------------------------------
-- Table `iotronic`.`conductors`
-- -----------------------------------------------------
DROP TABLE IF EXISTS `iotronic`.`conductors` ;
CREATE TABLE IF NOT EXISTS `iotronic`.`conductors` (
`created_at` DATETIME NULL DEFAULT NULL,
`updated_at` DATETIME NULL DEFAULT NULL,
`id` INT(11) NOT NULL AUTO_INCREMENT,
`hostname` VARCHAR(255) NOT NULL,
`online` TINYINT(1) NULL DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `uniq_conductors0hostname` (`hostname` ASC))
ENGINE = InnoDB
AUTO_INCREMENT = 6
DEFAULT CHARACTER SET = utf8;
-- -----------------------------------------------------
-- Table `iotronic`.`nodes`
-- -----------------------------------------------------
DROP TABLE IF EXISTS `iotronic`.`nodes` ;
CREATE TABLE IF NOT EXISTS `iotronic`.`nodes` (
`created_at` DATETIME NULL DEFAULT NULL,
`updated_at` DATETIME NULL DEFAULT NULL,
`id` INT(11) NOT NULL AUTO_INCREMENT,
`uuid` VARCHAR(36) NOT NULL,
`code` VARCHAR(25) NOT NULL,
`status` VARCHAR(15) NULL DEFAULT NULL,
`name` VARCHAR(255) NULL DEFAULT NULL,
`device` VARCHAR(255) NOT NULL,
`session` VARCHAR(255) NULL DEFAULT NULL,
`mobile` TINYINT(1) NOT NULL DEFAULT '0',
`extra` TEXT NULL DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `uuid` (`uuid` ASC),
UNIQUE INDEX `code` (`code` ASC))
ENGINE = InnoDB
AUTO_INCREMENT = 132
DEFAULT CHARACTER SET = utf8;
-- -----------------------------------------------------
-- Table `iotronic`.`locations`
-- -----------------------------------------------------
DROP TABLE IF EXISTS `iotronic`.`locations` ;
CREATE TABLE IF NOT EXISTS `iotronic`.`locations` (
`created_at` DATETIME NULL DEFAULT NULL,
`updated_at` DATETIME NULL DEFAULT NULL,
`id` INT(11) NOT NULL AUTO_INCREMENT,
`longitude` VARCHAR(18) NULL DEFAULT NULL,
`latitude` VARCHAR(18) NULL DEFAULT NULL,
`altitude` VARCHAR(18) NULL DEFAULT NULL,
`node_id` INT(11) NOT NULL,
PRIMARY KEY (`id`),
INDEX `node_id` (`node_id` ASC),
CONSTRAINT `location_ibfk_1`
FOREIGN KEY (`node_id`)
REFERENCES `iotronic`.`nodes` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB
AUTO_INCREMENT = 6
DEFAULT CHARACTER SET = utf8;
-- -----------------------------------------------------
-- Table `iotronic`.`sessions`
-- -----------------------------------------------------
DROP TABLE IF EXISTS `iotronic`.`sessions` ;
CREATE TABLE IF NOT EXISTS `iotronic`.`sessions` (
`created_at` DATETIME NULL DEFAULT NULL,
`updated_at` DATETIME NULL DEFAULT NULL,
`id` INT(11) NOT NULL AUTO_INCREMENT,
`valid` TINYINT(1) NOT NULL DEFAULT '1',
`session_id` VARCHAR(18) NOT NULL,
`node_uuid` VARCHAR(36) NOT NULL,
`node_id` INT(11) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE INDEX `session_id` (`session_id` ASC),
INDEX `session_node_id` (`node_id` ASC),
CONSTRAINT `session_node_id`
FOREIGN KEY (`node_id`)
REFERENCES `iotronic`.`nodes` (`id`)
ON DELETE CASCADE
ON UPDATE CASCADE)
ENGINE = InnoDB
AUTO_INCREMENT = 10
DEFAULT CHARACTER SET = utf8;
-- -----------------------------------------------------
-- Table `iotronic`.`plugins`
-- -----------------------------------------------------
DROP TABLE IF EXISTS `iotronic`.`plugins` ;
CREATE TABLE IF NOT EXISTS `iotronic`.`plugins` (
`created_at` DATETIME NULL DEFAULT NULL,
`updated_at` DATETIME NULL DEFAULT NULL,
`id` INT(11) NOT NULL AUTO_INCREMENT,
`name` VARCHAR(20) NOT NULL,
`category` VARCHAR(20) NOT NULL,
`jsonschema` LONGTEXT NOT NULL,
`code` LONGTEXT NOT NULL,
PRIMARY KEY (`id`))
ENGINE = InnoDB
AUTO_INCREMENT = 10
DEFAULT CHARACTER SET = utf8;
-- -----------------------------------------------------
-- Table `iotronic`.`plugins_injected`
-- -----------------------------------------------------
DROP TABLE IF EXISTS `iotronic`.`plugins_injected` ;
CREATE TABLE IF NOT EXISTS `iotronic`.`plugins_injected` (
`created_at` DATETIME NULL DEFAULT NULL,
`updated_at` DATETIME NULL DEFAULT NULL,
`node_id` INT(11) NOT NULL,
`plugin_id` INT(11) NOT NULL,
`state` VARCHAR(20) NOT NULL)
ENGINE = InnoDB
DEFAULT CHARACTER SET = utf8;
-- -----------------------------------------------------
-- Table `iotronic`.`sensors`
-- -----------------------------------------------------
DROP TABLE IF EXISTS `iotronic`.`sensors` ;
CREATE TABLE IF NOT EXISTS `iotronic`.`sensors` (
`created_at` DATETIME NULL DEFAULT NULL,
`updated_at` DATETIME NULL DEFAULT NULL,
`id` INT NOT NULL,
`type` VARCHAR(45) NOT NULL,
`unit` VARCHAR(45) NOT NULL,
`fabric_name` VARCHAR(45) NULL DEFAULT NULL,
`model` VARCHAR(45) NULL DEFAULT NULL,
PRIMARY KEY (`id`))
ENGINE = InnoDB
DEFAULT CHARACTER SET = utf8;
SET SQL_MODE=@OLD_SQL_MODE;
SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS;