summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--networking_zte/common/__init__.py0
-rw-r--r--networking_zte/common/rest/__init__.py0
-rw-r--r--networking_zte/common/rest/servermanager.py471
-rw-r--r--networking_zte/common/rest/znic_l2/__init__.py0
-rw-r--r--networking_zte/common/rest/znic_l2/config.py84
-rw-r--r--networking_zte/common/rest/znic_l2/mech_znic_l2.py114
-rw-r--r--networking_zte/common/rest/znic_l2/znic_l2restconf.py495
-rw-r--r--networking_zte/fwaas/__init__.py0
-rw-r--r--networking_zte/l3/__init__.py0
-rw-r--r--networking_zte/lbaas/__init__.py0
-rw-r--r--networking_zte/ml2/__init__.py0
-rw-r--r--tools/neutron-zenic-agent10
-rw-r--r--tools/neutron-zenic-agent-script147
-rw-r--r--tools/neutron-zenic-agent.conf23
-rw-r--r--tools/neutron-zenic-agent.service12
15 files changed, 1356 insertions, 0 deletions
diff --git a/networking_zte/common/__init__.py b/networking_zte/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking_zte/common/__init__.py
diff --git a/networking_zte/common/rest/__init__.py b/networking_zte/common/rest/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking_zte/common/rest/__init__.py
diff --git a/networking_zte/common/rest/servermanager.py b/networking_zte/common/rest/servermanager.py
new file mode 100644
index 0000000..ebc69a3
--- /dev/null
+++ b/networking_zte/common/rest/servermanager.py
@@ -0,0 +1,471 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2# Copyright 2014 Big Switch Networks, Inc.
3# All Rights Reserved.
4#
5# Licensed under the Apache License, Version 2.0 (the "License"); you may
6# not use this file except in compliance with the License. You may obtain
7# a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14# License for the specific language governing permissions and limitations
15# under the License.
16#
17
18"""
19This module manages the HTTP and HTTPS connections to the backend controllers.
20
21The main class it provides for external use is ServerPool which manages a set
22of ServerProxy objects that correspond to individual backend controllers.
23
24The following functionality is handled by this module:
25- Translation of rest_* function calls to HTTP/HTTPS calls to the controllers
26- Automatic failover between controllers
27- SSL Certificate enforcement
28- HTTP Authentication
29
30"""
31import base64
32import eventlet
33import httplib
34import os
35import socket
36import ssl
37
38from oslo_config import cfg
39from oslo_log import log
40from oslo_serialization import jsonutils
41
42from neutron.common import exceptions
43from neutron.i18n import _LE
44from neutron.i18n import _LI
45from neutron.i18n import _LW
46
47LOG = log.getLogger(__name__)
48
49TOPOLOGY_PATH = "/topology"
50SUCCESS_CODES = range(200, 207)
51FAILURE_CODES = [301, 302, 303]
52BASE_URI = '/'
53
54
55class RemoteRestError(exceptions.NeutronException):
56 message = _("Error in REST call to remote network "
57 "controller: %(reason)s")
58 status = None
59
60 def __init__(self, **kwargs):
61 self.status = kwargs.pop('status', None)
62 self.reason = kwargs.get('reason')
63 super(RemoteRestError, self).__init__(**kwargs)
64
65
66class ServerProxy(object):
67 """REST server proxy to a network controller."""
68
69 def __init__(self, server, port, ssl, auth, timeout,
70 base_uri, success_codes, name, combined_cert):
71 self.server = server
72 self.port = port
73 self.ssl = ssl
74 self.base_uri = base_uri
75 self.timeout = timeout
76 self.name = name
77 self.success_codes = success_codes
78 self.auth = None
79 self.failed = False
80 self.capabilities = []
81 # cache connection here to avoid a SSL handshake for every connection
82 # self.currentconn = None
83 if auth:
84 self.auth = 'Basic ' + base64.encodestring(auth).strip()
85 self.combined_cert = combined_cert
86
87 def rest_call(self, action, resource, data='', headers={}, timeout=False,
88 reconnect=False):
89 uri = self.base_uri + resource
90 body = jsonutils.dumps(data)
91 if not headers:
92 headers = {}
93 headers['Content-type'] = 'application/json'
94 headers['Accept'] = 'application/json'
95 headers['NeutronProxy-Agent'] = self.name
96
97 if self.auth:
98 headers['Authorization'] = self.auth
99 headers['Realm'] = 'ZENIC'
100
101 LOG.info(_LI("ServerProxy: server=%(server)s, port=%(port)d, "
102 "ssl=%(ssl)r"),
103 {'server': self.server, 'port': self.port, 'ssl': self.ssl})
104 LOG.info(_LI("ServerProxy: resource=%(resource)s, data=%(data)r, "
105 "headers=%(headers)r, action=%(action)s"),
106 {'resource': resource, 'data': data, 'headers': headers,
107 'action': action})
108
109 conn = None
110 if self.ssl:
111 conn = httplib.HTTPSConnection(
112 self.server, self.port, timeout=self.timeout)
113 if conn is None:
114 LOG.error(_LE(
115 'ServerProxy: Could not establish HTTPS connection'))
116 return 0, None, None, None
117 else:
118 conn = httplib.HTTPConnection(
119 self.server, self.port, timeout=self.timeout)
120 if conn is None:
121 LOG.error(_LE(
122 'ServerProxy: Could not establish HTTP connection'))
123 return 0, None, None, None
124
125 try:
126 conn.request(action, uri, body, headers)
127 response = conn.getresponse()
128 respstr = response.read()
129 respdata = respstr
130 if response.status in self.success_codes:
131 try:
132 respdata = jsonutils.loads(respstr)
133 except ValueError:
134 # response was not JSON, ignore the exception
135 pass
136 ret = (response.status, response.reason, respstr, respdata)
137 except socket.timeout as e1:
138 LOG.error(_LE('ServerProxy: %(action)s failure, %(el)r'),
139 {"action": action, "el": e1})
140 ret = 9, None, None, None
141 except socket.error as e:
142 LOG.error(_LE("ServerProxy: %(action)s failure, %(e)r"),
143 {"action": action, "e": e})
144 ret = 0, None, None, None
145 conn.close()
146 # LOG.debug('ServerProxy: status=%d, reason=%r, ret=%s, data=%r' % ret)
147 return ret
148
149 """
150 if not self.currentconn or reconnect:
151 if self.currentconn:
152 self.currentconn.close()
153 if self.ssl:
154 self.currentconn = HTTPSConnectionWithValidation(
155 self.server, self.port, timeout=timeout)
156 if self.currentconn is None:
157 LOG.error(_('ServerProxy: Could not establish HTTPS '
158 'connection'))
159 return 0, None, None, None
160 self.currentconn.combined_cert = self.combined_cert
161 else:
162 self.currentconn = httplib.HTTPConnection(
163 self.server, self.port, timeout=timeout)
164 if self.currentconn is None:
165 LOG.error(_('ServerProxy: Could not establish HTTP '
166 'connection'))
167 return 0, None, None, None
168
169 try:
170 self.currentconn.request(action, uri, body, headers)
171 response = self.currentconn.getresponse()
172 respstr = response.read()
173 respdata = respstr
174 if response.status in self.success_codes:
175 try:
176 respdata = json.loads(respstr)
177 except ValueError:
178 # response was not JSON, ignore the exception
179 pass
180 ret = (response.status, response.reason, respstr, respdata)
181 except httplib.HTTPException:
182 # If we were using a cached connection, try again with a new one.
183 with excutils.save_and_reraise_exception() as ctxt:
184 self.currentconn.close()
185 if reconnect:
186 # if reconnect is true, this was on a fresh connection so
187 # reraise since this server seems to be broken
188 ctxt.reraise = True
189 else:
190 # if reconnect is false, it was a cached connection so
191 # try one more time before re-raising
192 ctxt.reraise = False
193 return self.rest_call(action, resource, data, headers,
194 timeout=timeout, reconnect=True)
195 except (socket.timeout, socket.error) as e:
196 self.currentconn.close()
197 LOG.error(_('ServerProxy: %(action)s failure, %(e)r'),
198 {'action': action, 'e': e})
199 ret = 0, None, None, None
200 LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, "
201 "ret=%(ret)s, data=%(data)r"), {'status': ret[0],
202 'reason': ret[1],
203 'ret': ret[2],
204 'data': ret[3]})
205 return ret
206 """
207
208
209class ServerPool(object):
210 def __init__(self, servers, auth, ssl, no_ssl_validation, ssl_sticky,
211 ssl_cert_directory, consistency_interval,
212 timeout=False, cache_connections=False,
213 base_uri=BASE_URI, success_codes=SUCCESS_CODES,
214 failure_codes=FAILURE_CODES, name='NeutronRestProxy'):
215 LOG.debug(_("ServerPool: initializing"))
216 # 'servers' is the list of network controller REST end-points
217 # (used in order specified till one succeeds, and it is sticky
218 # till next failure). Use 'server_auth' to encode api-key
219 self.auth = auth
220 self.ssl = ssl
221 self.base_uri = base_uri
222 self.success_codes = success_codes
223 self.failure_codes = failure_codes
224 self.name = name
225 self.timeout = timeout
226 self.always_reconnect = not cache_connections
227 self.consistency_interval = consistency_interval
228 self.no_ssl_validation = no_ssl_validation
229 self.ssl_cert_directory = ssl_cert_directory
230 self.ssl_sticky = ssl_sticky
231
232 default_port = 8000
233 if timeout is not False:
234 self.timeout = timeout
235
236 # Function to use to retrieve topology for consistency syncs.
237 # Needs to be set by module that uses the servermanager.
238 self.get_topo_function = None
239 self.get_topo_function_args = {}
240
241 if not servers:
242 raise cfg.Error(_('Servers not defined. Aborting server manager.'))
243 servers = [s if len(s.rsplit(':', 1)) == 2
244 else "%s:%d" % (s, default_port)
245 for s in servers]
246 if any((len(spl) != 2 or not spl[1].isdigit())
247 for spl in [sp.rsplit(':', 1)
248 for sp in servers]):
249 raise cfg.Error(_('Servers must be defined as <ip>:<port>. '
250 'Configuration was %s') % servers)
251 self.servers = [
252 self.server_proxy_for(server, int(port))
253 for server, port in (s.rsplit(':', 1) for s in servers)
254 ]
255 eventlet.spawn(self._consistency_watchdog, self.consistency_interval)
256 LOG.debug("ServerPool: initialization done")
257
258 def server_proxy_for(self, server, port):
259 combined_cert = self._get_combined_cert_for_server(server, port)
260 return ServerProxy(server, port, self.ssl, self.auth,
261 self.timeout, self.base_uri, self.success_codes,
262 self.name, combined_cert=combined_cert)
263
264 def _get_combined_cert_for_server(self, server, port):
265 # The ssl library requires a combined file with all trusted certs
266 # so we make one containing the trusted CAs and the corresponding
267 # host cert for this server
268 combined_cert = None
269 if self.ssl and not self.no_ssl_validation:
270 base_ssl = self.ssl_cert_directory
271 host_dir = os.path.join(base_ssl, 'host_certs')
272 ca_dir = os.path.join(base_ssl, 'ca_certs')
273 combined_dir = os.path.join(base_ssl, 'combined')
274 combined_cert = os.path.join(combined_dir, '%s.pem' % server)
275 if not os.path.exists(base_ssl):
276 raise cfg.Error(_('ssl_cert_directory [%s] does not exist. '
277 'Create it or disable ssl.') % base_ssl)
278 for automake in [combined_dir, ca_dir, host_dir]:
279 if not os.path.exists(automake):
280 os.makedirs(automake)
281
282 # get all CA certs
283 certs = self._get_ca_cert_paths(ca_dir)
284
285 # check for a host specific cert
286 hcert, exists = self._get_host_cert_path(host_dir, server)
287 if exists:
288 certs.append(hcert)
289 elif self.ssl_sticky:
290 self._fetch_and_store_cert(server, port, hcert)
291 certs.append(hcert)
292 if not certs:
293 raise cfg.Error(_('No certificates were found to verify '
294 'controller %s') % (server))
295 self._combine_certs_to_file(certs, combined_cert)
296 return combined_cert
297
298 def _combine_certs_to_file(self, certs, cfile):
299 """combine_certs_to_file
300
301 Concatenates the contents of each certificate in a list of
302 certificate paths to one combined location for use with ssl
303 sockets.
304 """
305 with open(cfile, 'w') as combined:
306 for c in certs:
307 with open(c, 'r') as cert_handle:
308 combined.write(cert_handle.read())
309
310 def _get_host_cert_path(self, host_dir, server):
311 """returns full path and boolean indicating existence"""
312 hcert = os.path.join(host_dir, '%s.pem' % server)
313 if os.path.exists(hcert):
314 return hcert, True
315 return hcert, False
316
317 def _get_ca_cert_paths(self, ca_dir):
318 certs = [os.path.join(root, name)
319 for name in [
320 name for (root, dirs, files) in os.walk(ca_dir)
321 for name in files
322 ]
323 if name.endswith('.pem')]
324 return certs
325
326 def _fetch_and_store_cert(self, server, port, path):
327 """Grabs a certificate from a server and writes it to a given path."""
328 try:
329 cert = ssl.get_server_certificate((server, port))
330 except Exception as e:
331 raise cfg.Error(_('Could not retrieve initial '
332 'certificate from controller %(server)s. '
333 'Error details: %(error)s') %
334 {'server': server, 'error': str(e)})
335
336 LOG.warning(_LW("Storing to certificate for host %(server)s "
337 "at %(path)s") % {'server': server,
338 'path': path})
339 self._file_put_contents(path, cert)
340
341 return cert
342
343 def _file_put_contents(self, path, contents):
344 # Simple method to write to file.
345 # Created for easy Mocking
346 with open(path, 'w') as handle:
347 handle.write(contents)
348
349 def server_failure(self, resp, ignore_codes=[]):
350 """Define failure codes as required.
351
352 Note: We assume 301-303 is a failure, and try the next server in
353 the server pool.
354 """
355 return resp[0] in self.failure_codes and resp[0] not in ignore_codes
356
357 def action_success(self, resp):
358 """Defining success codes as required.
359
360 Note: We assume any valid 2xx as being successful response.
361 """
362 return resp[0] in self.success_codes
363
364 def rest_call(self, action, resource, data, headers, ignore_codes,
365 timeout=False):
366 good_first = sorted(self.servers, key=lambda x: x.failed)
367 first_response = None
368 for active_server in good_first:
369 ret = active_server.rest_call(action, resource, data, headers,
370 timeout,
371 reconnect=self.always_reconnect)
372 # If inconsistent, do a full synchronization
373 # if ret[0] == httplib.CONFLICT:
374 # if self.get_topo_function:
375 # data = self.get_topo_function(
376 # **self.get_topo_function_args)
377 # active_server.rest_call('PUT', TOPOLOGY_PATH, data,
378 # timeout=None)
379 # Store the first response as the error to be bubbled up to the
380 # user since it was a good server. Subsequent servers will most
381 # likely be cluster slaves and won't have a useful error for the
382 # user (e.g. 302 redirect to master)
383 if not first_response:
384 first_response = ret
385 if not self.server_failure(ret, ignore_codes):
386 active_server.failed = False
387 return ret
388 else:
389 try:
390 LOG.error(_LE('ServerProxy: %(action)s failure for '
391 'servers:%(server)r Response:'
392 '%(response)s'),
393 {'action': action,
394 'server': (active_server.server,
395 active_server.port),
396 'response': unicode(ret[3], "utf-8")})
397 LOG.error(_LE("ServerProxy: Error details: "
398 "status=%(status)d, reason=%(reason)r, "
399 "ret=%(ret)s, data=%(data)r"),
400 {'status': ret[0], 'reason': ret[1],
401 'ret': unicode(ret[2], "utf-8"),
402 'data': unicode(ret[3], "utf-8")})
403 except Exception as e:
404 LOG.error(_LE("fail to display info, err: %(e)s"),
405 {'e': e})
406 active_server.failed = True
407
408 # All servers failed, reset server list and try again next time
409 LOG.error(_('ServerProxy: %(action)s failure for all servers: '
410 '%(server)r'),
411 {'action': action,
412 'server': tuple((s.server,
413 s.port) for s in self.servers)})
414 return first_response
415
416 def rest_action(self, action, resource, data='', errstr='%s',
417 ignore_codes=[], headers={}, timeout=False):
418 """rest_action
419
420 Wrapper for rest_call that verifies success and raises a
421 RemoteRestError on failure with a provided error string
422 By default, 404 errors on DELETE calls are ignored because
423 they already do not exist on the backend.
424 """
425 LOG.debug(_("rest_action: %(action)s action to "
426 "resource %(resource)s %(data)s"),
427 {'action': action, 'resource': resource, 'data': data})
428
429 if not ignore_codes and action == 'DELETE':
430 ignore_codes = [404]
431 resp = self.rest_call(action, resource, data, headers, ignore_codes,
432 timeout)
433 if self.server_failure(resp, ignore_codes):
434 try:
435 LOG.error(errstr, unicode(resp[2], "utf-8"))
436 except Exception as e:
437 LOG.error(_LE("fail to display info, err: %(e)s"),
438 {'e': e})
439 raise RemoteRestError(reason=resp[2], status=resp[0])
440 if resp[0] in ignore_codes:
441 LOG.warning(_("NeutronRestProxyV2: Received and ignored error "
442 "code %(code)s on %(action)s action to resource "
443 "%(resource)s"),
444 {'code': resp[2], 'action': action,
445 'resource': resource})
446 return resp
447
448 def _consistency_watchdog(self, polling_interval=60):
449 return
450
451
452class HTTPSConnectionWithValidation(httplib.HTTPSConnection):
453 # If combined_cert is None, the connection will continue without
454 # any certificate validation.
455 combined_cert = None
456
457 def connect(self):
458 sock = socket.create_connection((self.host, self.port),
459 self.timeout, self.source_address)
460 if self._tunnel_host:
461 self.sock = sock
462 self._tunnel()
463
464 if self.combined_cert:
465 self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
466 cert_reqs=ssl.CERT_REQUIRED,
467 ca_certs=self.combined_cert)
468 else:
469 self.sock = ssl.wrap_socket(sock, self.key_file,
470 self.cert_file,
471 cert_reqs=ssl.CERT_NONE)
diff --git a/networking_zte/common/rest/znic_l2/__init__.py b/networking_zte/common/rest/znic_l2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking_zte/common/rest/znic_l2/__init__.py
diff --git a/networking_zte/common/rest/znic_l2/config.py b/networking_zte/common/rest/znic_l2/config.py
new file mode 100644
index 0000000..ad49dad
--- /dev/null
+++ b/networking_zte/common/rest/znic_l2/config.py
@@ -0,0 +1,84 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2# Copyright 2014 Big Switch Networks, Inc.
3# All Rights Reserved.
4#
5# Licensed under the Apache License, Version 2.0 (the "License"); you may
6# not use this file except in compliance with the License. You may obtain
7# a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14# License for the specific language governing permissions and limitations
15# under the License.
16#
17
18
19"""
20This module manages configuration options
21"""
22
23from oslo_config import cfg
24
25from neutron.agent.common import config as agconfig
26from neutron.common import utils
27
28restproxy_opts = [
29 cfg.ListOpt('servers', default=['localhost:8800'],
30 help=_("A comma separated list of Big Switch or Floodlight "
31 "servers and port numbers. The plugin proxies the "
32 "requests to the Big Switch/Floodlight server, "
33 "which performs the networking configuration. Only one"
34 "server is needed per deployment, but you may wish to"
35 "deploy multiple servers to support failover.")),
36 cfg.StrOpt('server_auth', default=None, secret=True,
37 help=_("The username and password for authenticating against "
38 " the Big Switch or Floodlight controller.")),
39 cfg.BoolOpt('server_ssl', default=False,
40 help=_("If True, Use SSL when connecting to the Big Switch or "
41 "Floodlight controller.")),
42 cfg.BoolOpt('ssl_sticky', default=True,
43 help=_("Trust and store the first certificate received for "
44 "each controller address and use it to validate future "
45 "connections to that address.")),
46 cfg.BoolOpt('no_ssl_validation', default=False,
47 help=_("Disables SSL certificate validation for controllers")),
48 cfg.BoolOpt('cache_connections', default=True,
49 help=_("Re-use HTTP/HTTPS connections to the controller.")),
50 cfg.StrOpt('ssl_cert_directory',
51 default='/etc/neutron/plugins/proxyagent/znic/ssl',
52 help=_("Directory containing ca_certs and host_certs "
53 "certificate directories.")),
54 cfg.BoolOpt('sync_data', default=False,
55 help=_("Sync data on connect")),
56 cfg.BoolOpt('auto_sync_on_failure', default=True,
57 help=_("If neutron fails to create a resource because "
58 "the backend controller doesn't know of a dependency, "
59 "the plugin automatically triggers a full data "
60 "synchronization to the controller.")),
61 cfg.IntOpt('consistency_interval', default=60,
62 help=_("Time between verifications that the backend controller "
63 "database is consistent with Neutron")),
64 cfg.IntOpt('server_timeout', default=10,
65 help=_("Maximum number of seconds to wait for proxy request "
66 "to connect and complete.")),
67 cfg.IntOpt('thread_pool_size', default=4,
68 help=_("Maximum number of threads to spawn to handle large "
69 "volumes of port creations.")),
70 cfg.StrOpt('neutron_id', default='neutron-' + utils.get_hostname(),
71 deprecated_name='quantum_id',
72 help=_("User defined identifier for this Neutron deployment")),
73 cfg.BoolOpt('add_meta_server_route', default=True,
74 help=_("Flag to decide if a route to the metadata server "
75 "should be injected into the VM")),
76 cfg.StrOpt('zenic_version', default="50.1",
77 help=_("Version number of the zenic controller corresponding "
78 "to the ml2-plugin")),
79]
80
81
82def register_config():
83 cfg.CONF.register_opts(restproxy_opts, "RESTPROXY")
84 agconfig.register_root_helper(cfg.CONF)
diff --git a/networking_zte/common/rest/znic_l2/mech_znic_l2.py b/networking_zte/common/rest/znic_l2/mech_znic_l2.py
new file mode 100644
index 0000000..2303dfe
--- /dev/null
+++ b/networking_zte/common/rest/znic_l2/mech_znic_l2.py
@@ -0,0 +1,114 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2#
3# Copyright 2014 Big Switch Networks, Inc.
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17
18from neutron.plugins.proxydriver.common.rest.znic_l2 \
19 import config as pl_config
20from neutron.plugins.proxydriver.common.rest.znic_l2 \
21 import znic_l2restconf as restconf
22from oslo_log import log
23
24LOG = log.getLogger(__name__)
25
26
27class ZnicL2Driver(object):
28 """Mechanism Driver for Znic Networks Controller.
29
30 This driver relays the network create, update, delete
31 operations to the Znic Controller.
32 """
33 def __init__(self):
34 LOG.debug(_('Initializing driver'))
35 # register plugin config opts
36 pl_config.register_config()
37 # backend doesn't support bulk operations yet
38 self.native_bulk_support = False
39 # init network ctrl connections
40 self.servers = restconf.ZnicServerPool(
41 pl_config.cfg.CONF.RESTPROXY.servers,
42 pl_config.cfg.CONF.RESTPROXY.server_auth,
43 pl_config.cfg.CONF.RESTPROXY.zenic_version,
44 pl_config.cfg.CONF.RESTPROXY.server_ssl,
45 pl_config.cfg.CONF.RESTPROXY.no_ssl_validation,
46 pl_config.cfg.CONF.RESTPROXY.ssl_sticky,
47 pl_config.cfg.CONF.RESTPROXY.ssl_cert_directory,
48 pl_config.cfg.CONF.RESTPROXY.consistency_interval,
49 pl_config.cfg.CONF.RESTPROXY.server_timeout,
50 pl_config.cfg.CONF.RESTPROXY.cache_connections)
51 LOG.debug(_("Initialization done"))
52
53 def set_enable_security_group(self, en_security_group):
54 self.servers.set_enable_security_group(en_security_group)
55
56 def create_network(self, mech_context):
57 # create network on the network controller
58 self.servers.rest_create_network(mech_context)
59
60 def update_network(self, mech_context):
61 # update network on the network controller
62 self.servers.rest_update_network(mech_context)
63
64 def delete_network(self, mech_context):
65 # delete network on the network controller
66 self.servers.rest_delete_network(mech_context)
67
68 def create_subnet(self, mech_context):
69 # create subnet on the network controller
70 self.servers.rest_create_subnet(mech_context)
71
72 def update_subnet(self, mech_context):
73 # update subnet on the network controller
74 self.servers.rest_update_subnet(mech_context)
75
76 def delete_subnet(self, mech_context):
77 # delete subnet on the network controller
78 self.servers.rest_delete_subnet(mech_context)
79
80 def create_port(self, mech_context):
81 # create port on the network controller
82 self.servers.rest_create_port(mech_context)
83
84 def update_port(self, mech_context):
85 # update port on the network controller
86 self.servers.rest_update_port(mech_context)
87
88 def delete_port(self, mech_context):
89 # delete port on the network controller
90 self.servers.rest_delete_port(mech_context)
91
92 def create_security_group(self, mech_context):
93 # create security group on the network controller
94 self.servers.rest_create_securitygroup(mech_context)
95
96 def update_security_group(self, mech_context):
97 # update security group on the network controller
98 self.servers.rest_update_securitygroup(mech_context)
99
100 def delete_security_group(self, mech_context):
101 # delete security group on the network controller
102 self.servers.rest_delete_securitygroup(mech_context)
103
104 def create_security_group_rule(self, mech_context):
105 # create securitygroup rule on the network controller
106 self.servers.rest_create_securitygroup_rule(mech_context)
107
108 def update_security_group_rule(self, mech_context):
109 # update securitygroup rule on the network controller
110 self.servers.rest_update_securitygroup_rule(mech_context)
111
112 def delete_security_group_rule(self, mech_context):
113 # delete securitygroup rule on the network controller
114 self.servers.rest_delete_securitygroup_rule(mech_context)
diff --git a/networking_zte/common/rest/znic_l2/znic_l2restconf.py b/networking_zte/common/rest/znic_l2/znic_l2restconf.py
new file mode 100644
index 0000000..96fd162
--- /dev/null
+++ b/networking_zte/common/rest/znic_l2/znic_l2restconf.py
@@ -0,0 +1,495 @@
1# vim: tabstop=4 shiftwidth=4 softtabstop=4
2#
3# Copyright 2014 Big Switch Networks, Inc.
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17
18
19from neutron.db import common_db_mixin as base_db
20# from neutron.openstack.common import log
21from neutron.plugins.proxydriver.common.rest import servermanager
22from oslo_log import log
23
24from neutron.extensions import portsecurity as psec
25
26LOG = log.getLogger(__name__)
27
28# The following are used to invoke the API on the external controller
29TENANT = 'tenant'
30NETWORK = 'network'
31SUBNET = 'subnet'
32PORT = 'port'
33ROUTER = 'router'
34FLOATING_IP = 'floating-ip'
35VXLAN_TUNNEL = 'vxlan-tunnel'
36SECURITY_GROUP = 'sg'
37SECURITY_GROUP_RULE = 'sg-rule'
38CLASSIFIER = 'classifier'
39
40BASE_URI = '/restconf/operations/zenic-vdcapp-model:'
41SUCCESS_CODES = range(200, 207)
42FAILURE_CODES = [0, 9, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503,
43 504, 505]
44
45
46class ZnicServerPool(servermanager.ServerPool, base_db.CommonDbMixin):
47 """Znic Server Pool for Znic Mechanism Driver.
48
49 This server pool has the network,subnet,port and security group operations
50 of create, update and delete, to the Znic Controller.
51 """
52 def __init__(self, servers, auth, zenic_version, ssl, no_ssl_validation,
53 ssl_sticky, ssl_cert_directory, consistency_interval,
54 timeout=False, cache_connections=True, base_uri=BASE_URI,
55 success_codes=SUCCESS_CODES,
56 failure_codes=FAILURE_CODES, name='ZnicRestProxy'):
57 super(ZnicServerPool, self).__init__(
58 servers, auth, ssl, no_ssl_validation, ssl_sticky,
59 ssl_cert_directory, consistency_interval, timeout,
60 cache_connections, base_uri, success_codes, failure_codes, name)
61 version = zenic_version.split('.')
62 version = version[0] + version[1]
63 if (not version.isdigit()) or (int(version) < 403):
64 LOG.error(_("zenic_version error!zenic_version = %s"), version)
65 self.zenic_version = int(version)
66
67 def validate_dict(self, instance, key, default_val):
68 return instance[key] if (key in instance and
69 instance[key] is not None) else default_val
70
71 def validate_ipv4(self, ip_in, default_val):
72 return ip_in if (ip_in != 0 and (ip_in is not None)) else default_val
73
74 def construct_network_info(self, mech_context, action):
75 network = mech_context.current
76 context = mech_context._plugin_context
77 # validate tenant
78 tenant_id = self._get_tenant_id_for_create(context, network)
79
80 if action == 'DELETE' or action == 'GET':
81 network_info = {
82 "input": {
83 "id": network['id']
84 }
85 }
86 else:
87 network_info = {
88 "input": {
89 "id": network['id'],
90 "name": network['name'],
91 "admin_state_up": network['admin_state_up'],
92 "tenant_id": tenant_id,
93 "shared": network['shared'],
94 "band_width": self.validate_dict(network, 'bandwidth', 0),
95 "burst_size": self.validate_dict(network, 'cbs', 0),
96 "dscp": self.validate_dict(network, 'dscp', 0),
97 "external":
98 self.validate_dict(network, 'router:external', False),
99 }
100 }
101
102 input = network_info['input']
103 if network.get('provider:network_type') != "flat":
104 input['segmentation_id'] = \
105 mech_context.network_segments[0]['segmentation_id']
106
107 if self.zenic_version > 403:
108 if 'mtu' in network:
109 input['mtu'] = self.validate_dict(network, "mtu", 0)
110
111 if action == 'POST':
112 input = network_info['input']
113 if self.zenic_version > 403:
114 if 'port_security_enabled' in network and \
115 self.en_security_group:
116 input['port_security_enabled'] = \
117 self.validate_dict(network, psec.PORTSECURITY, True)
118 return network_info
119
120 def construct_subnet_info(self, mech_context, action):
121 subnet = mech_context.current
122 context = mech_context._plugin_context
123 # validate tenant
124 tenant_id = self._get_tenant_id_for_create(context, subnet)
125
126 if action == 'DELETE' or action == 'GET':
127 subnet_info = {
128 "input": {
129 "id": subnet['id']
130 }
131 }
132 else:
133 if subnet['ip_version'] == 6:
134 gateway_default = "::"
135 else:
136 gateway_default = "0.0.0.0"
137
138 subnet_info = {
139 "input": {
140 "id": subnet['id'],
141 "subnet_name": subnet['name'],
142 # "network_id": subnet['network_id'],
143 # "tenant_id": tenant_id,
144 "dns_nameservers": ','.join(subnet['dns_nameservers']),
145 "allocation_pools": subnet['allocation_pools'],
146 "host_routes":
147 '\r\n'.join(','.join([route.get("destination", ""),
148 route.get("nexthop", "")])
149 for route in self.validate_dict(
150 subnet, 'host_routes', [])),
151 # "ip_version": subnet['ip_version'],
152 "gateway_ip": self.validate_ipv4(
153 subnet['gateway_ip'], gateway_default),
154 # "cidr": subnet['cidr']
155 }
156 }
157
158 if action == 'POST':
159 input = subnet_info['input']
160 input['network_id'] = subnet['network_id']
161 input['tenant_id'] = tenant_id
162 input['cidr'] = subnet['cidr']
163 input['ip_version'] = subnet['ip_version']
164
165 return subnet_info
166
167 def construct_port_info(self, mech_context, action):
168 port = mech_context.current
169 context = mech_context._plugin_context
170 # validate tenant
171 tenant_id = self._get_tenant_id_for_create(context, port)
172
173 if action == 'DELETE' or action == 'GET':
174 port_info = {
175 "input": {
176 "id": port["id"]
177 }
178 }
179 else:
180 if not self.en_security_group:
181 port["security_groups"] = []
182 port_info = {
183 "input": {
184 "id": port['id'],
185 "name": port['name'],
186 "allowed_address_pairs":
187 [{'ip_address': pairs['ip_address'],
188 'mac_address': pairs['mac_address']}
189 for pairs in port['allowed_address_pairs']],
190 "admin_state_up": port["admin_state_up"],
191 # "network_id": port["network_id"],
192 # "tenant_id": tenant_id,
193 # "mac_address": port["mac_address"],
194 "binding_profile": str(port['binding:profile']),
195 "device_owner": port["device_owner"],
196 "fixed_ips": [{'subnet_id': ip["subnet_id"],
197 'ip_address': ip["ip_address"]}
198 for ip in port["fixed_ips"]],
199 "security_groups": port["security_groups"],
200 "band_width": self.validate_dict(port, 'bandwidth', 0),
201 "burst_size": self.validate_dict(port, 'cbs', 0),
202 "dscp": self.validate_dict(port, 'dscp', 0),
203 }
204 }
205
206 if action == 'POST' or action == 'PUT':
207 input = port_info['input']
208 if self.zenic_version > 403:
209 if 'extra_dhcp_opts' in port:
210 input['extra_dhcp_opts'] = [{'opt_value':
211 dhcp["opt_value"],
212 'ip_version':
213 dhcp["ip_version"],
214 'opt_name':
215 dhcp["opt_name"]}
216 for dhcp in
217 port["extra_dhcp_opts"]]
218
219 if action == 'POST':
220 input = port_info['input']
221 input['network_id'] = port['network_id']
222 input['tenant_id'] = tenant_id
223 input['mac_address'] = port['mac_address']
224 if self.zenic_version > 403:
225 if 'port_security_enabled' in port and self.en_security_group:
226 input['port_security_enabled'] = \
227 self.validate_dict(port, psec.PORTSECURITY, True)
228 return port_info
229
230 def construct_securitygroup_info(self, mech_context, action):
231 sg = mech_context.current
232 context = mech_context._plugin_context
233 # validate tenant
234 tenant_id = self._get_tenant_id_for_create(context, sg)
235
236 if action == 'DELETE' or action == 'GET':
237 securitygroup_info = {"input": {"id": sg["id"]}}
238 elif action == 'PUT':
239 securitygroup_info = {
240 "input": {
241 "id": sg['id'],
242 "name": sg['name'],
243 "description": sg["description"],
244 }
245 }
246 else:
247 securitygroup_info = {
248 "input": {
249 "id": sg['id'],
250 "name": sg['name'],
251 "description": sg["description"],
252 "tenant_id": tenant_id
253 }
254 }
255
256 if action == "POST":
257 securitygroup_rules = self.validate_dict(
258 sg, 'security_group_rules', None)
259 if securitygroup_rules is not None:
260 security_group_rules = []
261 for rule in securitygroup_rules:
262 ethertype = self.validate_dict(rule, 'ethertype', None)
263 ipv4 = None
264 ipv6 = None
265 if ethertype and ethertype.find('4') != -1:
266 ipv4 = self.validate_dict(
267 rule, 'remote_ip_prefix', None)
268 elif ethertype and ethertype.find('6') != -1:
269 ipv6 = self.validate_dict(
270 rule, 'remote_ip_prefix', None)
271 else:
272 LOG.error("ethertype:%s is error!" % ethertype)
273
274 sg_rule = {
275 "id": rule['id'],
276 "port_range_max":
277 self.validate_dict(rule, 'port_range_max', 0),
278 "port_range_min":
279 self.validate_dict(rule, 'port_range_min', 0),
280 "protocol":
281 self.validate_dict(rule, 'protocol', None),
282 "remote_group_id":
283 self.validate_dict(rule, 'remote_group_id', None),
284 "remote_ipv4_prefix": ipv4,
285 "remote_ipv6_prefix": ipv6,
286 "direction":
287 self.validate_dict(rule, 'direction', None),
288 "ethertype": ethertype,
289 "tenant_id": tenant_id,
290 "security_group_id":
291 self.validate_dict(rule, 'security_group_id', None)
292 }
293 security_group_rules.append(sg_rule)
294 securitygroup_info['input']['security_group_rules'] = \
295 security_group_rules
296 return securitygroup_info
297
298 def construct_securitygroup_rule_info(self, mech_context, action):
299 rule = mech_context.current
300 context = mech_context._plugin_context
301 # validate tenant
302 tenant_id = self._get_tenant_id_for_create(context, rule)
303 ethertype = self.validate_dict(rule, 'ethertype', None)
304 ipv4 = None
305 ipv6 = None
306 if ethertype and ethertype.find('4') != -1:
307 ipv4 = self.validate_dict(rule, 'remote_ip_prefix', None)
308 elif ethertype and ethertype.find('6') != -1:
309 ipv6 = self.validate_dict(rule, 'remote_ip_prefix', None)
310 else:
311 LOG.error("ethertype:%s is error!" % ethertype)
312
313 if action == 'DELETE' or action == 'GET':
314 securitygroup_rule_info = {"input": {"id": rule["id"]}}
315 else:
316 securitygroup_rule_info = {
317 "input": {
318 "id": rule['id'],
319 "port_range_max":
320 self.validate_dict(rule, 'port_range_max', 0),
321 "port_range_min":
322 self.validate_dict(rule, 'port_range_min', 0),
323 "protocol":
324 self.validate_dict(rule, 'protocol', None),
325 "remote_group_id":
326 self.validate_dict(rule, 'remote_group_id', None),
327 "remote_ipv4_prefix": ipv4,
328 "remote_ipv6_prefix": ipv6,
329 "security_group_id":
330 self.validate_dict(rule, 'security_group_id', None)
331 }
332 }
333
334 if action == 'POST':
335 input = securitygroup_rule_info['input']
336 input['direction'] = self.validate_dict(rule, 'direction', None)
337 input['ethertype'] = ethertype
338 input['tenant_id'] = tenant_id
339
340 return securitygroup_rule_info
341
342 def set_enable_security_group(self, en_security_group):
343 self.en_security_group = en_security_group
344
345 def rest_create_tenant(self, tenant_id, tenant_name, description):
346 tenant_data = {"id": tenant_id,
347 "name": tenant_name,
348 "description": description}
349 data = {"input": tenant_data}
350 resource = 'add-' + TENANT
351 errstr = _("Unable to create tenant: %s")
352 self.rest_action('POST', resource, data, errstr)
353
354 def rest_update_tenant(self, tenant_id, tenant_name, description):
355 tenant_data = {"id": tenant_id,
356 "name": tenant_name,
357 "description": description}
358 data = {"input": tenant_data}
359 resource = 'update-' + TENANT
360 errstr = _("Unable to update tenant: %s")
361 self.rest_action('POST', resource, data, errstr)
362
363 def rest_delete_tenant(self, tenant_id):
364 tenant_data = {"id": tenant_id}
365 data = {"input": tenant_data}
366 resource = 'del-' + TENANT
367 errstr = _("Unable to delete tenant: %s")
368 self.rest_action('POST', resource, data, errstr)
369
370 def rest_get_tenant(self, tenant_id):
371 tenant_data = {"id": tenant_id}
372 data = {"input": tenant_data}
373 resource = 'get-' + TENANT
374 errstr = _("Unable to get tenant: %s")
375 return self.rest_action('POST', resource, data, errstr)
376
377 def rest_create_network(self, mech_context):
378 data = self.construct_network_info(mech_context, 'POST')
379 resource = 'add-' + NETWORK
380 errstr = _("Unable to create remote network: %s")
381 self.rest_action('POST', resource, data, errstr)
382
383 def rest_update_network(self, mech_context):
384 data = self.construct_network_info(mech_context, 'PUT')
385 resource = 'update-' + NETWORK
386 errstr = _("Unable to update remote network: %s")
387 self.rest_action('POST', resource, data, errstr)
388
389 def rest_delete_network(self, mech_context):
390 data = self.construct_network_info(mech_context, 'DELETE')
391 resource = 'del-' + NETWORK
392 errstr = _("Unable to delete remote network: %s")
393 self.rest_action('POST', resource, data, errstr)
394
395 def rest_get_network(self, mech_context):
396 data = self.construct_network_info(mech_context, 'GET')
397 resource = 'get-' + NETWORK
398 errstr = _("Unable to get remote network: %s")
399 return self.rest_action('POST', resource, data, errstr)
400
401 def rest_create_subnet(self, mech_context):
402 data = self.construct_subnet_info(mech_context, 'POST')
403 resource = 'add-' + SUBNET
404 errstr = _("Unable to create remote subnet: %s")
405 self.rest_action('POST', resource, data, errstr)
406
407 def rest_update_subnet(self, mech_context):
408 data = self.construct_subnet_info(mech_context, 'PUT')
409 resource = 'update-' + SUBNET
410 errstr = _("Unable to update remote subnet: %s")
411 self.rest_action('POST', resource, data, errstr)
412
413 def rest_delete_subnet(self, mech_context):
414 data = self.construct_subnet_info(mech_context, 'DELETE')
415 resource = 'del-' + SUBNET
416 errstr = _("Unable to delete remote subnet: %s")
417 self.rest_action('POST', resource, data, errstr)
418
419 def rest_get_subnet(self, mech_context):
420 data = self.construct_subnet_info(mech_context, 'GET')
421 resource = 'get-' + SUBNET
422 errstr = _("Unable to get remote subnet: %s")
423 return self.rest_action('POST', resource, data, errstr)
424
425 def rest_create_port(self, mech_context):
426 data = self.construct_port_info(mech_context, 'POST')
427 resource = 'add-' + PORT
428 errstr = _("Unable to create remote port: %s")
429 self.rest_action('POST', resource, data, errstr)
430
431 def rest_update_port(self, mech_context):
432 data = self.construct_port_info(mech_context, 'PUT')
433 resource = 'update-' + PORT
434 errstr = _("Unable to update remote port: %s")
435 self.rest_action('POST', resource, data, errstr)
436
437 def rest_delete_port(self, mech_context):
438 data = self.construct_port_info(mech_context, 'DELETE')
439 resource = 'del-' + PORT
440 errstr = _("Unable to delete remote port: %s")
441 self.rest_action('POST', resource, data, errstr)
442
443 def rest_get_port(self, mech_context):
444 data = self.construct_port_info(mech_context, 'GET')
445 resource = 'get-' + PORT
446 errstr = _("Unable to get remote port: %s")
447 return self.rest_action('POST', resource, data, errstr)
448
449 def rest_create_securitygroup(self, mech_context):
450 data = self.construct_securitygroup_info(mech_context, 'POST')
451 resource = 'add-' + SECURITY_GROUP
452 errstr = _("Unable to create remote securitygroup: %s")
453 self.rest_action('POST', resource, data, errstr)
454
455 def rest_update_securitygroup(self, mech_context):
456 data = self.construct_securitygroup_info(mech_context, 'PUT')
457 resource = 'update-' + SECURITY_GROUP
458 errstr = _("Unable to update remote securitygroup: %s")
459 self.rest_action('POST', resource, data, errstr)
460
461 def rest_delete_securitygroup(self, mech_context):
462 data = self.construct_securitygroup_info(mech_context, 'DELETE')
463 resource = 'del-' + SECURITY_GROUP
464 errstr = _("Unable to delete remote securitygroup: %s")
465 self.rest_action('POST', resource, data, errstr)
466
467 def rest_get_securitygroup(self, mech_context):
468 data = self.construct_securitygroup_info(mech_context, 'GET')
469 resource = 'get-' + SECURITY_GROUP
470 errstr = _("Unable to get remote securitygroup: %s")
471 return self.rest_action('POST', resource, data, errstr)
472
473 def rest_create_securitygroup_rule(self, mech_context):
474 data = self.construct_securitygroup_rule_info(mech_context, 'POST')
475 resource = 'add-' + SECURITY_GROUP_RULE
476 errstr = _("Unable to create remote securitygroup_rule: %s")
477 self.rest_action('POST', resource, data, errstr)
478
479 def rest_update_securitygroup_rule(self, mech_context):
480 data = self.construct_securitygroup_rule_info(mech_context, 'PUT')
481 resource = 'update-' + SECURITY_GROUP_RULE
482 errstr = _("Unable to update remote securitygroup_rule: %s")
483 self.rest_action('POST', resource, data, errstr)
484
485 def rest_delete_securitygroup_rule(self, mech_context):
486 data = self.construct_securitygroup_rule_info(mech_context, 'DELETE')
487 resource = 'del-' + SECURITY_GROUP_RULE
488 errstr = _("Unable to delete remote securitygroup_rule: %s")
489 self.rest_action('POST', resource, data, errstr)
490
491 def rest_get_securitygroup_rule(self, mech_context):
492 data = self.construct_securitygroup_rule_info(mech_context, 'GET')
493 resource = 'get-' + SECURITY_GROUP_RULE
494 errstr = _("Unable to get remote securitygroup_rule: %s")
495 return self.rest_action('POST', resource, data, errstr)
diff --git a/networking_zte/fwaas/__init__.py b/networking_zte/fwaas/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking_zte/fwaas/__init__.py
diff --git a/networking_zte/l3/__init__.py b/networking_zte/l3/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking_zte/l3/__init__.py
diff --git a/networking_zte/lbaas/__init__.py b/networking_zte/lbaas/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking_zte/lbaas/__init__.py
diff --git a/networking_zte/ml2/__init__.py b/networking_zte/ml2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/networking_zte/ml2/__init__.py
diff --git a/tools/neutron-zenic-agent b/tools/neutron-zenic-agent
new file mode 100644
index 0000000..4073a9a
--- /dev/null
+++ b/tools/neutron-zenic-agent
@@ -0,0 +1,10 @@
1#!/usr/bin/python
2# PBR Generated from u'console_scripts'
3
4import sys
5
6from neutron.agent.zenic_agent import main
7
8
9if __name__ == "__main__":
10 sys.exit(main())
diff --git a/tools/neutron-zenic-agent-script b/tools/neutron-zenic-agent-script
new file mode 100644
index 0000000..d1e05c6
--- /dev/null
+++ b/tools/neutron-zenic-agent-script
@@ -0,0 +1,147 @@
1#!/bin/sh
2### BEGIN INIT INFO
3# Provides: neutron-zenic-agent
4# Required-Start: $network $local_fs $remote_fs $syslog
5# Required-Stop: $remote_fs
6# Should-Start: mysql postgresql rabbitmq-server keystone openvswitch-switch neutron-ovs-cleanup
7# Should-Stop: mysql postgresql rabbitmq-server keystone openvswitch-switch
8# Default-Start: 2 3 4 5
9# Default-Stop: 0 1 6
10# Short-Description: Neutron zenic agent
11# Description: Provide zenic agent for neutron
12### END INIT INFO
13
14# Authors: Mehdi Abaakouk <sileht@sileht.net>
15# Thomas Goirand <zigo@debian.org>
16
17# PATH should only include /usr/* if it runs after the mountnfs.sh script
18PATH=/sbin:/usr/sbin:/bin:/usr/bin
19DESC="OpenStack Neutron zenic agent"
20PROJECT_NAME=neutron
21NAME=${PROJECT_NAME}-zenic-agent
22# --config-file=/etc/neutron/neutron.conf will be happened
23# to DAEMON_ARGS later by openstack-pkg-tools
24DAEMON_ARGS="--config-file=/etc/neutron/plugin.ini"
25#!/bin/sh
26# The content after this line comes from openstack-pkg-tools
27# and has been automatically added to a .init.in script, which
28# contains only the descriptive part for the daemon. Everything
29# else is standardized as a single unique script.
30
31# Author: Thomas Goirand <zigo@debian.org>
32
33# PATH should only include /usr/* if it runs after the mountnfs.sh script
34PATH=/sbin:/usr/sbin:/bin:/usr/bin
35
36if [ -z "${DAEMON}" ] ; then
37 DAEMON=/usr/bin/${NAME}
38fi
39PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid
40if [ -z "${SCRIPTNAME}" ] ; then
41 SCRIPTNAME=/etc/init.d/${NAME}
42fi
43if [ -z "${SYSTEM_USER}" ] ; then
44 SYSTEM_USER=${PROJECT_NAME}
45fi
46if [ -z "${SYSTEM_USER}" ] ; then
47 SYSTEM_GROUP=${PROJECT_NAME}
48fi
49if [ "${SYSTEM_USER}" != "root" ] ; then
50 STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}"
51fi
52if [ -z "${CONFIG_FILE}" ] ; then
53 CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf
54fi
55LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log
56if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then
57 DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}"
58fi
59
60# Exit if the package is not installed
61[ -x $DAEMON ] || exit 0
62
63# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed
64if [ `whoami` = "root" ] ; then
65 for i in lock run log lib ; do
66 mkdir -p /var/$i/${PROJECT_NAME}
67 chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME}
68 done
69fi
70
71# This defines init_is_upstart which we use later on (+ more...)
72. /lib/lsb/init-functions
73
74# Manage log options: logfile and/or syslog, depending on user's choosing
75[ -r /etc/default/openstack ] && . /etc/default/openstack
76[ -r /etc/default/$NAME ] && . /etc/default/$NAME
77[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
78[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE"
79
80do_start() {
81 start-stop-daemon --start ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/
82lib/${PROJECT_NAME} --startas $DAEMON \
83 --test > /dev/null || return 1
84 start-stop-daemon --start ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/
85lib/${PROJECT_NAME} --startas $DAEMON \
86 -- $DAEMON_ARGS || return 2
87}
88
89do_stop() {
90 start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
91 RETVAL=$?
92 rm -f $PIDFILE
93 return "$RETVAL"
94}
95
96do_systemd_start() {
97 exec $DAEMON $DAEMON_ARGS
98}
99
100case "$1" in
101start)
102 init_is_upstart > /dev/null 2>&1 && exit 1
103 log_daemon_msg "Starting $DESC" "$NAME"
104 do_start
105 case $? in
106 0|1) log_end_msg 0 ;;
107 2) log_end_msg 1 ;;
108 esac
109;;
110stop)
111 init_is_upstart > /dev/null 2>&1 && exit 0
112 log_daemon_msg "Stopping $DESC" "$NAME"
113 do_stop
114 case $? in
115 0|1) log_end_msg 0 ;;
116 2) log_end_msg 1 ;;
117 esac
118;;
119status)
120 status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
121;;
122systemd-start)
123 do_systemd_start
124;;
125restart|force-reload)
126 init_is_upstart > /dev/null 2>&1 && exit 1
127 log_daemon_msg "Restarting $DESC" "$NAME"
128 do_stop
129 case $? in
130 0|1)
131 do_start
132 case $? in
133 0) log_end_msg 0 ;;
134 1) log_end_msg 1 ;; # Old process is still running
135 *) log_end_msg 1 ;; # Failed to start
136 esac
137 ;;
138 *) log_end_msg 1 ;; # Failed to stop
139 esac
140;;
141*)
142 echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2
143 exit 3
144;;
145esac
146
147exit 0
diff --git a/tools/neutron-zenic-agent.conf b/tools/neutron-zenic-agent.conf
new file mode 100644
index 0000000..ded10da
--- /dev/null
+++ b/tools/neutron-zenic-agent.conf
@@ -0,0 +1,23 @@
1# vim:set ft=upstart ts=2 et:
2description "Neutron zenic Agent"
3author "Chuck Short <zulcss@ubuntu.com>"
4
5start on runlevel [2345]
6stop on runlevel [!2345]
7
8respawn
9
10chdir /var/run
11
12pre-start script
13 mkdir -p /var/run/neutron
14 chown neutron:root /var/run/neutron
15 # Check to see if openvswitch plugin in use by checking
16 # status of cleanup upstart configuration
17 if status neutron-ovs-cleanup; then
18 start wait-for-state WAIT_FOR=neutron-ovs-cleanup WAIT_STATE=running WAITER=neutron-zenic-agent
19 fi
20end script
21
22exec start-stop-daemon --start --chuid neutron --exec /usr/bin/neutron-zenic-agent -- \
23--config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugin.ini --log-file=/var/log/neutron/zenic-agent.log
diff --git a/tools/neutron-zenic-agent.service b/tools/neutron-zenic-agent.service
new file mode 100644
index 0000000..ed597f2
--- /dev/null
+++ b/tools/neutron-zenic-agent.service
@@ -0,0 +1,12 @@
1[Unit]
2Description=OpenStack Neutron Zenic Layer 3 Agent
3After=syslog.target network.target
4
5[Service]
6Type=simple
7User=neutron
8ExecStart=/usr/bin/neutron-zenic-agent --log-file /var/log/neutron/zenic-agent.log
9PrivateTmp=false
10
11[Install]
12WantedBy=multi-user.target