From 08bd072d3a21374167193c40f764e66dca305085 Mon Sep 17 00:00:00 2001 From: Jon Proulx Date: Fri, 13 Mar 2015 16:04:35 -0400 Subject: [PATCH] sanitized config dump --- MIT_CSAIL/compute/etc/neutron/api-paste.ini | 30 + .../compute/etc/neutron/fwaas_driver.ini | 3 + MIT_CSAIL/compute/etc/neutron/l3_agent.ini | 102 ++ MIT_CSAIL/compute/etc/neutron/neutron.conf | 494 +++++++ .../etc/neutron/neutron.conf.dpkg-dist | 637 +++++++++ .../etc/neutron/plugins/ml2/ml2_conf.ini | 83 ++ .../plugins/ml2/ml2_conf.ini.dpkg-dist | 71 + .../neutron/plugins/ml2/ml2_conf_arista.ini | 100 ++ .../neutron/plugins/ml2/ml2_conf_brocade.ini | 15 + .../neutron/plugins/ml2/ml2_conf_cisco.ini | 118 ++ .../neutron/plugins/ml2/ml2_conf_fslsdn.ini | 52 + .../etc/neutron/plugins/ml2/ml2_conf_mlnx.ini | 4 + .../etc/neutron/plugins/ml2/ml2_conf_ncs.ini | 28 + .../etc/neutron/plugins/ml2/ml2_conf_odl.ini | 30 + .../etc/neutron/plugins/ml2/ml2_conf_ofa.ini | 13 + .../neutron/plugins/ml2/ml2_conf_sriov.ini | 31 + MIT_CSAIL/compute/etc/neutron/policy.json | 138 ++ MIT_CSAIL/compute/etc/neutron/rootwrap.conf | 34 + .../etc/neutron/rootwrap.d/debug.filters | 14 + .../neutron/rootwrap.d/ipset-firewall.filters | 12 + .../rootwrap.d/iptables-firewall.filters | 21 + .../compute/etc/neutron/rootwrap.d/l3.filters | 49 + .../rootwrap.d/openvswitch-plugin.filters | 22 + .../etc/neutron/rootwrap.d/vpnaas.filters | 13 + MIT_CSAIL/compute/etc/neutron/vpn_agent.ini | 14 + MIT_CSAIL/compute/etc/nova/api-paste.ini | 127 ++ MIT_CSAIL/compute/etc/nova/logging.conf | 81 ++ MIT_CSAIL/compute/etc/nova/nova-compute.conf | 4 + MIT_CSAIL/compute/etc/nova/nova.conf | 85 ++ .../compute/etc/nova/nova.conf.dpkg-dist | 12 + MIT_CSAIL/compute/etc/nova/policy.json | 348 +++++ MIT_CSAIL/compute/etc/nova/rootwrap.conf | 27 + .../etc/nova/rootwrap.d/compute.filters | 228 +++ MIT_CSAIL/compute/etc/nova/secret.xml | 6 + MIT_CSAIL/compute/etc/nova/virsh.secret | 1 + .../NON-OPENSTACK-SPECIFIC-BITS-REMOVED | 0 .../apache2/conf.d/openstack-dashboard.conf | 10 + .../sites-enabled/25-keystone_wsgi_admin.conf | 46 + .../sites-enabled/25-keystone_wsgi_main.conf | 49 + MIT_CSAIL/controller/etc/cinder/api-paste.ini | 64 + .../etc/cinder/api-paste.ini.dpkg-dist | 60 + MIT_CSAIL/controller/etc/cinder/cinder.conf | 68 + .../etc/cinder/cinder.conf.dpkg-dist | 11 + MIT_CSAIL/controller/etc/cinder/logging.conf | 76 + MIT_CSAIL/controller/etc/cinder/policy.json | 80 ++ MIT_CSAIL/controller/etc/cinder/rootwrap.conf | 27 + .../etc/cinder/rootwrap.d/volume.filters | 157 +++ .../etc/glance/glance-api-paste.ini | 66 + .../controller/etc/glance/glance-api.conf | 361 +++++ .../etc/glance/glance-cache-paste.ini | 15 + .../controller/etc/glance/glance-cache.conf | 148 ++ .../etc/glance/glance-registry-paste.ini | 28 + .../etc/glance/glance-registry.conf | 98 ++ .../etc/glance/glance-scrubber-paste.ini | 3 + .../etc/glance/glance-scrubber.conf | 108 ++ MIT_CSAIL/controller/etc/glance/glance.sqlite | 0 MIT_CSAIL/controller/etc/glance/policy.json | 52 + .../controller/etc/glance/schema-image.json | 28 + MIT_CSAIL/controller/etc/heat/api-paste.ini | 94 ++ .../etc/heat/environment.d/default.yaml | 9 + MIT_CSAIL/controller/etc/heat/heat.conf | 1241 +++++++++++++++++ MIT_CSAIL/controller/etc/heat/policy.json | 70 + .../etc/keystone/default_catalog.templates | 27 + .../etc/keystone/keystone-paste.ini | 121 ++ .../controller/etc/keystone/keystone-ssl.conf | 385 +++++ .../controller/etc/keystone/keystone.conf | 386 +++++ .../etc/keystone/keystone.conf.back | 89 ++ .../controller/etc/keystone/logging.conf | 39 + .../etc/keystone/logging.conf.sample | 39 + MIT_CSAIL/controller/etc/keystone/policy.json | 171 +++ .../controller/etc/neutron/api-paste.ini | 32 + .../controller/etc/neutron/dhcp_agent.ini | 80 ++ MIT_CSAIL/controller/etc/neutron/dnsmasq.conf | 3 + .../controller/etc/neutron/fwaas_driver.ini | 3 + MIT_CSAIL/controller/etc/neutron/l3_agent.ini | 75 + .../controller/etc/neutron/lbaas_agent.ini | 31 + .../controller/etc/neutron/metadata_agent.ini | 44 + MIT_CSAIL/controller/etc/neutron/neutron.conf | 412 ++++++ .../etc/neutron/plugins/ml2/ml2_conf.ini | 88 ++ .../plugins/ml2/ml2_conf.ini.dpkg-dist | 71 + .../neutron/plugins/ml2/ml2_conf_arista.ini | 100 ++ .../neutron/plugins/ml2/ml2_conf_brocade.ini | 15 + .../neutron/plugins/ml2/ml2_conf_cisco.ini | 118 ++ .../neutron/plugins/ml2/ml2_conf_fslsdn.ini | 52 + .../etc/neutron/plugins/ml2/ml2_conf_mlnx.ini | 4 + .../etc/neutron/plugins/ml2/ml2_conf_ncs.ini | 28 + .../etc/neutron/plugins/ml2/ml2_conf_odl.ini | 30 + .../etc/neutron/plugins/ml2/ml2_conf_ofa.ini | 13 + .../neutron/plugins/ml2/ml2_conf_sriov.ini | 31 + .../openvswitch/ovs_neutron_plugin.ini | 169 +++ MIT_CSAIL/controller/etc/neutron/policy.json | 140 ++ .../controller/etc/neutron/rootwrap.conf | 34 + .../etc/neutron/rootwrap.d/debug.filters | 14 + .../etc/neutron/rootwrap.d/dhcp.filters | 36 + .../neutron/rootwrap.d/ipset-firewall.filters | 12 + .../rootwrap.d/iptables-firewall.filters | 21 + .../etc/neutron/rootwrap.d/l3.filters | 49 + .../neutron/rootwrap.d/lbaas-haproxy.filters | 26 + .../rootwrap.d/openvswitch-plugin.filters | 22 + .../etc/neutron/rootwrap.d/vpnaas.filters | 13 + .../controller/etc/neutron/vpn_agent.ini | 14 + MIT_CSAIL/controller/etc/nova/api-paste.ini | 107 ++ MIT_CSAIL/controller/etc/nova/logging.conf | 81 ++ MIT_CSAIL/controller/etc/nova/nova.conf | 103 ++ .../controller/etc/nova/nova.conf.pre-quantum | 50 + MIT_CSAIL/controller/etc/nova/policy.json | 348 +++++ MIT_CSAIL/controller/etc/nova/rootwrap.conf | 27 + .../etc/nova/rootwrap.d/api-metadata.filters | 13 + .../etc/openstack-dashboard/local_settings.py | 550 ++++++++ .../csail/overrides.py | 131 ++ 110 files changed, 10463 insertions(+) create mode 100644 MIT_CSAIL/compute/etc/neutron/api-paste.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/fwaas_driver.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/l3_agent.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/neutron.conf create mode 100644 MIT_CSAIL/compute/etc/neutron/neutron.conf.dpkg-dist create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf.ini.dpkg-dist create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_arista.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_brocade.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_cisco.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_ncs.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_odl.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_ofa.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_sriov.ini create mode 100644 MIT_CSAIL/compute/etc/neutron/policy.json create mode 100644 MIT_CSAIL/compute/etc/neutron/rootwrap.conf create mode 100644 MIT_CSAIL/compute/etc/neutron/rootwrap.d/debug.filters create mode 100644 MIT_CSAIL/compute/etc/neutron/rootwrap.d/ipset-firewall.filters create mode 100644 MIT_CSAIL/compute/etc/neutron/rootwrap.d/iptables-firewall.filters create mode 100644 MIT_CSAIL/compute/etc/neutron/rootwrap.d/l3.filters create mode 100644 MIT_CSAIL/compute/etc/neutron/rootwrap.d/openvswitch-plugin.filters create mode 100644 MIT_CSAIL/compute/etc/neutron/rootwrap.d/vpnaas.filters create mode 100644 MIT_CSAIL/compute/etc/neutron/vpn_agent.ini create mode 100644 MIT_CSAIL/compute/etc/nova/api-paste.ini create mode 100644 MIT_CSAIL/compute/etc/nova/logging.conf create mode 100644 MIT_CSAIL/compute/etc/nova/nova-compute.conf create mode 100644 MIT_CSAIL/compute/etc/nova/nova.conf create mode 100644 MIT_CSAIL/compute/etc/nova/nova.conf.dpkg-dist create mode 100644 MIT_CSAIL/compute/etc/nova/policy.json create mode 100644 MIT_CSAIL/compute/etc/nova/rootwrap.conf create mode 100644 MIT_CSAIL/compute/etc/nova/rootwrap.d/compute.filters create mode 100644 MIT_CSAIL/compute/etc/nova/secret.xml create mode 100644 MIT_CSAIL/compute/etc/nova/virsh.secret create mode 100644 MIT_CSAIL/controller/etc/apache2/NON-OPENSTACK-SPECIFIC-BITS-REMOVED create mode 100644 MIT_CSAIL/controller/etc/apache2/conf.d/openstack-dashboard.conf create mode 100644 MIT_CSAIL/controller/etc/apache2/sites-enabled/25-keystone_wsgi_admin.conf create mode 100644 MIT_CSAIL/controller/etc/apache2/sites-enabled/25-keystone_wsgi_main.conf create mode 100644 MIT_CSAIL/controller/etc/cinder/api-paste.ini create mode 100644 MIT_CSAIL/controller/etc/cinder/api-paste.ini.dpkg-dist create mode 100644 MIT_CSAIL/controller/etc/cinder/cinder.conf create mode 100644 MIT_CSAIL/controller/etc/cinder/cinder.conf.dpkg-dist create mode 100644 MIT_CSAIL/controller/etc/cinder/logging.conf create mode 100644 MIT_CSAIL/controller/etc/cinder/policy.json create mode 100644 MIT_CSAIL/controller/etc/cinder/rootwrap.conf create mode 100644 MIT_CSAIL/controller/etc/cinder/rootwrap.d/volume.filters create mode 100644 MIT_CSAIL/controller/etc/glance/glance-api-paste.ini create mode 100644 MIT_CSAIL/controller/etc/glance/glance-api.conf create mode 100644 MIT_CSAIL/controller/etc/glance/glance-cache-paste.ini create mode 100644 MIT_CSAIL/controller/etc/glance/glance-cache.conf create mode 100644 MIT_CSAIL/controller/etc/glance/glance-registry-paste.ini create mode 100644 MIT_CSAIL/controller/etc/glance/glance-registry.conf create mode 100644 MIT_CSAIL/controller/etc/glance/glance-scrubber-paste.ini create mode 100644 MIT_CSAIL/controller/etc/glance/glance-scrubber.conf create mode 100644 MIT_CSAIL/controller/etc/glance/glance.sqlite create mode 100644 MIT_CSAIL/controller/etc/glance/policy.json create mode 100644 MIT_CSAIL/controller/etc/glance/schema-image.json create mode 100644 MIT_CSAIL/controller/etc/heat/api-paste.ini create mode 100644 MIT_CSAIL/controller/etc/heat/environment.d/default.yaml create mode 100644 MIT_CSAIL/controller/etc/heat/heat.conf create mode 100644 MIT_CSAIL/controller/etc/heat/policy.json create mode 100644 MIT_CSAIL/controller/etc/keystone/default_catalog.templates create mode 100644 MIT_CSAIL/controller/etc/keystone/keystone-paste.ini create mode 100644 MIT_CSAIL/controller/etc/keystone/keystone-ssl.conf create mode 100644 MIT_CSAIL/controller/etc/keystone/keystone.conf create mode 100644 MIT_CSAIL/controller/etc/keystone/keystone.conf.back create mode 100644 MIT_CSAIL/controller/etc/keystone/logging.conf create mode 100644 MIT_CSAIL/controller/etc/keystone/logging.conf.sample create mode 100644 MIT_CSAIL/controller/etc/keystone/policy.json create mode 100644 MIT_CSAIL/controller/etc/neutron/api-paste.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/dhcp_agent.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/dnsmasq.conf create mode 100644 MIT_CSAIL/controller/etc/neutron/fwaas_driver.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/l3_agent.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/lbaas_agent.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/metadata_agent.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/neutron.conf create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf.ini.dpkg-dist create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_arista.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_brocade.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_cisco.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_ncs.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_odl.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_ofa.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_sriov.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini create mode 100644 MIT_CSAIL/controller/etc/neutron/policy.json create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.conf create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.d/debug.filters create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.d/dhcp.filters create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.d/ipset-firewall.filters create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.d/iptables-firewall.filters create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.d/l3.filters create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.d/lbaas-haproxy.filters create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.d/openvswitch-plugin.filters create mode 100644 MIT_CSAIL/controller/etc/neutron/rootwrap.d/vpnaas.filters create mode 100644 MIT_CSAIL/controller/etc/neutron/vpn_agent.ini create mode 100644 MIT_CSAIL/controller/etc/nova/api-paste.ini create mode 100644 MIT_CSAIL/controller/etc/nova/logging.conf create mode 100644 MIT_CSAIL/controller/etc/nova/nova.conf create mode 100644 MIT_CSAIL/controller/etc/nova/nova.conf.pre-quantum create mode 100644 MIT_CSAIL/controller/etc/nova/policy.json create mode 100644 MIT_CSAIL/controller/etc/nova/rootwrap.conf create mode 100644 MIT_CSAIL/controller/etc/nova/rootwrap.d/api-metadata.filters create mode 100644 MIT_CSAIL/controller/etc/openstack-dashboard/local_settings.py create mode 100644 MIT_CSAIL/controller/usr/local/lib/python2.7/dist-packages/horizon_csail-0.1-py2.7.egg/csail/overrides.py diff --git a/MIT_CSAIL/compute/etc/neutron/api-paste.ini b/MIT_CSAIL/compute/etc/neutron/api-paste.ini new file mode 100644 index 0000000..bbcd415 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/api-paste.ini @@ -0,0 +1,30 @@ +[composite:neutron] +use = egg:Paste#urlmap +/: neutronversions +/v2.0: neutronapi_v2_0 + +[composite:neutronapi_v2_0] +use = call:neutron.auth:pipeline_factory +noauth = request_id catch_errors extensions neutronapiapp_v2_0 +keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 + +[filter:request_id] +paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:catch_errors] +paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory + +[filter:keystonecontext] +paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +[filter:extensions] +paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory + +[app:neutronversions] +paste.app_factory = neutron.api.versions:Versions.factory + +[app:neutronapiapp_v2_0] +paste.app_factory = neutron.api.v2.router:APIRouter.factory diff --git a/MIT_CSAIL/compute/etc/neutron/fwaas_driver.ini b/MIT_CSAIL/compute/etc/neutron/fwaas_driver.ini new file mode 100644 index 0000000..41f761a --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/fwaas_driver.ini @@ -0,0 +1,3 @@ +[fwaas] +#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver +#enabled = True diff --git a/MIT_CSAIL/compute/etc/neutron/l3_agent.ini b/MIT_CSAIL/compute/etc/neutron/l3_agent.ini new file mode 100644 index 0000000..94c9714 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/l3_agent.ini @@ -0,0 +1,102 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +# handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +# external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +# metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +# send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +# periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +# periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 + +# The working mode for the agent. Allowed values are: +# - legacy: this preserves the existing behavior where the L3 agent is +# deployed on a centralized networking node to provide L3 services +# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR. +# - dvr: this mode enables DVR functionality, and must be used for an L3 +# agent that runs on a compute host. +# - dvr_snat: this enables centralized SNAT support in conjunction with +# DVR. This mode must be used for an L3 agent running on a centralized +# node (or in single-host deployments, e.g. devstack). +# agent_mode = legacy + +# Location to store keepalived and all HA configurations +# ha_confs_path = $state_path/ha_confs + +# VRRP authentication type AH/PASS +# ha_vrrp_auth_type = PASS + +# VRRP authentication password +# ha_vrrp_auth_password = + +# The advertisement interval in seconds +# ha_vrrp_advert_int = 2 diff --git a/MIT_CSAIL/compute/etc/neutron/neutron.conf b/MIT_CSAIL/compute/etc/neutron/neutron.conf new file mode 100644 index 0000000..bf2ca76 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/neutron.conf @@ -0,0 +1,494 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +# verbose = False +verbose = True + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +# debug = False +debug = True + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 +bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +# api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +# auth_strategy = keystone +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 +mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +# dhcp_lease_duration = 86400 +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True +dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +# allow_overlapping_ips = False +allow_overlapping_ips = False +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = neutron.openstack.common.rpc.impl_kombu +# Size of RPC thread pool +# rpc_thread_pool_size = 64 +# Size of RPC connection pool +# rpc_conn_pool_size = 30 +# Seconds to wait for a response from call or multicall +# rpc_response_timeout = 60 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# rpc_cast_timeout = 30 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron +control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# IP address of the RabbitMQ installation +# rabbit_host = localhost +rabbit_host = .csail.mit.edu +# Password of the RabbitMQ server +# rabbit_password = guest +rabbit_password = +# Port where RabbitMQ server is running/listening +# rabbit_port = 5672 +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +rabbit_hosts = .csail.mit.edu:5672 +# User ID used for RabbitMQ connections +# rabbit_userid = guest +rabbit_userid = REDACTED +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +rabbit_ha_queues = False + +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +# default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +# notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 +dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False +use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +# notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +# notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +# nova_url = http://127.0.0.1:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +# nova_region_name = + +# Username for connection to nova in admin context +# nova_admin_username = + +# The uuid of the admin nova tenant +# nova_admin_tenant_id = + +# Password for connection to nova in admin context. +# nova_admin_password = + +# Authorization URL for connection to nova in admin context. +# nova_admin_auth_url = + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== +log_dir=/var/log/neutron +rabbit_use_ssl=False +kombu_reconnect_delay=1.0 + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +connection = sqlite:////var/lib/neutron/neutron.sqlite + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/MIT_CSAIL/compute/etc/neutron/neutron.conf.dpkg-dist b/MIT_CSAIL/compute/etc/neutron/neutron.conf.dpkg-dist new file mode 100644 index 0000000..f96b23c --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/neutron.conf.dpkg-dist @@ -0,0 +1,637 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +# verbose = False + +# =========Start Global Config Option for Distributed L3 Router=============== +# Setting the "router_distributed" flag to "True" will default to the creation +# of distributed tenant routers. The admin can override this flag by specifying +# the type of the router on the create request (admin-only attribute). Default +# value is "False" to support legacy mode (centralized) routers. +# +# router_distributed = False +# +# ===========End Global Config Option for Distributed L3 Router=============== + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +# debug = False + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +# state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +# api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +# auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# DVR Base MAC address. The first 3 octets will remain unchanged. If the +# 4th octet is not 00, it will also be used. The others will be randomly +# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to +# avoid mixing them up with MAC's allocated for tenant ports. +# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00 +# The default is 3 octet +# dvr_base_mac = fa:16:3f:00:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds). Use -1 to +# tell dnsmasq to use infinite lease times. +# dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +# allow_overlapping_ips = False +# Ensure that configured gateway is on subnet. For IPv6, validate only if +# gateway is not a link local address. Deprecated, to be removed during the +# K release, at which point the check will be mandatory. +# force_gateway_on_subnet = True + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# Maximum number of routes per router +# max_routes = 30 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Allow automatic rescheduling of routers from dead L3 agents with +# admin_state_up set to True to alive agents. +# allow_automatic_l3agent_failover = False + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== items for l3 extension ============== +# Enable high availability for virtual routers. +# l3_ha = False +# +# Maximum number of l3 agents which a HA router will be scheduled on. If it +# is set to 0 the router will be scheduled on every agent. +# max_l3_agents_per_router = 3 +# +# Minimum number of l3 agents which a HA router will be scheduled on. The +# default value is 2. +# min_l3_agents_per_router = 2 +# +# CIDR of the administrative network if HA mode is enabled +# l3_ha_net_cidr = 169.254.192.0/18 +# =========== end of items for l3 extension ======= + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +# notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +# notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +# nova_url = http://127.0.0.1:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +# nova_region_name = + +# Username for connection to nova in admin context +# nova_admin_username = + +# The uuid of the admin nova tenant +# nova_admin_tenant_id = + +# Password for connection to nova in admin context. +# nova_admin_password = + +# Authorization URL for connection to nova in admin context. +# nova_admin_auth_url = + +# CA file for novaclient to verify server certificates +# nova_ca_certificates_file = + +# Boolean to control ignoring SSL errors on the nova url +# nova_api_insecure = False + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest + +# The RabbitMQ password. (string value) +#rabbit_password=guest + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=oslo + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitor = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +# Number of firewalls allowed per tenant. A negative value means unlimited. +# quota_firewall = 1 + +# Number of firewall policies allowed per tenant. A negative value means +# unlimited. +# quota_firewall_policy = 1 + +# Number of firewall rules allowed per tenant. A negative value means +# unlimited. +# quota_firewall_rule = 100 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_host = 127.0.0.1 +auth_port = 35357 +auth_protocol = http +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +connection = sqlite:////var/lib/neutron/neutron.sqlite +# NOTE: In deployment the [database] section and its connection attribute may +# be set in the corresponding core plugin '.ini' file. However, it is suggested +# to put the [database] section and its connection attribute in this +# configuration file. + +# Database engine for which script will be generated when using offline +# migration +# engine = + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default +# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'. +#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default +# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend +# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf.ini new file mode 100644 index 0000000..48ae22e --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf.ini @@ -0,0 +1,83 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +type_drivers = gre,vlan +# Example: type_drivers = flat,vlan,gre,vxlan + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +tenant_network_types = gre +# Example: tenant_network_types = vlan,gre,vxlan + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade +mechanism_drivers=openvswitch + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 +network_vlan_ranges=trunk:2112:2114 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +# tunnel_id_ranges = +tunnel_id_ranges=1:1000 + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True +enable_security_group = True +firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +[agent] +tunnel_types=gre +l2_population=False +polling_interval=30 +veth_mtu=9134 + +[ovs] +enable_tunneling=True +tenant_network_type=gre +bridge_mappings=trunk:eth1-br +local_ip= +network_vlan_ranges=trunk:2112:2114 +tunnel_id_ranges=1:1000 diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf.ini.dpkg-dist b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf.ini.dpkg-dist new file mode 100644 index 0000000..4fb1a4a --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf.ini.dpkg-dist @@ -0,0 +1,71 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade + +# (ListOpt) Ordered list of extension driver entrypoints +# to be loaded from the neutron.ml2.extension_drivers namespace. +# extension_drivers = +# Example: extension_drivers = anewextensiondriver + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +# tunnel_id_ranges = + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +# Use ipset to speed-up the iptables security groups. Enabling ipset support +# requires that ipset is installed on L2 agent node. +# enable_ipset = True diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_arista.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_arista.ini new file mode 100644 index 0000000..abaf5bc --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_arista.ini @@ -0,0 +1,100 @@ +# Defines configuration options specific for Arista ML2 Mechanism driver + +[ml2_arista] +# (StrOpt) EOS IP address. This is required field. If not set, all +# communications to Arista EOS will fail +# +# eapi_host = +# Example: eapi_host = 192.168.0.1 +# +# (StrOpt) EOS command API username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_username = +# Example: arista_eapi_username = admin +# +# (StrOpt) EOS command API password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_password = +# Example: eapi_password = my_password +# +# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs +# ("node1.domain.com") or as short names ("node1"). This is +# optional. If not set, a value of "True" is assumed. +# +# use_fqdn = +# Example: use_fqdn = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# sync_interval = +# Example: sync_interval = 60 +# +# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller. +# This is useful when multiple OpenStack/Neutron controllers are +# managing the same Arista HW clusters. Note that this name must +# match with the region name registered (or known) to keystone +# service. Authentication with Keysotne is performed by EOS. +# This is optional. If not set, a value of "RegionOne" is assumed. +# +# region_name = +# Example: region_name = RegionOne + + +[l3_arista] + +# (StrOpt) primary host IP address. This is required field. If not set, all +# communications to Arista EOS will fail. This is the host where +# primary router is created. +# +# primary_l3_host = +# Example: primary_l3_host = 192.168.10.10 +# +# (StrOpt) Primary host username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# primary_l3_host_username = +# Example: arista_primary_l3_username = admin +# +# (StrOpt) Primary host password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# primary_l3_host_password = +# Example: primary_l3_password = my_password +# +# (StrOpt) IP address of the second Arista switch paired as +# MLAG (Multi-chassis Link Aggregation) with the first. +# This is optional field, however, if mlag_config flag is set, +# then this is a required field. If not set, all +# communications to Arista EOS will fail. If mlag_config is set +# to False, then this field is ignored +# +# seconadary_l3_host = +# Example: seconadary_l3_host = 192.168.10.20 +# +# (BoolOpt) Defines if Arista switches are configured in MLAG mode +# If yes, all L3 configuration is pushed to both switches +# automatically. If this flag is set, ensure that secondary_l3_host +# is set to the second switch's IP. +# This flag is Optional. If not set, a value of "False" is assumed. +# +# mlag_config = +# Example: mlag_config = True +# +# (BoolOpt) Defines if the router is created in default VRF or a +# a specific VRF. This is optional. +# If not set, a value of "False" is assumed. +# +# Example: use_vrf = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# l3_sync_interval = +# Example: l3_sync_interval = 60 diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_brocade.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_brocade.ini new file mode 100644 index 0000000..6757411 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_brocade.ini @@ -0,0 +1,15 @@ +[ml2_brocade] +# username = +# password = +# address = +# ostype = NOS +# osversion = autodetect | n.n.n +# physical_networks = physnet1,physnet2 +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS +# osversion = 4.1.1 +# physical_networks = physnet1,physnet2 diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_cisco.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_cisco.ini new file mode 100644 index 0000000..1b69100 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_cisco.ini @@ -0,0 +1,118 @@ +[ml2_cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# +# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch. +# This string value must be present in the ml2_conf.ini network_vlan_ranges +# variable. +# +# managed_physical_network = +# Example: managed_physical_network = physnet1 + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# Cisco Nexus Switch Format. +# [ml2_mech_cisco_nexus:] +# = (1) +# ssh_port= (2) +# username= (3) +# password= (4) +# +# (1) For each host connected to a port on the switch, specify the hostname +# and the Nexus physical port (interface) it is connected to. +# Valid intf_type's are 'ethernet' and 'port-channel'. +# The default setting for is 'ethernet' and need not be +# added to this setting. +# (2) The TCP port for connecting via SSH to manage the switch. This is +# port number 22 unless the switch has been configured otherwise. +# (3) The username for logging into the switch to manage it. +# (4) The password for logging into the switch to manage it. +# +# Example: +# [ml2_mech_cisco_nexus:1.1.1.1] +# compute1=1/1 +# compute2=ethernet:1/2 +# compute3=port-channel:1 +# ssh_port=22 +# username=admin +# password=mySecretPassword + +[ml2_cisco_apic] + +# Hostname:port list of APIC controllers +# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80 + +# Username for the APIC controller +# apic_username = user + +# Password for the APIC controller +# apic_password = password + +# Whether use SSl for connecting to the APIC controller or not +# apic_use_ssl = True + +# How to map names to APIC: use_uuid or use_name +# apic_name_mapping = use_name + +# Names for APIC objects used by Neutron +# Note: When deploying multiple clouds against one APIC, +# these names must be unique between the clouds. +# apic_vmm_domain = openstack +# apic_vlan_ns_name = openstack_ns +# apic_node_profile = openstack_profile +# apic_entity_profile = openstack_entity +# apic_function_profile = openstack_function +# apic_app_profile_name = openstack_app +# Agent timers for State reporting and topology discovery +# apic_sync_interval = 30 +# apic_agent_report_interval = 30 +# apic_agent_poll_interval = 2 + +# Specify your network topology. +# This section indicates how your compute nodes are connected to the fabric's +# switches and ports. The format is as follows: +# +# [apic_switch:] +# , = +# +# You can have multiple sections, one for each switch in your fabric that is +# participating in Openstack. e.g. +# +# [apic_switch:17] +# ubuntu,ubuntu1 = 1/10 +# ubuntu2,ubuntu3 = 1/11 +# +# [apic_switch:18] +# ubuntu5,ubuntu6 = 1/1 +# ubuntu7,ubuntu8 = 1/2 + +# Describe external connectivity. +# In this section you can specify the external network configuration in order +# for the plugin to be able to teach the fabric how to route the internal +# traffic to the outside world. The external connectivity configuration +# format is as follows: +# +# [apic_external_network:] +# switch = +# port = +# encap = +# cidr_exposed = +# gateway_ip = +# +# An example follows: +# [apic_external_network:network_ext] +# switch=203 +# port=1/34 +# encap=vlan-100 +# cidr_exposed=10.10.40.2/16 +# gateway_ip=10.10.40.1 diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini new file mode 100644 index 0000000..6ee4a4e --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini @@ -0,0 +1,52 @@ +# Defines Configuration options for FSL SDN OS Mechanism Driver +# Cloud Resource Discovery (CRD) authorization credentials +[ml2_fslsdn] +#(StrOpt) User name for authentication to CRD. +# e.g.: user12 +# +# crd_user_name = + +#(StrOpt) Password for authentication to CRD. +# e.g.: secret +# +# crd_password = + +#(StrOpt) Tenant name for CRD service. +# e.g.: service +# +# crd_tenant_name = + +#(StrOpt) CRD auth URL. +# e.g.: http://127.0.0.1:5000/v2.0/ +# +# crd_auth_url = + +#(StrOpt) URL for connecting to CRD Service. +# e.g.: http://127.0.0.1:9797 +# +# crd_url= + +#(IntOpt) Timeout value for connecting to CRD service +# in seconds, e.g.: 30 +# +# crd_url_timeout= + +#(StrOpt) Region name for connecting to CRD in +# admin context, e.g.: RegionOne +# +# crd_region_name= + +#(BoolOpt)If set, ignore any SSL validation issues (boolean value) +# e.g.: False +# +# crd_api_insecure= + +#(StrOpt)Authorization strategy for connecting to CRD in admin +# context, e.g.: keystone +# +# crd_auth_strategy= + +#(StrOpt)Location of CA certificates file to use for CRD client +# requests. +# +# crd_ca_certificates_file= diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini new file mode 100644 index 0000000..46139ae --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini @@ -0,0 +1,4 @@ +[eswitch] +# (StrOpt) Type of Network Interface to allocate for VM: +# mlnx_direct or hostdev according to libvirt terminology +# vnic_type = mlnx_direct diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_ncs.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_ncs.ini new file mode 100644 index 0000000..dbbfcbd --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_ncs.ini @@ -0,0 +1,28 @@ +# Defines configuration options specific to the Tail-f NCS Mechanism Driver + +[ml2_ncs] +# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack +# subtree. +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://ncs/api/running/services/openstack + +# (StrOpt) Username for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = +# Example: timeout = 15 + diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_odl.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_odl.ini new file mode 100644 index 0000000..9e88c1b --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_odl.ini @@ -0,0 +1,30 @@ +# Configuration for the OpenDaylight MechanismDriver + +[ml2_odl] +# (StrOpt) OpenDaylight REST URL +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron + +# (StrOpt) Username for HTTP basic authentication to ODL. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to ODL. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = 10 +# Example: timeout = 15 + +# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. +# This is an optional parameter, default value is 30 minutes. +# +# session_timeout = 30 +# Example: session_timeout = 60 diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_ofa.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_ofa.ini new file mode 100644 index 0000000..4a94b98 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_ofa.ini @@ -0,0 +1,13 @@ +# Defines configuration options specific to the OpenFlow Agent Mechanism Driver + +[ovs] +# Please refer to configuration options to the OpenvSwitch + +[agent] +# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath. +# This is an optional parameter, default value is 60 seconds. +# +# get_datapath_retry_times = +# Example: get_datapath_retry_times = 30 + +# Please refer to configuration options to the OpenvSwitch else the above. diff --git a/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_sriov.ini b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_sriov.ini new file mode 100644 index 0000000..9566f54 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/plugins/ml2/ml2_conf_sriov.ini @@ -0,0 +1,31 @@ +# Defines configuration options for SRIOV NIC Switch MechanismDriver +# and Agent + +[ml2_sriov] +# (ListOpt) Comma-separated list of +# supported Vendor PCI Devices, in format vendor_id:product_id +# +# supported_pci_vendor_devs = 15b3:1004, 8086:10c9 +# Example: supported_pci_vendor_devs = 15b3:1004 +# +# (BoolOpt) Requires running SRIOV neutron agent for port binding +# agent_required = True + +[sriov_nic] +# (ListOpt) Comma-separated list of : +# tuples mapping physical network names to the agent's node-specific +# physical network device interfaces of SR-IOV physical function to be used +# for VLAN networks. All physical networks listed in network_vlan_ranges on +# the server should have mappings to appropriate interfaces on each agent. +# +# physical_device_mappings = +# Example: physical_device_mappings = physnet1:eth1 +# +# (ListOpt) Comma-separated list of : +# tuples, mapping network_device to the agent's node-specific list of virtual +# functions that should not be used for virtual networking. +# vfs_to_exclude is a semicolon-separated list of virtual +# functions to exclude from network_device. The network_device in the +# mapping should appear in the physical_device_mappings list. +# exclude_devices = +# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3 diff --git a/MIT_CSAIL/compute/etc/neutron/policy.json b/MIT_CSAIL/compute/etc/neutron/policy.json new file mode 100644 index 0000000..e7db435 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/policy.json @@ -0,0 +1,138 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_only": "rule:context_is_admin", + "regular_user": "", + "shared": "field:networks:shared=True", + "shared_firewalls": "field:firewalls:shared=True", + "external": "field:networks:router:external=True", + "default": "rule:admin_or_owner", + + "create_subnet": "rule:admin_or_network_owner", + "get_subnet": "rule:admin_or_owner or rule:shared", + "update_subnet": "rule:admin_or_network_owner", + "delete_subnet": "rule:admin_or_network_owner", + + "create_network": "", + "get_network": "rule:admin_or_owner or rule:shared or rule:external", + "get_network:router:external": "rule:regular_user", + "get_network:segments": "rule:admin_only", + "get_network:provider:network_type": "rule:admin_only", + "get_network:provider:physical_network": "rule:admin_only", + "get_network:provider:segmentation_id": "rule:admin_only", + "get_network:queue_id": "rule:admin_only", + "create_network:shared": "rule:admin_only", + "create_network:router:external": "rule:admin_only", + "create_network:segments": "rule:admin_only", + "create_network:provider:network_type": "rule:admin_only", + "create_network:provider:physical_network": "rule:admin_only", + "create_network:provider:segmentation_id": "rule:admin_only", + "update_network": "rule:admin_or_owner", + "update_network:segments": "rule:admin_only", + "update_network:shared": "rule:admin_only", + "update_network:provider:network_type": "rule:admin_only", + "update_network:provider:physical_network": "rule:admin_only", + "update_network:provider:segmentation_id": "rule:admin_only", + "update_network:router:external": "rule:admin_only", + "delete_network": "rule:admin_or_owner", + + "create_port": "", + "create_port:mac_address": "rule:admin_or_network_owner", + "create_port:fixed_ips": "rule:admin_or_network_owner", + "create_port:port_security_enabled": "rule:admin_or_network_owner", + "create_port:binding:host_id": "rule:admin_only", + "create_port:binding:profile": "rule:admin_only", + "create_port:mac_learning_enabled": "rule:admin_or_network_owner", + "get_port": "rule:admin_or_owner", + "get_port:queue_id": "rule:admin_only", + "get_port:binding:vif_type": "rule:admin_only", + "get_port:binding:vif_details": "rule:admin_only", + "get_port:binding:host_id": "rule:admin_only", + "get_port:binding:profile": "rule:admin_only", + "update_port": "rule:admin_or_owner", + "update_port:fixed_ips": "rule:admin_or_network_owner", + "update_port:port_security_enabled": "rule:admin_or_network_owner", + "update_port:binding:host_id": "rule:admin_only", + "update_port:binding:profile": "rule:admin_only", + "update_port:mac_learning_enabled": "rule:admin_or_network_owner", + "delete_port": "rule:admin_or_owner", + + "get_router:ha": "rule:admin_only", + "create_router": "rule:regular_user", + "create_router:external_gateway_info:enable_snat": "rule:admin_only", + "create_router:distributed": "rule:admin_only", + "create_router:ha": "rule:admin_only", + "get_router": "rule:admin_or_owner", + "get_router:distributed": "rule:admin_only", + "update_router:external_gateway_info:enable_snat": "rule:admin_only", + "update_router:distributed": "rule:admin_only", + "update_router:ha": "rule:admin_only", + "delete_router": "rule:admin_or_owner", + + "add_router_interface": "rule:admin_or_owner", + "remove_router_interface": "rule:admin_or_owner", + + "create_firewall": "", + "get_firewall": "rule:admin_or_owner", + "create_firewall:shared": "rule:admin_only", + "get_firewall:shared": "rule:admin_only", + "update_firewall": "rule:admin_or_owner", + "update_firewall:shared": "rule:admin_only", + "delete_firewall": "rule:admin_or_owner", + + "create_firewall_policy": "", + "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", + "create_firewall_policy:shared": "rule:admin_or_owner", + "update_firewall_policy": "rule:admin_or_owner", + "delete_firewall_policy": "rule:admin_or_owner", + + "create_firewall_rule": "", + "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", + "update_firewall_rule": "rule:admin_or_owner", + "delete_firewall_rule": "rule:admin_or_owner", + + "create_qos_queue": "rule:admin_only", + "get_qos_queue": "rule:admin_only", + + "update_agent": "rule:admin_only", + "delete_agent": "rule:admin_only", + "get_agent": "rule:admin_only", + + "create_dhcp-network": "rule:admin_only", + "delete_dhcp-network": "rule:admin_only", + "get_dhcp-networks": "rule:admin_only", + "create_l3-router": "rule:admin_only", + "delete_l3-router": "rule:admin_only", + "get_l3-routers": "rule:admin_only", + "get_dhcp-agents": "rule:admin_only", + "get_l3-agents": "rule:admin_only", + "get_loadbalancer-agent": "rule:admin_only", + "get_loadbalancer-pools": "rule:admin_only", + + "create_floatingip": "rule:regular_user", + "update_floatingip": "rule:admin_or_owner", + "delete_floatingip": "rule:admin_or_owner", + "get_floatingip": "rule:admin_or_owner", + + "create_network_profile": "rule:admin_only", + "update_network_profile": "rule:admin_only", + "delete_network_profile": "rule:admin_only", + "get_network_profiles": "", + "get_network_profile": "", + "update_policy_profiles": "rule:admin_only", + "get_policy_profiles": "", + "get_policy_profile": "", + + "create_metering_label": "rule:admin_only", + "delete_metering_label": "rule:admin_only", + "get_metering_label": "rule:admin_only", + + "create_metering_label_rule": "rule:admin_only", + "delete_metering_label_rule": "rule:admin_only", + "get_metering_label_rule": "rule:admin_only", + + "get_service_provider": "rule:regular_user", + "get_lsn": "rule:admin_only", + "create_lsn": "rule:admin_only" +} diff --git a/MIT_CSAIL/compute/etc/neutron/rootwrap.conf b/MIT_CSAIL/compute/etc/neutron/rootwrap.conf new file mode 100644 index 0000000..dee1dd9 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/rootwrap.conf @@ -0,0 +1,34 @@ +# Configuration for neutron-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR + +[xenapi] +# XenAPI configuration is only required by the L2 agent if it is to +# target a XenServer/XCP compute host's dom0. +xenapi_connection_url= +xenapi_connection_username=root +xenapi_connection_password= diff --git a/MIT_CSAIL/compute/etc/neutron/rootwrap.d/debug.filters b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/debug.filters new file mode 100644 index 0000000..b61d960 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/debug.filters @@ -0,0 +1,14 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# This is needed because we should ping +# from inside a namespace which requires root +ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ +ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+ diff --git a/MIT_CSAIL/compute/etc/neutron/rootwrap.d/ipset-firewall.filters b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/ipset-firewall.filters new file mode 100644 index 0000000..52c6637 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/ipset-firewall.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] +# neutron/agent/linux/iptables_firewall.py +# "ipset", "-A", ... +ipset: CommandFilter, ipset, root diff --git a/MIT_CSAIL/compute/etc/neutron/rootwrap.d/iptables-firewall.filters b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/iptables-firewall.filters new file mode 100644 index 0000000..b8a6ab5 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/iptables-firewall.filters @@ -0,0 +1,21 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# neutron/agent/linux/iptables_manager.py +# "iptables-save", ... +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# neutron/agent/linux/iptables_manager.py +# "iptables", "-A", ... +iptables: CommandFilter, iptables, root +ip6tables: CommandFilter, ip6tables, root diff --git a/MIT_CSAIL/compute/etc/neutron/rootwrap.d/l3.filters b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/l3.filters new file mode 100644 index 0000000..d9e4a13 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/l3.filters @@ -0,0 +1,49 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# arping +arping: CommandFilter, arping, root + +# l3_agent +sysctl: CommandFilter, sysctl, root +route: CommandFilter, route, root +radvd: CommandFilter, radvd, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, python, -9 +kill_metadata7: KillFilter, root, python2.7, -9 +kill_metadata6: KillFilter, root, python2.6, -9 +kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP +kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root + +# ovs_lib (if OVSInterfaceDriver is used) +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# iptables_manager +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# Keepalived +keepalived: CommandFilter, keepalived, root +kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9 + +# l3 agent to delete floatingip's conntrack state +conntrack: CommandFilter, conntrack, root diff --git a/MIT_CSAIL/compute/etc/neutron/rootwrap.d/openvswitch-plugin.filters b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/openvswitch-plugin.filters new file mode 100644 index 0000000..b63a83b --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/openvswitch-plugin.filters @@ -0,0 +1,22 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# openvswitch-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +ovs-vsctl: CommandFilter, ovs-vsctl, root +ovs-ofctl: CommandFilter, ovs-ofctl, root +kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 +ovsdb-client: CommandFilter, ovsdb-client, root +xe: CommandFilter, xe, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/MIT_CSAIL/compute/etc/neutron/rootwrap.d/vpnaas.filters b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/vpnaas.filters new file mode 100644 index 0000000..7848136 --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/rootwrap.d/vpnaas.filters @@ -0,0 +1,13 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +openswan: CommandFilter, ipsec, root diff --git a/MIT_CSAIL/compute/etc/neutron/vpn_agent.ini b/MIT_CSAIL/compute/etc/neutron/vpn_agent.ini new file mode 100644 index 0000000..c3089df --- /dev/null +++ b/MIT_CSAIL/compute/etc/neutron/vpn_agent.ini @@ -0,0 +1,14 @@ +[DEFAULT] +# VPN-Agent configuration file +# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also + +[vpnagent] +# vpn device drivers which vpn agent will use +# If we want to use multiple drivers, we need to define this option multiple times. +# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver +# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver +# vpn_device_driver=another_driver + +[ipsec] +# Status check interval +# ipsec_status_check_interval=60 diff --git a/MIT_CSAIL/compute/etc/nova/api-paste.ini b/MIT_CSAIL/compute/etc/nova/api-paste.ini new file mode 100644 index 0000000..f16e36c --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/api-paste.ini @@ -0,0 +1,127 @@ +############ +# Metadata # +############ +[composite:metadata] +use = egg:Paste#urlmap +/: meta + +[pipeline:meta] +pipeline = ec2faultwrap logrequest metaapp + +[app:metaapp] +paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory + +####### +# EC2 # +####### + +[composite:ec2] +use = egg:Paste#urlmap +/services/Cloud: ec2cloud + +[composite:ec2cloud] +use = call:nova.api.auth:pipeline_factory +noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor +keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor + +[filter:ec2faultwrap] +paste.filter_factory = nova.api.ec2:FaultWrapper.factory + +[filter:logrequest] +paste.filter_factory = nova.api.ec2:RequestLogging.factory + +[filter:ec2lockout] +paste.filter_factory = nova.api.ec2:Lockout.factory + +[filter:ec2keystoneauth] +paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory + +[filter:ec2noauth] +paste.filter_factory = nova.api.ec2:NoAuth.factory + +[filter:cloudrequest] +controller = nova.api.ec2.cloud.CloudController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:authorizer] +paste.filter_factory = nova.api.ec2:Authorizer.factory + +[filter:validator] +paste.filter_factory = nova.api.ec2:Validator.factory + +[app:ec2executor] +paste.app_factory = nova.api.ec2:Executor.factory + +############# +# OpenStack # +############# + +[composite:osapi_compute] +use = call:nova.api.openstack.urlmap:urlmap_factory +/: oscomputeversions +/v1.1: openstack_compute_api_v2 +/v2: openstack_compute_api_v2 +/v2.1: openstack_compute_api_v21 +/v3: openstack_compute_api_v3 + +[composite:openstack_compute_api_v2] +use = call:nova.api.auth:pipeline_factory +noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2 +keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2 +keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2 + +[composite:openstack_compute_api_v21] +use = call:nova.api.auth:pipeline_factory_v21 +noauth = request_id faultwrap sizelimit noauth osapi_compute_app_v21 +keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21 + +[composite:openstack_compute_api_v3] +use = call:nova.api.auth:pipeline_factory_v21 +noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3 +keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3 + +[filter:request_id] +paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:compute_req_id] +paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory + +[filter:faultwrap] +paste.filter_factory = nova.api.openstack:FaultWrapper.factory + +[filter:noauth] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory + +[filter:noauth_v3] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory + +[filter:ratelimit] +paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory + +[app:osapi_compute_app_v2] +paste.app_factory = nova.api.openstack.compute:APIRouter.factory + +[app:osapi_compute_app_v21] +paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory + +[app:osapi_compute_app_v3] +paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory + +[pipeline:oscomputeversions] +pipeline = faultwrap oscomputeversionapp + +[app:oscomputeversionapp] +paste.app_factory = nova.api.openstack.compute.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/MIT_CSAIL/compute/etc/nova/logging.conf b/MIT_CSAIL/compute/etc/nova/logging.conf new file mode 100644 index 0000000..5482a04 --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/logging.conf @@ -0,0 +1,81 @@ +[loggers] +keys = root, nova + +[handlers] +keys = stderr, stdout, watchedfile, syslog, null + +[formatters] +keys = context, default + +[logger_root] +level = WARNING +handlers = null + +[logger_nova] +level = INFO +handlers = stderr +qualname = nova + +[logger_amqp] +level = WARNING +handlers = stderr +qualname = amqp + +[logger_amqplib] +level = WARNING +handlers = stderr +qualname = amqplib + +[logger_sqlalchemy] +level = WARNING +handlers = stderr +qualname = sqlalchemy +# "level = INFO" logs SQL queries. +# "level = DEBUG" logs SQL queries and results. +# "level = WARNING" logs neither. (Recommended for production systems.) + +[logger_boto] +level = WARNING +handlers = stderr +qualname = boto + +[logger_suds] +level = INFO +handlers = stderr +qualname = suds + +[logger_eventletwsgi] +level = WARNING +handlers = stderr +qualname = eventlet.wsgi.server + +[handler_stderr] +class = StreamHandler +args = (sys.stderr,) +formatter = context + +[handler_stdout] +class = StreamHandler +args = (sys.stdout,) +formatter = context + +[handler_watchedfile] +class = handlers.WatchedFileHandler +args = ('nova.log',) +formatter = context + +[handler_syslog] +class = handlers.SysLogHandler +args = ('/dev/log', handlers.SysLogHandler.LOG_USER) +formatter = context + +[handler_null] +class = nova.openstack.common.log.NullHandler +formatter = default +args = () + +[formatter_context] +class = nova.openstack.common.log.ContextFormatter + +[formatter_default] +format = %(message)s diff --git a/MIT_CSAIL/compute/etc/nova/nova-compute.conf b/MIT_CSAIL/compute/etc/nova/nova-compute.conf new file mode 100644 index 0000000..48ad489 --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/nova-compute.conf @@ -0,0 +1,4 @@ +[DEFAULT] +compute_driver=libvirt.LibvirtDriver +[libvirt] +virt_type=kvm diff --git a/MIT_CSAIL/compute/etc/nova/nova.conf b/MIT_CSAIL/compute/etc/nova/nova.conf new file mode 100644 index 0000000..5cbb9e4 --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/nova.conf @@ -0,0 +1,85 @@ +[DEFAULT] +state_path=/var/lib/nova +lock_path=/var/lock/nova +verbose=True +network_api_class=nova.network.neutronv2.api.API +debug=False +log_dir=/var/log/nova +amqp_durable_queues=False +vncserver_proxyclient_address= +rabbit_hosts=.csail.mit.edu:5672 +notify_api_faults=False +live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE +rabbit_use_ssl=False +live_migration_bandwidth=0 +notification_driver=nova.openstack.common.notifier.log_notifier +rabbit_userid= +rabbit_ha_queues=False +network_device_mtu=9134 +rabbit_password= +report_interval=10 +security_group_api=neutron +live_migration_uri=qemu+ssh://nova@%s/system?keyfile=/var/lib/nova/.ssh/id_rsa&no_tty=1&no_verify=1 +rabbit_host=.csail.mit.edu +vnc_enabled=True +rabbit_virtual_host=/ +image_service=nova.image.glance.GlanceImageService +heal_instance_info_cache_interval=0 +firewall_driver=nova.virt.firewall.NoopFirewallDriver +rabbit_port=5672 +live_migration_retry_count=30 +vif_plugging_is_fatal=True +novncproxy_base_url=http://.csail.mit.edu:6080/vnc_auto.html +service_down_time=60 +vncserver_listen=0.0.0.0 +notification_topics=notifications +auth_strategy=keystone +compute_driver=libvirt.LibvirtDriver +rootwrap_config=/etc/nova/rootwrap.conf +rpc_backend=nova.openstack.common.rpc.impl_kombu +vif_plugging_timeout=300 +libvirt_cpu_mode=host-passthrough +use_syslog=False +compute_monitors=ComputeDriverCPUMonitor +force_raw_images=True +vnc_keymap=en-us +force_snat_range=0.0.0.0/0 +dhcp_domain=novalocal + +[conductor] +use_local=False + +[libvirt] +vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver +cpu_mode=host-model +virt_type=kvm +images_rbd_pool=vms +rbd_secret_uuid=cf58e08b-3c51-410f-b043-619c616c6f44 +images_type=rbd +images_rbd_ceph_conf=/etc/ceph/ceph.conf +rbd_user=openstack + +[database] + +[LIBVIRT] +volume_clear=none + + + +[Default] + +[neutron] +admin_auth_url=http://.csail.mit.edu:35357/v2.0 +extension_sync_interval=600 +admin_username=neutron +admin_tenant_name=openstack +url_timeout=30 +admin_password= +auth_strategy=keystone +default_tenant_id=default +url=http://.csail.mit.edu:9696 +ovs_bridge=br-int +region_name=RegionOne + +[glance] +api_servers=.csail.mit.edu:9292 diff --git a/MIT_CSAIL/compute/etc/nova/nova.conf.dpkg-dist b/MIT_CSAIL/compute/etc/nova/nova.conf.dpkg-dist new file mode 100644 index 0000000..a2a354d --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/nova.conf.dpkg-dist @@ -0,0 +1,12 @@ +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +libvirt_use_virtio_for_bridges=True +verbose=True +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +enabled_apis=ec2,osapi_compute,metadata diff --git a/MIT_CSAIL/compute/etc/nova/policy.json b/MIT_CSAIL/compute/etc/nova/policy.json new file mode 100644 index 0000000..cdb8014 --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/policy.json @@ -0,0 +1,348 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "cells_scheduler_filter:TargetCellFilter": "is_admin:True", + + "compute:create": "", + "compute:create:attach_network": "", + "compute:create:attach_volume": "", + "compute:create:forced_host": "is_admin:True", + "compute:get_all": "", + "compute:get_all_tenants": "", + "compute:start": "rule:admin_or_owner", + "compute:stop": "rule:admin_or_owner", + "compute:unlock_override": "rule:admin_api", + + "compute:shelve": "", + "compute:shelve_offload": "", + "compute:unshelve": "", + + "compute:volume_snapshot_create": "", + "compute:volume_snapshot_delete": "", + + "admin_api": "is_admin:True", + "compute:v3:servers:start": "rule:admin_or_owner", + "compute:v3:servers:stop": "rule:admin_or_owner", + "compute_extension:v3:os-access-ips:discoverable": "", + "compute_extension:v3:os-access-ips": "", + "compute_extension:accounts": "rule:admin_api", + "compute_extension:admin_actions": "rule:admin_api", + "compute_extension:admin_actions:pause": "rule:admin_or_owner", + "compute_extension:admin_actions:unpause": "rule:admin_or_owner", + "compute_extension:admin_actions:suspend": "rule:admin_or_owner", + "compute_extension:admin_actions:resume": "rule:admin_or_owner", + "compute_extension:admin_actions:lock": "rule:admin_or_owner", + "compute_extension:admin_actions:unlock": "rule:admin_or_owner", + "compute_extension:admin_actions:resetNetwork": "rule:admin_api", + "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", + "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", + "compute_extension:admin_actions:migrateLive": "rule:admin_api", + "compute_extension:admin_actions:resetState": "rule:admin_api", + "compute_extension:admin_actions:migrate": "rule:admin_api", + "compute_extension:v3:os-admin-actions": "rule:admin_api", + "compute_extension:v3:os-admin-actions:discoverable": "", + "compute_extension:v3:os-admin-actions:reset_network": "rule:admin_api", + "compute_extension:v3:os-admin-actions:inject_network_info": "rule:admin_api", + "compute_extension:v3:os-admin-actions:reset_state": "rule:admin_api", + "compute_extension:v3:os-admin-password": "", + "compute_extension:v3:os-admin-password:discoverable": "", + "compute_extension:aggregates": "rule:admin_api", + "compute_extension:v3:os-aggregates:discoverable": "", + "compute_extension:v3:os-aggregates:index": "rule:admin_api", + "compute_extension:v3:os-aggregates:create": "rule:admin_api", + "compute_extension:v3:os-aggregates:show": "rule:admin_api", + "compute_extension:v3:os-aggregates:update": "rule:admin_api", + "compute_extension:v3:os-aggregates:delete": "rule:admin_api", + "compute_extension:v3:os-aggregates:add_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api", + "compute_extension:agents": "rule:admin_api", + "compute_extension:v3:os-agents": "rule:admin_api", + "compute_extension:v3:os-agents:discoverable": "", + "compute_extension:attach_interfaces": "", + "compute_extension:v3:os-attach-interfaces": "", + "compute_extension:v3:os-attach-interfaces:discoverable": "", + "compute_extension:baremetal_nodes": "rule:admin_api", + "compute_extension:v3:os-block-device-mapping-v1:discoverable": "", + "compute_extension:cells": "rule:admin_api", + "compute_extension:cells:create": "rule:admin_api", + "compute_extension:cells:delete": "rule:admin_api", + "compute_extension:cells:update": "rule:admin_api", + "compute_extension:cells:sync_instances": "rule:admin_api", + "compute_extension:v3:os-cells": "rule:admin_api", + "compute_extension:v3:os-cells:create": "rule:admin_api", + "compute_extension:v3:os-cells:delete": "rule:admin_api", + "compute_extension:v3:os-cells:update": "rule:admin_api", + "compute_extension:v3:os-cells:sync_instances": "rule:admin_api", + "compute_extension:v3:os-cells:discoverable": "", + "compute_extension:certificates": "", + "compute_extension:v3:os-certificates:create": "", + "compute_extension:v3:os-certificates:show": "", + "compute_extension:v3:os-certificates:discoverable": "", + "compute_extension:cloudpipe": "rule:admin_api", + "compute_extension:cloudpipe_update": "rule:admin_api", + "compute_extension:console_output": "", + "compute_extension:v3:consoles:discoverable": "", + "compute_extension:v3:os-console-output:discoverable": "", + "compute_extension:v3:os-console-output": "", + "compute_extension:consoles": "", + "compute_extension:v3:os-remote-consoles": "", + "compute_extension:v3:os-remote-consoles:discoverable": "", + "compute_extension:createserverext": "", + "compute_extension:v3:os-create-backup:discoverable": "", + "compute_extension:v3:os-create-backup": "rule:admin_or_owner", + "compute_extension:deferred_delete": "", + "compute_extension:v3:os-deferred-delete": "", + "compute_extension:v3:os-deferred-delete:discoverable": "", + "compute_extension:disk_config": "", + "compute_extension:evacuate": "rule:admin_api", + "compute_extension:v3:os-evacuate": "rule:admin_api", + "compute_extension:v3:os-evacuate:discoverable": "", + "compute_extension:extended_server_attributes": "rule:admin_api", + "compute_extension:v3:os-extended-server-attributes": "rule:admin_api", + "compute_extension:v3:os-extended-server-attributes:discoverable": "", + "compute_extension:extended_status": "", + "compute_extension:v3:os-extended-status": "", + "compute_extension:v3:os-extended-status:discoverable": "", + "compute_extension:extended_availability_zone": "", + "compute_extension:v3:os-extended-availability-zone": "", + "compute_extension:v3:os-extended-availability-zone:discoverable": "", + "compute_extension:extended_ips": "", + "compute_extension:extended_ips_mac": "", + "compute_extension:extended_vif_net": "", + "compute_extension:v3:extension_info:discoverable": "", + "compute_extension:extended_volumes": "", + "compute_extension:v3:os-extended-volumes": "", + "compute_extension:v3:os-extended-volumes:swap": "", + "compute_extension:v3:os-extended-volumes:discoverable": "", + "compute_extension:v3:os-extended-volumes:attach": "", + "compute_extension:v3:os-extended-volumes:detach": "", + "compute_extension:fixed_ips": "rule:admin_api", + "compute_extension:flavor_access": "", + "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", + "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", + "compute_extension:v3:os-flavor-access": "", + "compute_extension:v3:os-flavor-access:discoverable": "", + "compute_extension:v3:os-flavor-access:remove_tenant_access": "rule:admin_api", + "compute_extension:v3:os-flavor-access:add_tenant_access": "rule:admin_api", + "compute_extension:flavor_disabled": "", + "compute_extension:flavor_rxtx": "", + "compute_extension:v3:os-flavor-rxtx": "", + "compute_extension:v3:os-flavor-rxtx:discoverable": "", + "compute_extension:flavor_swap": "", + "compute_extension:flavorextradata": "", + "compute_extension:flavorextraspecs:index": "", + "compute_extension:flavorextraspecs:show": "", + "compute_extension:flavorextraspecs:create": "rule:admin_api", + "compute_extension:flavorextraspecs:update": "rule:admin_api", + "compute_extension:flavorextraspecs:delete": "rule:admin_api", + "compute_extension:v3:flavors:discoverable": "", + "compute_extension:v3:flavor-extra-specs:discoverable": "", + "compute_extension:v3:flavor-extra-specs:index": "", + "compute_extension:v3:flavor-extra-specs:show": "", + "compute_extension:v3:flavor-extra-specs:create": "rule:admin_api", + "compute_extension:v3:flavor-extra-specs:update": "rule:admin_api", + "compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api", + "compute_extension:flavormanage": "rule:admin_api", + "compute_extension:v3:flavor-manage:discoverable": "", + "compute_extension:v3:flavor-manage": "rule:admin_api", + "compute_extension:floating_ip_dns": "", + "compute_extension:floating_ip_pools": "", + "compute_extension:floating_ips": "", + "compute_extension:floating_ips_bulk": "rule:admin_api", + "compute_extension:fping": "", + "compute_extension:fping:all_tenants": "rule:admin_api", + "compute_extension:hide_server_addresses": "is_admin:False", + "compute_extension:v3:os-hide-server-addresses": "is_admin:False", + "compute_extension:v3:os-hide-server-addresses:discoverable": "", + "compute_extension:hosts": "rule:admin_api", + "compute_extension:v3:os-hosts": "rule:admin_api", + "compute_extension:v3:os-hosts:discoverable": "", + "compute_extension:hypervisors": "rule:admin_api", + "compute_extension:v3:os-hypervisors": "rule:admin_api", + "compute_extension:v3:os-hypervisors:discoverable": "", + "compute_extension:image_size": "", + "compute_extension:v3:images:discoverable": "", + "compute_extension:v3:image-size": "", + "compute_extension:v3:image-size:discoverable": "", + "compute_extension:instance_actions": "", + "compute_extension:v3:os-instance-actions": "", + "compute_extension:v3:os-instance-actions:discoverable": "", + "compute_extension:instance_actions:events": "rule:admin_api", + "compute_extension:v3:os-instance-actions:events": "rule:admin_api", + "compute_extension:instance_usage_audit_log": "rule:admin_api", + "compute_extension:v3:ips:discoverable": "", + "compute_extension:keypairs": "", + "compute_extension:keypairs:index": "", + "compute_extension:keypairs:show": "", + "compute_extension:keypairs:create": "", + "compute_extension:keypairs:delete": "", + "compute_extension:v3:os-keypairs:discoverable": "", + "compute_extension:v3:os-keypairs": "", + "compute_extension:v3:os-keypairs:index": "", + "compute_extension:v3:os-keypairs:show": "", + "compute_extension:v3:os-keypairs:create": "", + "compute_extension:v3:os-keypairs:delete": "", + "compute_extension:v3:limits:discoverable": "", + "compute_extension:v3:os-lock-server:discoverable": "", + "compute_extension:v3:os-lock-server:lock": "rule:admin_or_owner", + "compute_extension:v3:os-lock-server:unlock": "rule:admin_or_owner", + "compute_extension:v3:os-migrate-server:discoverable": "", + "compute_extension:v3:os-migrate-server:migrate": "rule:admin_api", + "compute_extension:v3:os-migrate-server:migrate_live": "rule:admin_api", + "compute_extension:multinic": "", + "compute_extension:v3:os-multinic": "", + "compute_extension:v3:os-multinic:discoverable": "", + "compute_extension:networks": "rule:admin_api", + "compute_extension:networks:view": "", + "compute_extension:networks_associate": "rule:admin_api", + "compute_extension:v3:os-pause-server:discoverable": "", + "compute_extension:v3:os-pause-server:pause": "rule:admin_or_owner", + "compute_extension:v3:os-pause-server:unpause": "rule:admin_or_owner", + "compute_extension:v3:os-pci:pci_servers": "", + "compute_extension:v3:os-pci:discoverable": "", + "compute_extension:v3:os-pci:index": "rule:admin_api", + "compute_extension:v3:os-pci:detail": "rule:admin_api", + "compute_extension:v3:os-pci:show": "rule:admin_api", + "compute_extension:quotas:show": "", + "compute_extension:quotas:update": "rule:admin_api", + "compute_extension:quotas:delete": "rule:admin_api", + "compute_extension:v3:os-quota-sets:discoverable": "", + "compute_extension:v3:os-quota-sets:show": "", + "compute_extension:v3:os-quota-sets:update": "rule:admin_api", + "compute_extension:v3:os-quota-sets:delete": "rule:admin_api", + "compute_extension:v3:os-quota-sets:detail": "rule:admin_api", + "compute_extension:quota_classes": "", + "compute_extension:rescue": "", + "compute_extension:v3:os-rescue": "", + "compute_extension:v3:os-rescue:discoverable": "", + "compute_extension:v3:os-scheduler-hints:discoverable": "", + "compute_extension:security_group_default_rules": "rule:admin_api", + "compute_extension:security_groups": "", + "compute_extension:v3:os-security-groups": "", + "compute_extension:v3:os-security-groups:discoverable": "", + "compute_extension:server_diagnostics": "rule:admin_api", + "compute_extension:v3:os-server-diagnostics": "rule:admin_api", + "compute_extension:v3:os-server-diagnostics:discoverable": "", + "compute_extension:server_groups": "", + "compute_extension:server_password": "", + "compute_extension:v3:os-server-password": "", + "compute_extension:v3:os-server-password:discoverable": "", + "compute_extension:server_usage": "", + "compute_extension:v3:os-server-usage": "", + "compute_extension:v3:os-server-usage:discoverable": "", + "compute_extension:v3:os-server-groups": "", + "compute_extension:v3:os-server-groups:discoverable": "", + "compute_extension:services": "rule:admin_api", + "compute_extension:v3:os-services": "rule:admin_api", + "compute_extension:v3:os-services:discoverable": "", + "compute_extension:v3:server-metadata:discoverable": "", + "compute_extension:v3:servers:discoverable": "", + "compute_extension:shelve": "", + "compute_extension:shelveOffload": "rule:admin_api", + "compute_extension:v3:os-shelve:shelve": "", + "compute_extension:v3:os-shelve:shelve:discoverable": "", + "compute_extension:v3:os-shelve:shelve_offload": "rule:admin_api", + "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", + "compute_extension::v3:os-simple-tenant-usage:discoverable": "", + "compute_extension::v3:os-simple-tenant-usage:show": "rule:admin_or_owner", + "compute_extension::v3:os-simple-tenant-usage:list": "rule:admin_api", + "compute_extension:v3:os-suspend-server:discoverable": "", + "compute_extension:v3:os-suspend-server:suspend": "rule:admin_or_owner", + "compute_extension:v3:os-suspend-server:resume": "rule:admin_or_owner", + "compute_extension:simple_tenant_usage:list": "rule:admin_api", + "compute_extension:unshelve": "", + "compute_extension:v3:os-shelve:unshelve": "", + "compute_extension:users": "rule:admin_api", + "compute_extension:v3:os-user-data:discoverable": "", + "compute_extension:virtual_interfaces": "", + "compute_extension:virtual_storage_arrays": "", + "compute_extension:volumes": "", + "compute_extension:volume_attachments:index": "", + "compute_extension:volume_attachments:show": "", + "compute_extension:volume_attachments:create": "", + "compute_extension:volume_attachments:update": "", + "compute_extension:volume_attachments:delete": "", + "compute_extension:v3:os-volumes": "", + "compute_extension:v3:os-volumes:discoverable": "", + "compute_extension:volumetypes": "", + "compute_extension:availability_zone:list": "", + "compute_extension:v3:os-availability-zone:list": "", + "compute_extension:v3:os-availability-zone:discoverable": "", + "compute_extension:availability_zone:detail": "rule:admin_api", + "compute_extension:v3:os-availability-zone:detail": "rule:admin_api", + "compute_extension:used_limits_for_admin": "rule:admin_api", + "compute_extension:v3:os-used-limits": "rule:admin_api", + "compute_extension:v3:os-used-limits:discoverable": "", + "compute_extension:migrations:index": "rule:admin_api", + "compute_extension:v3:os-migrations:index": "rule:admin_api", + "compute_extension:v3:os-migrations:discoverable": "", + "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api", + "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api", + "compute_extension:console_auth_tokens": "rule:admin_api", + "compute_extension:v3:os-console-auth-tokens": "rule:admin_api", + "compute_extension:os-server-external-events:create": "rule:admin_api", + "compute_extension:v3:os-server-external-events:create": "rule:admin_api", + + "volume:create": "", + "volume:get_all": "", + "volume:get_volume_metadata": "", + "volume:get_snapshot": "", + "volume:get_all_snapshots": "", + + + "volume_extension:types_manage": "rule:admin_api", + "volume_extension:types_extra_specs": "rule:admin_api", + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + + + "network:get_all": "", + "network:get": "", + "network:create": "", + "network:delete": "", + "network:associate": "", + "network:disassociate": "", + "network:get_vifs_by_instance": "", + "network:allocate_for_instance": "", + "network:deallocate_for_instance": "", + "network:validate_networks": "", + "network:get_instance_uuids_by_ip_filter": "", + "network:get_instance_id_by_floating_address": "", + "network:setup_networks_on_host": "", + "network:get_backdoor_port": "", + + "network:get_floating_ip": "", + "network:get_floating_ip_pools": "", + "network:get_floating_ip_by_address": "", + "network:get_floating_ips_by_project": "", + "network:get_floating_ips_by_fixed_address": "", + "network:allocate_floating_ip": "", + "network:deallocate_floating_ip": "", + "network:associate_floating_ip": "", + "network:disassociate_floating_ip": "", + "network:release_floating_ip": "", + "network:migrate_instance_start": "", + "network:migrate_instance_finish": "", + + "network:get_fixed_ip": "", + "network:get_fixed_ip_by_address": "", + "network:add_fixed_ip_to_instance": "", + "network:remove_fixed_ip_from_instance": "", + "network:add_network_to_project": "", + "network:get_instance_nw_info": "", + + "network:get_dns_domains": "", + "network:add_dns_entry": "", + "network:modify_dns_entry": "", + "network:delete_dns_entry": "", + "network:get_dns_entries_by_address": "", + "network:get_dns_entries_by_name": "", + "network:create_private_dns_domain": "", + "network:create_public_dns_domain": "", + "network:delete_dns_domain": "", + "network:attach_external_network": "rule:admin_api" +} diff --git a/MIT_CSAIL/compute/etc/nova/rootwrap.conf b/MIT_CSAIL/compute/etc/nova/rootwrap.conf new file mode 100644 index 0000000..aa466c5 --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/rootwrap.conf @@ -0,0 +1,27 @@ +# Configuration for nova-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR diff --git a/MIT_CSAIL/compute/etc/nova/rootwrap.d/compute.filters b/MIT_CSAIL/compute/etc/nova/rootwrap.d/compute.filters new file mode 100644 index 0000000..f4424ae --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/rootwrap.d/compute.filters @@ -0,0 +1,228 @@ +# nova-rootwrap command filters for compute nodes +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# nova/virt/disk/mount/api.py: 'kpartx', '-a', device +# nova/virt/disk/mount/api.py: 'kpartx', '-d', device +kpartx: CommandFilter, kpartx, root + +# nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path +# nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path +tune2fs: CommandFilter, tune2fs, root + +# nova/virt/disk/mount/api.py: 'mount', mapped_device +# nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target +# nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'.. +# nova/virt/configdrive.py: 'mount', device, mountdir +# nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ... +mount: CommandFilter, mount, root + +# nova/virt/disk/mount/api.py: 'umount', mapped_device +# nova/virt/disk/api.py: 'umount' target +# nova/virt/xenapi/vm_utils.py: 'umount', dev_path +# nova/virt/configdrive.py: 'umount', mountdir +umount: CommandFilter, umount, root + +# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image +# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device +qemu-nbd: CommandFilter, qemu-nbd, root + +# nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image +# nova/virt/disk/mount/loop.py: 'losetup', '--detach', device +losetup: CommandFilter, losetup, root + +# nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path +# nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device +blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.* + +# nova/virt/disk/vfs/localfs.py: 'tee', canonpath +tee: CommandFilter, tee, root + +# nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath +mkdir: CommandFilter, mkdir, root + +# nova/virt/disk/vfs/localfs.py: 'chown' +# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log +# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log +# nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk') +chown: CommandFilter, chown, root + +# nova/virt/disk/vfs/localfs.py: 'chmod' +chmod: CommandFilter, chmod, root + +# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' +# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' +# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev +# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. +# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. +# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. +# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. +# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) +# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] +# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge +# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. +# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. +# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... +# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. +# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' +# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' +# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. +# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. +# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' +# nova/network/linux_net.py: 'ip', 'route', 'add', .. +# nova/network/linux_net.py: 'ip', 'route', 'del', . +# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev +ip: CommandFilter, ip, root + +# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev +# nova/network/linux_net.py: 'tunctl', '-b', '-t', dev +tunctl: CommandFilter, tunctl, root + +# nova/virt/libvirt/vif.py: 'ovs-vsctl', ... +# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... +# nova/network/linux_net.py: 'ovs-vsctl', .... +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# nova/network/linux_net.py: 'ovs-ofctl', .... +ovs-ofctl: CommandFilter, ovs-ofctl, root + +# nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ... +dd: CommandFilter, dd, root + +# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ... +iscsiadm: CommandFilter, iscsiadm, root + +# nova/virt/libvirt/volume.py: 'aoe-revalidate', aoedev +# nova/virt/libvirt/volume.py: 'aoe-discover' +aoe-revalidate: CommandFilter, aoe-revalidate, root +aoe-discover: CommandFilter, aoe-discover, root + +# nova/virt/xenapi/vm_utils.py: parted, --script, ... +# nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*. +parted: CommandFilter, parted, root + +# nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path +pygrub: CommandFilter, pygrub, root + +# nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s +fdisk: CommandFilter, fdisk, root + +# nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path +# nova/virt/disk/api.py: e2fsck, -f, -p, image +e2fsck: CommandFilter, e2fsck, root + +# nova/virt/xenapi/vm_utils.py: resize2fs, partition_path +# nova/virt/disk/api.py: resize2fs, image +resize2fs: CommandFilter, resize2fs, root + +# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... +iptables-save: CommandFilter, iptables-save, root +ip6tables-save: CommandFilter, ip6tables-save, root + +# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... +# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. +arping: CommandFilter, arping, root + +# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address +dhcp_release: CommandFilter, dhcp_release, root + +# nova/network/linux_net.py: 'kill', '-9', pid +# nova/network/linux_net.py: 'kill', '-HUP', pid +kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP + +# nova/network/linux_net.py: 'kill', pid +kill_radvd: KillFilter, root, /usr/sbin/radvd + +# nova/network/linux_net.py: dnsmasq call +dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq + +# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. +radvd: CommandFilter, radvd, root + +# nova/network/linux_net.py: 'brctl', 'addbr', bridge +# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 +# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' +# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface +brctl: CommandFilter, brctl, root + +# nova/virt/libvirt/utils.py: 'mkswap' +# nova/virt/xenapi/vm_utils.py: 'mkswap' +mkswap: CommandFilter, mkswap, root + +# nova/virt/xenapi/vm_utils.py: 'mkfs' +# nova/utils.py: 'mkfs', fs, path, label +mkfs: CommandFilter, mkfs, root + +# nova/virt/libvirt/utils.py: 'qemu-img' +qemu-img: CommandFilter, qemu-img, root + +# nova/virt/disk/vfs/localfs.py: 'readlink', '-e' +readlink: CommandFilter, readlink, root + +# nova/virt/disk/api.py: 'touch', target +touch: CommandFilter, touch, root + +# nova/virt/disk/api.py: +mkfs.ext3: CommandFilter, mkfs.ext3, root +mkfs.ntfs: CommandFilter, mkfs.ntfs, root + +# nova/virt/libvirt/connection.py: +read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi + +# nova/virt/libvirt/connection.py: +lvremove: CommandFilter, lvremove, root + +# nova/virt/libvirt/utils.py: +lvcreate: CommandFilter, lvcreate, root + +# nova/virt/libvirt/utils.py: +lvs: CommandFilter, lvs, root + +# nova/virt/libvirt/utils.py: +vgs: CommandFilter, vgs, root + +# nova/virt/baremetal/volume_driver.py: 'tgtadm', '--lld', 'iscsi', ... +tgtadm: CommandFilter, tgtadm, root + +# nova/utils.py:read_file_as_root: 'cat', file_path +# (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file) +read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd +read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow + +# nova/virt/libvirt/volume.py: 'multipath' '-R' +multipath: CommandFilter, multipath, root + +# nova/virt/libvirt/utils.py: +systool: CommandFilter, systool, root + +# nova/virt/libvirt/volume.py: +sginfo: CommandFilter, sginfo, root +sg_scan: CommandFilter, sg_scan, root +ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/ip-.*-iscsi-iqn.*, /dev/disk/by-path/ip-.*-iscsi-iqn.* + +# nova/volume/encryptors.py: +# nova/virt/libvirt/dmcrypt.py: +cryptsetup: CommandFilter, cryptsetup, root + +# nova/virt/xenapi/vm_utils.py: +xenstore-read: CommandFilter, xenstore-read, root + +# nova/virt/baremetal/tilera.py: 'rpc.mountd' +rpc.mountd: CommandFilter, rpc.mountd, root + +# nova/virt/libvirt/utils.py: +rbd: CommandFilter, rbd, root + +# nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path +shred: CommandFilter, shred, root + +# nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control.. +cp: CommandFilter, cp, root + +# nova/virt/xenapi/vm_utils.py: +sync: CommandFilter, sync, root + diff --git a/MIT_CSAIL/compute/etc/nova/secret.xml b/MIT_CSAIL/compute/etc/nova/secret.xml new file mode 100644 index 0000000..e0ae7d3 --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/secret.xml @@ -0,0 +1,6 @@ + + + /etc/ceph/ceph.client.openstack.keyring secret + + REDACTED + diff --git a/MIT_CSAIL/compute/etc/nova/virsh.secret b/MIT_CSAIL/compute/etc/nova/virsh.secret new file mode 100644 index 0000000..e138e06 --- /dev/null +++ b/MIT_CSAIL/compute/etc/nova/virsh.secret @@ -0,0 +1 @@ + diff --git a/MIT_CSAIL/controller/etc/apache2/NON-OPENSTACK-SPECIFIC-BITS-REMOVED b/MIT_CSAIL/controller/etc/apache2/NON-OPENSTACK-SPECIFIC-BITS-REMOVED new file mode 100644 index 0000000..e69de29 diff --git a/MIT_CSAIL/controller/etc/apache2/conf.d/openstack-dashboard.conf b/MIT_CSAIL/controller/etc/apache2/conf.d/openstack-dashboard.conf new file mode 100644 index 0000000..e3a26c2 --- /dev/null +++ b/MIT_CSAIL/controller/etc/apache2/conf.d/openstack-dashboard.conf @@ -0,0 +1,10 @@ +# +WSGIScriptAlias /horizon /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi +WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10 +Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/ + + Order allow,deny + Allow from all + +# +RedirectMatch permanent ^/$ /horizon/ diff --git a/MIT_CSAIL/controller/etc/apache2/sites-enabled/25-keystone_wsgi_admin.conf b/MIT_CSAIL/controller/etc/apache2/sites-enabled/25-keystone_wsgi_admin.conf new file mode 100644 index 0000000..f736a54 --- /dev/null +++ b/MIT_CSAIL/controller/etc/apache2/sites-enabled/25-keystone_wsgi_admin.conf @@ -0,0 +1,46 @@ +# ************************************ +# Vhost template in module puppetlabs-apache +# Managed by Puppet +# ************************************ +Listen 35358 + + ServerName .csail.mit.edu + + ## Vhost docroot + DocumentRoot "/usr/share/pyshared/keystone/httpd/" + + + + ## Directories, there should at least be a declaration for /usr/lib/cgi-bin/keystone + + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + Require all granted + + + ## Load additional static includes + + + ## Logging + ErrorLog "/var/log/apache2/keystone_wsgi_admin_error_ssl.log" + ServerSignature Off + CustomLog "/var/log/apache2/keystone_wsgi_admin_access_ssl.log" combined + + ## SSL directives + SSLEngine on + SSLCACertificatePath ssl.crt/ + SSLCADNRequestFile ssl.crt/csail-client.crt + SSLCARevocationPath ssl.crl/ + SSLCertificateFile ssl.crt/.csail.mit.edu.crt + SSLCertificateKeyFile ssl.key/.csail.mit.edu.key + SSLCipherSuite HIGH,!ADH + SSLProtocol all -SSLv2 + # SSLUserName SSL_CLIENT_S_DN_Email + SSLVerifyDepth 3 + WSGIDaemonProcess keystone-admin-ssl group=keystone processes=16 threads=16 user=keystone + WSGIProcessGroup keystone-admin-ssl + WSGIChunkedRequest On + WSGIScriptAlias / "/usr/share/pyshared/keystone/httpd/admin" + diff --git a/MIT_CSAIL/controller/etc/apache2/sites-enabled/25-keystone_wsgi_main.conf b/MIT_CSAIL/controller/etc/apache2/sites-enabled/25-keystone_wsgi_main.conf new file mode 100644 index 0000000..44410f4 --- /dev/null +++ b/MIT_CSAIL/controller/etc/apache2/sites-enabled/25-keystone_wsgi_main.conf @@ -0,0 +1,49 @@ +# ************************************ +# Vhost template in module puppetlabs-apache +# Managed by Puppet +# ************************************ +Listen 5001 + + ServerName .csail.mit.edu + + ## Vhost docroot + DocumentRoot "/usr/share/pyshared/keystone/httpd/" + + + + ## Directories, there should at least be a declaration for /usr/lib/cgi-bin/keystone + + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None + Require all granted + + + ## Load additional static includes + + + ## Logging + ErrorLog "/var/log/apache2/keystone_wsgi_main_error_ssl.log" + ServerSignature Off + CustomLog "/var/log/apache2/keystone_wsgi_main_access_ssl.log" combined + + + + + ## SSL directives + SSLEngine on + SSLCACertificatePath ssl.crt/ + SSLCADNRequestFile ssl.crt/csail-client.crt + SSLCARevocationPath ssl.crl/ + SSLCertificateFile ssl.crt/.csail.mit.edu.crt + SSLCertificateKeyFile ssl.key/.csail.mit.edu.key + SSLCipherSuite HIGH,!ADH + SSLProtocol all -SSLv2 + # SSLUserName SSL_CLIENT_S_DN_Email + SSLVerifyDepth 3 + + WSGIDaemonProcess keystone-main-ssl group=keystone processes=16 threads=16 user=keystone + WSGIProcessGroup keystone-main-ssl + WSGIScriptAlias / "/usr/share/pyshared/keystone/httpd//main" + diff --git a/MIT_CSAIL/controller/etc/cinder/api-paste.ini b/MIT_CSAIL/controller/etc/cinder/api-paste.ini new file mode 100644 index 0000000..572313b --- /dev/null +++ b/MIT_CSAIL/controller/etc/cinder/api-paste.ini @@ -0,0 +1,64 @@ +############# +# OpenStack # +############# + +[composite:osapi_volume] +use = call:cinder.api:root_app_factory +/: apiversions +/v1: openstack_volume_api_v1 +/v2: openstack_volume_api_v2 + +[composite:openstack_volume_api_v1] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = faultwrap sizelimit noauth apiv1 +keystone = faultwrap sizelimit authtoken keystonecontext apiv1 +keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1 + +[composite:openstack_volume_api_v2] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = faultwrap sizelimit noauth apiv2 +keystone = faultwrap sizelimit authtoken keystonecontext apiv2 +keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2 + +[filter:faultwrap] +paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory + +[filter:noauth] +paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory + +[app:apiv1] +paste.app_factory = cinder.api.v1.router:APIRouter.factory + +[app:apiv2] +paste.app_factory = cinder.api.v2.router:APIRouter.factory + +[pipeline:apiversions] +pipeline = faultwrap osvolumeversionapp + +[app:osvolumeversionapp] +paste.app_factory = cinder.api.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +service_protocol = http +service_host = +service_port = 5000 +auth_host = +auth_port = 35357 +auth_version = v2.0 +auth_protocol = http +admin_tenant_name = services +admin_user = cinder +admin_password = +signing_dir = /var/lib/cinder +auth_uri=http://:5000/v2.0/ diff --git a/MIT_CSAIL/controller/etc/cinder/api-paste.ini.dpkg-dist b/MIT_CSAIL/controller/etc/cinder/api-paste.ini.dpkg-dist new file mode 100644 index 0000000..ba922d5 --- /dev/null +++ b/MIT_CSAIL/controller/etc/cinder/api-paste.ini.dpkg-dist @@ -0,0 +1,60 @@ +############# +# OpenStack # +############# + +[composite:osapi_volume] +use = call:cinder.api:root_app_factory +/: apiversions +/v1: openstack_volume_api_v1 +/v2: openstack_volume_api_v2 + +[composite:openstack_volume_api_v1] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = request_id faultwrap sizelimit osprofiler noauth apiv1 +keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 +keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 + +[composite:openstack_volume_api_v2] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = request_id faultwrap sizelimit osprofiler noauth apiv2 +keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 +keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 + +[filter:request_id] +paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:faultwrap] +paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory +hmac_keys = SECRET_KEY +enabled = yes + +[filter:noauth] +paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory + +[app:apiv1] +paste.app_factory = cinder.api.v1.router:APIRouter.factory + +[app:apiv2] +paste.app_factory = cinder.api.v2.router:APIRouter.factory + +[pipeline:apiversions] +pipeline = faultwrap osvolumeversionapp + +[app:osvolumeversionapp] +paste.app_factory = cinder.api.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/MIT_CSAIL/controller/etc/cinder/cinder.conf b/MIT_CSAIL/controller/etc/cinder/cinder.conf new file mode 100644 index 0000000..93c0b4b --- /dev/null +++ b/MIT_CSAIL/controller/etc/cinder/cinder.conf @@ -0,0 +1,68 @@ +[DEFAULT] +debug = True +verbose = True +rootwrap_config = /etc/cinder/rootwrap.conf +api_paste_confg = /etc/cinder/api-paste.ini +iscsi_helper = tgtadm +volume_name_template = volume-%s +volume_group = nova-volumes +volume_clear = none +auth_strategy = keystone +state_path = /var/lib/cinder +volumes_dir = /var/lib/cinder/volumes +rabbit_port=5672 +rabbit_virtual_host=/ +rabbit_password= +rabbit_userid= +rabbit_host= +osapi_volume_extension = cinder.api.contrib.standard_extensions +api_paste_config=/etc/cinder/api-paste.ini +osapi_volume_listen=0.0.0.0 +sql_idle_timeout=3600 +scheduler_driver=cinder.scheduler.simple.SimpleScheduler +#rabbit_hosts=:5672 +rabbit_ha_queues=False +rpc_backend=cinder.openstack.common.rpc.impl_kombu +use_syslog=False +control_exchange=openstack +default_volume_type=production +enabled_backends=ganymede,rbd +glance_api_version=2 +[ganymede] +#EQLX bits: +volume_driver=cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver +san_ip= +san_login=cinder +san_password= +san_thin_provision=false +#san_ssh_port=22 +# #eqlx_group_name parameter actually represents the CLI prompt message +# #without '>' ending. E.g. if prompt looks like 'group-0>', then the +# #parameter must be set to 'group-0' +eqlx_group_name=ganymede +eqlx_pool=Nimbus +#Seconds to wait before sending a keepalive packet +#eqlx_ssh_keepalive_interval=1200 +#Timeout for the Group Manager cli command execution +#eqlx_cli_timeout=30 +#Maximum retry count for reconnection +#eqlx_cli_max_retries=5 +#Seconds to sleep before the next reconnection retry +#eqlx_cli_retries_timeout=30 +#Use CHAP authentificaion for targets +#eqlx_use_chap=false +#eqlx_chap_login=admin +#eqlx_chap_password=password +#eqlx_verbose_ssh=false +volume_backend_name=eqlx +[rbd] +volume_driver=cinder.volume.drivers.rbd.RBDDriver +rbd_pool=volumes +rbd_ceph_conf=/etc/ceph/ceph.conf +rbd_flatten_volume_from_snapshot=false +rbd_max_clone_depth=5 +rbd_user=openstack +rbd_secret_uuid= +volume_backend_name=rbd +[database] +connection = mysql://cinder:@/cinder?charset=utf8 diff --git a/MIT_CSAIL/controller/etc/cinder/cinder.conf.dpkg-dist b/MIT_CSAIL/controller/etc/cinder/cinder.conf.dpkg-dist new file mode 100644 index 0000000..7094033 --- /dev/null +++ b/MIT_CSAIL/controller/etc/cinder/cinder.conf.dpkg-dist @@ -0,0 +1,11 @@ +[DEFAULT] +rootwrap_config = /etc/cinder/rootwrap.conf +api_paste_confg = /etc/cinder/api-paste.ini +iscsi_helper = tgtadm +volume_name_template = volume-%s +volume_group = cinder-volumes +verbose = True +auth_strategy = keystone +state_path = /var/lib/cinder +lock_path = /var/lock/cinder +volumes_dir = /var/lib/cinder/volumes diff --git a/MIT_CSAIL/controller/etc/cinder/logging.conf b/MIT_CSAIL/controller/etc/cinder/logging.conf new file mode 100644 index 0000000..476425b --- /dev/null +++ b/MIT_CSAIL/controller/etc/cinder/logging.conf @@ -0,0 +1,76 @@ +[loggers] +keys = root, cinder + +[handlers] +keys = stderr, stdout, watchedfile, syslog, null + +[formatters] +keys = legacycinder, default + +[logger_root] +level = WARNING +handlers = null + +[logger_cinder] +level = INFO +handlers = stderr +qualname = cinder + +[logger_amqplib] +level = WARNING +handlers = stderr +qualname = amqplib + +[logger_sqlalchemy] +level = WARNING +handlers = stderr +qualname = sqlalchemy +# "level = INFO" logs SQL queries. +# "level = DEBUG" logs SQL queries and results. +# "level = WARNING" logs neither. (Recommended for production systems.) + +[logger_boto] +level = WARNING +handlers = stderr +qualname = boto + +[logger_suds] +level = INFO +handlers = stderr +qualname = suds + +[logger_eventletwsgi] +level = WARNING +handlers = stderr +qualname = eventlet.wsgi.server + +[handler_stderr] +class = StreamHandler +args = (sys.stderr,) +formatter = legacycinder + +[handler_stdout] +class = StreamHandler +args = (sys.stdout,) +formatter = legacycinder + +[handler_watchedfile] +class = handlers.WatchedFileHandler +args = ('cinder.log',) +formatter = legacycinder + +[handler_syslog] +class = handlers.SysLogHandler +args = ('/dev/log', handlers.SysLogHandler.LOG_USER) +formatter = legacycinder + +[handler_null] +class = cinder.log.NullHandler +formatter = default +args = () + +[formatter_legacycinder] +class = cinder.log.LegacyCinderFormatter + +[formatter_default] +format = %(message)s diff --git a/MIT_CSAIL/controller/etc/cinder/policy.json b/MIT_CSAIL/controller/etc/cinder/policy.json new file mode 100644 index 0000000..8f3a7b2 --- /dev/null +++ b/MIT_CSAIL/controller/etc/cinder/policy.json @@ -0,0 +1,80 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "admin_api": "is_admin:True", + + "volume:create": "", + "volume:get_all": "", + "volume:get_volume_metadata": "", + "volume:get_volume_admin_metadata": "rule:admin_api", + "volume:delete_volume_admin_metadata": "rule:admin_api", + "volume:update_volume_admin_metadata": "rule:admin_api", + "volume:get_snapshot": "", + "volume:get_all_snapshots": "", + "volume:extend": "", + "volume:update_readonly_flag": "", + "volume:retype": "", + + "volume_extension:types_manage": "rule:admin_api", + "volume_extension:types_extra_specs": "rule:admin_api", + "volume_extension:volume_type_encryption": "rule:admin_api", + "volume_extension:volume_encryption_metadata": "rule:admin_or_owner", + "volume_extension:extended_snapshot_attributes": "", + "volume_extension:volume_image_metadata": "", + + "volume_extension:quotas:show": "", + "volume_extension:quotas:update": "rule:admin_api", + "volume_extension:quota_classes": "", + + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:backup_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_admin_actions:force_detach": "rule:admin_api", + "volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api", + "volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api", + + "volume_extension:volume_host_attribute": "rule:admin_api", + "volume_extension:volume_tenant_attribute": "rule:admin_or_owner", + "volume_extension:volume_mig_status_attribute": "rule:admin_api", + "volume_extension:hosts": "rule:admin_api", + "volume_extension:services": "rule:admin_api", + + "volume_extension:volume_manage": "rule:admin_api", + "volume_extension:volume_unmanage": "rule:admin_api", + + "volume:services": "rule:admin_api", + + "volume:create_transfer": "", + "volume:accept_transfer": "", + "volume:delete_transfer": "", + "volume:get_all_transfers": "", + + "volume_extension:replication:promote": "rule:admin_api", + "volume_extension:replication:reenable": "rule:admin_api", + + "backup:create" : "", + "backup:delete": "", + "backup:get": "", + "backup:get_all": "", + "backup:restore": "", + "backup:backup-import": "rule:admin_api", + "backup:backup-export": "rule:admin_api", + + "snapshot_extension:snapshot_actions:update_snapshot_status": "", + + "consistencygroup:create" : "group:nobody", + "consistencygroup:delete": "group:nobody", + "consistencygroup:get": "group:nobody", + "consistencygroup:get_all": "group:nobody", + + "consistencygroup:create_cgsnapshot" : "", + "consistencygroup:delete_cgsnapshot": "", + "consistencygroup:get_cgsnapshot": "", + "consistencygroup:get_all_cgsnapshots": "", + + "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api" +} diff --git a/MIT_CSAIL/controller/etc/cinder/rootwrap.conf b/MIT_CSAIL/controller/etc/cinder/rootwrap.conf new file mode 100644 index 0000000..001b90a --- /dev/null +++ b/MIT_CSAIL/controller/etc/cinder/rootwrap.conf @@ -0,0 +1,27 @@ +# Configuration for cinder-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR diff --git a/MIT_CSAIL/controller/etc/cinder/rootwrap.d/volume.filters b/MIT_CSAIL/controller/etc/cinder/rootwrap.d/volume.filters new file mode 100644 index 0000000..2d23743 --- /dev/null +++ b/MIT_CSAIL/controller/etc/cinder/rootwrap.d/volume.filters @@ -0,0 +1,157 @@ +# cinder-rootwrap command filters for volume nodes +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# cinder/volume/iscsi.py: iscsi_helper '--op' ... +ietadm: CommandFilter, ietadm, root +tgtadm: CommandFilter, tgtadm, root +tgt-admin: CommandFilter, tgt-admin, root +cinder-rtstool: CommandFilter, cinder-rtstool, root + +# LVM related show commands +pvs: EnvFilter, env, root, LC_ALL=C, pvs +vgs: EnvFilter, env, root, LC_ALL=C, vgs +lvs: EnvFilter, env, root, LC_ALL=C, lvs +lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay + +# cinder/volume/driver.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. +# cinder/volume/driver.py: 'lvcreate', '-L', ... +lvcreate: CommandFilter, lvcreate, root + +# cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... +dd: CommandFilter, dd, root + +# cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ... +lvremove: CommandFilter, lvremove, root + +# cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'... +lvrename: CommandFilter, lvrename, root + +# cinder/volume/driver.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ... +lvextend: CommandFilter, lvextend, root + +# cinder/brick/local_dev/lvm.py: 'lvchange -a y -K ' +lvchange: CommandFilter, lvchange, root + +# cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... +# cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... +iscsiadm: CommandFilter, iscsiadm, root + +# cinder/volume/drivers/lvm.py: 'shred', '-n3' +# cinder/volume/drivers/lvm.py: 'shred', '-n0', '-z', '-s%dMiB' +shred: CommandFilter, shred, root + +# cinder/volume/utils.py: utils.temporary_chown(path, 0) +chown: CommandFilter, chown, root + +# cinder/volume/utils.py: copy_volume(..., ionice='...') +ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7] +ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3] + +# cinder/volume/utils.py: setup_blkio_cgroup() +cgcreate: CommandFilter, cgcreate, root +cgset: CommandFilter, cgset, root +cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+ + +# cinder/volume/driver.py +dmsetup: CommandFilter, dmsetup, root +ln: CommandFilter, ln, root + +# cinder/image/image_utils.py +qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img +qemu-img_convert: CommandFilter, qemu-img, root + +udevadm: CommandFilter, udevadm, root + +# cinder/volume/driver.py: utils.read_file_as_root() +cat: CommandFilter, cat, root + +# cinder/volume/nfs.py +stat: CommandFilter, stat, root +mount: CommandFilter, mount, root +df: CommandFilter, df, root +du: CommandFilter, du, root +truncate: CommandFilter, truncate, root +chmod: CommandFilter, chmod, root +rm: CommandFilter, rm, root + +# cinder/volume/drivers/netapp/nfs.py: +netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+ + +# cinder/volume/drivers/glusterfs.py +chgrp: CommandFilter, chgrp, root +umount: CommandFilter, umount, root + +# cinder/volumes/drivers/hds/hds.py: +hus-cmd: CommandFilter, hus-cmd, root +hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root + +# cinder/volumes/drivers/hds/hnas_backend.py +ssc: CommandFilter, ssc, root + +# cinder/brick/initiator/connector.py: +ls: CommandFilter, ls, root +tee: CommandFilter, tee, root +multipath: CommandFilter, multipath, root +systool: CommandFilter, systool, root + +# cinder/volume/drivers/block_device.py +blockdev: CommandFilter, blockdev, root + +# cinder/volume/drivers/ibm/gpfs.py +mv: CommandFilter, mv, root +mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root +mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root +mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root +mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root +mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root +mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root +mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root +mkfs: CommandFilter, mkfs, root + +# cinder/volume/drivers/ibm/gpfs.py +# cinder/volume/drivers/ibm/ibmnas.py +find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -inum, \d+ + +# cinder/brick/initiator/connector.py: +aoe-revalidate: CommandFilter, aoe-revalidate, root +aoe-discover: CommandFilter, aoe-discover, root +aoe-flush: CommandFilter, aoe-flush, root + +# cinder/brick/initiator/linuxscsi.py: +sg_scan: CommandFilter, sg_scan, root + +#cinder/backup/services/tsm.py +dsmc:CommandFilter,/usr/bin/dsmc,root + +# cinder/volume/drivers/hitachi/hbsd_horcm.py +raidqry: CommandFilter, raidqry, root +raidcom: CommandFilter, raidcom, root +pairsplit: CommandFilter, pairsplit, root +paircreate: CommandFilter, paircreate, root +pairdisplay: CommandFilter, pairdisplay, root +pairevtwait: CommandFilter, pairevtwait, root +horcmstart.sh: CommandFilter, horcmstart.sh, root +horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root +horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr + +# cinder/volume/drivers/hitachi/hbsd_snm2.py +auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman +auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref +auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef +aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1 +auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn +auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap +autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap +aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol +auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd +auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel +auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize +auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser +autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef +autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt +autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini +auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi +audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool +aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal +aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon diff --git a/MIT_CSAIL/controller/etc/glance/glance-api-paste.ini b/MIT_CSAIL/controller/etc/glance/glance-api-paste.ini new file mode 100644 index 0000000..90102d2 --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/glance-api-paste.ini @@ -0,0 +1,66 @@ +# Use this pipeline for no auth or image caching - DEFAULT +[pipeline:glance-api] +pipeline = versionnegotiation unauthenticated-context rootapp + +# Use this pipeline for image caching and no auth +[pipeline:glance-api-caching] +pipeline = versionnegotiation unauthenticated-context cache rootapp + +# Use this pipeline for caching w/ management interface but no auth +[pipeline:glance-api-cachemanagement] +pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp + +# Use this pipeline for keystone auth +[pipeline:glance-api-keystone] +pipeline = versionnegotiation authtoken context rootapp + +# Use this pipeline for keystone auth with image caching +[pipeline:glance-api-keystone+caching] +pipeline = versionnegotiation authtoken context cache rootapp + +# Use this pipeline for keystone auth with caching and cache management +[pipeline:glance-api-keystone+cachemanagement] +pipeline = versionnegotiation authtoken context cache cachemanage rootapp + +[composite:rootapp] +paste.composite_factory = glance.api:root_app_factory +/: apiversions +/v1: apiv1app +/v2: apiv2app + +[app:apiversions] +paste.app_factory = glance.api.versions:create_resource + +[app:apiv1app] +paste.app_factory = glance.api.v1.router:API.factory + +[app:apiv2app] +paste.app_factory = glance.api.v2.router:API.factory + +[filter:versionnegotiation] +paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory + +[filter:cache] +paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory + +[filter:cachemanage] +paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory + +[filter:context] +paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +delay_auth_decision = true +auth_host = +auth_port = 35357 +auth_protocol = http +auth_uri = http://:5000/ +admin_tenant_name = services +admin_user = glance +admin_password = +signing_dirname = /tmp/keystone-signing-glance-api +#auth_version = v2.0 \ No newline at end of file diff --git a/MIT_CSAIL/controller/etc/glance/glance-api.conf b/MIT_CSAIL/controller/etc/glance/glance-api.conf new file mode 100644 index 0000000..6c9587d --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/glance-api.conf @@ -0,0 +1,361 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +verbose = False +#verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False +debug = True + +# Maximum image size (in bytes) that may be uploaded through the +# Glance API server. Defaults to 1 TB. +# WARNING: this value should only be increased after careful consideration +# and must be set to a value under 8 EB (9223372036854775808). +#image_size_cap = 1099511627776 + +# Address to bind the API server +bind_host = 0.0.0.0 + +# Port the bind the API server to +bind_port = 9292 + +# Log to this file. Make sure you do not set the same log +# file for both the API and registry servers! +log_file = /var/log/glance/api.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# Number of Glance API worker processes to start. +# On machines with more than one CPU increasing this value +# may improve performance (especially if using SSL with +# compression turned on). It is typically recommended to set +# this value to the number of CPUs present on your machine. +workers = 24 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Allow unauthenticated users to access the API with read-only +# privileges. This only applies when using ContextMiddleware. +#allow_anonymous_access = False + +# Allow access to version 1 of glance api +#enable_v1_api = True + +# Allow access to version 2 of glance api +#enable_v2_api = True + +# Return the URL that references where the data is stored on +# the backend storage system. For example, if using the +# file system store a URL of 'file:///path/to/image' will +# be returned to the user in the 'direct_url' meta-data field. +# The default value is false. +#show_image_direct_url = False +show_image_direct_url=True + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False +use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL0 + +# ================= SSL Options =============================== + +# Certificate file to use when starting API server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# ============ Registry Options =============================== + +# Address to find the registry server +registry_host = 127.0.0.1 + +# Port the registry server is listening on +registry_port = 9191 + +# What protocol to use when connecting to the registry server? +# Set to https for secure HTTP communication +registry_client_protocol = http + +# The path to the key file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file +#registry_client_key_file = /path/to/key/file + +# The path to the cert file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file +#registry_client_cert_file = /path/to/cert/file + +# The path to the certifying authority cert file to use in SSL connections +# to the registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file +#registry_client_ca_file = /path/to/ca/file + +# When using SSL in connections to the registry server, do not require +# validation via a certifying authority. This is the registry's equivalent of +# specifying --insecure on the command line using glanceclient for the API +# Default: False +#registry_client_insecure = False + +# The period of time, in seconds, that the API server will wait for a registry +# request to complete. A value of '0' implies no timeout. +# Default: 600 +#registry_client_timeout = 600 + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create = False + +# ============ Notification System Options ===================== + +# Notifications can be sent when images are create, updated or deleted. +# There are three methods of sending notifications, logging (via the +# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid +# message queue), or noop (no notifications sent, the default) +notifier_strategy = noop + +# glance now usses message queue more more than just logging +# this defines the genral rpc backend +rpc_backend=rabbit +# Configuration options if sending notifications via rabbitmq (these are +# the defaults) +rabbit_host= +rabbit_port=5672 +rabbit_password= +rabbit_userid= + +rabbit_use_ssl = false +rabbit_virtual_host = / +rabbit_notification_exchange = glance +rabbit_notification_topic = notifications +rabbit_durable_queues = False + +# Configuration options if sending notifications via Qpid (these are +# the defaults) +qpid_notification_exchange = glance +qpid_notification_topic = notifications +qpid_host = localhost +qpid_port = 5672 +qpid_username = +qpid_password = +qpid_sasl_mechanisms = +qpid_reconnect_timeout = 0 +qpid_reconnect_limit = 0 +qpid_reconnect_interval_min = 0 +qpid_reconnect_interval_max = 0 +qpid_reconnect_interval = 0 +qpid_heartbeat = 5 +# Set to 'ssl' to enable SSL +qpid_protocol = tcp +qpid_tcp_nodelay = True + + +# ============ Delayed Delete Options ============================= + +# Turn on/off delayed delete +delayed_delete = False + +# Delayed delete time in seconds +scrub_time = 43200 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-scrubber.conf +scrubber_datadir = /var/lib/glance/scrubber + +# =============== Image Cache Options ============================= + +# Base directory that the Image Cache uses +image_cache_dir = /var/lib/glance/image-cache/ + + +[glance_store] + +default_store = rbd +stores = glance.store.filesystem.Store,glance.store.rbd.Store + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +filesystem_store_datadir = /var/lib/glance/images/ + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like '127.0.0.1:8080/v1.0/' +swift_store_auth_address = 127.0.0.1:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# Whether to use ServiceNET to communicate with the Swift storage servers. +# (If you aren't RACKSPACE, leave this False!) +# +# To use ServiceNET for authentication, prefix hostname of +# `swift_store_auth_address` with 'snet-'. +# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ +swift_enable_snet = False + +# If set to True enables multi-tenant storage mode which causes Glance images +# to be stored in tenant specific Swift accounts. +#swift_store_multi_tenant = False + +# A list of swift ACL strings that will be applied as both read and +# write ACLs to the containers created by Glance in multi-tenant +# mode. This grants the specified tenants/users read and write access +# to all newly created image objects. The standard swift ACL string +# formats are allowed, including: +# : +# : +# *: +# Multiple ACLs can be combined using a comma separated list, for +# example: swift_store_admin_tenants = service:glance,*:admin +#swift_store_admin_tenants = + +# The region of the swift endpoint to be used for single tenant. This setting +# is only necessary if the tenant has multiple swift endpoints. +#swift_store_region = + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = 127.0.0.1:8080/v1.0/ + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +#s3_store_object_buffer_dir = /path/to/dir + +# When forming a bucket url, boto will either set the bucket name as the +# subdomain or as the first token of the path. Amazon's S3 service will +# accept it as the subdomain, but Swift's S3 middleware requires it be +# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. +#s3_store_bucket_url_format = subdomain + +# ============ RBD Store Options ============================= + +# Ceph configuration file path +# If using cephx authentication, this file should +# include a reference to the right keyring +# in a client. section +rbd_store_ceph_conf = /etc/ceph/ceph.conf + +# RADOS user to authenticate as (only applicable if using cephx) +rbd_store_user = openstack + +# RADOS pool in which images are stored +rbd_store_pool = images + +# Images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +rbd_store_chunk_size = 8 + +[keystone_authtoken] +auth_host = +auth_port = 35358 +auth_protocol = https +admin_tenant_name = services +admin_user = glance +admin_password = +#signing_dirname = /tmp/keystone-signing-glance-registry +auth_version = v2.0 +auth_uri=https://:5001/v2.0/ + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +config_file = /etc/glance/glance-api-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-api-keystone], you would configure the flavor below +# as 'keystone'. +flavor=keystone+cachemanagement + +[database] + +# SQLAlchemy connection string for the reference implementation +# registry server. Any valid SQLAlchemy connection string is fine. +# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine +connection = mysql://glance:@/glance + +# Period in seconds after which SQLAlchemy should reestablish its connection +# to the database. +# +# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop +# idle connections. This can result in 'MySQL Gone Away' exceptions. If you +# notice this, you can lower this value to ensure that SQLAlchemy reconnects +# before MySQL can drop the connection. +sql_idle_timeout = 3600 diff --git a/MIT_CSAIL/controller/etc/glance/glance-cache-paste.ini b/MIT_CSAIL/controller/etc/glance/glance-cache-paste.ini new file mode 100644 index 0000000..35ab371 --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/glance-cache-paste.ini @@ -0,0 +1,15 @@ +[app:glance-pruner] +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.image_cache.pruner:Pruner + +[app:glance-prefetcher] +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.image_cache.prefetcher:Prefetcher + +[app:glance-cleaner] +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.image_cache.cleaner:Cleaner + +[app:glance-queue-image] +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.image_cache.queue_image:Queuer diff --git a/MIT_CSAIL/controller/etc/glance/glance-cache.conf b/MIT_CSAIL/controller/etc/glance/glance-cache.conf new file mode 100644 index 0000000..8ca3bcd --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/glance-cache.conf @@ -0,0 +1,148 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False +verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False +debug = False + +log_file = /var/log/glance/image-cache.log + +# Send logs to syslog (/dev/log) instead of to file specified by `log_file` +#use_syslog = False + +# Directory that the Image Cache writes data to +image_cache_dir = /var/lib/glance/image-cache/ + +# Number of seconds after which we should consider an incomplete image to be +# stalled and eligible for reaping +image_cache_stall_time = 86400 + +# image_cache_invalid_entry_grace_period - seconds +# +# If an exception is raised as we're writing to the cache, the cache-entry is +# deemed invalid and moved to /invalid so that it can be +# inspected for debugging purposes. +# +# This is number of seconds to leave these invalid images around before they +# are elibible to be reaped. +image_cache_invalid_entry_grace_period = 3600 + +# Max cache size in bytes +image_cache_max_size = 10737418240 + +# Address to find the registry server +registry_host = 0.0.0.0 + +# Port the registry server is listening on +registry_port = 9191 + +# Auth settings if using Keystone +# auth_url = http://127.0.0.1:5000/v2.0/ +auth_url = http://localhost:5000/v2.0 +# admin_tenant_name = %SERVICE_TENANT_NAME% +admin_tenant_name = services +# admin_user = %SERVICE_USER% +admin_user = glance +# admin_password = %SERVICE_PASSWORD% +admin_password = + +# List of which store classes and store class locations are +# currently known to glance at startup. +# known_stores = glance.store.filesystem.Store, +# glance.store.http.Store, +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +filesystem_store_datadir = /var/lib/glance/images/ + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like '127.0.0.1:8080/v1.0/' +swift_store_auth_address = 127.0.0.1:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# Whether to use ServiceNET to communicate with the Swift storage servers. +# (If you aren't RACKSPACE, leave this False!) +# +# To use ServiceNET for authentication, prefix hostname of +# `swift_store_auth_address` with 'snet-'. +# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ +swift_enable_snet = False + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = 127.0.0.1:8080/v1.0/ + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +# s3_store_object_buffer_dir = /path/to/dir + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +# metadata_encryption_key = <16, 24 or 32 char registry metadata key> diff --git a/MIT_CSAIL/controller/etc/glance/glance-registry-paste.ini b/MIT_CSAIL/controller/etc/glance/glance-registry-paste.ini new file mode 100644 index 0000000..5851d46 --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/glance-registry-paste.ini @@ -0,0 +1,28 @@ +# Use this pipeline for no auth - DEFAULT +[pipeline:glance-registry] +pipeline = unauthenticated-context registryapp + +# Use this pipeline for keystone auth +[pipeline:glance-registry-keystone] +pipeline = authtoken context registryapp + +[app:registryapp] +paste.app_factory = glance.registry.api.v1:API.factory + +[filter:context] +paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +auth_host = +auth_port = 35357 +auth_protocol = http +auth_uri = http://:5000/ +admin_tenant_name = services +admin_user = glance +admin_password = +signing_dirname = /tmp/keystone-signing-glance-registry +#auth_version = v2.0 \ No newline at end of file diff --git a/MIT_CSAIL/controller/etc/glance/glance-registry.conf b/MIT_CSAIL/controller/etc/glance/glance-registry.conf new file mode 100644 index 0000000..15a8d40 --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/glance-registry.conf @@ -0,0 +1,98 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False +verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +debug = False +#debug = True + +# Address to bind the registry server +bind_host = 0.0.0.0 + +# Port the bind the registry server to +bind_port = 9191 + +# Log to this file. Make sure you do not set the same log +# file for both the API and registry servers! +log_file = /var/log/glance/registry.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# Limit the api to return `param_limit_max` items in a call to a container. If +# a larger `limit` query param is provided, it will be reduced to this value. +api_limit_max = 1000 + +# If a `limit` query param is not provided in an api request, it will +# default to `limit_param_default` +limit_param_default = 25 + +# Role used to identify an authenticated user as administrator +admin_role = admin + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create = False + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False +use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL1 + +# ================= SSL Options =============================== + +# Certificate file to use when starting registry server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting registry server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +[keystone_authtoken] +auth_host = +auth_port = 35358 +auth_protocol = https +admin_tenant_name = services +admin_user = glance +admin_password = +#signing_dirname = /tmp/keystone-signing-glance-registry +auth_version = v2.0 +auth_uri=https://:5001/v2.0/ + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +config_file = /etc/glance/glance-registry-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-registry-keystone], you would configure the flavor below +# as 'keystone'. +flavor = keystone + +[database] +# SQLAlchemy connection string for the reference implementation +# registry server. Any valid SQLAlchemy connection string is fine. +# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine +connection = mysql://glance:@/glance + +# Period in seconds after which SQLAlchemy should reestablish its connection +# to the database. +# +# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop +# idle connections. This can result in 'MySQL Gone Away' exceptions. If you +# notice this, you can lower this value to ensure that SQLAlchemy reconnects +# before MySQL can drop the connection. +sql_idle_timeout = 3600 + diff --git a/MIT_CSAIL/controller/etc/glance/glance-scrubber-paste.ini b/MIT_CSAIL/controller/etc/glance/glance-scrubber-paste.ini new file mode 100644 index 0000000..ac342f8 --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/glance-scrubber-paste.ini @@ -0,0 +1,3 @@ +[app:glance-scrubber] +paste.app_factory = glance.common.wsgi:app_factory +glance.app_factory = glance.store.scrubber:Scrubber diff --git a/MIT_CSAIL/controller/etc/glance/glance-scrubber.conf b/MIT_CSAIL/controller/etc/glance/glance-scrubber.conf new file mode 100644 index 0000000..ecfc7bf --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/glance-scrubber.conf @@ -0,0 +1,108 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/glance/scrubber.log + +# Send logs to syslog (/dev/log) instead of to file specified by `log_file` +#use_syslog = False + +# Should we run our own loop or rely on cron/scheduler to run us +daemon = False + +# Loop time between checking for new items to schedule for delete +wakeup_time = 300 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-api.conf +scrubber_datadir = /var/lib/glance/scrubber + +# Only one server in your deployment should be designated the cleanup host +cleanup_scrubber = False + +# pending_delete items older than this time are candidates for cleanup +cleanup_scrubber_time = 86400 + +# Address to find the registry server for cleanups +registry_host = 0.0.0.0 + +# Port the registry server is listening on +registry_port = 9191 + +# Auth settings if using Keystone +# auth_url = http://127.0.0.1:5000/v2.0/ +# admin_tenant_name = %SERVICE_TENANT_NAME% +# admin_user = %SERVICE_USER% +# admin_password = %SERVICE_PASSWORD% + +# Directory to use for lock files. Default to a temp directory +# (string value). This setting needs to be the same for both +# glance-scrubber and glance-api. +#lock_path= + +# API to use for accessing data. Default value points to sqlalchemy +# package, it is also possible to use: glance.db.registry.api +#data_api = glance.db.sqlalchemy.api + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# ================= Database Options ===============+========== + +[database] + +# The SQLAlchemy connection string used to connect to the +# database (string value) +#connection=sqlite:////glance/openstack/common/db/$sqlite_db + +# The SQLAlchemy connection string used to connect to the +# slave database (string value) +#slave_connection= + +# timeout before idle sql connections are reaped (integer +# value) +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +#max_pool_size= + +# maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +#max_retries=10 + +# interval between retries of opening a sql connection +# (integer value) +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +#pool_timeout= diff --git a/MIT_CSAIL/controller/etc/glance/glance.sqlite b/MIT_CSAIL/controller/etc/glance/glance.sqlite new file mode 100644 index 0000000..e69de29 diff --git a/MIT_CSAIL/controller/etc/glance/policy.json b/MIT_CSAIL/controller/etc/glance/policy.json new file mode 100644 index 0000000..325f00b --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/policy.json @@ -0,0 +1,52 @@ +{ + "context_is_admin": "role:admin", + "default": "", + + "add_image": "", + "delete_image": "", + "get_image": "", + "get_images": "", + "modify_image": "", + "publicize_image": "role:admin", + "copy_from": "", + + "download_image": "", + "upload_image": "", + + "delete_image_location": "", + "get_image_location": "", + "set_image_location": "", + + "add_member": "", + "delete_member": "", + "get_member": "", + "get_members": "", + "modify_member": "", + + "manage_image_cache": "role:admin", + + "get_task": "", + "get_tasks": "", + "add_task": "", + "modify_task": "", + + "get_metadef_namespace": "", + "get_metadef_namespaces":"", + "modify_metadef_namespace":"", + "add_metadef_namespace":"", + + "get_metadef_object":"", + "get_metadef_objects":"", + "modify_metadef_object":"", + "add_metadef_object":"", + + "list_metadef_resource_types":"", + "get_metadef_resource_type":"", + "add_metadef_resource_type_association":"", + + "get_metadef_property":"", + "get_metadef_properties":"", + "modify_metadef_property":"", + "add_metadef_property":"" + +} diff --git a/MIT_CSAIL/controller/etc/glance/schema-image.json b/MIT_CSAIL/controller/etc/glance/schema-image.json new file mode 100644 index 0000000..5aafd6b --- /dev/null +++ b/MIT_CSAIL/controller/etc/glance/schema-image.json @@ -0,0 +1,28 @@ +{ + "kernel_id": { + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." + }, + "ramdisk_id": { + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." + }, + "instance_uuid": { + "type": "string", + "description": "ID of instance used to create this image." + }, + "architecture": { + "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", + "type": "string" + }, + "os_distro": { + "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", + "type": "string" + }, + "os_version": { + "description": "Operating system version as specified by the distributor", + "type": "string" + } +} diff --git a/MIT_CSAIL/controller/etc/heat/api-paste.ini b/MIT_CSAIL/controller/etc/heat/api-paste.ini new file mode 100644 index 0000000..c82dd75 --- /dev/null +++ b/MIT_CSAIL/controller/etc/heat/api-paste.ini @@ -0,0 +1,94 @@ + +# heat-api pipeline +[pipeline:heat-api] +pipeline = faultwrap ssl versionnegotiation authurl authtoken context apiv1app + +# heat-api pipeline for standalone heat +# ie. uses alternative auth backend that authenticates users against keystone +# using username and password instead of validating token (which requires +# an admin/service token). +# To enable, in heat.conf: +# [paste_deploy] +# flavor = standalone +# +[pipeline:heat-api-standalone] +pipeline = faultwrap ssl versionnegotiation authurl authpassword context apiv1app + +# heat-api pipeline for custom cloud backends +# i.e. in heat.conf: +# [paste_deploy] +# flavor = custombackend +# +[pipeline:heat-api-custombackend] +pipeline = faultwrap versionnegotiation context custombackendauth apiv1app + +# heat-api-cfn pipeline +[pipeline:heat-api-cfn] +pipeline = cfnversionnegotiation ec2authtoken authtoken context apicfnv1app + +# heat-api-cfn pipeline for standalone heat +# relies exclusively on authenticating with ec2 signed requests +[pipeline:heat-api-cfn-standalone] +pipeline = cfnversionnegotiation ec2authtoken context apicfnv1app + +# heat-api-cloudwatch pipeline +[pipeline:heat-api-cloudwatch] +pipeline = versionnegotiation ec2authtoken authtoken context apicwapp + +# heat-api-cloudwatch pipeline for standalone heat +# relies exclusively on authenticating with ec2 signed requests +[pipeline:heat-api-cloudwatch-standalone] +pipeline = versionnegotiation ec2authtoken context apicwapp + +[app:apiv1app] +paste.app_factory = heat.common.wsgi:app_factory +heat.app_factory = heat.api.openstack.v1:API + +[app:apicfnv1app] +paste.app_factory = heat.common.wsgi:app_factory +heat.app_factory = heat.api.cfn.v1:API + +[app:apicwapp] +paste.app_factory = heat.common.wsgi:app_factory +heat.app_factory = heat.api.cloudwatch:API + +[filter:versionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.openstack:version_negotiation_filter + +[filter:faultwrap] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.openstack:faultwrap_filter + +[filter:cfnversionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.cfn:version_negotiation_filter + +[filter:cwversionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.cloudwatch:version_negotiation_filter + +[filter:context] +paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory + +[filter:ec2authtoken] +paste.filter_factory = heat.api.aws.ec2token:EC2Token_filter_factory + +[filter:ssl] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.openstack:sslmiddleware_filter + +# Middleware to set auth_url header appropriately +[filter:authurl] +paste.filter_factory = heat.common.auth_url:filter_factory + +# Auth middleware that validates token against keystone +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +# Auth middleware that validates username/password against keystone +[filter:authpassword] +paste.filter_factory = heat.common.auth_password:filter_factory + +# Auth middleware that validates against custom backend +[filter:custombackendauth] +paste.filter_factory = heat.common.custom_backend_auth:filter_factory diff --git a/MIT_CSAIL/controller/etc/heat/environment.d/default.yaml b/MIT_CSAIL/controller/etc/heat/environment.d/default.yaml new file mode 100644 index 0000000..143ee48 --- /dev/null +++ b/MIT_CSAIL/controller/etc/heat/environment.d/default.yaml @@ -0,0 +1,9 @@ + +resource_registry: + # allow older templates with Quantum in them. + "OS::Quantum*": "OS::Neutron*" + # Choose your implementation of AWS::CloudWatch::Alarm + "AWS::CloudWatch::Alarm": "file:///etc/heat/templates/AWS_CloudWatch_Alarm.yaml" + #"AWS::CloudWatch::Alarm": "OS::Heat::CWLiteAlarm" + "OS::Metering::Alarm": "OS::Ceilometer::Alarm" + "AWS::RDS::DBInstance": "file:///etc/heat/templates/AWS_RDS_DBInstance.yaml" diff --git a/MIT_CSAIL/controller/etc/heat/heat.conf b/MIT_CSAIL/controller/etc/heat/heat.conf new file mode 100644 index 0000000..7c6e5f0 --- /dev/null +++ b/MIT_CSAIL/controller/etc/heat/heat.conf @@ -0,0 +1,1241 @@ +[DEFAULT] + +# +# Options defined in heat.api.middleware.ssl +# + +# The HTTP Header that will be used to determine which the +# original request protocol scheme was, even if it was removed +# by an SSL terminator proxy. (string value) +#secure_proxy_ssl_header=X-Forwarded-Proto + + +# +# Options defined in heat.common.config +# + +# The default user for new instances. This option is +# deprecated and will be removed in the Juno release. If it's +# empty, Heat will use the default user set up with your cloud +# image (for OS::Nova::Server) or 'ec2-user' (for +# AWS::EC2::Instance). (string value) +#instance_user=ec2-user + +# Driver to use for controlling instances. (string value) +#instance_driver=heat.engine.nova + +# List of directories to search for plug-ins. (list value) +#plugin_dirs=/usr/lib64/heat,/usr/lib/heat + +# The directory to search for environment files. (string +# value) +#environment_dir=/etc/heat/environment.d + +# Select deferred auth method, stored password or trusts. +# (string value) +#deferred_auth_method=password + +# Subset of trustor roles to be delegated to heat. (list +# value) +#trusts_delegated_roles=heat_stack_owner + +# Maximum resources allowed per top-level stack. (integer +# value) +#max_resources_per_stack=1000 + +# Maximum number of stacks any one tenant may have active at +# one time. (integer value) +#max_stacks_per_tenant=100 + +# Controls how many events will be pruned whenever a stack's +# events exceed max_events_per_stack. Set this lower to keep +# more events at the expense of more frequent purges. (integer +# value) +#event_purge_batch_size=10 + +# Maximum events that will be available per stack. Older +# events will be deleted when this is reached. Set to 0 for +# unlimited events per stack. (integer value) +#max_events_per_stack=1000 + +# Timeout in seconds for stack action (ie. create or update). +# (integer value) +#stack_action_timeout=3600 + +# RPC timeout for the engine liveness check that is used for +# stack locking. (integer value) +#engine_life_check_timeout=2 + +# onready allows you to send a notification when the heat +# processes are ready to serve. This is either a module with +# the notify() method or a shell command. To enable +# notifications with systemd, one may use the 'systemd-notify +# --ready' shell command or the 'heat.common.systemd' +# notification module. (string value) +#onready= + +# Name of the engine node. This can be an opaque identifier. +# It is not necessarily a hostname, FQDN, or IP address. +# (string value) +#host=heat + +# Seconds between running periodic tasks. (integer value) +#periodic_interval=60 + +# URL of the Heat metadata server. (string value) +#heat_metadata_server_url= +heat_metadata_server_url=https://.csail.mit.edu:8000 + +# URL of the Heat waitcondition server. (string value) +#heat_waitcondition_server_url= +heat_waitcondition_server_url=https://.csail.mit.edu:8000/v1/waitcondition + +# URL of the Heat CloudWatch server. (string value) +#heat_watch_server_url= +heat_watch_server_url=https://.csail.mit.edu:8003 + +# Instance connection to CFN/CW API via https. (string value) +#instance_connection_is_secure=0 + +# Instance connection to CFN/CW API validate certs if SSL is +# used. (string value) +#instance_connection_https_validate_certificates=1 + +# Default region name used to get services endpoints. (string +# value) +#region_name_for_services= + +# Keystone role for heat template-defined users. (string +# value) +#heat_stack_user_role=heat_stack_user + +# Keystone domain ID which contains heat template-defined +# users. (string value) +#stack_user_domain= + +# Keystone username, a user with roles sufficient to manage +# users and projects in the stack_user_domain. (string value) +#stack_domain_admin= + +# Keystone password for stack_domain_admin user. (string +# value) +#stack_domain_admin_password= + +# Maximum raw byte size of any template. (integer value) +#max_template_size=524288 + +# Maximum depth allowed when using nested stacks. (integer +# value) +#max_nested_stack_depth=3 + + +# +# Options defined in heat.common.crypt +# + +# Encryption key used for authentication info in database. +# (string value) +#auth_encryption_key=notgood but just long enough i think +auth_encryption_key=Jeemahphi5aP8ahr7aiX9eirog7Zoraiph2hiosh3thohpei4eeMopeeh5Oa2Wi4 + +# +# Options defined in heat.common.heat_keystoneclient +# + +# Fully qualified class name to use as a keystone backend. +# (string value) +#keystone_backend=heat.common.heat_keystoneclient.KeystoneClientV3 + + +# +# Options defined in heat.common.wsgi +# + +# Maximum raw byte size of JSON request body. Should be larger +# than max_template_size. (integer value) +#max_json_body_size=1048576 + + +# +# Options defined in heat.db.api +# + +# The backend to use for db. (string value) +#db_backend=sqlalchemy + + +# +# Options defined in heat.engine.clients +# + +# Fully qualified class name to use as a client backend. +# (string value) +#cloud_backend=heat.engine.clients.OpenStackClients + + +# +# Options defined in heat.engine.resources.loadbalancer +# + +# Custom template for the built-in loadbalancer nested stack. +# (string value) +#loadbalancer_template= + + +# +# Options defined in heat.openstack.common.db.sqlalchemy.session +# + +# the filename to use with sqlite (string value) +#sqlite_db=heat.sqlite + +# If true, use synchronous mode for sqlite (boolean value) +#sqlite_synchronous=true + + +# +# Options defined in heat.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in heat.openstack.common.lockutils +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path= + + +# +# Options defined in heat.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false +debug=true + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false +verbose=true + +# Log output to standard error (boolean value) +#use_stderr=true + +# format string to use for log messages with context (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# format string to use for log messages without context +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# data to append to log format when level is DEBUG (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# prefix each line of exception output with this format +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# list of logger=LEVEL pairs (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN + +# publish error events (boolean value) +#publish_errors=false + +# make deprecations fatal (boolean value) +#fatal_deprecations=false + +# If an instance is passed with the log message, format it +# like this (string value) +#instance_format="[instance: %(uuid)s] " + +# If an instance UUID is passed with the log message, format +# it like this (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of logging configuration file. It does not disable +# existing loggers, but just appends specified logging +# configuration to any other existing logging options. Please +# see the Python logging module documentation for details on +# logging configuration files. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= +log_dir=/var/log/heat + +# Use syslog for logging. (boolean value) +#use_syslog=false + +# syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in heat.openstack.common.notifier.api +# + +# Driver or drivers to handle sending notifications (multi +# valued) +#notification_driver= + +# Default notification level for outgoing notifications +# (string value) +#default_notification_level=INFO + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in heat.openstack.common.notifier.list_notifier +# + +# List of drivers to send notifications (multi valued) +#list_notifier_drivers=heat.openstack.common.notifier.no_op_notifier + + +# +# Options defined in heat.openstack.common.notifier.rpc_notifier +# + +# AMQP topic used for OpenStack notifications (list value) +#notification_topics=notifications + + +# +# Options defined in heat.openstack.common.policy +# + +# JSON file containing policy (string value) +#policy_file=policy.json + +# Rule enforced when requested rule is not found (string +# value) +#policy_default_rule=default + + +# +# Options defined in heat.openstack.common.rpc +# + +# The messaging module to use, defaults to kombu. (string +# value) +#rpc_backend=heat.openstack.common.rpc.impl_kombu + +# Size of RPC thread pool (integer value) +#rpc_thread_pool_size=64 + +# Size of RPC connection pool (integer value) +#rpc_conn_pool_size=30 + +# Seconds to wait for a response from call or multicall +# (integer value) +#rpc_response_timeout=60 + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions + +# If passed, use a fake RabbitMQ provider (boolean value) +#fake_rabbit=false + +# AMQP exchange to connect to if using RabbitMQ or Qpid +# (string value) +#control_exchange=heat + + +# +# Options defined in heat.openstack.common.rpc.amqp +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + + +# +# Options defined in heat.openstack.common.rpc.impl_kombu +# + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled) (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled) (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL enabled) +# (string value) +#kombu_ssl_ca_certs= + +# The RabbitMQ broker address where a single node is used +# (string value) +#rabbit_host=localhost + +rabbit_port=5672 +rabbit_virtual_host=/ +rabbit_password= +rabbit_userid= +rabbit_host=.csail.mit.edu + +# The RabbitMQ broker port where a single node is used +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# connect over SSL for RabbitMQ (boolean value) +#rabbit_use_ssl=false + +# the RabbitMQ userid (string value) +#rabbit_userid=guest + +# the RabbitMQ password (string value) +#rabbit_password=guest + +# the RabbitMQ virtual host (string value) +#rabbit_virtual_host=/ + +# how frequently to retry connecting with RabbitMQ (integer +# value) +#rabbit_retry_interval=1 + +# how long to backoff for between retries when connecting to +# RabbitMQ (integer value) +#rabbit_retry_backoff=2 + +# maximum retries with trying to connect to RabbitMQ (the +# default of 0 implies an infinite retry count) (integer +# value) +#rabbit_max_retries=0 + +# use H/A queues in RabbitMQ (x-ha-policy: all).You need to +# wipe RabbitMQ database when changing this option. (boolean +# value) +#rabbit_ha_queues=false + + +# +# Options defined in heat.openstack.common.rpc.impl_qpid +# + +# Qpid broker hostname (string value) +#qpid_hostname=localhost + +# Qpid broker port (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for qpid connection (string value) +#qpid_username= + +# Password for qpid connection (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl' (string value) +#qpid_protocol=tcp + +# Disable Nagle algorithm (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + + +# +# Options defined in heat.openstack.common.rpc.impl_zmq +# + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver (string value) +#rpc_zmq_matchmaker=heat.openstack.common.rpc.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1 (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=heat + + +# +# Options defined in heat.openstack.common.rpc.matchmaker +# + +# Heartbeat frequency (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + + +[auth_password] + +# +# Options defined in heat.common.config +# + +# Allow orchestration of multiple clouds. (boolean value) +#multi_cloud=false + +# Allowed keystone endpoints for auth_uri when multi_cloud is +# enabled. At least one endpoint needs to be specified. (list +# value) +#allowed_auth_uris= + + +[clients] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +insecure=false +#insecure=true + +[clients_ceilometer] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +#insecure=false + + +[clients_cinder] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +#insecure=false + + +[clients_heat] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +#insecure=false + +# Optional heat url in format like +# http://0.0.0.0:8004/v1/%(tenant_id)s. (string value) +#url= + + +[clients_keystone] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +#insecure=false + + +[clients_neutron] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +#insecure=false + + +[clients_nova] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +#insecure=false + + +[clients_swift] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +#insecure=false + + +[clients_trove] + +# +# Options defined in heat.common.config +# + +# Type of endpoint in Identity service catalog to use for +# communication with the OpenStack service. (string value) +#endpoint_type=publicURL + +# Optional CA cert file to use in SSL connections. (string +# value) +#ca_file= + +# Optional PEM-formatted certificate chain file. (string +# value) +#cert_file= + +# Optional PEM-formatted file that contains the private key. +# (string value) +#key_file= + +# If set, then the server's certificate will not be verified. +# (boolean value) +#insecure=false + + +[database] + +# +# Options defined in heat.openstack.common.db.api +# + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + + +# +# Options defined in heat.openstack.common.db.sqlalchemy.session +# + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection=sqlite:////heat/openstack/common/db/$sqlite_db +connection=mysql://heat:@.csail.mit.edu/heat + +# The SQLAlchemy connection string used to connect to the +# slave database (string value) +#slave_connection= + +# timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + + +[ec2authtoken] + +# +# Options defined in heat.api.aws.ec2token +# + +# Authentication Endpoint URI. (string value) +#auth_uri= +auth_uri = https://.csail.mit.edu:5001/v2.0 + +# Allow orchestration of multiple clouds. (boolean value) +#multi_cloud=false + +# Allowed keystone endpoints for auth_uri when multi_cloud is +# enabled. At least one endpoint needs to be specified. (list +# value) +#allowed_auth_uris= + + +[heat_api] + +# +# Options defined in heat.common.wsgi +# + +# Address to bind the server. Useful when selecting a +# particular network interface. (string value) +#bind_host=0.0.0.0 + +# The port on which the server will listen. (integer value) +#bind_port=8004 + +# Number of backlog requests to configure the socket with. +# (integer value) +#backlog=4096 + +# Location of the SSL certificate file to use for SSL mode. +# (string value) +#cert_file= +cert_file=/etc/ssl/certs/-1.csail.mit.edu.crt +key_file=/etc/ssl/private/-1.csail.mit.edu.key + +# Location of the SSL key file to use for enabling SSL mode. +# (string value) +#key_file= + + +# Number of workers for Heat service. (integer value) +#workers=0 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + + +[heat_api_cfn] + +# +# Options defined in heat.common.wsgi +# + +# Address to bind the server. Useful when selecting a +# particular network interface. (string value) +#bind_host=0.0.0.0 + +# The port on which the server will listen. (integer value) +#bind_port=8000 + +# Number of backlog requests to configure the socket with. +# (integer value) +#backlog=4096 + +# Location of the SSL certificate file to use for SSL mode. +# (string value) +#cert_file= +cert_file=/etc/ssl/certs/-1.csail.mit.edu.crt +key_file=/etc/ssl/private/-1.csail.mit.edu.key + +# Location of the SSL key file to use for enabling SSL mode. +# (string value) +#key_file= + +# Number of workers for Heat service. (integer value) +#workers=0 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + + +[heat_api_cloudwatch] + +# +# Options defined in heat.common.wsgi +# + +# Address to bind the server. Useful when selecting a +# particular network interface. (string value) +#bind_host=0.0.0.0 + +# The port on which the server will listen. (integer value) +#bind_port=8003 + +# Number of backlog requests to configure the socket with. +# (integer value) +#backlog=4096 + +# Location of the SSL certificate file to use for SSL mode. +# (string value) +#cert_file= +cert_file=/etc/ssl/certs/-1.csail.mit.edu.crt +key_file=/etc/ssl/private/-1.csail.mit.edu.key + +# Location of the SSL key file to use for enabling SSL mode. +# (string value) +#key_file= + +# Number of workers for Heat service. (integer value) +#workers=0 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs.) (integer value) +#max_header_line=16384 + + +[keystone_authtoken] + +# +# Options defined in keystoneclient.middleware.auth_token +# + +# Prefix to prepend at the beginning of the path (string +# value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint (string +# value) +#auth_host=127.0.0.1 + +auth_port=35358 +auth_protocol=https +auth_host=.csail.mit.edu +auth_uri=https://.csail.mit.edu:5001/v2.0 +auth_version=2.0 + +# Port of the admin Identity API endpoint (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint(http or https) +# (string value) +#auth_protocol=https + +# Complete public Identity API endpoint (string value) +#auth_uri= + +# API version of the admin Identity API endpoint (string +# value) +#auth_version= + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# Single shared secret with the Keystone configuration used +# for bootstrapping a Keystone installation, or otherwise +# bypassing the normal authentication process. (string value) +#admin_token= + +# Keystone account username (string value) +admin_user=heat + +# Keystone account password (string value) +admin_password= + +# Keystone service account tenant name to validate user tokens +# (string value) +admin_tenant_name=services + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=300 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + + +[matchmaker_redis] + +# +# Options defined in heat.openstack.common.rpc.matchmaker_redis +# + +# Host to locate redis (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server. (optional) (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in heat.openstack.common.rpc.matchmaker_ring +# + +# Matchmaker ring file (JSON) (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[paste_deploy] + +# +# Options defined in heat.common.config +# + +# The flavor to use. (string value) +#flavor= + +# The API paste config file to use. (string value) +#api_paste_config=api-paste.ini + + +[revision] + +# +# Options defined in heat.common.config +# + +# Heat build revision. If you would prefer to manage your +# build revision separately, you can move this section to a +# different file and add it as another config option. (string +# value) +#heat_revision=unknown + + +[rpc_notifier2] + +# +# Options defined in heat.openstack.common.notifier.rpc_notifier2 +# + +# AMQP topic(s) used for OpenStack notifications (list value) +#topics=notifications + + +[ssl] + +# +# Options defined in heat.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely +# (string value) +#cert_file= +cert_file=/etc/ssl/certs/-1.csail.mit.edu.crt +key_file=/etc/ssl/private/-1.csail.mit.edu.key +# Private key file to use when starting the server securely +# (string value) +#key_file= + + diff --git a/MIT_CSAIL/controller/etc/heat/policy.json b/MIT_CSAIL/controller/etc/heat/policy.json new file mode 100644 index 0000000..f95144a --- /dev/null +++ b/MIT_CSAIL/controller/etc/heat/policy.json @@ -0,0 +1,70 @@ +{ + "context_is_admin": "role:admin", + "deny_stack_user": "not role:heat_stack_user", + "deny_everybody": "!", + + "cloudformation:ListStacks": "rule:deny_stack_user", + "cloudformation:CreateStack": "rule:deny_stack_user", + "cloudformation:DescribeStacks": "rule:deny_stack_user", + "cloudformation:DeleteStack": "rule:deny_stack_user", + "cloudformation:UpdateStack": "rule:deny_stack_user", + "cloudformation:CancelUpdateStack": "rule:deny_stack_user", + "cloudformation:DescribeStackEvents": "rule:deny_stack_user", + "cloudformation:ValidateTemplate": "rule:deny_stack_user", + "cloudformation:GetTemplate": "rule:deny_stack_user", + "cloudformation:EstimateTemplateCost": "rule:deny_stack_user", + "cloudformation:DescribeStackResource": "", + "cloudformation:DescribeStackResources": "rule:deny_stack_user", + "cloudformation:ListStackResources": "rule:deny_stack_user", + + "cloudwatch:DeleteAlarms": "rule:deny_stack_user", + "cloudwatch:DescribeAlarmHistory": "rule:deny_stack_user", + "cloudwatch:DescribeAlarms": "rule:deny_stack_user", + "cloudwatch:DescribeAlarmsForMetric": "rule:deny_stack_user", + "cloudwatch:DisableAlarmActions": "rule:deny_stack_user", + "cloudwatch:EnableAlarmActions": "rule:deny_stack_user", + "cloudwatch:GetMetricStatistics": "rule:deny_stack_user", + "cloudwatch:ListMetrics": "rule:deny_stack_user", + "cloudwatch:PutMetricAlarm": "rule:deny_stack_user", + "cloudwatch:PutMetricData": "", + "cloudwatch:SetAlarmState": "rule:deny_stack_user", + + "actions:action": "rule:deny_stack_user", + "build_info:build_info": "rule:deny_stack_user", + "events:index": "rule:deny_stack_user", + "events:show": "rule:deny_stack_user", + "resource:index": "rule:deny_stack_user", + "resource:metadata": "", + "resource:signal": "", + "resource:show": "rule:deny_stack_user", + "stacks:abandon": "rule:deny_stack_user", + "stacks:create": "rule:deny_stack_user", + "stacks:delete": "rule:deny_stack_user", + "stacks:detail": "rule:deny_stack_user", + "stacks:generate_template": "rule:deny_stack_user", + "stacks:global_index": "rule:deny_everybody", + "stacks:index": "rule:deny_stack_user", + "stacks:list_resource_types": "rule:deny_stack_user", + "stacks:lookup": "", + "stacks:preview": "rule:deny_stack_user", + "stacks:resource_schema": "rule:deny_stack_user", + "stacks:show": "rule:deny_stack_user", + "stacks:template": "rule:deny_stack_user", + "stacks:update": "rule:deny_stack_user", + "stacks:update_patch": "rule:deny_stack_user", + "stacks:validate_template": "rule:deny_stack_user", + "stacks:snapshot": "rule:deny_stack_user", + "stacks:show_snapshot": "rule:deny_stack_user", + "stacks:delete_snapshot": "rule:deny_stack_user", + "stacks:list_snapshots": "rule:deny_stack_user", + + "software_configs:create": "rule:deny_stack_user", + "software_configs:show": "rule:deny_stack_user", + "software_configs:delete": "rule:deny_stack_user", + "software_deployments:index": "rule:deny_stack_user", + "software_deployments:create": "rule:deny_stack_user", + "software_deployments:show": "rule:deny_stack_user", + "software_deployments:update": "rule:deny_stack_user", + "software_deployments:delete": "rule:deny_stack_user", + "software_deployments:metadata": "" +} diff --git a/MIT_CSAIL/controller/etc/keystone/default_catalog.templates b/MIT_CSAIL/controller/etc/keystone/default_catalog.templates new file mode 100644 index 0000000..a69b7f0 --- /dev/null +++ b/MIT_CSAIL/controller/etc/keystone/default_catalog.templates @@ -0,0 +1,27 @@ +# config for templated.Catalog, using camelCase because I don't want to do +# translations for keystone compat +catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0 +catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0 +catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0 +catalog.RegionOne.identity.name = Identity Service + +# fake compute service for now to help novaclient tests work +catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.name = Compute Service + +catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.name = Volume Service + +catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud +catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin +catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud +catalog.RegionOne.ec2.name = EC2 Service + +catalog.RegionOne.image.publicURL = http://localhost:9292/v1 +catalog.RegionOne.image.adminURL = http://localhost:9292/v1 +catalog.RegionOne.image.internalURL = http://localhost:9292/v1 +catalog.RegionOne.image.name = Image Service diff --git a/MIT_CSAIL/controller/etc/keystone/keystone-paste.ini b/MIT_CSAIL/controller/etc/keystone/keystone-paste.ini new file mode 100644 index 0000000..46f994c --- /dev/null +++ b/MIT_CSAIL/controller/etc/keystone/keystone-paste.ini @@ -0,0 +1,121 @@ +# Keystone PasteDeploy configuration file. + +[filter:debug] +paste.filter_factory = keystone.common.wsgi:Debug.factory + +[filter:build_auth_context] +paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory + +[filter:token_auth] +paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory + +[filter:admin_token_auth] +paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory + +[filter:xml_body] +paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory + +[filter:xml_body_v2] +paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory + +[filter:xml_body_v3] +paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory + +[filter:json_body] +paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory + +[filter:user_crud_extension] +paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory + +[filter:crud_extension] +paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory + +[filter:ec2_extension] +paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory + +[filter:ec2_extension_v3] +paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory + +[filter:federation_extension] +paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory + +[filter:oauth1_extension] +paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory + +[filter:s3_extension] +paste.filter_factory = keystone.contrib.s3:S3Extension.factory + +[filter:endpoint_filter_extension] +paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory + +[filter:endpoint_policy_extension] +paste.filter_factory = keystone.contrib.endpoint_policy.routers:EndpointPolicyExtension.factory + +[filter:simple_cert_extension] +paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory + +[filter:revoke_extension] +paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory + +[filter:url_normalize] +paste.filter_factory = keystone.middleware:NormalizingFilter.factory + +[filter:sizelimit] +paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory + +[filter:stats_monitoring] +paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory + +[filter:stats_reporting] +paste.filter_factory = keystone.contrib.stats:StatsExtension.factory + +[filter:access_log] +paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory + +[app:public_service] +paste.app_factory = keystone.service:public_app_factory + +[app:service_v3] +paste.app_factory = keystone.service:v3_app_factory + +[app:admin_service] +paste.app_factory = keystone.service:admin_app_factory + +[pipeline:public_api] +# The last item in this pipeline must be public_service or an equivalent +# application. It cannot be a filter. +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service + +[pipeline:admin_api] +# The last item in this pipeline must be admin_service or an equivalent +# application. It cannot be a filter. +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service + +[pipeline:api_v3] +# The last item in this pipeline must be service_v3 or an equivalent +# application. It cannot be a filter. +pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension service_v3 + +[app:public_version_service] +paste.app_factory = keystone.service:public_version_app_factory + +[app:admin_version_service] +paste.app_factory = keystone.service:admin_version_app_factory + +[pipeline:public_version_api] +pipeline = sizelimit url_normalize xml_body public_version_service + +[pipeline:admin_version_api] +pipeline = sizelimit url_normalize xml_body admin_version_service + +[composite:main] +use = egg:Paste#urlmap +/v2.0 = public_api +/v3 = api_v3 +/ = public_version_api + +[composite:admin] +use = egg:Paste#urlmap +/v2.0 = admin_api +/v3 = api_v3 +/ = admin_version_api diff --git a/MIT_CSAIL/controller/etc/keystone/keystone-ssl.conf b/MIT_CSAIL/controller/etc/keystone/keystone-ssl.conf new file mode 100644 index 0000000..15e7c0f --- /dev/null +++ b/MIT_CSAIL/controller/etc/keystone/keystone-ssl.conf @@ -0,0 +1,385 @@ +[DEFAULT] +workers = 48 +# A "shared secret" between keystone and other openstack services +# admin_token = ADMIN +admin_token = + +# The IP address of the network interface to listen on +# bind_host = 0.0.0.0 +bind_host = 0.0.0.0 + +# The port number which the public service listens on +# public_port = 5000 +public_port = 5000 + +# The port number which the public admin listens on +# admin_port = 35357 +admin_port = 35357 + +# The base endpoint URLs for keystone that are advertised to clients +# (NOTE: this does NOT affect how keystone listens for connections) +# public_endpoint = http://localhost:%(public_port)d/ +# admin_endpoint = http://localhost:%(admin_port)d/ + +# The number of worker processes to serve the public and admin WSGI +# applications respectively. +public_workers = 24 +admin_workers = 24 + + +# The port number which the OpenStack Compute service listens on +# compute_port = 8774 +compute_port = 8774 + +# Path to your policy definition containing identity actions +# policy_file = policy.json + +# Rule to check if no matching policy definition is found +# FIXME(dolph): This should really be defined as [policy] default_rule +# policy_default_rule = admin_required + +# Role for migrating membership relationships +# During a SQL upgrade, the following values will be used to create a new role +# that will replace records in the user_tenant_membership table with explicit +# role grants. After migration, the member_role_id will be used in the API +# add_user_to_project, and member_role_name will be ignored. +# member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab +# member_role_name = _member_ + +# === Logging Options === +# Print debugging output +# (includes plaintext request logging, potentially including passwords) +debug = False +#debug = True + +# debug = True +# Print more verbose output +verbose = False +#verbose = True + +# Name of log file to output to. If not set, logging will go to stdout. +log_file = keystone.log + +# The directory to keep log files in (will be prepended to --logfile) +log_dir = /var/log/keystone + +# Use syslog for logging. +# use_syslog = False +use_syslog = False + +# syslog facility to receive log lines +# syslog_log_facility = LOG_USER + +# If this option is specified, the logging configuration file specified is +# used and overrides any other logging options specified. Please see the +# Python logging module documentation for details on logging configuration +# files. +# log_config = logging.conf + +# A logging.Formatter log message format string which may use any of the +# available logging.LogRecord attributes. +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s + +# Format string for %(asctime)s in log records. +# log_date_format = %Y-%m-%d %H:%M:%S + +# onready allows you to send a notification when the process is ready to serve +# For example, to have it notify using systemd, one could set shell command: +# onready = systemd-notify --ready +# or a module with notify() method: +# onready = keystone.common.systemd + +[sql] +# The SQLAlchemy connection string used to connect to the database +# connection = sqlite:////var/lib/keystone/keystone.db +connection = mysql://keystone_admin:@/keystone +# the timeout before idle sql connections are reaped +# idle_timeout = 200 +idle_timeout = 200 + +[memcache] +servers = localhost:11211 +# max_compare_and_set_retry = 16 + +[identity] +driver = keystone.identity.backends.sql.Identity + +# This references the domain to use for all Identity API v2 requests (which are +# not aware of domains). A domain with this ID will be created for you by +# keystone-manage db_sync in migration 008. The domain referenced by this ID +# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. +# There is nothing special about this domain, other than the fact that it must +# exist to order to maintain support for your v2 clients. +# default_domain_id = default + +[trust] +driver = keystone.trust.backends.sql.Trust + +# delegation and impersonation features can be optionally disabled +# enabled = True + +[catalog] +# dynamic, sql-based backend (supports API/CLI-based management commands) +driver = keystone.catalog.backends.sql.Catalog + +# static, file-based backend (does *NOT* support any management commands) +# driver = keystone.catalog.backends.templated.TemplatedCatalog + +# template_file = default_catalog.templates + +[token] +#driver = keystone.token.backends.sql.Token +driver = keystone.token.backends.memcache.Token + +# Amount of time a token should remain valid (in seconds) +#expiration = 86400 +# shorter expiration keeps bloat down and performance up +# 10min pre patch. 1hr post patch. +expiration = 3600 + +#provider=keystone.token.providers.pki.Provider +provider=keystone.token.providers.uuid.Provider + +# Token specific caching toggle. This has no effect unless the global caching +# option is set to True +# caching = True +# no point to caching tokens that are only stored in memcache +caching = False +# Token specific cache time-to-live (TTL) in seconds. +# cache_time = +# Revocation-List specific cache time-to-live (TTL) in seconds. +# revocation_cache_time = 3600 + + +[cache] +# Global cache functionality toggle. +enabled = False +# enabled = True +# Prefix for building the configuration dictionary for the cache region. This +# should not need to be changed unless there is another dogpile.cache region +# with the same configuration name +# config_prefix = cache.keystone + +# Default TTL, in seconds, for any cached item in the dogpile.cache region. +# This applies to any cached method that doesn't have an explicit cache +# expiration time defined for it. +# expiration_time = 600 + +# Dogpile.cache backend module. It is recommended that Memcache +# (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production +# deployments. Small workloads (single process) like devstack can use the +# dogpile.cache.memory backend. +# backend = keystone.common.cache.noop +#backend = dogpile.cache.memcache +backend = dogpile.cache.pylibmc +# Arguments supplied to the backend module. Specify this option once per +# argument to be passed to the dogpile.cache backend. +# Example format: : +# backend_argument = +backend_argument = url:127.0.0.1 + +# Proxy Classes to import that will affect the way the dogpile.cache backend +# functions. See the dogpile.cache documentation on changing-backend-behavior. +# Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2 +# proxies = + +# Use a key-mangling function (sha1) to ensure fixed length cache-keys. This +# is toggle-able for debugging purposes, it is highly recommended to always +# leave this set to True. +# use_key_mangler = True + +# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls) +# This is only really useful if you need to see the specific cache-backend +# get/set/delete calls with the keys/values. Typically this should be left +# set to False. +# debug_cache_backend = False + + +[policy] +driver = keystone.policy.backends.sql.Policy + +[ec2] +driver = keystone.contrib.ec2.backends.sql.Ec2 + +[ssl] +#enable = True +#certfile = /etc/keystone/ssl/certs/keystone.pem +#keyfile = /etc/keystone/ssl/private/keystonekey.pem +#ca_certs = /etc/keystone/ssl/certs/ca.pem +#cert_required = True + +[signing] +#token_format = PKI +#certfile = /etc/keystone/ssl/certs/signing_cert.pem +#keyfile = /etc/keystone/ssl/private/signing_key.pem +#ca_certs = /etc/keystone/ssl/certs/ca.pem +#key_size = 1024 +#valid_days = 3650 +#ca_password = None + +[ldap] +# url = ldap://localhost +# user = dc=Manager,dc=example,dc=com +# password = None +# suffix = cn=example,cn=com +# use_dumb_member = False +# allow_subtree_delete = False +# dumb_member = cn=dumb,dc=example,dc=com + +# Maximum results per page; a value of zero ('0') disables paging (default) +# page_size = 0 + +# The LDAP dereferencing option for queries. This can be either 'never', +# 'searching', 'always', 'finding' or 'default'. The 'default' option falls +# back to using default dereferencing configured by your ldap.conf. +# alias_dereferencing = default + +# The LDAP scope for queries, this can be either 'one' +# (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree) +# query_scope = one + +# user_tree_dn = ou=Users,dc=example,dc=com +# user_filter = +# user_objectclass = inetOrgPerson +# user_domain_id_attribute = businessCategory +# user_id_attribute = cn +# user_name_attribute = sn +# user_mail_attribute = email +# user_pass_attribute = userPassword +# user_enabled_attribute = enabled +# user_enabled_mask = 0 +# user_enabled_default = True +# user_attribute_ignore = tenant_id,tenants +# user_allow_create = True +# user_allow_update = True +# user_allow_delete = True +# user_enabled_emulation = False +# user_enabled_emulation_dn = + +# tenant_tree_dn = ou=Groups,dc=example,dc=com +# tenant_filter = +# tenant_objectclass = groupOfNames +# tenant_domain_id_attribute = businessCategory +# tenant_id_attribute = cn +# tenant_member_attribute = member +# tenant_name_attribute = ou +# tenant_desc_attribute = desc +# tenant_enabled_attribute = enabled +# tenant_attribute_ignore = +# tenant_allow_create = True +# tenant_allow_update = True +# tenant_allow_delete = True +# tenant_enabled_emulation = False +# tenant_enabled_emulation_dn = + +# role_tree_dn = ou=Roles,dc=example,dc=com +# role_filter = +# role_objectclass = organizationalRole +# role_id_attribute = cn +# role_name_attribute = ou +# role_member_attribute = roleOccupant +# role_attribute_ignore = +# role_allow_create = True +# role_allow_update = True +# role_allow_delete = True + +# group_tree_dn = +# group_filter = +# group_objectclass = groupOfNames +# group_id_attribute = cn +# group_name_attribute = ou +# group_member_attribute = member +# group_desc_attribute = desc +# group_attribute_ignore = +# group_allow_create = True +# group_allow_update = True +# group_allow_delete = True + +[auth] +methods = password,token +password = keystone.auth.plugins.password.Password +token = keystone.auth.plugins.token.Token + +[filter:debug] +paste.filter_factory = keystone.common.wsgi:Debug.factory + +[filter:token_auth] +paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory + +[filter:admin_token_auth] +paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory + +[filter:xml_body] +paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory + +[filter:json_body] +paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory + +[filter:user_crud_extension] +paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory + +[filter:crud_extension] +paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory + +[filter:ec2_extension] +paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory + +[filter:s3_extension] +paste.filter_factory = keystone.contrib.s3:S3Extension.factory + +[filter:url_normalize] +paste.filter_factory = keystone.middleware:NormalizingFilter.factory + +[filter:sizelimit] +paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory + +[filter:stats_monitoring] +paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory + +[filter:stats_reporting] +paste.filter_factory = keystone.contrib.stats:StatsExtension.factory + +[filter:access_log] +paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory + +[app:public_service] +paste.app_factory = keystone.service:public_app_factory + +[app:service_v3] +paste.app_factory = keystone.service:v3_app_factory + +[app:admin_service] +paste.app_factory = keystone.service:admin_app_factory + +[pipeline:public_api] +pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service + +[pipeline:admin_api] +pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service + +[pipeline:api_v3] +pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3 + +[app:public_version_service] +paste.app_factory = keystone.service:public_version_app_factory + +[app:admin_version_service] +paste.app_factory = keystone.service:admin_version_app_factory + +[pipeline:public_version_api] +pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service + +[pipeline:admin_version_api] +pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service + +[composite:main] +use = egg:Paste#urlmap +/v2.0 = public_api +/v3 = api_v3 +/ = public_version_api + +[composite:admin] +use = egg:Paste#urlmap +/v2.0 = admin_api +/v3 = api_v3 +/ = admin_version_api diff --git a/MIT_CSAIL/controller/etc/keystone/keystone.conf b/MIT_CSAIL/controller/etc/keystone/keystone.conf new file mode 100644 index 0000000..a815225 --- /dev/null +++ b/MIT_CSAIL/controller/etc/keystone/keystone.conf @@ -0,0 +1,386 @@ +[DEFAULT] +workers = 48 +# A "shared secret" between keystone and other openstack services +# admin_token = ADMIN +admin_token = + +# The IP address of the network interface to listen on +# bind_host = 0.0.0.0 +bind_host = 0.0.0.0 + +# The port number which the public service listens on +# public_port = 5000 +public_port = 5000 + +# The port number which the public admin listens on +# admin_port = 35357 +admin_port = 35357 + +# The base endpoint URLs for keystone that are advertised to clients +# (NOTE: this does NOT affect how keystone listens for connections) +# public_endpoint = http://localhost:%(public_port)d/ +# admin_endpoint = http://localhost:%(admin_port)d/ + +# The number of worker processes to serve the public and admin WSGI +# applications respectively. +public_workers = 24 +admin_workers = 24 + + +# The port number which the OpenStack Compute service listens on +# compute_port = 8774 +compute_port = 8774 + +# Path to your policy definition containing identity actions +# policy_file = policy.json + +# Rule to check if no matching policy definition is found +# FIXME(dolph): This should really be defined as [policy] default_rule +# policy_default_rule = admin_required + +# Role for migrating membership relationships +# During a SQL upgrade, the following values will be used to create a new role +# that will replace records in the user_tenant_membership table with explicit +# role grants. After migration, the member_role_id will be used in the API +# add_user_to_project, and member_role_name will be ignored. +# member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab +# member_role_name = _member_ + +# === Logging Options === +# Print debugging output +# (includes plaintext request logging, potentially including passwords) +debug = False +#debug = True + +# debug = True +# Print more verbose output +verbose = False +#verbose = True + +# Name of log file to output to. If not set, logging will go to stdout. +log_file = keystone.log + +# The directory to keep log files in (will be prepended to --logfile) +log_dir = /var/log/keystone + +# Use syslog for logging. +# use_syslog = False +use_syslog = False + +# syslog facility to receive log lines +# syslog_log_facility = LOG_USER + +# If this option is specified, the logging configuration file specified is +# used and overrides any other logging options specified. Please see the +# Python logging module documentation for details on logging configuration +# files. +# log_config = logging.conf + +# A logging.Formatter log message format string which may use any of the +# available logging.LogRecord attributes. +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s + +# Format string for %(asctime)s in log records. +# log_date_format = %Y-%m-%d %H:%M:%S + +# onready allows you to send a notification when the process is ready to serve +# For example, to have it notify using systemd, one could set shell command: +# onready = systemd-notify --ready +# or a module with notify() method: +# onready = keystone.common.systemd + +[database] +# The SQLAlchemy connection string used to connect to the database +# connection = sqlite:////var/lib/keystone/keystone.db +connection = mysql://keystone_admin:@localhost/keystone +[sql] +# the timeout before idle sql connections are reaped +# idle_timeout = 200 +idle_timeout = 200 + +[memcache] +servers = localhost:11211 +# max_compare_and_set_retry = 16 + +[identity] +driver = keystone.identity.backends.sql.Identity + +# This references the domain to use for all Identity API v2 requests (which are +# not aware of domains). A domain with this ID will be created for you by +# keystone-manage db_sync in migration 008. The domain referenced by this ID +# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. +# There is nothing special about this domain, other than the fact that it must +# exist to order to maintain support for your v2 clients. +# default_domain_id = default + +[trust] +driver = keystone.trust.backends.sql.Trust + +# delegation and impersonation features can be optionally disabled +# enabled = True + +[catalog] +# dynamic, sql-based backend (supports API/CLI-based management commands) +driver = keystone.catalog.backends.sql.Catalog + +# static, file-based backend (does *NOT* support any management commands) +# driver = keystone.catalog.backends.templated.TemplatedCatalog + +# template_file = default_catalog.templates + +[token] +#driver = keystone.token.backends.sql.Token +driver = keystone.token.persistence.backends.memcache.Token + +# Amount of time a token should remain valid (in seconds) +#expiration = 86400 +# shorter expiration keeps bloat down and performance up +# icehouse 1hr max. juno 2hr fine does 10 work...? +expiration = 36000 + +#provider=keystone.token.providers.pki.Provider +provider=keystone.token.providers.uuid.Provider + +# Token specific caching toggle. This has no effect unless the global caching +# option is set to True +# caching = True +# no point to caching tokens that are only stored in memcache +caching = False +# Token specific cache time-to-live (TTL) in seconds. +# cache_time = +# Revocation-List specific cache time-to-live (TTL) in seconds. +# revocation_cache_time = 3600 + + +[cache] +# Global cache functionality toggle. +enabled = False +# enabled = True +# Prefix for building the configuration dictionary for the cache region. This +# should not need to be changed unless there is another dogpile.cache region +# with the same configuration name +# config_prefix = cache.keystone + +# Default TTL, in seconds, for any cached item in the dogpile.cache region. +# This applies to any cached method that doesn't have an explicit cache +# expiration time defined for it. +# expiration_time = 600 + +# Dogpile.cache backend module. It is recommended that Memcache +# (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production +# deployments. Small workloads (single process) like devstack can use the +# dogpile.cache.memory backend. +# backend = keystone.common.cache.noop +#backend = dogpile.cache.memcache +backend = dogpile.cache.pylibmc +# Arguments supplied to the backend module. Specify this option once per +# argument to be passed to the dogpile.cache backend. +# Example format: : +# backend_argument = +backend_argument = url:127.0.0.1 + +# Proxy Classes to import that will affect the way the dogpile.cache backend +# functions. See the dogpile.cache documentation on changing-backend-behavior. +# Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2 +# proxies = + +# Use a key-mangling function (sha1) to ensure fixed length cache-keys. This +# is toggle-able for debugging purposes, it is highly recommended to always +# leave this set to True. +# use_key_mangler = True + +# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls) +# This is only really useful if you need to see the specific cache-backend +# get/set/delete calls with the keys/values. Typically this should be left +# set to False. +# debug_cache_backend = False + + +[policy] +driver = keystone.policy.backends.sql.Policy + +[ec2] +driver = keystone.contrib.ec2.backends.sql.Ec2 + +[ssl] +#enable = True +#certfile = /etc/keystone/ssl/certs/keystone.pem +#keyfile = /etc/keystone/ssl/private/keystonekey.pem +#ca_certs = /etc/keystone/ssl/certs/ca.pem +#cert_required = True + +[signing] +#token_format = PKI +#certfile = /etc/keystone/ssl/certs/signing_cert.pem +#keyfile = /etc/keystone/ssl/private/signing_key.pem +#ca_certs = /etc/keystone/ssl/certs/ca.pem +#key_size = 1024 +#valid_days = 3650 +#ca_password = None + +[ldap] +# url = ldap://localhost +# user = dc=Manager,dc=example,dc=com +# password = None +# suffix = cn=example,cn=com +# use_dumb_member = False +# allow_subtree_delete = False +# dumb_member = cn=dumb,dc=example,dc=com + +# Maximum results per page; a value of zero ('0') disables paging (default) +# page_size = 0 + +# The LDAP dereferencing option for queries. This can be either 'never', +# 'searching', 'always', 'finding' or 'default'. The 'default' option falls +# back to using default dereferencing configured by your ldap.conf. +# alias_dereferencing = default + +# The LDAP scope for queries, this can be either 'one' +# (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree) +# query_scope = one + +# user_tree_dn = ou=Users,dc=example,dc=com +# user_filter = +# user_objectclass = inetOrgPerson +# user_domain_id_attribute = businessCategory +# user_id_attribute = cn +# user_name_attribute = sn +# user_mail_attribute = email +# user_pass_attribute = userPassword +# user_enabled_attribute = enabled +# user_enabled_mask = 0 +# user_enabled_default = True +# user_attribute_ignore = tenant_id,tenants +# user_allow_create = True +# user_allow_update = True +# user_allow_delete = True +# user_enabled_emulation = False +# user_enabled_emulation_dn = + +# tenant_tree_dn = ou=Groups,dc=example,dc=com +# tenant_filter = +# tenant_objectclass = groupOfNames +# tenant_domain_id_attribute = businessCategory +# tenant_id_attribute = cn +# tenant_member_attribute = member +# tenant_name_attribute = ou +# tenant_desc_attribute = desc +# tenant_enabled_attribute = enabled +# tenant_attribute_ignore = +# tenant_allow_create = True +# tenant_allow_update = True +# tenant_allow_delete = True +# tenant_enabled_emulation = False +# tenant_enabled_emulation_dn = + +# role_tree_dn = ou=Roles,dc=example,dc=com +# role_filter = +# role_objectclass = organizationalRole +# role_id_attribute = cn +# role_name_attribute = ou +# role_member_attribute = roleOccupant +# role_attribute_ignore = +# role_allow_create = True +# role_allow_update = True +# role_allow_delete = True + +# group_tree_dn = +# group_filter = +# group_objectclass = groupOfNames +# group_id_attribute = cn +# group_name_attribute = ou +# group_member_attribute = member +# group_desc_attribute = desc +# group_attribute_ignore = +# group_allow_create = True +# group_allow_update = True +# group_allow_delete = True + +[auth] +methods = password,token +password = keystone.auth.plugins.password.Password +token = keystone.auth.plugins.token.Token + +[filter:debug] +paste.filter_factory = keystone.common.wsgi:Debug.factory + +[filter:token_auth] +paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory + +[filter:admin_token_auth] +paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory + +[filter:xml_body] +paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory + +[filter:json_body] +paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory + +[filter:user_crud_extension] +paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory + +[filter:crud_extension] +paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory + +[filter:ec2_extension] +paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory + +[filter:s3_extension] +paste.filter_factory = keystone.contrib.s3:S3Extension.factory + +[filter:url_normalize] +paste.filter_factory = keystone.middleware:NormalizingFilter.factory + +[filter:sizelimit] +paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory + +[filter:stats_monitoring] +paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory + +[filter:stats_reporting] +paste.filter_factory = keystone.contrib.stats:StatsExtension.factory + +[filter:access_log] +paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory + +[app:public_service] +paste.app_factory = keystone.service:public_app_factory + +[app:service_v3] +paste.app_factory = keystone.service:v3_app_factory + +[app:admin_service] +paste.app_factory = keystone.service:admin_app_factory + +[pipeline:public_api] +pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service + +[pipeline:admin_api] +pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service + +[pipeline:api_v3] +pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3 + +[app:public_version_service] +paste.app_factory = keystone.service:public_version_app_factory + +[app:admin_version_service] +paste.app_factory = keystone.service:admin_version_app_factory + +[pipeline:public_version_api] +pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service + +[pipeline:admin_version_api] +pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service + +[composite:main] +use = egg:Paste#urlmap +/v2.0 = public_api +/v3 = api_v3 +/ = public_version_api + +[composite:admin] +use = egg:Paste#urlmap +/v2.0 = admin_api +/v3 = api_v3 +/ = admin_version_api diff --git a/MIT_CSAIL/controller/etc/keystone/keystone.conf.back b/MIT_CSAIL/controller/etc/keystone/keystone.conf.back new file mode 100644 index 0000000..1ad118b --- /dev/null +++ b/MIT_CSAIL/controller/etc/keystone/keystone.conf.back @@ -0,0 +1,89 @@ +[DEFAULT] +bind_host = 0.0.0.0 +public_port = 5000 +admin_port = 35357 +admin_token = +compute_port = 3000 +verbose = false +debug = false +log_file = /var/log/keystone/keystone.log +use_syslog = False + +[sql] +connection = mysql://keystone_admin:@127.0.0.1/keystone +idle_timeout = 300 +min_pool_size = 5 +max_pool_size = 10 +pool_timeout = 200 +[identity] +driver = keystone.identity.backends.sql.Identity +[catalog] +driver=keystone.catalog.backends.sql.Catalog + +[token] +driver = keystone.token.backends.kvs.Token +expiration = 86400 + +[policy] +driver = keystone.policy.backends.rules.Policy + +[ec2] +driver = keystone.contrib.ec2.backends.sql.Ec2 + +[filter:debug] +paste.filter_factory = keystone.common.wsgi:Debug.factory + +[filter:token_auth] +paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory + +[filter:admin_token_auth] +paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory + +[filter:xml_body] +paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory + +[filter:json_body] +paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory + +[filter:crud_extension] +paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory + +[filter:ec2_extension] +paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory + +[filter:s3_extension] +paste.filter_factory = keystone.contrib.s3:S3Extension.factory + +[app:public_service] +paste.app_factory = keystone.service:public_app_factory + +[app:admin_service] +paste.app_factory = keystone.service:admin_app_factory + +[pipeline:public_api] +pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service + +[pipeline:admin_api] +pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension crud_extension admin_service + +[app:public_version_service] +paste.app_factory = keystone.service:public_version_app_factory + +[app:admin_version_service] +paste.app_factory = keystone.service:admin_version_app_factory + +[pipeline:public_version_api] +pipeline = xml_body public_version_service + +[pipeline:admin_version_api] +pipeline = xml_body admin_version_service + +[composite:main] +use = egg:Paste#urlmap +/v2.0 = public_api +/ = public_version_api + +[composite:admin] +use = egg:Paste#urlmap +/v2.0 = admin_api +/ = admin_version_api diff --git a/MIT_CSAIL/controller/etc/keystone/logging.conf b/MIT_CSAIL/controller/etc/keystone/logging.conf new file mode 100644 index 0000000..7a538ae --- /dev/null +++ b/MIT_CSAIL/controller/etc/keystone/logging.conf @@ -0,0 +1,39 @@ +[loggers] +keys=root + +[formatters] +keys=normal,normal_with_name,debug + +[handlers] +keys=production,file,devel + +[logger_root] +level=WARNING +handlers=file + +[handler_production] +class=handlers.SysLogHandler +level=ERROR +formatter=normal_with_name +args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) + +[handler_file] +class=FileHandler +level=DEBUG +formatter=normal_with_name +args=('/var/log/keystone/keystone.log', 'a') + +[handler_devel] +class=StreamHandler +level=NOTSET +formatter=debug +args=(sys.stdout,) + +[formatter_normal] +format=%(asctime)s %(levelname)s %(message)s + +[formatter_normal_with_name] +format=(%(name)s): %(asctime)s %(levelname)s %(message)s + +[formatter_debug] +format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/MIT_CSAIL/controller/etc/keystone/logging.conf.sample b/MIT_CSAIL/controller/etc/keystone/logging.conf.sample new file mode 100644 index 0000000..d87d3a2 --- /dev/null +++ b/MIT_CSAIL/controller/etc/keystone/logging.conf.sample @@ -0,0 +1,39 @@ +[loggers] +keys=root + +[formatters] +keys=normal,normal_with_name,debug + +[handlers] +keys=production,file,devel + +[logger_root] +level=WARNING +handlers=file + +[handler_production] +class=handlers.SysLogHandler +level=ERROR +formatter=normal_with_name +args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) + +[handler_file] +class=FileHandler +level=DEBUG +formatter=normal_with_name +args=('keystone.log', 'a') + +[handler_devel] +class=StreamHandler +level=NOTSET +formatter=debug +args=(sys.stdout,) + +[formatter_normal] +format=%(asctime)s %(levelname)s %(message)s + +[formatter_normal_with_name] +format=(%(name)s): %(asctime)s %(levelname)s %(message)s + +[formatter_debug] +format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/MIT_CSAIL/controller/etc/keystone/policy.json b/MIT_CSAIL/controller/etc/keystone/policy.json new file mode 100644 index 0000000..af65205 --- /dev/null +++ b/MIT_CSAIL/controller/etc/keystone/policy.json @@ -0,0 +1,171 @@ +{ + "admin_required": "role:admin or is_admin:1", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner" : "user_id:%(user_id)s", + "admin_or_owner": "rule:admin_required or rule:owner", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:admin_required", + "identity:update_region": "rule:admin_required", + "identity:delete_region": "rule:admin_required", + + "identity:get_service": "rule:admin_required", + "identity:list_services": "rule:admin_required", + "identity:create_service": "rule:admin_required", + "identity:update_service": "rule:admin_required", + "identity:delete_service": "rule:admin_required", + + "identity:get_endpoint": "rule:admin_required", + "identity:list_endpoints": "rule:admin_required", + "identity:create_endpoint": "rule:admin_required", + "identity:update_endpoint": "rule:admin_required", + "identity:delete_endpoint": "rule:admin_required", + + "identity:get_domain": "rule:admin_required", + "identity:list_domains": "rule:admin_required", + "identity:create_domain": "rule:admin_required", + "identity:update_domain": "rule:admin_required", + "identity:delete_domain": "rule:admin_required", + + "identity:get_project": "rule:admin_required", + "identity:list_projects": "rule:admin_required", + "identity:list_user_projects": "rule:admin_or_owner", + "identity:create_project": "rule:admin_required", + "identity:update_project": "rule:admin_required", + "identity:delete_project": "rule:admin_required", + + "identity:get_user": "rule:admin_required", + "identity:list_users": "rule:admin_required", + "identity:create_user": "rule:admin_required", + "identity:update_user": "rule:admin_required", + "identity:delete_user": "rule:admin_required", + "identity:change_password": "rule:admin_or_owner", + + "identity:get_group": "rule:admin_required", + "identity:list_groups": "rule:admin_required", + "identity:list_groups_for_user": "rule:admin_or_owner", + "identity:create_group": "rule:admin_required", + "identity:update_group": "rule:admin_required", + "identity:delete_group": "rule:admin_required", + "identity:list_users_in_group": "rule:admin_required", + "identity:remove_user_from_group": "rule:admin_required", + "identity:check_user_in_group": "rule:admin_required", + "identity:add_user_to_group": "rule:admin_required", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_or_owner", + "identity:ec2_list_credentials": "rule:admin_or_owner", + "identity:ec2_create_credential": "rule:admin_or_owner", + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_required", + "identity:list_roles": "rule:admin_required", + "identity:create_role": "rule:admin_required", + "identity:update_role": "rule:admin_required", + "identity:delete_role": "rule:admin_required", + + "identity:check_grant": "rule:admin_required", + "identity:list_grants": "rule:admin_required", + "identity:create_grant": "rule:admin_required", + "identity:revoke_grant": "rule:admin_required", + + "identity:list_role_assignments": "rule:admin_required", + + "identity:get_policy": "rule:admin_required", + "identity:list_policies": "rule:admin_required", + "identity:create_policy": "rule:admin_required", + "identity:update_policy": "rule:admin_required", + "identity:delete_policy": "rule:admin_required", + + "identity:check_token": "rule:admin_required", + "identity:validate_token": "rule:service_or_admin", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_owner", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:get_trust": "rule:admin_or_owner", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:check_role_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups": "rule:admin_required", + "identity:get_endpoint_group": "rule:admin_required", + "identity:update_endpoint_group": "rule:admin_required", + "identity:delete_endpoint_group": "rule:admin_required", + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups_for_project": "rule:admin_required", + "identity:add_endpoint_group_to_project": "rule:admin_required", + "identity:remove_endpoint_group_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:admin_required", + "identity:list_identity_providers": "rule:admin_required", + "identity:get_identity_providers": "rule:admin_required", + "identity:update_identity_provider": "rule:admin_required", + "identity:delete_identity_provider": "rule:admin_required", + + "identity:create_protocol": "rule:admin_required", + "identity:update_protocol": "rule:admin_required", + "identity:get_protocol": "rule:admin_required", + "identity:list_protocols": "rule:admin_required", + "identity:delete_protocol": "rule:admin_required", + + "identity:create_mapping": "rule:admin_required", + "identity:get_mapping": "rule:admin_required", + "identity:list_mappings": "rule:admin_required", + "identity:delete_mapping": "rule:admin_required", + "identity:update_mapping": "rule:admin_required", + + "identity:get_auth_catalog": "", + "identity:get_auth_projects": "", + "identity:get_auth_domains": "", + + "identity:list_projects_for_groups": "", + "identity:list_domains_for_groups": "", + + "identity:list_revoke_events": "", + + "identity:create_policy_association_for_endpoint": "rule:admin_required", + "identity:check_policy_association_for_endpoint": "rule:admin_required", + "identity:delete_policy_association_for_endpoint": "rule:admin_required", + "identity:create_policy_association_for_service": "rule:admin_required", + "identity:check_policy_association_for_service": "rule:admin_required", + "identity:delete_policy_association_for_service": "rule:admin_required", + "identity:create_policy_association_for_region_and_service": "rule:admin_required", + "identity:check_policy_association_for_region_and_service": "rule:admin_required", + "identity:delete_policy_association_for_region_and_service": "rule:admin_required", + "identity:get_policy_for_endpoint": "rule:admin_required", + "identity:list_endpoints_for_policy": "rule:admin_required" +} diff --git a/MIT_CSAIL/controller/etc/neutron/api-paste.ini b/MIT_CSAIL/controller/etc/neutron/api-paste.ini new file mode 100644 index 0000000..810ac51 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/api-paste.ini @@ -0,0 +1,32 @@ +[composite:neutron] +use = egg:Paste#urlmap +/: neutronversions +/v2.0: neutronapi_v2_0 + +[composite:neutronapi_v2_0] +use = call:neutron.auth:pipeline_factory +noauth = extensions neutronapiapp_v2_0 +keystone = authtoken keystonecontext extensions neutronapiapp_v2_0 + +[filter:keystonecontext] +paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +admin_user=neutron +auth_port=35357 +admin_password= +auth_protocol=http +auth_version = v2.0 +auth_uri=http://:5000/ +admin_tenant_name=services +auth_host= + +[filter:extensions] +paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory + +[app:neutronversions] +paste.app_factory = neutron.api.versions:Versions.factory + +[app:neutronapiapp_v2_0] +paste.app_factory = neutron.api.v2.router:APIRouter.factory diff --git a/MIT_CSAIL/controller/etc/neutron/dhcp_agent.ini b/MIT_CSAIL/controller/etc/neutron/dhcp_agent.ini new file mode 100644 index 0000000..136faee --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/dhcp_agent.ini @@ -0,0 +1,80 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +debug = True + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# resync_interval = 5 +resync_interval = 30 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True +use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet gateway_ip is None. The guest instance must +# be configured to request host routes via DHCP (Option 121). +# enable_isolated_metadata = False +enable_isolated_metadata = True + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +# enable_metadata_network = False +enable_metadata_network = True +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +# dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +dnsmasq_config_file = /etc/neutron/dnsmasq.conf + +# Use another DNS server before any in /etc/resolv.conf. +# dnsmasq_dns_server = + +# Limit number of leases to prevent a denial-of-service. +# dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy +root_helper=sudo neutron-rootwrap /etc/neutron/rootwrap.conf +state_path=/var/lib/neutron diff --git a/MIT_CSAIL/controller/etc/neutron/dnsmasq.conf b/MIT_CSAIL/controller/etc/neutron/dnsmasq.conf new file mode 100644 index 0000000..fc16f33 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/dnsmasq.conf @@ -0,0 +1,3 @@ +domain=csail.mit.edu +dhcp-boot=pxelinux.0,.csail.mit.edu, +dhcp-option=26,9000 diff --git a/MIT_CSAIL/controller/etc/neutron/fwaas_driver.ini b/MIT_CSAIL/controller/etc/neutron/fwaas_driver.ini new file mode 100644 index 0000000..41f761a --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/fwaas_driver.ini @@ -0,0 +1,3 @@ +[fwaas] +#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver +#enabled = True diff --git a/MIT_CSAIL/controller/etc/neutron/l3_agent.ini b/MIT_CSAIL/controller/etc/neutron/l3_agent.ini new file mode 100644 index 0000000..55153f7 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/l3_agent.ini @@ -0,0 +1,75 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +debug = False + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True +use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# Each L3 agent can be associated with at most one external network. This +# value should be set to the UUID of that external network. If empty, +# the agent will enforce that only a single external networks exists and +# use that external network id +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +# handle_internal_only_routers = True +handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge +# external_network_bridge = br-ex +external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +# metadata_port = 9697 +metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +# send_arp_for_ha = 3 +send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +# periodic_interval = 40 +periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +# periodic_fuzzy_delay = 5 +periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True +enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy diff --git a/MIT_CSAIL/controller/etc/neutron/lbaas_agent.ini b/MIT_CSAIL/controller/etc/neutron/lbaas_agent.ini new file mode 100644 index 0000000..570b865 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/lbaas_agent.ini @@ -0,0 +1,31 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output). +# debug = False +debug = True +# The LBaaS agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +periodic_interval = 10 + +# LBaas requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent requires a driver to manage the loadbalancer. HAProxy is the +# opensource version. +device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver + +# The user group +# user_group = nogroup diff --git a/MIT_CSAIL/controller/etc/neutron/metadata_agent.ini b/MIT_CSAIL/controller/etc/neutron/metadata_agent.ini new file mode 100644 index 0000000..93d9eca --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/metadata_agent.ini @@ -0,0 +1,44 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +#debug = True +debug = False +metadata_workers = 12 +# The Neutron user information for accessing the Neutron API. +auth_url = http://:35357/v2.0 +auth_region = RegionOne +admin_tenant_name = services +admin_user = neutron +admin_password = + +# cache requests in memory for 5sec this was a juno back port & +# leaving it disabled (default for icehouse) kills performance (and +# possibly the service under load) +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid fo +# +# The problem: +# https://bugs.launchpad.net/cloud-archive/+bug/1361357 +# +# the cause https://review.openstack.org/#/c/95491/ +cache_url = memory://?default_ttl=20 + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +# nova_metadata_ip = 127.0.0.1 +nova_metadata_ip = 127.0.0.1 + +# TCP Port used by Nova metadata server +# nova_metadata_port = 8775 +nova_metadata_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +# metadata_proxy_shared_secret = + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy +metadata_proxy_shared_secret= diff --git a/MIT_CSAIL/controller/etc/neutron/neutron.conf b/MIT_CSAIL/controller/etc/neutron/neutron.conf new file mode 100644 index 0000000..53af277 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/neutron.conf @@ -0,0 +1,412 @@ +[DEFAULT] +# Default log level is INFO +# verbose and debug has the same result. +# One of them will set DEBUG log level output +debug = False +#debug = True +verbose = False +#verbose = True + +notify_nova_on_port_status_changes = True +notify_nova_on_port_data_changes = True +nova_url = http://.csail.mit.edu:8774/v2 +nova_admin_username = nova +# note ID not name this is (services) +nova_admin_tenant_id = +nova_admin_password = +nova_admin_auth_url = http://.csail.mit.edu:35357/v2.0 + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server +# bind_host = 0.0.0.0 +bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# Neutron plugin provider module +core_plugin = ml2 + +# Advanced service modules +# service_plugins = +service_plugins = router,lbaas + +# Paste configuration file +# api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +# auth_strategy = keystone +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 +mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 +# dhcp_lease_duration = 1200 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +# allow_overlapping_ips = False +allow_overlapping_ips = False +# Ensure that configured gateway is on subnet +force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = neutron.openstack.common.rpc.impl_kombu +# Size of RPC thread pool +# rpc_thread_pool_size = 64 +# Size of RPC connection pool +# rpc_conn_pool_size = 30 +# Seconds to wait for a response from call or multicall +# rpc_response_timeout = 60 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# rpc_cast_timeout = 30 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron +control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled)' +# kombu_ssl_ca_certs = +# IP address of the RabbitMQ installation +# rabbit_host = localhost +rabbit_host = +# Password of the RabbitMQ server +# rabbit_password = guest +rabbit_password = +# Port where RabbitMQ server is running/listening +# rabbit_port = 5672 +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +#rabbit_hosts = :5672 +# User ID used for RabbitMQ connections +# rabbit_userid = guest +rabbit_userid = +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all).You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +rabbit_ha_queues = False + +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are create, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. DHCP agents needs it. +# notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +# default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +# notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down. +# agent_down_time = 5 +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 +dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 16 +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== +log_dir=/var/log/neutron + +[quotas] +# resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# default number of resource allowed per tenant, minus for unlimited +# default_quota = -1 + +# number of networks allowed per tenant, and minus means unlimited +# quota_network = 10 + +# number of subnets allowed per tenant, and minus means unlimited +# quota_subnet = 10 + +# number of ports allowed per tenant, and minus means unlimited +# quota_port = 50 +quota_port=-1 + +# number of security groups allowed per tenant, and minus means unlimited +# quota_security_group = 10 + +# number of security group rules allowed per tenant, and minus means unlimited +# quota_security_group_rule = 100 + +# default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server, should be less than +# agent_down_time +# report_interval = 4 +report_interval = 30 + + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_host = +auth_port = 35357 +auth_protocol = http +auth_vesrion = v2.0 +admin_tenant_name = services +admin_user = neutron +admin_password = +signing_dir = $state_path/keystone-signing +auth_uri=http://:5000/v2.0 + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +connection = mysql://neutron:@/neutron + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service type include LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# this is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default + diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf.ini new file mode 100644 index 0000000..0c480f6 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf.ini @@ -0,0 +1,88 @@ +[ovs] +network_vlan_ranges=trunk:2112:2114 +local_ip= +enable_tunneling=True +integration_bridge=br-int +tunnel_id_ranges=1:1000 +tunnel_bridge=br-tun +tenant_network_type=gre +bridge_mappings=trunk:eth1-br + +[agent] +tunnel_types=gre +l2_population=false +polling_interval=30 +veth_mtu=9134 + +[securitygroup] +enable_security_group=true +firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + + +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan +type_drivers=vlan,gre + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan +tenant_network_types=gre + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade +mechanism_drivers = openvswitch + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 +network_vlan_ranges=trunk:2112:2114 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +# tunnel_id_ranges = +tunnel_id_ranges=1:1000 + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +#[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf.ini.dpkg-dist b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf.ini.dpkg-dist new file mode 100644 index 0000000..4fb1a4a --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf.ini.dpkg-dist @@ -0,0 +1,71 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade + +# (ListOpt) Ordered list of extension driver entrypoints +# to be loaded from the neutron.ml2.extension_drivers namespace. +# extension_drivers = +# Example: extension_drivers = anewextensiondriver + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +# tunnel_id_ranges = + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True + +# Use ipset to speed-up the iptables security groups. Enabling ipset support +# requires that ipset is installed on L2 agent node. +# enable_ipset = True diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_arista.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_arista.ini new file mode 100644 index 0000000..abaf5bc --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_arista.ini @@ -0,0 +1,100 @@ +# Defines configuration options specific for Arista ML2 Mechanism driver + +[ml2_arista] +# (StrOpt) EOS IP address. This is required field. If not set, all +# communications to Arista EOS will fail +# +# eapi_host = +# Example: eapi_host = 192.168.0.1 +# +# (StrOpt) EOS command API username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_username = +# Example: arista_eapi_username = admin +# +# (StrOpt) EOS command API password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# eapi_password = +# Example: eapi_password = my_password +# +# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs +# ("node1.domain.com") or as short names ("node1"). This is +# optional. If not set, a value of "True" is assumed. +# +# use_fqdn = +# Example: use_fqdn = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# sync_interval = +# Example: sync_interval = 60 +# +# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller. +# This is useful when multiple OpenStack/Neutron controllers are +# managing the same Arista HW clusters. Note that this name must +# match with the region name registered (or known) to keystone +# service. Authentication with Keysotne is performed by EOS. +# This is optional. If not set, a value of "RegionOne" is assumed. +# +# region_name = +# Example: region_name = RegionOne + + +[l3_arista] + +# (StrOpt) primary host IP address. This is required field. If not set, all +# communications to Arista EOS will fail. This is the host where +# primary router is created. +# +# primary_l3_host = +# Example: primary_l3_host = 192.168.10.10 +# +# (StrOpt) Primary host username. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# primary_l3_host_username = +# Example: arista_primary_l3_username = admin +# +# (StrOpt) Primary host password. This is required field. +# if not set, all communications to Arista EOS will fail. +# +# primary_l3_host_password = +# Example: primary_l3_password = my_password +# +# (StrOpt) IP address of the second Arista switch paired as +# MLAG (Multi-chassis Link Aggregation) with the first. +# This is optional field, however, if mlag_config flag is set, +# then this is a required field. If not set, all +# communications to Arista EOS will fail. If mlag_config is set +# to False, then this field is ignored +# +# seconadary_l3_host = +# Example: seconadary_l3_host = 192.168.10.20 +# +# (BoolOpt) Defines if Arista switches are configured in MLAG mode +# If yes, all L3 configuration is pushed to both switches +# automatically. If this flag is set, ensure that secondary_l3_host +# is set to the second switch's IP. +# This flag is Optional. If not set, a value of "False" is assumed. +# +# mlag_config = +# Example: mlag_config = True +# +# (BoolOpt) Defines if the router is created in default VRF or a +# a specific VRF. This is optional. +# If not set, a value of "False" is assumed. +# +# Example: use_vrf = True +# +# (IntOpt) Sync interval in seconds between Neutron plugin and EOS. +# This field defines how often the synchronization is performed. +# This is an optional field. If not set, a value of 180 seconds +# is assumed. +# +# l3_sync_interval = +# Example: l3_sync_interval = 60 diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_brocade.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_brocade.ini new file mode 100644 index 0000000..6757411 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_brocade.ini @@ -0,0 +1,15 @@ +[ml2_brocade] +# username = +# password = +# address = +# ostype = NOS +# osversion = autodetect | n.n.n +# physical_networks = physnet1,physnet2 +# +# Example: +# username = admin +# password = password +# address = 10.24.84.38 +# ostype = NOS +# osversion = 4.1.1 +# physical_networks = physnet1,physnet2 diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_cisco.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_cisco.ini new file mode 100644 index 0000000..1b69100 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_cisco.ini @@ -0,0 +1,118 @@ +[ml2_cisco] + +# (StrOpt) A short prefix to prepend to the VLAN number when creating a +# VLAN interface. For example, if an interface is being created for +# VLAN 2001 it will be named 'q-2001' using the default prefix. +# +# vlan_name_prefix = q- +# Example: vlan_name_prefix = vnet- + +# (BoolOpt) A flag to enable round robin scheduling of routers for SVI. +# svi_round_robin = False + +# +# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch. +# This string value must be present in the ml2_conf.ini network_vlan_ranges +# variable. +# +# managed_physical_network = +# Example: managed_physical_network = physnet1 + +# Cisco Nexus Switch configurations. +# Each switch to be managed by Openstack Neutron must be configured here. +# +# Cisco Nexus Switch Format. +# [ml2_mech_cisco_nexus:] +# = (1) +# ssh_port= (2) +# username= (3) +# password= (4) +# +# (1) For each host connected to a port on the switch, specify the hostname +# and the Nexus physical port (interface) it is connected to. +# Valid intf_type's are 'ethernet' and 'port-channel'. +# The default setting for is 'ethernet' and need not be +# added to this setting. +# (2) The TCP port for connecting via SSH to manage the switch. This is +# port number 22 unless the switch has been configured otherwise. +# (3) The username for logging into the switch to manage it. +# (4) The password for logging into the switch to manage it. +# +# Example: +# [ml2_mech_cisco_nexus:1.1.1.1] +# compute1=1/1 +# compute2=ethernet:1/2 +# compute3=port-channel:1 +# ssh_port=22 +# username=admin +# password=mySecretPassword + +[ml2_cisco_apic] + +# Hostname:port list of APIC controllers +# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80 + +# Username for the APIC controller +# apic_username = user + +# Password for the APIC controller +# apic_password = password + +# Whether use SSl for connecting to the APIC controller or not +# apic_use_ssl = True + +# How to map names to APIC: use_uuid or use_name +# apic_name_mapping = use_name + +# Names for APIC objects used by Neutron +# Note: When deploying multiple clouds against one APIC, +# these names must be unique between the clouds. +# apic_vmm_domain = openstack +# apic_vlan_ns_name = openstack_ns +# apic_node_profile = openstack_profile +# apic_entity_profile = openstack_entity +# apic_function_profile = openstack_function +# apic_app_profile_name = openstack_app +# Agent timers for State reporting and topology discovery +# apic_sync_interval = 30 +# apic_agent_report_interval = 30 +# apic_agent_poll_interval = 2 + +# Specify your network topology. +# This section indicates how your compute nodes are connected to the fabric's +# switches and ports. The format is as follows: +# +# [apic_switch:] +# , = +# +# You can have multiple sections, one for each switch in your fabric that is +# participating in Openstack. e.g. +# +# [apic_switch:17] +# ubuntu,ubuntu1 = 1/10 +# ubuntu2,ubuntu3 = 1/11 +# +# [apic_switch:18] +# ubuntu5,ubuntu6 = 1/1 +# ubuntu7,ubuntu8 = 1/2 + +# Describe external connectivity. +# In this section you can specify the external network configuration in order +# for the plugin to be able to teach the fabric how to route the internal +# traffic to the outside world. The external connectivity configuration +# format is as follows: +# +# [apic_external_network:] +# switch = +# port = +# encap = +# cidr_exposed = +# gateway_ip = +# +# An example follows: +# [apic_external_network:network_ext] +# switch=203 +# port=1/34 +# encap=vlan-100 +# cidr_exposed=10.10.40.2/16 +# gateway_ip=10.10.40.1 diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini new file mode 100644 index 0000000..6ee4a4e --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini @@ -0,0 +1,52 @@ +# Defines Configuration options for FSL SDN OS Mechanism Driver +# Cloud Resource Discovery (CRD) authorization credentials +[ml2_fslsdn] +#(StrOpt) User name for authentication to CRD. +# e.g.: user12 +# +# crd_user_name = + +#(StrOpt) Password for authentication to CRD. +# e.g.: secret +# +# crd_password = + +#(StrOpt) Tenant name for CRD service. +# e.g.: service +# +# crd_tenant_name = + +#(StrOpt) CRD auth URL. +# e.g.: http://127.0.0.1:5000/v2.0/ +# +# crd_auth_url = + +#(StrOpt) URL for connecting to CRD Service. +# e.g.: http://127.0.0.1:9797 +# +# crd_url= + +#(IntOpt) Timeout value for connecting to CRD service +# in seconds, e.g.: 30 +# +# crd_url_timeout= + +#(StrOpt) Region name for connecting to CRD in +# admin context, e.g.: RegionOne +# +# crd_region_name= + +#(BoolOpt)If set, ignore any SSL validation issues (boolean value) +# e.g.: False +# +# crd_api_insecure= + +#(StrOpt)Authorization strategy for connecting to CRD in admin +# context, e.g.: keystone +# +# crd_auth_strategy= + +#(StrOpt)Location of CA certificates file to use for CRD client +# requests. +# +# crd_ca_certificates_file= diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini new file mode 100644 index 0000000..46139ae --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_mlnx.ini @@ -0,0 +1,4 @@ +[eswitch] +# (StrOpt) Type of Network Interface to allocate for VM: +# mlnx_direct or hostdev according to libvirt terminology +# vnic_type = mlnx_direct diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_ncs.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_ncs.ini new file mode 100644 index 0000000..dbbfcbd --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_ncs.ini @@ -0,0 +1,28 @@ +# Defines configuration options specific to the Tail-f NCS Mechanism Driver + +[ml2_ncs] +# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack +# subtree. +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://ncs/api/running/services/openstack + +# (StrOpt) Username for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to NCS. +# This is an optional parameter. If unspecified then no authentication is used. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = +# Example: timeout = 15 + diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_odl.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_odl.ini new file mode 100644 index 0000000..9e88c1b --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_odl.ini @@ -0,0 +1,30 @@ +# Configuration for the OpenDaylight MechanismDriver + +[ml2_odl] +# (StrOpt) OpenDaylight REST URL +# If this is not set then no HTTP requests will be made. +# +# url = +# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron + +# (StrOpt) Username for HTTP basic authentication to ODL. +# +# username = +# Example: username = admin + +# (StrOpt) Password for HTTP basic authentication to ODL. +# +# password = +# Example: password = admin + +# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. +# This is an optional parameter, default value is 10 seconds. +# +# timeout = 10 +# Example: timeout = 15 + +# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. +# This is an optional parameter, default value is 30 minutes. +# +# session_timeout = 30 +# Example: session_timeout = 60 diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_ofa.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_ofa.ini new file mode 100644 index 0000000..4a94b98 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_ofa.ini @@ -0,0 +1,13 @@ +# Defines configuration options specific to the OpenFlow Agent Mechanism Driver + +[ovs] +# Please refer to configuration options to the OpenvSwitch + +[agent] +# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath. +# This is an optional parameter, default value is 60 seconds. +# +# get_datapath_retry_times = +# Example: get_datapath_retry_times = 30 + +# Please refer to configuration options to the OpenvSwitch else the above. diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_sriov.ini b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_sriov.ini new file mode 100644 index 0000000..9566f54 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/ml2/ml2_conf_sriov.ini @@ -0,0 +1,31 @@ +# Defines configuration options for SRIOV NIC Switch MechanismDriver +# and Agent + +[ml2_sriov] +# (ListOpt) Comma-separated list of +# supported Vendor PCI Devices, in format vendor_id:product_id +# +# supported_pci_vendor_devs = 15b3:1004, 8086:10c9 +# Example: supported_pci_vendor_devs = 15b3:1004 +# +# (BoolOpt) Requires running SRIOV neutron agent for port binding +# agent_required = True + +[sriov_nic] +# (ListOpt) Comma-separated list of : +# tuples mapping physical network names to the agent's node-specific +# physical network device interfaces of SR-IOV physical function to be used +# for VLAN networks. All physical networks listed in network_vlan_ranges on +# the server should have mappings to appropriate interfaces on each agent. +# +# physical_device_mappings = +# Example: physical_device_mappings = physnet1:eth1 +# +# (ListOpt) Comma-separated list of : +# tuples, mapping network_device to the agent's node-specific list of virtual +# functions that should not be used for virtual networking. +# vfs_to_exclude is a semicolon-separated list of virtual +# functions to exclude from network_device. The network_device in the +# mapping should appear in the physical_device_mappings list. +# exclude_devices = +# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3 diff --git a/MIT_CSAIL/controller/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/MIT_CSAIL/controller/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini new file mode 100644 index 0000000..fd6c736 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini @@ -0,0 +1,169 @@ +[ovs] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or change this to +# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for +# tenant networks to provide connectivity between hosts. Set to 'none' +# to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = gre +# Example: tenant_network_type = vxlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre, vxlan and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +# (BoolOpt) Set to True in the server and the agents to enable support +# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and +# GRE or VXLAN tunneling. +# +# WARNING: This option will be deprecated in the Icehouse release, at which +# point setting tunnel_type below will be required to enable +# tunneling. +# +# enable_tunneling = False + +# (StrOpt) The type of tunnel network, if any, supported by the plugin. If +# this is set, it will cause tunneling to be enabled. If this is not set and +# the option enable_tunneling is set, this will default to 'gre'. +# +# tunnel_type = +# Example: tunnel_type = gre +# Example: tunnel_type = vxlan + +# (ListOpt) Comma-separated list of : tuples +# enumerating ranges of GRE or VXLAN tunnel IDs that are available for +# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'. +# +# tunnel_id_ranges = +# Example: tunnel_id_ranges = 1:1000 + +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch bay". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# +# integration_bridge = br-int + +# Only used for the agent if tunnel_id_ranges (above) is not empty for +# the server. In most cases, the default value should be fine. +# +# tunnel_bridge = br-tun + +# Peer patch port in integration bridge for tunnel bridge +# int_peer_patch_port = patch-tun + +# Peer patch port in tunnel bridge for integration bridge +# tun_peer_patch_port = patch-int + +# Uncomment this line for the agent if tunnel_id_ranges (above) is not +# empty for the server. Set local-ip to be the local IP address of +# this hypervisor. +# +# local_ip = + +# (ListOpt) Comma-separated list of : tuples +# mapping physical network names to the agent's node-specific OVS +# bridge names to be used for flat and VLAN networks. The length of +# bridge names should be no more than 11. Each bridge must +# exist, and should have a physical network interface configured as a +# port. All physical networks listed in network_vlan_ranges on the +# server should have mappings to appropriate bridges on each agent. +# +# bridge_mappings = +# Example: bridge_mappings = physnet1:br-eth1 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (ListOpt) The types of tenant network tunnels supported by the agent. +# Setting this will enable tunneling support in the agent. This can be set to +# either 'gre' or 'vxlan'. If this is unset, it will default to [] and +# disable tunneling support in the agent. When running the agent with the OVS +# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section. +# When running the agent with ML2, you can specify as many values here as +# your compute hosts supports. +# +# tunnel_types = +# Example: tunnel_types = gre +# Example: tunnel_types = vxlan +# Example: tunnel_types = vxlan, gre + +# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By +# default, this will make use of the Open vSwitch default value of '4789' if +# not specified. +# +# vxlan_udp_port = +# Example: vxlan_udp_port = 8472 + +# (IntOpt) This is the MTU size of veth interfaces. +# Do not change unless you have a good reason to. +# The default MTU size of veth interfaces is 1500. +# veth_mtu = +# Example: veth_mtu = 1504 + +# (BoolOpt) Flag to enable l2-population extension. This option should only be +# used in conjunction with ml2 plugin and l2population mechanism driver. It'll +# enable plugin to populate remote ports macs and IPs (using fdb_add/remove +# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to +# optimize tunnel management. +# +# l2_population = False + +[securitygroup] +# Firewall driver for realizing neutron security group function. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# 1. With VLANs on eth1. +# [database] +# connection = mysql://root:nova@127.0.0.1:3306/ovs_neutron +# [OVS] +# network_vlan_ranges = default:2000:3999 +# tunnel_id_ranges = +# integration_bridge = br-int +# bridge_mappings = default:br-eth1 +# [AGENT] +# Add the following setting, if you want to log to a file +# +# 2. With tunneling. +# [database] +# connection = mysql://root:nova@127.0.0.1:3306/ovs_neutron +# [OVS] +# network_vlan_ranges = +# tunnel_id_ranges = 1:1000 +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 + +[OVS] +network_vlan_ranges=trunk:2112:2114 +local_ip= +enable_tunneling=True +integration_bridge=br-int +tunnel_id_ranges=1:1000 +tunnel_bridge=br-tun +tenant_network_type=gre +bridge_mappings=trunk:eth1-br + +[AGENT] +polling_interval=30 +veth_mtu=9134 + +[SECURITYGROUP] +firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver diff --git a/MIT_CSAIL/controller/etc/neutron/policy.json b/MIT_CSAIL/controller/etc/neutron/policy.json new file mode 100644 index 0000000..89805d4 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/policy.json @@ -0,0 +1,140 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", + "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", + "admin_only": "rule:context_is_admin", + "regular_user": "", + "shared": "field:networks:shared=True", + "shared_firewalls": "field:firewalls:shared=True", + "external": "field:networks:router:external=True", + "default": "rule:admin_or_owner", + + "subnets:private:read": "rule:admin_or_owner", + "subnets:private:write": "rule:admin_or_owner", + "subnets:shared:read": "rule:regular_user", + "subnets:shared:write": "rule:admin_only", + + "create_subnet": "rule:admin_or_network_owner", + "get_subnet": "rule:admin_or_owner or rule:shared", + "update_subnet": "rule:admin_or_network_owner", + "delete_subnet": "rule:admin_or_network_owner", + + "create_network": "", + "get_network": "rule:admin_or_owner or rule:shared or rule:external", + "get_network:router:external": "rule:regular_user", + "get_network:segments": "rule:admin_only", + "get_network:provider:network_type": "rule:admin_only", + "get_network:provider:physical_network": "rule:admin_only", + "get_network:provider:segmentation_id": "rule:admin_only", + "get_network:queue_id": "rule:admin_only", + "create_network:shared": "rule:admin_only", + "create_network:router:external": "rule:admin_only", + "create_network:segments": "rule:admin_only", + "create_network:provider:network_type": "rule:admin_only", + "create_network:provider:physical_network": "rule:admin_only", + "create_network:provider:segmentation_id": "rule:admin_only", + "update_network": "rule:admin_or_owner", + "update_network:segments": "rule:admin_only", + "update_network:shared": "rule:admin_only", + "update_network:provider:network_type": "rule:admin_only", + "update_network:provider:physical_network": "rule:admin_only", + "update_network:provider:segmentation_id": "rule:admin_only", + "delete_network": "rule:admin_or_owner", + + "create_port": "", + "create_port:mac_address": "", + "create_port:fixed_ips": "", + "create_port:port_security_enabled": "rule:admin_or_network_owner", + "create_port:binding:host_id": "rule:admin_only", + "create_port:binding:profile": "rule:admin_only", + "create_port:binding:vnic_type": "rule:admin_or_owner", + "create_port:mac_learning_enabled": "rule:admin_or_network_owner", + "get_port": "rule:admin_or_owner", + "get_port:queue_id": "rule:admin_only", + "get_port:binding:vif_type": "rule:admin_only", + "get_port:binding:vif_details": "rule:admin_only", + "get_port:binding:host_id": "rule:admin_only", + "get_port:binding:profile": "rule:admin_only", + "get_port:binding:vnic_type": "rule:admin_or_owner", + "update_port": "rule:admin_or_owner", + "update_port:fixed_ips": "rule:admin_or_network_owner", + "update_port:port_security_enabled": "rule:admin_or_network_owner", + "update_port:binding:host_id": "rule:admin_only", + "update_port:binding:profile": "rule:admin_only", + "update_port:binding:vnic_type": "rule:admin_or_owner", + "update_port:mac_learning_enabled": "rule:admin_or_network_owner", + "delete_port": "rule:admin_or_owner", + + "create_router:external_gateway_info:enable_snat": "rule:admin_only", + "update_router:external_gateway_info:enable_snat": "rule:admin_only", + + "create_firewall": "", + "get_firewall": "rule:admin_or_owner", + "create_firewall:shared": "rule:admin_only", + "get_firewall:shared": "rule:admin_only", + "update_firewall": "rule:admin_or_owner", + "delete_firewall": "rule:admin_or_owner", + + "create_firewall_policy": "", + "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", + "create_firewall_policy:shared": "rule:admin_or_owner", + "update_firewall_policy": "rule:admin_or_owner", + "delete_firewall_policy": "rule:admin_or_owner", + + "create_firewall_rule": "", + "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", + "create_firewall_rule:shared": "rule:admin_or_owner", + "get_firewall_rule:shared": "rule:admin_or_owner", + "update_firewall_rule": "rule:admin_or_owner", + "delete_firewall_rule": "rule:admin_or_owner", + + "create_qos_queue": "rule:admin_only", + "get_qos_queue": "rule:admin_only", + + "update_agent": "rule:admin_only", + "delete_agent": "rule:admin_only", + "get_agent": "rule:admin_only", + + "create_dhcp-network": "rule:admin_only", + "delete_dhcp-network": "rule:admin_only", + "get_dhcp-networks": "rule:admin_only", + "create_l3-router": "rule:admin_only", + "delete_l3-router": "rule:admin_only", + "get_l3-routers": "rule:admin_only", + "get_dhcp-agents": "rule:admin_only", + "get_l3-agents": "rule:admin_only", + "get_loadbalancer-agent": "rule:admin_only", + "get_loadbalancer-pools": "rule:admin_only", + + "create_router": "rule:regular_user", + "get_router": "rule:admin_or_owner", + "update_router:add_router_interface": "rule:admin_or_owner", + "update_router:remove_router_interface": "rule:admin_or_owner", + "delete_router": "rule:admin_or_owner", + + "create_floatingip": "rule:regular_user", + "update_floatingip": "rule:admin_or_owner", + "delete_floatingip": "rule:admin_or_owner", + "get_floatingip": "rule:admin_or_owner", + + "create_network_profile": "rule:admin_only", + "update_network_profile": "rule:admin_only", + "delete_network_profile": "rule:admin_only", + "get_network_profiles": "", + "get_network_profile": "", + "update_policy_profiles": "rule:admin_only", + "get_policy_profiles": "", + "get_policy_profile": "", + + "create_metering_label": "rule:admin_only", + "delete_metering_label": "rule:admin_only", + "get_metering_label": "rule:admin_only", + + "create_metering_label_rule": "rule:admin_only", + "delete_metering_label_rule": "rule:admin_only", + "get_metering_label_rule": "rule:admin_only", + + "get_service_provider": "rule:regular_user", + "get_lsn": "rule:admin_only", + "create_lsn": "rule:admin_only" +} diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.conf b/MIT_CSAIL/controller/etc/neutron/rootwrap.conf new file mode 100644 index 0000000..dee1dd9 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.conf @@ -0,0 +1,34 @@ +# Configuration for neutron-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR + +[xenapi] +# XenAPI configuration is only required by the L2 agent if it is to +# target a XenServer/XCP compute host's dom0. +xenapi_connection_url= +xenapi_connection_username=root +xenapi_connection_password= diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.d/debug.filters b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/debug.filters new file mode 100644 index 0000000..b61d960 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/debug.filters @@ -0,0 +1,14 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# This is needed because we should ping +# from inside a namespace which requires root +ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ +ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+ diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.d/dhcp.filters b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/dhcp.filters new file mode 100644 index 0000000..7c11d70 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/dhcp.filters @@ -0,0 +1,36 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# dhcp-agent +dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID= +# dhcp-agent uses kill as well, that's handled by the generic KillFilter +# it looks like these are the only signals needed, per +# neutron/agent/linux/dhcp.py +kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP +kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +ivs-ctl: CommandFilter, ivs-ctl, root +mm-ctl: CommandFilter, mm-ctl, root +dhcp_release: CommandFilter, dhcp_release, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, python, -9 +kill_metadata7: KillFilter, root, python2.7, -9 +kill_metadata6: KillFilter, root, python2.6, -9 + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.d/ipset-firewall.filters b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/ipset-firewall.filters new file mode 100644 index 0000000..52c6637 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/ipset-firewall.filters @@ -0,0 +1,12 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] +# neutron/agent/linux/iptables_firewall.py +# "ipset", "-A", ... +ipset: CommandFilter, ipset, root diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.d/iptables-firewall.filters b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/iptables-firewall.filters new file mode 100644 index 0000000..b8a6ab5 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/iptables-firewall.filters @@ -0,0 +1,21 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# neutron/agent/linux/iptables_manager.py +# "iptables-save", ... +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# neutron/agent/linux/iptables_manager.py +# "iptables", "-A", ... +iptables: CommandFilter, iptables, root +ip6tables: CommandFilter, ip6tables, root diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.d/l3.filters b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/l3.filters new file mode 100644 index 0000000..d9e4a13 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/l3.filters @@ -0,0 +1,49 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# arping +arping: CommandFilter, arping, root + +# l3_agent +sysctl: CommandFilter, sysctl, root +route: CommandFilter, route, root +radvd: CommandFilter, radvd, root + +# metadata proxy +metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root +# If installed from source (say, by devstack), the prefix will be +# /usr/local instead of /usr/bin. +metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root +# RHEL invocation of the metadata proxy will report /usr/bin/python +kill_metadata: KillFilter, root, python, -9 +kill_metadata7: KillFilter, root, python2.7, -9 +kill_metadata6: KillFilter, root, python2.6, -9 +kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP +kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root + +# ovs_lib (if OVSInterfaceDriver is used) +ovs-vsctl: CommandFilter, ovs-vsctl, root + +# iptables_manager +iptables-save: CommandFilter, iptables-save, root +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-save: CommandFilter, ip6tables-save, root +ip6tables-restore: CommandFilter, ip6tables-restore, root + +# Keepalived +keepalived: CommandFilter, keepalived, root +kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9 + +# l3 agent to delete floatingip's conntrack state +conntrack: CommandFilter, conntrack, root diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.d/lbaas-haproxy.filters b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/lbaas-haproxy.filters new file mode 100644 index 0000000..b4e1ecb --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/lbaas-haproxy.filters @@ -0,0 +1,26 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# haproxy +haproxy: CommandFilter, haproxy, root + +# lbaas-agent uses kill as well, that's handled by the generic KillFilter +kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP + +ovs-vsctl: CommandFilter, ovs-vsctl, root +mm-ctl: CommandFilter, mm-ctl, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +route: CommandFilter, route, root + +# arping +arping: CommandFilter, arping, root diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.d/openvswitch-plugin.filters b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/openvswitch-plugin.filters new file mode 100644 index 0000000..b63a83b --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/openvswitch-plugin.filters @@ -0,0 +1,22 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +# openvswitch-agent +# unclear whether both variants are necessary, but I'm transliterating +# from the old mechanism +ovs-vsctl: CommandFilter, ovs-vsctl, root +ovs-ofctl: CommandFilter, ovs-ofctl, root +kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 +ovsdb-client: CommandFilter, ovsdb-client, root +xe: CommandFilter, xe, root + +# ip_lib +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root diff --git a/MIT_CSAIL/controller/etc/neutron/rootwrap.d/vpnaas.filters b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/vpnaas.filters new file mode 100644 index 0000000..7848136 --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/rootwrap.d/vpnaas.filters @@ -0,0 +1,13 @@ +# neutron-rootwrap command filters for nodes on which neutron is +# expected to control network +# +# This file should be owned by (and only-writeable by) the root user + +# format seems to be +# cmd-name: filter-name, raw-command, user, args + +[Filters] + +ip: IpFilter, ip, root +ip_exec: IpNetnsExecFilter, ip, root +openswan: CommandFilter, ipsec, root diff --git a/MIT_CSAIL/controller/etc/neutron/vpn_agent.ini b/MIT_CSAIL/controller/etc/neutron/vpn_agent.ini new file mode 100644 index 0000000..c3089df --- /dev/null +++ b/MIT_CSAIL/controller/etc/neutron/vpn_agent.ini @@ -0,0 +1,14 @@ +[DEFAULT] +# VPN-Agent configuration file +# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also + +[vpnagent] +# vpn device drivers which vpn agent will use +# If we want to use multiple drivers, we need to define this option multiple times. +# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver +# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver +# vpn_device_driver=another_driver + +[ipsec] +# Status check interval +# ipsec_status_check_interval=60 diff --git a/MIT_CSAIL/controller/etc/nova/api-paste.ini b/MIT_CSAIL/controller/etc/nova/api-paste.ini new file mode 100644 index 0000000..01e8ab0 --- /dev/null +++ b/MIT_CSAIL/controller/etc/nova/api-paste.ini @@ -0,0 +1,107 @@ +############ +# Metadata # +############ +[composite:metadata] +use = egg:Paste#urlmap +/: meta + +[pipeline:meta] +pipeline = ec2faultwrap logrequest metaapp + +[app:metaapp] +paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory + +####### +# EC2 # +####### + +[composite:ec2] +use = egg:Paste#urlmap +/services/Cloud: ec2cloud + +[composite:ec2cloud] +use = call:nova.api.auth:pipeline_factory +noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor +keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor + +[filter:ec2faultwrap] +paste.filter_factory = nova.api.ec2:FaultWrapper.factory + +[filter:logrequest] +paste.filter_factory = nova.api.ec2:RequestLogging.factory + +[filter:ec2lockout] +paste.filter_factory = nova.api.ec2:Lockout.factory + +[filter:ec2keystoneauth] +paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory + +[filter:ec2noauth] +paste.filter_factory = nova.api.ec2:NoAuth.factory + +[filter:cloudrequest] +controller = nova.api.ec2.cloud.CloudController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:authorizer] +paste.filter_factory = nova.api.ec2:Authorizer.factory + +[filter:validator] +paste.filter_factory = nova.api.ec2:Validator.factory + +[app:ec2executor] +paste.app_factory = nova.api.ec2:Executor.factory + +############# +# Openstack # +############# + +[composite:osapi_compute] +use = call:nova.api.openstack.urlmap:urlmap_factory +/: oscomputeversions +/v1.1: openstack_compute_api_v2 +/v2: openstack_compute_api_v2 + +[composite:openstack_compute_api_v2] +use = call:nova.api.auth:pipeline_factory +noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2 +keystone = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2 +keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2 + +[filter:faultwrap] +paste.filter_factory = nova.api.openstack:FaultWrapper.factory + +[filter:noauth] +paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory + +#[filter:ratelimit] +#paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory + +[app:osapi_compute_app_v2] +paste.app_factory = nova.api.openstack.compute:APIRouter.factory + +[pipeline:oscomputeversions] +pipeline = faultwrap oscomputeversionapp + +[app:oscomputeversionapp] +paste.app_factory = nova.api.openstack.compute.versions:Versions.factory + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +#icehouse romoved#signing_dirname = /tmp/keystone-signing-nova +# signing_dir is configurable, but the default behavior of the authtoken +# middleware should be sufficient. It will create a temporary directory +# in the home directory for the user the nova process is running as. +#signing_dir = /var/lib/nova/keystone-signing +# Workaround for https://bugs.launchpad.net/nova/+bug/1154809 +auth_version = v2.0 diff --git a/MIT_CSAIL/controller/etc/nova/logging.conf b/MIT_CSAIL/controller/etc/nova/logging.conf new file mode 100644 index 0000000..5482a04 --- /dev/null +++ b/MIT_CSAIL/controller/etc/nova/logging.conf @@ -0,0 +1,81 @@ +[loggers] +keys = root, nova + +[handlers] +keys = stderr, stdout, watchedfile, syslog, null + +[formatters] +keys = context, default + +[logger_root] +level = WARNING +handlers = null + +[logger_nova] +level = INFO +handlers = stderr +qualname = nova + +[logger_amqp] +level = WARNING +handlers = stderr +qualname = amqp + +[logger_amqplib] +level = WARNING +handlers = stderr +qualname = amqplib + +[logger_sqlalchemy] +level = WARNING +handlers = stderr +qualname = sqlalchemy +# "level = INFO" logs SQL queries. +# "level = DEBUG" logs SQL queries and results. +# "level = WARNING" logs neither. (Recommended for production systems.) + +[logger_boto] +level = WARNING +handlers = stderr +qualname = boto + +[logger_suds] +level = INFO +handlers = stderr +qualname = suds + +[logger_eventletwsgi] +level = WARNING +handlers = stderr +qualname = eventlet.wsgi.server + +[handler_stderr] +class = StreamHandler +args = (sys.stderr,) +formatter = context + +[handler_stdout] +class = StreamHandler +args = (sys.stdout,) +formatter = context + +[handler_watchedfile] +class = handlers.WatchedFileHandler +args = ('nova.log',) +formatter = context + +[handler_syslog] +class = handlers.SysLogHandler +args = ('/dev/log', handlers.SysLogHandler.LOG_USER) +formatter = context + +[handler_null] +class = nova.openstack.common.log.NullHandler +formatter = default +args = () + +[formatter_context] +class = nova.openstack.common.log.ContextFormatter + +[formatter_default] +format = %(message)s diff --git a/MIT_CSAIL/controller/etc/nova/nova.conf b/MIT_CSAIL/controller/etc/nova/nova.conf new file mode 100644 index 0000000..2c925d1 --- /dev/null +++ b/MIT_CSAIL/controller/etc/nova/nova.conf @@ -0,0 +1,103 @@ +# HEADER: This file was autogenerated at Thu Oct 04 14:53:15 -0400 2012 +# HEADER: by puppet. While it can still be managed manually, it +# HEADER: is definitely not recommended. +[DEFAULT] +debug=False +verbose=True +force_config_drive=false +network_api_class=nova.network.neutronv2.api.API +state_path=/var/lib/nova +public_interface=eth0 +image_service=nova.image.glance.GlanceImageService +flat_injected=false +use_deprecated_auth=false +service_down_time=60 +rabbit_port=5672 +logdir=/var/log/nova +novncproxy_port=6080 +rabbit_virtual_host=/ +sql_connection=mysql://nova:@/nova +dhcp_domain=novalocal +rabbit_password= +lock_path=/var/lock/nova +rabbit_userid= +rabbit_host= +rootwrap_config=/etc/nova/rootwrap.conf +novncproxy_host=0.0.0.0 +force_dhcp_release=true +auth_strategy=keystone +ec2_private_dns_show_ip=true +enabled_apis=ec2,osapi_compute,metadata +memcached_servers=127.0.0.1 +volume_api_class=nova.volume.cinder.API +quota_fixed_ips=36864 +rabbit_hosts=:5672 +sql_idle_timeout=3600 +rabbit_ha_queues=False +security_group_api=neutron +firewall_driver=nova.virt.firewall.NoopFirewallDriver +osapi_compute_listen=0.0.0.0 +ec2_listen=0.0.0.0 +use_forwarded_for=False +osapi_volume_listen=0.0.0.0 +metadata_listen=0.0.0.0 +rpc_backend=rabbit +use_syslog=False +api_paste_config=/etc/nova/api-paste.ini + +scheduler_available_filters=nova.scheduler.filters.all_filters +scheduler_default_filters=ComputeFilter,AggregateInstanceExtraSpecsFilter,AggregateCoreFilter,RamFilter,DiskFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,RetryFilter +scheduler_max_attempts=5 +#least_cost_functions=nova.scheduler.least_cost.compute_fill_first_cost_fn +#compute_fill_first_cost_fn_weight=-1.0 +scheduler_weight_classes=nova.scheduler.weights.all_weighers +scheduler_host_subset_size=1 +ram_weight_multiplier=1 +# note these are overridden in host aggregates: +cpu_allocation_ratio=4.0 +ram_allocation_ratio=1.5 + +[metrics] +# over all weight multilpier equivalent to 1GB of memory weighter +#weight_multiplier=1048576.0 +weight_multiplier=1 +# basicly turn it off.... +#weight_multiplier=0 +weight_setting=cpu.idle.percent=1, cpu.iowait.percent=-10.0 +required=false +weight_of_unavailable=-100.0 + +[neutron] +extension_sync_interval=600 +default_tenant_id=default +admin_auth_url=https://.csail.mit.edu:5001/v2.0 +region_name=RegionOne +# url_timeout=30 +url_timeout=120 +admin_password= +auth_strategy=keystone +ovs_bridge=br-int +admin_tenant_name=services +url=http://.csail.mit.edu:9696 +metadata_proxy_shared_secret= +service_metadata_proxy=True +admin_username=neutron + +[glance] +api_servers=.csail.mit.edu:9292 + +[database] +idle_timeout=3600 +connection=mysql://nova:@/nova + +[keystone_authtoken] +auth_port=35357 +admin_password= +admin_user=nova +auth_protocol=http +auth_host= +admin_tenant_name=services +auth_uri=http://:5000/ + +[conductor] +#use_local=true diff --git a/MIT_CSAIL/controller/etc/nova/nova.conf.pre-quantum b/MIT_CSAIL/controller/etc/nova/nova.conf.pre-quantum new file mode 100644 index 0000000..93c31c8 --- /dev/null +++ b/MIT_CSAIL/controller/etc/nova/nova.conf.pre-quantum @@ -0,0 +1,50 @@ +# HEADER: This file was autogenerated at Thu Oct 04 14:53:15 -0400 2012 +# HEADER: by puppet. While it can still be managed manually, it +# HEADER: is definitely not recommended. +[DEFAULT] +debug=false +multi_host=True +state_path=/var/lib/nova +public_interface=eth0 +dhcpbridge=/usr/bin/nova-dhcpbridge +fixed_range=10.0.0.0/16 +image_service=nova.image.glance.GlanceImageService +dhcpbridge_flagfile=/etc/nova/nova.conf +flat_injected=false +use_deprecated_auth=false +glance_api_servers=:9292 +service_down_time=60 +rabbit_port=5672 +verbose=false +logdir=/var/log/nova +novncproxy_port=6080 +rabbit_virtual_host=/ +sql_connection=mysql://nova:@/nova +dhcp_domain=novalocal +rabbit_password= +lock_path=/var/lock/nova +rabbit_userid= +rabbit_host= +rootwrap_config=/etc/nova/rootwrap.conf +floating_range=/19 +novncproxy_host=0.0.0.0 +force_dhcp_release=true +flat_interface=eth1 +auth_strategy=keystone +flat_network_bridge=br100 +network_manager=nova.network.manager.FlatDHCPManager +api_paste_config=/etc/nova/api-paste.ini +scheduler_available_filters=nova.scheduler.filters.all_filters +scheduler_default_filters=ComputeFilter,AggregateInstanceExtraSpecsFilter,AggregateCoreFilter,RamFilter,DiskFilter,RetryFilter +scheduler_max_attempts=30 +#least_cost_functions=nova.scheduler.least_cost.compute_fill_first_cost_fn +#compute_fill_first_cost_fn_weight=-1.0 +scheduler_weight_classes=nova.scheduler.weights.all_weighers +ram_weight_multiplier=1.0 +osapi_compute_workers=2 +cpu_allocation_ratio=4.0 +ram_allocation_ratio=1.5 +ec2_private_dns_show_ip=true +enabled_apis=ec2,osapi_compute,metadata +volume_api_class=nova.volume.cinder.API +quota_fixed_ips=36864 diff --git a/MIT_CSAIL/controller/etc/nova/policy.json b/MIT_CSAIL/controller/etc/nova/policy.json new file mode 100644 index 0000000..cdb8014 --- /dev/null +++ b/MIT_CSAIL/controller/etc/nova/policy.json @@ -0,0 +1,348 @@ +{ + "context_is_admin": "role:admin", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "cells_scheduler_filter:TargetCellFilter": "is_admin:True", + + "compute:create": "", + "compute:create:attach_network": "", + "compute:create:attach_volume": "", + "compute:create:forced_host": "is_admin:True", + "compute:get_all": "", + "compute:get_all_tenants": "", + "compute:start": "rule:admin_or_owner", + "compute:stop": "rule:admin_or_owner", + "compute:unlock_override": "rule:admin_api", + + "compute:shelve": "", + "compute:shelve_offload": "", + "compute:unshelve": "", + + "compute:volume_snapshot_create": "", + "compute:volume_snapshot_delete": "", + + "admin_api": "is_admin:True", + "compute:v3:servers:start": "rule:admin_or_owner", + "compute:v3:servers:stop": "rule:admin_or_owner", + "compute_extension:v3:os-access-ips:discoverable": "", + "compute_extension:v3:os-access-ips": "", + "compute_extension:accounts": "rule:admin_api", + "compute_extension:admin_actions": "rule:admin_api", + "compute_extension:admin_actions:pause": "rule:admin_or_owner", + "compute_extension:admin_actions:unpause": "rule:admin_or_owner", + "compute_extension:admin_actions:suspend": "rule:admin_or_owner", + "compute_extension:admin_actions:resume": "rule:admin_or_owner", + "compute_extension:admin_actions:lock": "rule:admin_or_owner", + "compute_extension:admin_actions:unlock": "rule:admin_or_owner", + "compute_extension:admin_actions:resetNetwork": "rule:admin_api", + "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", + "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", + "compute_extension:admin_actions:migrateLive": "rule:admin_api", + "compute_extension:admin_actions:resetState": "rule:admin_api", + "compute_extension:admin_actions:migrate": "rule:admin_api", + "compute_extension:v3:os-admin-actions": "rule:admin_api", + "compute_extension:v3:os-admin-actions:discoverable": "", + "compute_extension:v3:os-admin-actions:reset_network": "rule:admin_api", + "compute_extension:v3:os-admin-actions:inject_network_info": "rule:admin_api", + "compute_extension:v3:os-admin-actions:reset_state": "rule:admin_api", + "compute_extension:v3:os-admin-password": "", + "compute_extension:v3:os-admin-password:discoverable": "", + "compute_extension:aggregates": "rule:admin_api", + "compute_extension:v3:os-aggregates:discoverable": "", + "compute_extension:v3:os-aggregates:index": "rule:admin_api", + "compute_extension:v3:os-aggregates:create": "rule:admin_api", + "compute_extension:v3:os-aggregates:show": "rule:admin_api", + "compute_extension:v3:os-aggregates:update": "rule:admin_api", + "compute_extension:v3:os-aggregates:delete": "rule:admin_api", + "compute_extension:v3:os-aggregates:add_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api", + "compute_extension:agents": "rule:admin_api", + "compute_extension:v3:os-agents": "rule:admin_api", + "compute_extension:v3:os-agents:discoverable": "", + "compute_extension:attach_interfaces": "", + "compute_extension:v3:os-attach-interfaces": "", + "compute_extension:v3:os-attach-interfaces:discoverable": "", + "compute_extension:baremetal_nodes": "rule:admin_api", + "compute_extension:v3:os-block-device-mapping-v1:discoverable": "", + "compute_extension:cells": "rule:admin_api", + "compute_extension:cells:create": "rule:admin_api", + "compute_extension:cells:delete": "rule:admin_api", + "compute_extension:cells:update": "rule:admin_api", + "compute_extension:cells:sync_instances": "rule:admin_api", + "compute_extension:v3:os-cells": "rule:admin_api", + "compute_extension:v3:os-cells:create": "rule:admin_api", + "compute_extension:v3:os-cells:delete": "rule:admin_api", + "compute_extension:v3:os-cells:update": "rule:admin_api", + "compute_extension:v3:os-cells:sync_instances": "rule:admin_api", + "compute_extension:v3:os-cells:discoverable": "", + "compute_extension:certificates": "", + "compute_extension:v3:os-certificates:create": "", + "compute_extension:v3:os-certificates:show": "", + "compute_extension:v3:os-certificates:discoverable": "", + "compute_extension:cloudpipe": "rule:admin_api", + "compute_extension:cloudpipe_update": "rule:admin_api", + "compute_extension:console_output": "", + "compute_extension:v3:consoles:discoverable": "", + "compute_extension:v3:os-console-output:discoverable": "", + "compute_extension:v3:os-console-output": "", + "compute_extension:consoles": "", + "compute_extension:v3:os-remote-consoles": "", + "compute_extension:v3:os-remote-consoles:discoverable": "", + "compute_extension:createserverext": "", + "compute_extension:v3:os-create-backup:discoverable": "", + "compute_extension:v3:os-create-backup": "rule:admin_or_owner", + "compute_extension:deferred_delete": "", + "compute_extension:v3:os-deferred-delete": "", + "compute_extension:v3:os-deferred-delete:discoverable": "", + "compute_extension:disk_config": "", + "compute_extension:evacuate": "rule:admin_api", + "compute_extension:v3:os-evacuate": "rule:admin_api", + "compute_extension:v3:os-evacuate:discoverable": "", + "compute_extension:extended_server_attributes": "rule:admin_api", + "compute_extension:v3:os-extended-server-attributes": "rule:admin_api", + "compute_extension:v3:os-extended-server-attributes:discoverable": "", + "compute_extension:extended_status": "", + "compute_extension:v3:os-extended-status": "", + "compute_extension:v3:os-extended-status:discoverable": "", + "compute_extension:extended_availability_zone": "", + "compute_extension:v3:os-extended-availability-zone": "", + "compute_extension:v3:os-extended-availability-zone:discoverable": "", + "compute_extension:extended_ips": "", + "compute_extension:extended_ips_mac": "", + "compute_extension:extended_vif_net": "", + "compute_extension:v3:extension_info:discoverable": "", + "compute_extension:extended_volumes": "", + "compute_extension:v3:os-extended-volumes": "", + "compute_extension:v3:os-extended-volumes:swap": "", + "compute_extension:v3:os-extended-volumes:discoverable": "", + "compute_extension:v3:os-extended-volumes:attach": "", + "compute_extension:v3:os-extended-volumes:detach": "", + "compute_extension:fixed_ips": "rule:admin_api", + "compute_extension:flavor_access": "", + "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", + "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", + "compute_extension:v3:os-flavor-access": "", + "compute_extension:v3:os-flavor-access:discoverable": "", + "compute_extension:v3:os-flavor-access:remove_tenant_access": "rule:admin_api", + "compute_extension:v3:os-flavor-access:add_tenant_access": "rule:admin_api", + "compute_extension:flavor_disabled": "", + "compute_extension:flavor_rxtx": "", + "compute_extension:v3:os-flavor-rxtx": "", + "compute_extension:v3:os-flavor-rxtx:discoverable": "", + "compute_extension:flavor_swap": "", + "compute_extension:flavorextradata": "", + "compute_extension:flavorextraspecs:index": "", + "compute_extension:flavorextraspecs:show": "", + "compute_extension:flavorextraspecs:create": "rule:admin_api", + "compute_extension:flavorextraspecs:update": "rule:admin_api", + "compute_extension:flavorextraspecs:delete": "rule:admin_api", + "compute_extension:v3:flavors:discoverable": "", + "compute_extension:v3:flavor-extra-specs:discoverable": "", + "compute_extension:v3:flavor-extra-specs:index": "", + "compute_extension:v3:flavor-extra-specs:show": "", + "compute_extension:v3:flavor-extra-specs:create": "rule:admin_api", + "compute_extension:v3:flavor-extra-specs:update": "rule:admin_api", + "compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api", + "compute_extension:flavormanage": "rule:admin_api", + "compute_extension:v3:flavor-manage:discoverable": "", + "compute_extension:v3:flavor-manage": "rule:admin_api", + "compute_extension:floating_ip_dns": "", + "compute_extension:floating_ip_pools": "", + "compute_extension:floating_ips": "", + "compute_extension:floating_ips_bulk": "rule:admin_api", + "compute_extension:fping": "", + "compute_extension:fping:all_tenants": "rule:admin_api", + "compute_extension:hide_server_addresses": "is_admin:False", + "compute_extension:v3:os-hide-server-addresses": "is_admin:False", + "compute_extension:v3:os-hide-server-addresses:discoverable": "", + "compute_extension:hosts": "rule:admin_api", + "compute_extension:v3:os-hosts": "rule:admin_api", + "compute_extension:v3:os-hosts:discoverable": "", + "compute_extension:hypervisors": "rule:admin_api", + "compute_extension:v3:os-hypervisors": "rule:admin_api", + "compute_extension:v3:os-hypervisors:discoverable": "", + "compute_extension:image_size": "", + "compute_extension:v3:images:discoverable": "", + "compute_extension:v3:image-size": "", + "compute_extension:v3:image-size:discoverable": "", + "compute_extension:instance_actions": "", + "compute_extension:v3:os-instance-actions": "", + "compute_extension:v3:os-instance-actions:discoverable": "", + "compute_extension:instance_actions:events": "rule:admin_api", + "compute_extension:v3:os-instance-actions:events": "rule:admin_api", + "compute_extension:instance_usage_audit_log": "rule:admin_api", + "compute_extension:v3:ips:discoverable": "", + "compute_extension:keypairs": "", + "compute_extension:keypairs:index": "", + "compute_extension:keypairs:show": "", + "compute_extension:keypairs:create": "", + "compute_extension:keypairs:delete": "", + "compute_extension:v3:os-keypairs:discoverable": "", + "compute_extension:v3:os-keypairs": "", + "compute_extension:v3:os-keypairs:index": "", + "compute_extension:v3:os-keypairs:show": "", + "compute_extension:v3:os-keypairs:create": "", + "compute_extension:v3:os-keypairs:delete": "", + "compute_extension:v3:limits:discoverable": "", + "compute_extension:v3:os-lock-server:discoverable": "", + "compute_extension:v3:os-lock-server:lock": "rule:admin_or_owner", + "compute_extension:v3:os-lock-server:unlock": "rule:admin_or_owner", + "compute_extension:v3:os-migrate-server:discoverable": "", + "compute_extension:v3:os-migrate-server:migrate": "rule:admin_api", + "compute_extension:v3:os-migrate-server:migrate_live": "rule:admin_api", + "compute_extension:multinic": "", + "compute_extension:v3:os-multinic": "", + "compute_extension:v3:os-multinic:discoverable": "", + "compute_extension:networks": "rule:admin_api", + "compute_extension:networks:view": "", + "compute_extension:networks_associate": "rule:admin_api", + "compute_extension:v3:os-pause-server:discoverable": "", + "compute_extension:v3:os-pause-server:pause": "rule:admin_or_owner", + "compute_extension:v3:os-pause-server:unpause": "rule:admin_or_owner", + "compute_extension:v3:os-pci:pci_servers": "", + "compute_extension:v3:os-pci:discoverable": "", + "compute_extension:v3:os-pci:index": "rule:admin_api", + "compute_extension:v3:os-pci:detail": "rule:admin_api", + "compute_extension:v3:os-pci:show": "rule:admin_api", + "compute_extension:quotas:show": "", + "compute_extension:quotas:update": "rule:admin_api", + "compute_extension:quotas:delete": "rule:admin_api", + "compute_extension:v3:os-quota-sets:discoverable": "", + "compute_extension:v3:os-quota-sets:show": "", + "compute_extension:v3:os-quota-sets:update": "rule:admin_api", + "compute_extension:v3:os-quota-sets:delete": "rule:admin_api", + "compute_extension:v3:os-quota-sets:detail": "rule:admin_api", + "compute_extension:quota_classes": "", + "compute_extension:rescue": "", + "compute_extension:v3:os-rescue": "", + "compute_extension:v3:os-rescue:discoverable": "", + "compute_extension:v3:os-scheduler-hints:discoverable": "", + "compute_extension:security_group_default_rules": "rule:admin_api", + "compute_extension:security_groups": "", + "compute_extension:v3:os-security-groups": "", + "compute_extension:v3:os-security-groups:discoverable": "", + "compute_extension:server_diagnostics": "rule:admin_api", + "compute_extension:v3:os-server-diagnostics": "rule:admin_api", + "compute_extension:v3:os-server-diagnostics:discoverable": "", + "compute_extension:server_groups": "", + "compute_extension:server_password": "", + "compute_extension:v3:os-server-password": "", + "compute_extension:v3:os-server-password:discoverable": "", + "compute_extension:server_usage": "", + "compute_extension:v3:os-server-usage": "", + "compute_extension:v3:os-server-usage:discoverable": "", + "compute_extension:v3:os-server-groups": "", + "compute_extension:v3:os-server-groups:discoverable": "", + "compute_extension:services": "rule:admin_api", + "compute_extension:v3:os-services": "rule:admin_api", + "compute_extension:v3:os-services:discoverable": "", + "compute_extension:v3:server-metadata:discoverable": "", + "compute_extension:v3:servers:discoverable": "", + "compute_extension:shelve": "", + "compute_extension:shelveOffload": "rule:admin_api", + "compute_extension:v3:os-shelve:shelve": "", + "compute_extension:v3:os-shelve:shelve:discoverable": "", + "compute_extension:v3:os-shelve:shelve_offload": "rule:admin_api", + "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", + "compute_extension::v3:os-simple-tenant-usage:discoverable": "", + "compute_extension::v3:os-simple-tenant-usage:show": "rule:admin_or_owner", + "compute_extension::v3:os-simple-tenant-usage:list": "rule:admin_api", + "compute_extension:v3:os-suspend-server:discoverable": "", + "compute_extension:v3:os-suspend-server:suspend": "rule:admin_or_owner", + "compute_extension:v3:os-suspend-server:resume": "rule:admin_or_owner", + "compute_extension:simple_tenant_usage:list": "rule:admin_api", + "compute_extension:unshelve": "", + "compute_extension:v3:os-shelve:unshelve": "", + "compute_extension:users": "rule:admin_api", + "compute_extension:v3:os-user-data:discoverable": "", + "compute_extension:virtual_interfaces": "", + "compute_extension:virtual_storage_arrays": "", + "compute_extension:volumes": "", + "compute_extension:volume_attachments:index": "", + "compute_extension:volume_attachments:show": "", + "compute_extension:volume_attachments:create": "", + "compute_extension:volume_attachments:update": "", + "compute_extension:volume_attachments:delete": "", + "compute_extension:v3:os-volumes": "", + "compute_extension:v3:os-volumes:discoverable": "", + "compute_extension:volumetypes": "", + "compute_extension:availability_zone:list": "", + "compute_extension:v3:os-availability-zone:list": "", + "compute_extension:v3:os-availability-zone:discoverable": "", + "compute_extension:availability_zone:detail": "rule:admin_api", + "compute_extension:v3:os-availability-zone:detail": "rule:admin_api", + "compute_extension:used_limits_for_admin": "rule:admin_api", + "compute_extension:v3:os-used-limits": "rule:admin_api", + "compute_extension:v3:os-used-limits:discoverable": "", + "compute_extension:migrations:index": "rule:admin_api", + "compute_extension:v3:os-migrations:index": "rule:admin_api", + "compute_extension:v3:os-migrations:discoverable": "", + "compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api", + "compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api", + "compute_extension:console_auth_tokens": "rule:admin_api", + "compute_extension:v3:os-console-auth-tokens": "rule:admin_api", + "compute_extension:os-server-external-events:create": "rule:admin_api", + "compute_extension:v3:os-server-external-events:create": "rule:admin_api", + + "volume:create": "", + "volume:get_all": "", + "volume:get_volume_metadata": "", + "volume:get_snapshot": "", + "volume:get_all_snapshots": "", + + + "volume_extension:types_manage": "rule:admin_api", + "volume_extension:types_extra_specs": "rule:admin_api", + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + + + "network:get_all": "", + "network:get": "", + "network:create": "", + "network:delete": "", + "network:associate": "", + "network:disassociate": "", + "network:get_vifs_by_instance": "", + "network:allocate_for_instance": "", + "network:deallocate_for_instance": "", + "network:validate_networks": "", + "network:get_instance_uuids_by_ip_filter": "", + "network:get_instance_id_by_floating_address": "", + "network:setup_networks_on_host": "", + "network:get_backdoor_port": "", + + "network:get_floating_ip": "", + "network:get_floating_ip_pools": "", + "network:get_floating_ip_by_address": "", + "network:get_floating_ips_by_project": "", + "network:get_floating_ips_by_fixed_address": "", + "network:allocate_floating_ip": "", + "network:deallocate_floating_ip": "", + "network:associate_floating_ip": "", + "network:disassociate_floating_ip": "", + "network:release_floating_ip": "", + "network:migrate_instance_start": "", + "network:migrate_instance_finish": "", + + "network:get_fixed_ip": "", + "network:get_fixed_ip_by_address": "", + "network:add_fixed_ip_to_instance": "", + "network:remove_fixed_ip_from_instance": "", + "network:add_network_to_project": "", + "network:get_instance_nw_info": "", + + "network:get_dns_domains": "", + "network:add_dns_entry": "", + "network:modify_dns_entry": "", + "network:delete_dns_entry": "", + "network:get_dns_entries_by_address": "", + "network:get_dns_entries_by_name": "", + "network:create_private_dns_domain": "", + "network:create_public_dns_domain": "", + "network:delete_dns_domain": "", + "network:attach_external_network": "rule:admin_api" +} diff --git a/MIT_CSAIL/controller/etc/nova/rootwrap.conf b/MIT_CSAIL/controller/etc/nova/rootwrap.conf new file mode 100644 index 0000000..aa466c5 --- /dev/null +++ b/MIT_CSAIL/controller/etc/nova/rootwrap.conf @@ -0,0 +1,27 @@ +# Configuration for nova-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, local0, local1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR diff --git a/MIT_CSAIL/controller/etc/nova/rootwrap.d/api-metadata.filters b/MIT_CSAIL/controller/etc/nova/rootwrap.d/api-metadata.filters new file mode 100644 index 0000000..1aa6f83 --- /dev/null +++ b/MIT_CSAIL/controller/etc/nova/rootwrap.d/api-metadata.filters @@ -0,0 +1,13 @@ +# nova-rootwrap command filters for api-metadata nodes +# This is needed on nova-api hosts running with "metadata" in enabled_apis +# or when running nova-api-metadata +# This file should be owned by (and only-writeable by) the root user + +[Filters] +# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... +iptables-save: CommandFilter, iptables-save, root +ip6tables-save: CommandFilter, ip6tables-save, root + +# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) +iptables-restore: CommandFilter, iptables-restore, root +ip6tables-restore: CommandFilter, ip6tables-restore, root diff --git a/MIT_CSAIL/controller/etc/openstack-dashboard/local_settings.py b/MIT_CSAIL/controller/etc/openstack-dashboard/local_settings.py new file mode 100644 index 0000000..c589d9d --- /dev/null +++ b/MIT_CSAIL/controller/etc/openstack-dashboard/local_settings.py @@ -0,0 +1,550 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +from openstack_dashboard import exceptions + +DEBUG = False +TEMPLATE_DEBUG = DEBUG + +# Required for Django 1.5. +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +#ALLOWED_HOSTS = ['horizon.example.com', ] + +ALLOWED_HOSTS = [] + + +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# Default Session Timeout is 30min this sets it to 1hr +SESSION_TIMEOUT = 3600 + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specfic API version for a given service API. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be "2.0" or "3". +OPENSTACK_API_VERSIONS = { + "identity": 3, + "volume": 2 +} + +# Set this to True if running on multi-domain model. When this is enabled, it +# will require user to enter the Domain name in addition to username for login. +# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Set Console type: +# valid options would be "AUTO", "VNC" or "SPICE" +# CONSOLE_TYPE = "AUTO" + +# Default OpenStack Dashboard configuration. +HORIZON_CONFIG = { + 'customization_module': 'csail.overrides', + 'dashboards': ('project', 'admin', 'settings',), + 'default_dashboard': 'project', + 'user_home': 'openstack_dashboard.views.get_user_home', + 'ajax_queue_limit': 10, + 'auto_fade_alerts': { + 'delay': 3000, + 'fade_duration': 1500, + 'types': ['alert-success', 'alert-info'] + }, + 'help_url': "http://docs.openstack.org", + 'exceptions': {'recoverable': exceptions.RECOVERABLE, + 'not_found': exceptions.NOT_FOUND, + 'unauthorized': exceptions.UNAUTHORIZED}, +} + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# } + +# Disable simplified floating IP address management for deployments with +# multiple floating IP pools or complex network requirements. +# HORIZON_CONFIG["simple_ip_management"] = False + +# Turn off browser autocompletion for the login form if so desired. +# HORIZON_CONFIG["password_autocomplete"] = "off" + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Set custom secret key: +# You can either set it to a specific value or you can let horizion generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +# from horizon.utils import secret_key +# SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) +SECRET_KEY = + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like +# CACHES = { +# 'default': { +# 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', +# 'LOCATION' : '127.0.0.1:11211', +# } +#} + +CACHES = { + 'default': { + +# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache' + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': '127.0.0.1:11211', + + } +} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = ".csail.mit.edu" +OPENSTACK_KEYSTONE_URL = "https://%s:5001/v3" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +# The CA certificate to use to verify SSL connections +# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True +} + +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': True, +} + +# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional +# services provided by neutron. Options currenly available are load +# balancer service, security groups, quotas, VPN service. +OPENSTACK_NEUTRON_NETWORK = { + 'enable_lb': True, + 'enable_firewall': False, + 'enable_quotas': True, + 'enable_security_group': True, + 'enable_vpn': False, + # The profile_support option is used to detect if an externa lrouter can be + # configured via the dashboard. When using specific plugins the + # profile_support can be turned on if needed. + 'profile_support': None, + #'profile_support': 'cisco', +} + +# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features +# in the OpenStack Dashboard related to the Image service, such as the list +# of supported image formats. +# OPENSTACK_IMAGE_BACKEND = { +# 'image_formats': [ +# ('', ''), +# ('aki', _('AKI - Amazon Kernel Image')), +# ('ami', _('AMI - Amazon Machine Image')), +# ('ari', _('ARI - Amazon Ramdisk Image')), +# ('iso', _('ISO - Optical Disk Image')), +# ('qcow2', _('QCOW2 - QEMU Emulator')), +# ('raw', _('Raw')), +# ('vdi', _('VDI')), +# ('vhd', _('VHD')), +# ('vmdk', _('VMDK')) +# ] +# } + +# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for +# image custom property attributes that appear on image detail pages. +IMAGE_CUSTOM_PROPERTY_TITLES = { + "architecture": _("Architecture"), + "kernel_id": _("Kernel ID"), + "ramdisk_id": _("Ramdisk ID"), + "image_state": _("Euca2ools state"), + "project_id": _("Project ID"), + "image_type": _("Image Type") +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'publicURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the +# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is None. This +# value should differ from OPENSTACK_ENDPOINT_TYPE if used. +#SECONDARY_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 1000 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +# If you have external monitoring links, eg: + + +# When launching an instance, the menu of available flavors is +# sorted by RAM usage, ascending. Provide a callback method here +# (and/or a flag for reverse sort) for the sorted() method if you'd +# like a different behaviour. For more info, see +# http://docs.python.org/2/library/functions.html#sorted +# CREATE_INSTANCE_FLAVOR_SORT = { +# 'key': my_awesome_callback_method, +# 'reverse': False, +# } + + +# custom sort bits are pretyt much undocumented attrs you can get are +# basicly what 'nova flaver-show' will list -Jon + +# sort by ram/vcpu then by vcpus multiply public types by 10 so private instance types are at top +CREATE_INSTANCE_FLAVOR_SORT = { + 'key': lambda flavor: getattr(flavor,'ram')/getattr(flavor,'vcpus')+getattr(flavor,'vcpus') if not getattr(flavor,'os-flavor-access:is_public') else 10*getattr(flavor,'ram')/getattr(flavor,'vcpus')+getattr(flavor,'vcpus'), + 'reverse': False, + } + + +# The Horizon Policy Enforcement engine uses these values to load per service +# policy rule files. The content of these files should match the files the +# OpenStack services are using to determine role based access control in the +# target installation. + +# Path to directory containing policy.json files +#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") + +# Map of local copy of service policy files + +#POLICY_FILES = { +# 'identity': 'keystone_policy.json', +# 'compute': 'nova_policy.json', +# 'volume': 'cinder_policy.json', +# 'image': 'glance_policy.json', +#} + + +# Trove user and database extension support. By default support for +# creating users and databases on database instances is turned on. +# To disable these extensions set the permission here to something +# unusable such as ["!"]. +# TROVE_ADD_USER_PERMS = [] +# TROVE_ADD_DATABASE_PERMS = [] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'formatters': { + 'verbose': { + 'format': '%(asctime)s %(process)d %(levelname)s %(name)s ' + '%(message)s' + }, + }, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + 'file': { + 'level': 'DEBUG', + 'class': 'logging.FileHandler', + 'filename': '/var/log/horizon/horizon.log', + 'formatter': 'verbose', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_dashboard': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'heatclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'ceilometerclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'troveclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'nose.plugins.manager': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + # 'handlers': ['console'], + 'handlers': ['file'], + # 'level': 'DEBUG', + 'level': 'DEBUG', + 'propagate': False, + }, + } +} + +SECURITY_GROUP_RULES = { + 'all_tcp': { + 'name': 'ALL TCP', + 'ip_protocol': 'tcp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_udp': { + 'name': 'ALL UDP', + 'ip_protocol': 'udp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_icmp': { + 'name': 'ALL ICMP', + 'ip_protocol': 'icmp', + 'from_port': '-1', + 'to_port': '-1', + }, + 'ssh': { + 'name': 'SSH', + 'ip_protocol': 'tcp', + 'from_port': '22', + 'to_port': '22', + }, + 'smtp': { + 'name': 'SMTP', + 'ip_protocol': 'tcp', + 'from_port': '25', + 'to_port': '25', + }, + 'dns': { + 'name': 'DNS', + 'ip_protocol': 'tcp', + 'from_port': '53', + 'to_port': '53', + }, + 'http': { + 'name': 'HTTP', + 'ip_protocol': 'tcp', + 'from_port': '80', + 'to_port': '80', + }, + 'pop3': { + 'name': 'POP3', + 'ip_protocol': 'tcp', + 'from_port': '110', + 'to_port': '110', + }, + 'imap': { + 'name': 'IMAP', + 'ip_protocol': 'tcp', + 'from_port': '143', + 'to_port': '143', + }, + 'ldap': { + 'name': 'LDAP', + 'ip_protocol': 'tcp', + 'from_port': '389', + 'to_port': '389', + }, + 'https': { + 'name': 'HTTPS', + 'ip_protocol': 'tcp', + 'from_port': '443', + 'to_port': '443', + }, + 'smtps': { + 'name': 'SMTPS', + 'ip_protocol': 'tcp', + 'from_port': '465', + 'to_port': '465', + }, + 'imaps': { + 'name': 'IMAPS', + 'ip_protocol': 'tcp', + 'from_port': '993', + 'to_port': '993', + }, + 'pop3s': { + 'name': 'POP3S', + 'ip_protocol': 'tcp', + 'from_port': '995', + 'to_port': '995', + }, + 'ms_sql': { + 'name': 'MS SQL', + 'ip_protocol': 'tcp', + 'from_port': '1443', + 'to_port': '1443', + }, + 'mysql': { + 'name': 'MYSQL', + 'ip_protocol': 'tcp', + 'from_port': '3306', + 'to_port': '3306', + }, + 'rdp': { + 'name': 'RDP', + 'ip_protocol': 'tcp', + 'from_port': '3389', + 'to_port': '3389', + }, +} + +FLAVOR_EXTRA_KEYS = { + 'flavor_keys': [ + ('quota:read_bytes_sec', _('Quota: Read bytes')), + ('quota:write_bytes_sec', _('Quota: Write bytes')), + ('quota:cpu_quota', _('Quota: CPU')), + ('quota:cpu_period', _('Quota: CPU period')), + ('quota:inbound_average', _('Quota: Inbound average')), + ('quota:outbound_average', _('Quota: Outbound average')), + ] +} + +LOGIN_URL = '/horizon/auth/login/' +LOGOUT_URL = '/horizon/auth/logout/' +LOGIN_REDIRECT_URL = '/horizon' + +# The Ubuntu package includes pre-compressed JS and compiled CSS to allow +# offline compression by default. To enable online compression, install +# the node-less package and enable the following option. +COMPRESS_OFFLINE = True diff --git a/MIT_CSAIL/controller/usr/local/lib/python2.7/dist-packages/horizon_csail-0.1-py2.7.egg/csail/overrides.py b/MIT_CSAIL/controller/usr/local/lib/python2.7/dist-packages/horizon_csail-0.1-py2.7.egg/csail/overrides.py new file mode 100644 index 0000000..9788200 --- /dev/null +++ b/MIT_CSAIL/controller/usr/local/lib/python2.7/dist-packages/horizon_csail-0.1-py2.7.egg/csail/overrides.py @@ -0,0 +1,131 @@ +import logging + +from django.utils.text import normalize_newlines # noqa +from django.utils.translation import ugettext_lazy as _ +from django.views.decorators.debug import sensitive_variables # noqa + +from horizon import forms +from horizon import exceptions + +from openstack_dashboard import api +from openstack_dashboard.dashboards.project.instances import views +from openstack_dashboard.dashboards.project.instances.workflows import \ + create_instance + + +LOG = logging.getLogger(__name__) + + +class CSAIL_SetInstanceDetailsAction(create_instance.SetInstanceDetailsAction): + class Meta: + name = _("Details") + help_text_template = ("project/instances/" + "_launch_details_help.html") + + def __init__(self, request, context, *args, **kwargs): + super(CSAIL_SetInstanceDetailsAction, self).__init__( + request, context, *args, **kwargs) + fixed_ip = forms.CharField( + widget=forms.TextInput, label=_("eth0 Fixed IP"), max_length=15, + required=False, help_text=_("Fixed IP you registered in WebDNS")) + self.fields.insert(4, 'fixed_ip', fixed_ip) + + +class CSAIL_SetInstanceDetails(create_instance.SetInstanceDetails): + action_class = CSAIL_SetInstanceDetailsAction + contributes = ("source_type", "source_id", + "availability_zone", "name", "count", "flavor", + "device_name", # Can be None for an image. + "delete_on_terminate", "fixed_ip") + + +class CSAIL_LaunchInstance(create_instance.LaunchInstance): + def __init__(self, *args, **kwargs): + steps = list(self.default_steps) + replace_index = None + for i, step in enumerate(steps): + if step.__name__ == 'SetInstanceDetails': + replace_index = i + break + if replace_index is not None: + steps[i] = CSAIL_SetInstanceDetails + self.default_steps = tuple(steps) + return super(CSAIL_LaunchInstance, self).__init__(*args, **kwargs) + + @sensitive_variables('context') + def handle(self, request, context): + custom_script = context.get('customization_script', '') + dev_mapping_1 = None + dev_mapping_2 = None + image_id = '' + # Determine volume mapping options + source_type = context.get('source_type', None) + if source_type in ['image_id', 'instance_snapshot_id']: + image_id = context['source_id'] + elif source_type in ['volume_id', 'volume_snapshot_id']: + dev_mapping_1 = { + context['device_name']: '%s::%s' % + (context['source_id'], + int(bool(context['delete_on_terminate'])))} + elif source_type == 'volume_image_id': + dev_mapping_2 = [ + {'device_name': str(context['device_name']), + 'source_type': 'image', + 'destination_type': 'volume', + 'delete_on_termination': + int(bool(context['delete_on_terminate'])), + 'uuid': context['source_id'], + 'boot_index': '0', + 'volume_size': context['volume_size'] + } + ] + netids = context.get('network_id', None) + fixed_ip = context.get('fixed_ip', None) + if netids: + nics = [{"net-id": netid, "v4-fixed-ip": fixed_ip} + for netid in netids] + else: + nics = None + + avail_zone = context.get('availability_zone', None) + + # Create port with Network Name and Port Profile + # for the use with the plugin supporting port profiles. + # neutron port-create --n1kv:profile + # for net_id in context['network_id']: + ## HACK for now use first network + if api.neutron.is_port_profiles_supported(): + net_id = context['network_id'][0] + LOG.debug("Horizon->Create Port with %(netid)s %(profile_id)s", + {'netid': net_id, 'profile_id': context['profile_id']}) + try: + port = api.neutron.port_create(request, net_id, + policy_profile_id= + context['profile_id']) + except Exception: + msg = (_('Port not created for profile-id (%s).') % + context['profile_id']) + exceptions.handle(request, msg) + if port and port.id: + nics = [{"port-id": port.id}] + + try: + api.nova.server_create(request, + context['name'], + image_id, + context['flavor'], + context['keypair_id'], + normalize_newlines(custom_script), + context['security_group_ids'], + block_device_mapping=dev_mapping_1, + block_device_mapping_v2=dev_mapping_2, + nics=nics, + availability_zone=avail_zone, + instance_count=int(context['count']), + admin_pass=context['admin_pass']) + return True + except Exception: + exceptions.handle(request) + return False + +views.LaunchInstanceView.workflow_class = CSAIL_LaunchInstance