Renaming files with savanna words in its names

* Removed etc/savanna directory:
  - all files are moved to etc/sahara
  - updated etc/sahara/compute.topology and etc/sahara/swift.toppology in configs
* Reanmed savannautils.py in HDP plugin
* Renamed savanna_api.py and savanna_db.py, savanna_subprocess.py
  and updated setup.cfg according that
* Modified alembic README since savanna-db-manage entry point changes

Co-Authored-By: Trevor McKay <tmckay@redhat.com>

Change-Id: I67e7abdc9bc68adad401a475095d07a6cef76542
Partial-Implements: blueprint savanna-renaming-service
This commit is contained in:
Alexander Ignatov 2014-03-19 19:56:27 +04:00
parent b28c7703a3
commit 1e7ed6e29d
18 changed files with 56 additions and 63 deletions

3
.gitignore vendored
View File

@ -32,9 +32,6 @@ doc/build
nosetests.xml
pylint-report.txt
etc/local.cfg
etc/savanna/*.conf
etc/savanna/*.topology
etc/savanna.conf
etc/sahara/*.conf
etc/sahara/*.topology
etc/sahara.conf

View File

@ -282,13 +282,13 @@
# File format: compute1 /rack1
# compute2 /rack2 compute3 /rack2 (string
# value)
#compute_topology_file=etc/savanna/compute.topology
#compute_topology_file=etc/sahara/compute.topology
# File with Swift topology. It should contain
# mapping between Swift nodes and racks. File
# format: node1 /rack1 node2
# /rack2 node3 /rack2 (string value)
#swift_topology_file=etc/savanna/swift.topology
#swift_topology_file=etc/sahara/swift.topology
#

View File

@ -66,7 +66,7 @@
# compute2 /rack2
# compute3 /rack2
# (string value)
#compute_topology_file=etc/savanna/compute.topology
#compute_topology_file=etc/sahara/compute.topology
# File with Swift topology. It should contains mapping
# between Swift nodes and racks. File format:
@ -74,7 +74,7 @@
# node2 /rack2
# node3 /rack2
# (string value)
#swift_topology_file=etc/savanna/swift.topology
#swift_topology_file=etc/sahara/swift.topology

View File

@ -1,7 +1,7 @@
[DEFAULT]
#
# Options defined in savanna.config
# Options defined in sahara.config
#
# Hostname or IP address that will be used to listen on.
@ -41,7 +41,7 @@
#
# Options defined in savanna.main
# Options defined in sahara.main
#
# Protocol used to access OpenStack Identity service. (string
@ -76,15 +76,15 @@
#
# Options defined in savanna.db.base
# Options defined in sahara.db.base
#
# Driver to use for database access. (string value)
#db_driver=savanna.db
#db_driver=sahara.db
#
# Options defined in savanna.openstack.common.db.sqlalchemy.session
# Options defined in sahara.openstack.common.db.sqlalchemy.session
#
# The file name to use with SQLite (string value)
@ -95,7 +95,7 @@
#
# Options defined in savanna.openstack.common.eventlet_backdoor
# Options defined in sahara.openstack.common.eventlet_backdoor
#
# Enable eventlet backdoor. Acceptable values are 0, <port>,
@ -110,7 +110,7 @@
#
# Options defined in savanna.openstack.common.lockutils
# Options defined in sahara.openstack.common.lockutils
#
# Whether to disable inter-process locks (boolean value)
@ -121,7 +121,7 @@
#
# Options defined in savanna.openstack.common.log
# Options defined in sahara.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
@ -213,7 +213,7 @@
#
# Options defined in savanna.openstack.common.periodic_task
# Options defined in sahara.openstack.common.periodic_task
#
# Some periodic tasks can be run in a separate process. Should
@ -222,7 +222,7 @@
#
# Options defined in savanna.plugins.base
# Options defined in sahara.plugins.base
#
# List of plugins to be loaded. Savanna preserves the order of
@ -231,7 +231,7 @@
#
# Options defined in savanna.service.edp.job_manager
# Options defined in sahara.service.edp.job_manager
#
# Postfix for storing jobs in hdfs. Will be added to
@ -240,7 +240,7 @@
#
# Options defined in savanna.service.periodic
# Options defined in sahara.service.periodic
#
# Enable periodic tasks. (boolean value)
@ -262,7 +262,7 @@
#
# Options defined in savanna.topology.topology_helper
# Options defined in sahara.topology.topology_helper
#
# Enables data locality for hadoop cluster.
@ -282,17 +282,17 @@
# File format: compute1 /rack1
# compute2 /rack2 compute3 /rack2 (string
# value)
#compute_topology_file=etc/savanna/compute.topology
#compute_topology_file=etc/sahara/compute.topology
# File with Swift topology. It should contain
# mapping between Swift nodes and racks. File
# format: node1 /rack1 node2
# /rack2 node3 /rack2 (string value)
#swift_topology_file=etc/savanna/swift.topology
#swift_topology_file=etc/sahara/swift.topology
#
# Options defined in savanna.utils.openstack.keystone
# Options defined in sahara.utils.openstack.keystone
#
# Enables Savanna to use Keystone API v3. If that flag is
@ -302,7 +302,7 @@
#
# Options defined in savanna.utils.remote
# Options defined in sahara.utils.remote
#
# A server to which guest agent running on a VM should connect
@ -323,7 +323,7 @@
[conductor]
#
# Options defined in savanna.conductor.api
# Options defined in sahara.conductor.api
#
# Perform savanna-conductor operations locally. (boolean
@ -334,7 +334,7 @@
[database]
#
# Options defined in savanna.openstack.common.db.api
# Options defined in sahara.openstack.common.db.api
#
# The backend to use for db (string value)
@ -343,7 +343,7 @@
#
# Options defined in savanna.openstack.common.db.sqlalchemy.session
# Options defined in sahara.openstack.common.db.sqlalchemy.session
#
# The SQLAlchemy connection string used to connect to the
@ -351,7 +351,7 @@
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection=sqlite:////savanna/openstack/common/db/$sqlite_db
#connection=sqlite:////sahara/openstack/common/db/$sqlite_db
# The SQLAlchemy connection string used to connect to the
# slave database (string value)
@ -413,7 +413,7 @@
[ssl]
#
# Options defined in savanna.openstack.common.sslutils
# Options defined in sahara.openstack.common.sslutils
#
# CA certificate file to use to verify connecting clients

View File

@ -15,71 +15,71 @@ under the License.
-->
The migrations in the alembic/versions contain the changes needed to migrate
from older Savanna releases to newer versions. A migration occurs by executing
from older Sahara releases to newer versions. A migration occurs by executing
a script that details the changes needed to upgrade/downgrade the database. The
migration scripts are ordered so that multiple scripts can run sequentially to
update the database. The scripts are executed by Savanna's migration wrapper
which uses the Alembic library to manage the migration. Savanna supports
update the database. The scripts are executed by Sahara's migration wrapper
which uses the Alembic library to manage the migration. Sahara supports
migration from Icehouse or later.
If you are a deployer or developer and want to migrate from Icehouse to later
release you must first add version tracking to the database:
```
$ savanna-db-manage --config-file /path/to/savaanna.conf stamp icehouse
$ sahara-db-manage --config-file /path/to/sahara.conf stamp icehouse
```
You can upgrade to the latest database version via:
```
$ savanna-db-manage --config-file /path/to/savanna.conf upgrade head
$ sahara-db-manage --config-file /path/to/sahara.conf upgrade head
```
To check the current database version:
```
$ savanna-db-manage --config-file /path/to/savanna.conf current
$ sahara-db-manage --config-file /path/to/sahara.conf current
```
To create a script to run the migration offline:
```
$ savanna-db-manage --config-file /path/to/savanna.conf upgrade head --sql
$ sahara-db-manage --config-file /path/to/sahara.conf upgrade head --sql
```
To run the offline migration between specific migration versions:
```
$ savanna-db-manage --config-file /path/to/savanna.conf upgrade \
$ sahara-db-manage --config-file /path/to/sahara.conf upgrade \
<start version>:<end version> --sql
```
Upgrade the database incrementally:
```
$ savanna-db-manage --config-file /path/to/savanna.conf upgrade --delta \
$ sahara-db-manage --config-file /path/to/sahara.conf upgrade --delta \
<# of revs>
```
Downgrade the database by a certain number of revisions:
```
$ savanna-db-manage --config-file /path/to/savanna.conf downgrade --delta \
$ sahara-db-manage --config-file /path/to/sahara.conf downgrade --delta \
<# of revs>
```
Create new revision:
```
$ savanna-db-manage --config-file /path/to/savanna.conf revision \
$ sahara-db-manage --config-file /path/to/sahara.conf revision \
-m "description of revision" --autogenerate
```
Create a blank file:
```
$ savanna-db-manage --config-file /path/to/savanna.conf revision \
$ sahara-db-manage --config-file /path/to/sahara.conf revision \
-m "description of revision"
```
To verify that the timeline does branch, you can run this command:
```
$ savanna-db-manage --config-file /path/to/savanna.conf check_migration
$ sahara-db-manage --config-file /path/to/sahara.conf check_migration
```
If the migration path does branch, you can find the branch point via:
```
$ savanna-db-manage --config-file /path/to/savanna.conf history
$ sahara-db-manage --config-file /path/to/sahara.conf history
```

View File

@ -22,7 +22,7 @@ from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils as u
from sahara.plugins.hdp import hadoopserver as h
from sahara.plugins.hdp import savannautils as utils
from sahara.plugins.hdp import saharautils as utils
from sahara.plugins.hdp.versions import versionhandlerfactory as vhf
from sahara.plugins import provisioning as p
from sahara.topology import topology_helper as th

View File

@ -16,7 +16,7 @@
import re
from sahara.openstack.common import log as logging
from sahara.plugins.hdp import savannautils
from sahara.plugins.hdp import saharautils
from sahara.utils import files as f
@ -47,7 +47,7 @@ class HadoopServer:
# all nodes must run Ambari agent
self._setup_and_start_ambari_agent(ambari_info.host.internal_ip)
@savannautils.inject_remote('r')
@saharautils.inject_remote('r')
def install_rpms(self, r):
LOG.info(
"{0}: Installing rpm's ...".format(self.instance.hostname()))
@ -58,7 +58,7 @@ class HadoopServer:
r.execute_command(rpm_cmd, run_as_root=True)
r.execute_command('yum -y install epel-release', run_as_root=True)
@savannautils.inject_remote('r')
@saharautils.inject_remote('r')
def install_swift_integration(self, r):
LOG.info(
"{0}: Installing swift integration ..."
@ -67,7 +67,7 @@ class HadoopServer:
rpm_cmd = 'rpm -Uvh ' + HADOOP_SWIFT_RPM
r.execute_command(rpm_cmd, run_as_root=True)
@savannautils.inject_remote('r')
@saharautils.inject_remote('r')
def configure_topology(self, topology_str, r):
r.write_file_to(
'/etc/hadoop/conf/topology.sh',
@ -78,7 +78,7 @@ class HadoopServer:
)
r.write_file_to('/etc/hadoop/conf/topology.data', topology_str)
@savannautils.inject_remote('r')
@saharautils.inject_remote('r')
def _setup_and_start_ambari_server(self, port, jdk_path, r):
LOG.info('{0}: Installing ambari-server ...'.format(
self.instance.hostname()))
@ -104,7 +104,7 @@ class HadoopServer:
'ambari-server start > /dev/null 2>&1', run_as_root=True
)
@savannautils.inject_remote('r')
@saharautils.inject_remote('r')
def _configure_ambari_server_api_port(self, port, r):
# do nothing if port is not specified or is default
if port is None or port == 8080:
@ -119,7 +119,7 @@ class HadoopServer:
# write the file back
r.write_file_to(ambari_config_file, data, run_as_root=True)
@savannautils.inject_remote('r')
@saharautils.inject_remote('r')
def _setup_and_start_ambari_agent(self, ambari_server_ip, r):
LOG.info('{0}: Installing Ambari Agent ...'.format(
self.instance.hostname()))

View File

@ -15,7 +15,7 @@
from sahara import context
from sahara.service.edp.binary_retrievers import internal_swift as i_swift
from sahara.service.edp.binary_retrievers import savanna_db as db
from sahara.service.edp.binary_retrievers import sahara_db as db
from sahara.swift import utils as su

View File

@ -46,7 +46,7 @@ opts = [
help="""Enables four-level topology for data locality.
Works only if corresponding plugin supports such mode."""),
cfg.StrOpt('compute_topology_file',
default='etc/savanna/compute.topology',
default='etc/sahara/compute.topology',
help="""File with nova compute topology.
It should contain mapping between nova computes and racks.
File format:
@ -54,7 +54,7 @@ opts = [
compute2 /rack2
compute3 /rack2"""),
cfg.StrOpt('swift_topology_file',
default='etc/savanna/swift.topology',
default='etc/sahara/swift.topology',
help="""File with Swift topology.
It should contain mapping between Swift nodes and racks.
File format:

View File

@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
def _get_sub_executable():
return '%s/_savanna-subprocess' % os.path.dirname(sys.argv[0])
return '%s/_sahara-subprocess' % os.path.dirname(sys.argv[0])
def start_subprocess():

View File

@ -25,17 +25,13 @@ packages =
sahara
data_files =
share/savanna = etc/savanna/*
share/sahara = etc/sahara/*
[entry_points]
console_scripts =
savanna-api = sahara.cli.savanna_api:main
savanna-db-manage = sahara.db.migration.cli:main
_savanna-subprocess = sahara.cli.savanna_subprocess:main
# TODO(slukjanov): remove this code (temp to migrate to the new name)
sahara-api = sahara.cli.savanna_api:main
sahara-api = sahara.cli.sahara_api:main
sahara-db-manage = sahara.db.migration.cli:main
_sahara-subprocess = sahara.cli.sahara_subprocess:main
savanna.cluster.plugins =
vanilla = sahara.plugins.vanilla.plugin:VanillaProvider
@ -59,7 +55,7 @@ source-dir = doc/source
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = sahara/locale/savanna.pot
output_file = sahara/locale/sahara.pot
[compile_catalog]
directory = sahara/locale