Flake8 errors fix

Fix for flake8 errors (mostly in comments) added.

Change-Id: I868172b521809eb2aafe8e80102ab4e96d0f6f0e
This commit is contained in:
Artem Roma 2014-03-27 14:54:07 +02:00
parent b54c48a50e
commit 94386ff7fe
22 changed files with 47 additions and 61 deletions

View File

@ -68,7 +68,7 @@ class Facts:
return None
def _init_parser(self):
#Custom YAML constructs for ruby objects for puppet files parsing
# Custom YAML constructs for ruby objects for puppet files parsing
def _construct_ruby_object(loader, suffix, node):
return loader.construct_yaml_map(node)

View File

@ -77,10 +77,10 @@ class TestsAdapter(logging.LoggerAdapter):
if self.regexp.search(binary_name) and 'self' in frame[0].f_locals:
return frame[0].f_locals.get('self').id()
elif frame[3] == '_run_cleanups':
#NOTE(myamazaki): method calling addCleanup
# NOTE(myamazaki): method calling addCleanup
return frame[0].f_locals.get('self').case.id()
elif frame[3] in ['setUpClass', 'tearDownClass']:
#NOTE(myamazaki): setUpClass or tearDownClass
# NOTE(myamazaki): setUpClass or tearDownClass
return "%s.%s.%s" % (frame[0].f_locals['cls'].__module__,
frame[0].f_locals['cls'].__name__,
frame[3])

View File

@ -439,7 +439,7 @@ class FileConfig(object):
class ConfigGroup(object):
# USE SLOTS
# USE SLOTS
def __init__(self, opts):
self.parse_opts(opts)

View File

@ -281,7 +281,7 @@ class HeatSmokeTests(heatmanager.HeatBaseTest):
else:
template = self._customize_template(template)
#create stack
# create stack
fail_msg = "Stack was not created properly."
stack = self.verify(20, self._create_stack, 6,
fail_msg, "stack creation",

View File

@ -73,7 +73,7 @@ class VolumesTest(nmanager.SmokeChecksTest):
msg_s1 = 'Volume was not created.'
#Create volume
# Create volume
volume = self.verify(120, self._create_volume, 1,
msg_s1,
"volume creation",

View File

@ -103,7 +103,7 @@ class TestImageAction(nmanager.SmokeChecksTest):
image=image_id,
flavor=flavor_id)
self.set_resource(name, server)
#self.addCleanup(self.compute_client.servers.delete, server)
# self.addCleanup(self.compute_client.servers.delete, server)
self.verify_response_body_content(
name, server.name,
msg="Please refer to OpenStack logs for more details.")

View File

@ -97,7 +97,7 @@ class TestUserTenantRole(nmanager.SmokeChecksTest):
'Step 8 failed: {msg}'.format(msg=msg_s7))
try:
#Auth in horizon with non-admin user
# Auth in horizon with non-admin user
client = requests.session()
if self.config.compute.deployment_os == 'Ubuntu':
url = self.config.identity.ubuntu_url

View File

@ -57,16 +57,16 @@ def main():
return nailgun_hooks.after_initialization_environment_hook()
with engine.contexted_session(pecan.conf.dbpath) as session:
#performing cleaning of expired data (if any) in db
# performing cleaning of expired data (if any) in db
mixins.clean_db(session)
#discover testsets and their tests
# discover testsets and their tests
CORE_PATH = pecan.conf.debug_tests if \
pecan.conf.get('debug_tests') else 'fuel_health'
nose_discovery.discovery(path=CORE_PATH, session=session)
#cache needed data from test repository
# cache needed data from test repository
mixins.cache_test_repository(session)
host, port = pecan.conf.server.host, pecan.conf.server.port

View File

@ -81,8 +81,8 @@ def discovery_check(session, cluster):
)
)
#flush data to db, cuz _add_cluster_testing_pattern
#is dependent on it
# flush data to db, cuz _add_cluster_testing_pattern
# is dependent on it
session.flush()
_add_cluster_testing_pattern(session, cluster_data)
@ -120,17 +120,17 @@ def _get_cluster_depl_tags(cluster_id):
release_data = REQ_SES.get(release_url).json()
#info about deployment type and operating system
# info about deployment type and operating system
mode = 'ha' if 'ha' in response['mode'].lower() else response['mode']
deployment_tags.add(mode)
deployment_tags.add(release_data.get(
'operating_system', 'failed to get os'))
#networks manager
# networks manager
network_type = response.get('net_provider', 'nova_network')
deployment_tags.add(network_type)
#info about murano/savanna clients installation
# info about murano/savanna clients installation
request_url += '/' + 'attributes'
response = REQ_SES.get(request_url).json()

View File

@ -99,7 +99,7 @@ class NoseDriver(object):
return False
def _clean_up(self, session, test_run_id, cluster_id, cleanup):
#need for performing proper cleaning up for current cluster
# need for performing proper cleaning up for current cluster
cluster_deployment_info = \
session.query(models.ClusterState.deployment_tags)\
.filter_by(id=cluster_id)\

View File

@ -57,7 +57,7 @@ class DiscoveryPlugin(plugins.Plugin):
self.session.merge(test_set)
self.test_sets[test_set.id] = test_set
#flush test_sets data into db
# flush test_sets data into db
self.session.commit()
except Exception as e:
LOG.error(
@ -89,7 +89,7 @@ class DiscoveryPlugin(plugins.Plugin):
test_obj = models.Test(**data)
self.session.merge(test_obj)
#flush tests data into db
# flush tests data into db
self.session.commit()
except Exception as e:
LOG.error(

View File

@ -77,9 +77,9 @@ def get_description(test_obj):
deployment_tags_pattern
)
#if deployment tags is empty or absent
#_process_docstring returns None so we
#must check this and prevent
# if deployment tags is empty or absent
# _process_docstring returns None so we
# must check this and prevent
if deployment_tags:
deployment_tags = [
tag.strip().lower() for tag in deployment_tags.split(',')

View File

@ -30,5 +30,5 @@ def do_apply_migrations():
'fuel_plugin.ostf_adapter.storage:migrations')
alembic_conf.set_main_option('sqlalchemy.url', conf.dbpath)
#apply initial migration
# apply initial migration
command.upgrade(alembic_conf, 'head')

View File

@ -15,12 +15,8 @@ import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('test_runs', sa.Column('pid', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('test_runs', 'pid')
### end Alembic commands ###

View File

@ -18,7 +18,6 @@ from fuel_plugin.ostf_adapter.storage import fields
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
'cluster_state',
sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),
@ -93,14 +92,11 @@ def upgrade():
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('tests')
op.drop_table('test_runs')
op.drop_table('cluster_testing_pattern')
op.drop_table('test_sets')
op.drop_table('cluster_state')
### end Alembic commands ###

View File

@ -16,16 +16,12 @@ from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('test_sets', sa.Column('exclusive_testsets',
postgresql.ARRAY(
sa.String(length=128)
),
nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('test_sets', 'exclusive_testsets')
### end Alembic commands ###

View File

@ -88,8 +88,8 @@ class TestSet(BASE):
deployment_tags = sa.Column(ARRAY(sa.String(64)))
test_runs_ordering_priority = sa.Column(sa.Integer)
#list of test sets that cannot be executed simultaneously
#with current test set
# list of test sets that cannot be executed simultaneously
# with current test set
exclusive_testsets = sa.Column(ARRAY(sa.String(128)))
tests = relationship(
@ -366,7 +366,7 @@ class TestRun(BASE):
plugin.run(test_run, test_set, dbpath)
#flush test_run data to db
# flush test_run data to db
session.flush()
return test_run.frontend

View File

@ -37,7 +37,7 @@ class CustomTransactionalHook(hooks.TransactionHook):
self.session.rollback()
def clear():
#not all GET controllers doesn't write to db
# not all GET controllers doesn't write to db
self.session.commit()
self.session.remove()

View File

@ -11,5 +11,3 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -115,7 +115,7 @@ class AdapterTests(BaseAdapterTest):
testsets = ["general_test", "stopped_test"]
cluster_id = 1
#make sure we have data about test_sets in db
# make sure we have data about test_sets in db
self.adapter.testsets(cluster_id)
for testset in testsets:
self.client.start_testrun(testset, cluster_id)
@ -159,8 +159,8 @@ class AdapterTests(BaseAdapterTest):
testset = "stopped_test"
cluster_id = 1
#make sure we have all needed data in db
#for this test case
# make sure we have all needed data in db
# for this test case
self.adapter.testsets(cluster_id)
self.client.start_testrun(testset, cluster_id)
@ -242,7 +242,7 @@ class AdapterTests(BaseAdapterTest):
]
cluster_id = 1
#make sure that we have all needed data in db
# make sure that we have all needed data in db
self.adapter.testsets(cluster_id)
resp = self.client.start_testrun_tests(testset, tests, cluster_id)
@ -325,7 +325,7 @@ class AdapterTests(BaseAdapterTest):
]
cluster_id = 1
#make sure we have all needed data in db
# make sure we have all needed data in db
self.adapter.testsets(cluster_id)
self.client.run_testset_with_timeout(testset, cluster_id, 10)
@ -414,7 +414,7 @@ class AdapterTests(BaseAdapterTest):
]
cluster_id = 1
#make sure we have all needed data in db
# make sure we have all needed data in db
self.adapter.testsets(cluster_id)
self.client.run_with_timeout(testset, tests, cluster_id, 70)
@ -497,7 +497,7 @@ class AdapterTests(BaseAdapterTest):
]
cluster_id = 1
#make sure that we have all needen data in db
# make sure that we have all needen data in db
self.adapter.testsets(cluster_id)
self.client.start_testrun(testset, cluster_id)
@ -512,7 +512,7 @@ class AdapterTests(BaseAdapterTest):
testset = 'test_with_error'
cluster_id = 4
#make sure we have all needed data in db
# make sure we have all needed data in db
self.adapter.testsets(cluster_id)
self.client.start_testrun(testset, cluster_id)

View File

@ -61,7 +61,7 @@ class BaseWSGITest(unittest2.TestCase):
}
def setUp(self):
#orm session wrapping
# orm session wrapping
self.connection = self.engine.connect()
self.trans = self.connection.begin()
@ -72,14 +72,14 @@ class BaseWSGITest(unittest2.TestCase):
test_sets = self.session.query(models.TestSet).all()
#need this if start unit tests in conjuction with integration
# need this if start unit tests in conjuction with integration
if not test_sets:
discovery(path=TEST_PATH, session=self.session)
mixins.cache_test_repository(self.session)
#mocking
#request mocking
# mocking
# request mocking
self.request_mock = MagicMock()
self.request_patcher = patch(
@ -88,7 +88,7 @@ class BaseWSGITest(unittest2.TestCase):
)
self.request_patcher.start()
#pecan conf mocking
# pecan conf mocking
self.pecan_conf_mock = MagicMock()
self.pecan_conf_mock.nailgun.host = '127.0.0.1'
self.pecan_conf_mock.nailgun.port = 8888
@ -99,17 +99,17 @@ class BaseWSGITest(unittest2.TestCase):
)
self.pecan_conf_patcher.start()
#engine.get_session mocking
# engine.get_session mocking
self.request_mock.session = self.session
def tearDown(self):
#rollback changes to database
#made by tests
# rollback changes to database
# made by tests
self.trans.rollback()
self.session.close()
self.connection.close()
#end of test_case patching
# end of test_case patching
self.request_patcher.stop()
self.pecan_conf_patcher.stop()

View File

@ -168,7 +168,7 @@ class TestTestRunsPutController(TestTestRunsController):
.filter_by(test_run_id=int(self.test_run['id']))\
.update({'status': 'running'})
#flush data which test is depend on into db
# flush data which test is depend on into db
self.session.commit()
self.request_mock.body = json.dumps(
@ -260,8 +260,8 @@ class TestClusterRedeployment(base.BaseWSGITest):
]]
}
#patch request_to_nailgun function in orded to emulate
#redeployment of cluster
# patch request_to_nailgun function in orded to emulate
# redeployment of cluster
cluster_data = set(
['multinode', 'ubuntu', 'nova_network']
)