Bump hacking

hacking 3.0.x is too old.

Also remove the note about pip's behavior which was already fixed in
recent versions.

Change-Id: I65d350943649c3346ed5741631c01724ddd256ef
This commit is contained in:
Takashi Kajinami 2024-02-19 02:23:30 +09:00
parent b68c105696
commit 44cd95684b
13 changed files with 31 additions and 42 deletions

View File

@ -254,8 +254,8 @@ class DefaultEngine(base.Engine):
db_api.update_action_execution_heartbeat(exec_id) db_api.update_action_execution_heartbeat(exec_id)
except exceptions.DBEntityNotFoundError: except exceptions.DBEntityNotFoundError:
LOG.debug( LOG.debug(
"Action execution heartbeat update failed. {}" "Action execution heartbeat update failed. {}",
.format(exec_id), exec_id,
exc_info=True exc_info=True
) )
# Ignore this error and continue with the # Ignore this error and continue with the

View File

@ -72,7 +72,7 @@ class KombuRPCListener(ConsumerMixin):
:param message: the plain amqp kombu.message with additional :param message: the plain amqp kombu.message with additional
information information
""" """
LOG.debug("Got response: {0}".format(response)) LOG.debug("Got response: {}", response)
try: try:
message.ack() message.ack()
@ -97,7 +97,7 @@ class KombuRPCListener(ConsumerMixin):
else: else:
LOG.debug( LOG.debug(
"Got a response, but seems like no process is waiting for " "Got a response, but seems like no process is waiting for "
"it [correlation_id={0}]".format(correlation_id) "it [correlation_id={}]", correlation_id
) )
def get_result(self, correlation_id, timeout): def get_result(self, correlation_id, timeout):

View File

@ -147,8 +147,8 @@ class KombuRPCServer(rpc_base.RPCServer, kombu_base.Base):
self.stop() self.stop()
LOG.info( LOG.info(
"Server with id='{}' stopped." "Server with id='{}' stopped.",
.format(self.server_id) self.server_id
) )
return return

View File

@ -51,14 +51,12 @@ def handle_expired_actions():
CONF.action_heartbeat.batch_size CONF.action_heartbeat.batch_size
) )
LOG.debug("Found {} running and expired actions.".format( LOG.debug("Found {} running and expired actions.", len(action_exs))
len(action_exs))
)
if action_exs: if action_exs:
LOG.info( LOG.info(
"Actions executions to transit to error, because " "Actions executions to transit to error, because "
"heartbeat wasn't received: {}".format(action_exs) "heartbeat wasn't received: {}", action_exs
) )
for action_ex in action_exs: for action_ex in action_exs:
@ -131,7 +129,7 @@ def start():
LOG.debug( LOG.debug(
"First run of action heartbeat checker, wait before " "First run of action heartbeat checker, wait before "
"checking to make sure executors have time to send " "checking to make sure executors have time to send "
"heartbeats. ({} seconds)".format(wait_time) "heartbeats. ({} seconds)", wait_time
) )
global _stopped global _stopped

View File

@ -56,8 +56,8 @@ def pause_running_executions(skip_tx=False):
db_api.get_workflow_executions(state=states.RUNNING, db_api.get_workflow_executions(state=states.RUNNING,
insecure=True)] insecure=True)]
LOG.info("Number of find workflow executions is {}" LOG.info("Number of find workflow executions is {}",
.format(len(execution_ids))) len(execution_ids))
if skip_tx: if skip_tx:
sched = sched_base.get_system_scheduler() sched = sched_base.get_system_scheduler()
@ -112,7 +112,7 @@ def _pause_execution(wf_ex_id, project_id, skip_tx=False):
if states.is_running(wf_ex.state): if states.is_running(wf_ex.state):
workflow_handler.pause_workflow(wf_ex) workflow_handler.pause_workflow(wf_ex)
LOG.info('Execution {} was paused'.format(wf_ex_id)) LOG.info('Execution {} was paused', wf_ex_id)
def await_pause_executions(skip_tx=False): def await_pause_executions(skip_tx=False):
@ -139,9 +139,9 @@ def await_pause_executions(skip_tx=False):
db_api.update_maintenance_status(PAUSED) db_api.update_maintenance_status(PAUSED)
return return
LOG.info('The following tasks have RUNNING state: {}'.format([ LOG.info('The following tasks have RUNNING state: {}', [
task.id for task in tasks task.id for task in tasks
])) ])
sched = sched_base.get_system_scheduler() sched = sched_base.get_system_scheduler()
job = sched_base.SchedulerJob( job = sched_base.SchedulerJob(
@ -166,9 +166,9 @@ def await_pause_executions(skip_tx=False):
if not tasks: if not tasks:
return True return True
LOG.info('The following tasks have RUNNING state: {}'.format([ LOG.info('The following tasks have RUNNING state: {}', [
task.id for task in tasks task.id for task in tasks
])) ])
eventlet.sleep(1) eventlet.sleep(1)
@ -284,6 +284,4 @@ def _resume_execution(wf_ex_id, skip_tx=False):
workflow_handler.resume_workflow(wf_ex) workflow_handler.resume_workflow(wf_ex)
LOG.info('The following execution was resumed: {}'.format([ LOG.info('The following execution was resumed: {}', [wf_ex.id])
wf_ex.id
]))

View File

@ -110,7 +110,7 @@ class TestCronTriggerController(base.APITest):
resp = self.app.get('/v2/cron_triggers/my_cron_trigger') resp = self.app.get('/v2/cron_triggers/my_cron_trigger')
self.assertEqual(200, resp.status_int) self.assertEqual(200, resp.status_int)
self.assertTrue('project_id' in resp.json) self.assertIn('project_id', resp.json)
@mock.patch.object(db_api, "get_cron_trigger", MOCK_NOT_FOUND) @mock.patch.object(db_api, "get_cron_trigger", MOCK_NOT_FOUND)
def test_get_not_found(self): def test_get_not_found(self):

View File

@ -224,7 +224,7 @@ class TestExecutionsController(base.APITest):
resp = self.app.get('/v2/executions/123', expect_errors=True) resp = self.app.get('/v2/executions/123', expect_errors=True)
self.assertEqual(200, resp.status_int) self.assertEqual(200, resp.status_int)
self.assertTrue('project_id' in resp.json) self.assertIn('project_id', resp.json)
@mock.patch.object( @mock.patch.object(
db_api, db_api,

View File

@ -242,7 +242,7 @@ class TestTasksController(base.APITest):
resp = self.app.get('/v2/tasks/123') resp = self.app.get('/v2/tasks/123')
self.assertEqual(200, resp.status_int) self.assertEqual(200, resp.status_int)
self.assertTrue('project_id' in resp.json) self.assertIn('project_id', resp.json)
@mock.patch.object(db_api, 'get_task_executions', MOCK_EMPTY) @mock.patch.object(db_api, 'get_task_executions', MOCK_EMPTY)
def test_get_all_empty(self): def test_get_all_empty(self):

View File

@ -213,7 +213,7 @@ class TestWorkbooksController(base.APITest):
resp = self.app.get('/v2/workbooks/123') resp = self.app.get('/v2/workbooks/123')
self.assertEqual(200, resp.status_int) self.assertEqual(200, resp.status_int)
self.assertTrue('project_id' in resp.json) self.assertIn('project_id', resp.json)
@mock.patch.object(workbooks, "update_workbook_v2", MOCK_UPDATED_WORKBOOK) @mock.patch.object(workbooks, "update_workbook_v2", MOCK_UPDATED_WORKBOOK)
def test_put(self): def test_put(self):

View File

@ -232,7 +232,7 @@ class WorkflowController(object):
@abc.abstractmethod @abc.abstractmethod
def evaluate_workflow_final_context(self): def evaluate_workflow_final_context(self):
"""Evaluates final workflow context assuming that workflow has finished. """Evaluates final workflow context after workflow has finished.
:return: Final workflow context. :return: Final workflow context.
""" """

View File

@ -1,7 +1,3 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
alembic>=0.9.6 # MIT alembic>=0.9.6 # MIT
croniter>=0.3.4 # MIT License croniter>=0.3.4 # MIT License
cachetools>=2.0.0 # MIT License cachetools>=2.0.0 # MIT License

View File

@ -1,7 +1,4 @@
# The order of packages is significant, because pip processes them in the order hacking>=6.1.0,<6.2.0 # Apache-2.0
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking>=3.0.1,<3.1.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0
doc8>=0.8.1 # Apache-2.0 doc8>=0.8.1 # Apache-2.0