Remove the hold command

This makes no sense in the zuulv3 world.

Change-Id: Id939ca174b490482007c32611ef8bbba9db4c7ca
This commit is contained in:
David Shrewsbury 2018-01-31 15:22:15 -05:00
parent 29ae581004
commit 2e0e655cd0
3 changed files with 1 additions and 65 deletions

View File

@ -132,23 +132,6 @@ the image is not being built correctly. If you know the image will be built
correctly you can simple delete the built image and remove it from all clouds
which will cause it to be rebuilt using ``nodepool dib-image-delete``.
Instance Management
~~~~~~~~~~~~~~~~~~~
With working images in providers you should see Nodepool launching instances
in these providers using the images it built. You may find that you need to
debug a particular job failure manually. An easy way to do this is to
``nodepool hold`` an instance then log in to the instance and perform any
necessary debugging steps. Note that this doesn't stop the job running there,
what it will do is prevent Nodepool from automatically deleting this instance
once the job is complete.
In some circumstances like manually holding an instance above, or wanting to
force a job restart you may want to delete a running instance. You can issue
a ``nodepool delete`` to force nodepool to do this.
Complete command help info is below.
Command Line Tools
------------------
@ -193,11 +176,6 @@ list
.. program-output:: nodepool list --help
:nostderr:
hold
^^^^
.. program-output:: nodepool hold --help
:nostderr:
delete
^^^^^^
.. program-output:: nodepool delete --help

View File

@ -74,15 +74,6 @@ class NodePoolCmd(NodepoolApp):
cmd_alien_image_list.add_argument('provider', help='provider name',
nargs='?')
cmd_hold = subparsers.add_parser(
'hold',
help='place a node in the HOLD state')
cmd_hold.set_defaults(func=self.hold)
cmd_hold.add_argument('id', help='node id')
cmd_hold.add_argument('--reason',
help='Reason this node is held',
required=True)
cmd_delete = subparsers.add_parser(
'delete',
help='place a node in the DELETE state')
@ -234,20 +225,6 @@ class NodePoolCmd(NodepoolApp):
print(t)
def hold(self):
node = self.zk.getNode(self.args.id)
if not node:
print("Node id %s not found" % self.args.id)
return
node.state = zk.HOLD
node.comment = self.args.reason
print("Waiting for lock...")
self.zk.lockNode(node, blocking=True)
self.zk.storeNode(node)
self.zk.unlockNode(node)
self.list(node_id=self.args.id)
def delete(self):
node = self.zk.getNode(self.args.id)
if not node:
@ -382,7 +359,7 @@ class NodePoolCmd(NodepoolApp):
if self.args.command in ('image-build', 'dib-image-list',
'image-list', 'dib-image-delete',
'image-delete', 'alien-image-list',
'list', 'hold', 'delete',
'list', 'delete',
'request-list', 'info', 'erase'):
self.zk = zk.ZooKeeper()
self.zk.connect(list(config.zookeeper_servers.values()))

View File

@ -201,25 +201,6 @@ class TestNodepoolCMD(tests.DBTestCase):
self.assert_listed(
configfile, ['dib-image-list'], 0, 'fake-image-0000000001', 0)
def test_hold(self):
configfile = self.setup_config('node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self.useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
nodes = self.waitForNodes('fake-label')
node_id = nodes[0].id
# Assert one node exists and it is node 1 in a ready state.
self.assert_listed(configfile, ['list'], 0, node_id, 1)
self.assert_nodes_listed(configfile, 1, zk.READY)
# Hold node 0000000000
self.patch_argv(
'-c', configfile, 'hold', node_id, '--reason', 'testing')
nodepoolcmd.main()
# Assert the state changed to HOLD
self.assert_listed(configfile, ['list'], 0, node_id, 1)
self.assert_nodes_listed(configfile, 1, 'hold')
def test_delete(self):
configfile = self.setup_config('node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)