Add QuotaSupport to drivers that don't have it

This adds QuotaSupport to all the drivers that don't have it, and
also updates their tests so there is at least one test which exercises
the new tenant quota feature.

Since this is expected to work across all drivers/providers/etc, we
should start including at least rudimentary quota support in every
driver.

Change-Id: I891ade226ba588ecdda835b143b7897bb4425bd8
This commit is contained in:
James E. Blair 2022-01-25 10:52:35 -08:00
parent ff8bbdf8f8
commit 9bcc046ffc
19 changed files with 97 additions and 4 deletions

View File

@ -8,6 +8,8 @@ AWS EC2 Driver
Selecting the aws driver adds the following options to the :attr:`providers`
section of the configuration.
.. note:: Quota support is not implemented.
.. attr-overview::
:prefix: providers.[aws]
:maxdepth: 3

View File

@ -13,12 +13,15 @@
# under the License.
import logging
import math
import boto3
import botocore.exceptions
import nodepool.exceptions
import nodepool.exceptions
from nodepool.driver import Provider
from nodepool.driver.utils import NodeDeleter
from nodepool.driver.utils import QuotaInformation, QuotaSupport
from nodepool.driver.aws.handler import AwsNodeRequestHandler
@ -43,7 +46,7 @@ class AwsInstance:
return getattr(self, name, default)
class AwsProvider(Provider):
class AwsProvider(Provider, QuotaSupport):
log = logging.getLogger("nodepool.driver.aws.AwsProvider")
def __init__(self, provider, *args):
@ -242,3 +245,19 @@ class AwsProvider(Provider):
instances = self.ec2.create_instances(**args)
return self.ec2.Instance(instances[0].id)
def getProviderLimits(self):
# TODO: query the api to get real limits
return QuotaInformation(
cores=math.inf,
instances=math.inf,
ram=math.inf,
default=math.inf)
def quotaNeededByLabel(self, ntype, pool):
# TODO: return real quota information about a label
return QuotaInformation(cores=0, instances=1, ram=0, default=1)
def unmanagedQuotaUsed(self):
# TODO: return real quota information about quota
return QuotaInformation()

View File

@ -14,6 +14,7 @@
import base64
import logging
import math
import urllib3
import time
@ -22,13 +23,14 @@ from openshift.dynamic import DynamicClient
from nodepool import exceptions
from nodepool.driver import Provider
from nodepool.driver.utils import NodeDeleter
from nodepool.driver.utils import QuotaInformation, QuotaSupport
from nodepool.driver.openshift import handler
from nodepool.driver.utils_k8s import get_client
urllib3.disable_warnings()
class OpenshiftProvider(Provider):
class OpenshiftProvider(Provider, QuotaSupport):
log = logging.getLogger("nodepool.driver.openshift.OpenshiftProvider")
def __init__(self, provider, *args):
@ -257,3 +259,19 @@ class OpenshiftProvider(Provider):
def getRequestHandler(self, poolworker, request):
return handler.OpenshiftNodeRequestHandler(poolworker, request)
def getProviderLimits(self):
# TODO: query the api to get real limits
return QuotaInformation(
cores=math.inf,
instances=math.inf,
ram=math.inf,
default=math.inf)
def quotaNeededByLabel(self, ntype, pool):
# TODO: return real quota information about a label
return QuotaInformation(cores=1, instances=1, ram=1, default=1)
def unmanagedQuotaUsed(self):
# TODO: return real quota information about quota
return QuotaInformation()

View File

@ -14,6 +14,7 @@
import itertools
import logging
import math
import threading
from concurrent.futures.thread import ThreadPoolExecutor
@ -24,6 +25,7 @@ from nodepool import nodeutils
from nodepool import zk
from nodepool.driver import Provider
from nodepool.driver.utils import NodeDeleter
from nodepool.driver.utils import QuotaInformation, QuotaSupport
from nodepool.driver.static.handler import StaticNodeRequestHandler
@ -42,7 +44,7 @@ def nodeTuple(node):
return Node(node.hostname, node.username, node.connection_port)
class StaticNodeProvider(Provider):
class StaticNodeProvider(Provider, QuotaSupport):
log = logging.getLogger("nodepool.driver.static."
"StaticNodeProvider")
@ -493,3 +495,16 @@ class StaticNodeProvider(Provider):
except Exception:
self.log.exception("Cannot re-register deleted node %s:",
node_tuple)
def getProviderLimits(self):
return QuotaInformation(
cores=math.inf,
instances=math.inf,
ram=math.inf,
default=math.inf)
def quotaNeededByLabel(self, ntype, pool):
return QuotaInformation(cores=0, instances=1, ram=0, default=1)
def unmanagedQuotaUsed(self):
return QuotaInformation()

View File

@ -3,6 +3,10 @@ zookeeper-servers:
port: null
chroot: null
tenant-resource-limits:
- tenant-name: tenant-1
max-cores: 1024
labels:
- name: ubuntu1404
- name: ubuntu1404-bad-ami-name

View File

@ -12,6 +12,10 @@ zookeeper-tls:
cert: {zookeeper_cert}
key: {zookeeper_key}
tenant-resource-limits:
- tenant-name: tenant-1
max-cores: 1024
labels:
- name: bionic
min-ready: 0

View File

@ -3,6 +3,10 @@ zookeeper-servers:
port: null
chroot: null
tenant-resource-limits:
- tenant-name: tenant-1
max-cores: 1024
labels:
- name: debian-stretch-f1-micro

View File

@ -8,6 +8,10 @@ zookeeper-tls:
cert: {zookeeper_cert}
key: {zookeeper_key}
tenant-resource-limits:
- tenant-name: tenant-1
max-cores: 1024
labels:
- name: pod-fedora
- name: kubernetes-namespace

View File

@ -12,6 +12,10 @@ zookeeper-tls:
cert: {zookeeper_cert}
key: {zookeeper_key}
tenant-resource-limits:
- tenant-name: tenant-1
max-cores: 1024
labels:
- name: backing-label
min-ready: 0

View File

@ -8,6 +8,10 @@ zookeeper-tls:
cert: {zookeeper_cert}
key: {zookeeper_key}
tenant-resource-limits:
- tenant-name: tenant-1
max-cores: 1024
labels:
- name: pod-fedora
- name: openshift-project

View File

@ -8,6 +8,10 @@ zookeeper-tls:
cert: {zookeeper_cert}
key: {zookeeper_key}
tenant-resource-limits:
- tenant-name: tenant-1
max-cores: 1024
labels:
- name: pod-fedora

View File

@ -8,6 +8,10 @@ zookeeper-tls:
cert: {zookeeper_cert}
key: {zookeeper_key}
tenant-resource-limits:
- tenant-name: tenant-1
max-cores: 1024
labels:
- name: fake-label
- name: other-label

View File

@ -133,6 +133,7 @@ class TestDriverAws(tests.DBTestCase):
req = zk.NodeRequest()
req.state = zk.REQUESTED
req.tenant_name = 'tenant-1'
req.node_types.append(label)
with patch('nodepool.driver.aws.handler.nodescan') as nodescan:
nodescan.return_value = 'MOCK KEY'

View File

@ -70,6 +70,7 @@ class TestDriverAzure(tests.DBTestCase):
pool.start()
req = zk.NodeRequest()
req.state = zk.REQUESTED
req.tenant_name = 'tenant-1'
req.node_types.append('bionic')
self.zk.storeNodeRequest(req)

View File

@ -280,6 +280,7 @@ class TestDriverGce(tests.DBTestCase):
nodescan.return_value = 'MOCK KEY'
req = zk.NodeRequest()
req.state = zk.REQUESTED
req.tenant_name = 'tenant-1'
req.node_types.append(label)
self.zk.storeNodeRequest(req)

View File

@ -107,6 +107,7 @@ class TestDriverKubernetes(tests.DBTestCase):
pool.start()
req = zk.NodeRequest()
req.state = zk.REQUESTED
req.tenant_name = 'tenant-1'
req.node_types.append('pod-fedora')
self.zk.storeNodeRequest(req)

View File

@ -35,6 +35,7 @@ class TestDriverMetastatic(tests.DBTestCase):
def _requestNode(self):
req = zk.NodeRequest()
req.state = zk.REQUESTED
req.tenant_name = 'tenant-1'
req.node_types.append('user-label')
self.zk.storeNodeRequest(req)

View File

@ -138,6 +138,7 @@ class TestDriverOpenshift(tests.DBTestCase):
pool.start()
req = zk.NodeRequest()
req.state = zk.REQUESTED
req.tenant_name = 'tenant-1'
req.node_types.append('pod-fedora')
self.zk.storeNodeRequest(req)

View File

@ -109,6 +109,7 @@ class TestDriverStatic(tests.DBTestCase):
req = zk.NodeRequest()
req.state = zk.REQUESTED
req.tenant_name = 'tenant-1'
req.node_types.append('fake-label')
self.zk.storeNodeRequest(req)
req = self.waitForNodeRequest(req, zk.FULFILLED)