Merge "Add OpenStack volume quota"

This commit is contained in:
Zuul 2023-02-24 20:12:41 +00:00 committed by Gerrit Code Review
commit 13007b6825
15 changed files with 421 additions and 25 deletions

View File

@ -588,4 +588,18 @@ Options
:default: infinity
:type: int
The maximum number of main memory (RAM) a tenant can allocate.
The maximum amount of main memory (RAM) a tenant can allocate.
.. attr:: max-volumes
:default: infinity
:type: int
The maximum number of volumes a tenant can allocate. Currently
only used by the OpenStack driver.
.. attr:: max-volume-gb
:default: infinity
:type: int
The maximum total size in gigabytes of volumes a tenant can
allocate. Currently only used by the OpenStack driver.

View File

@ -460,6 +460,22 @@ Selecting the OpenStack driver adds the following options to the
the amount of ram allocated by nodepool. If not defined
nodepool can use as much ram as the tenant allows.
.. attr:: max-volumes
:type: int
Maximum number of volumes usable from this pool. This can be
used to limit the number of volumes allocated by nodepool. If
not defined, nodepool can use as many volumes as the tenant
allows.
.. attr:: max-volume-gb
:type: int
Maximum total size in gigabytes of volumes usable from this
pool. This can be used to limit the volume storage allocated
by nodepool. If not defined, nodepool can use as much space
as the tenant allows.
.. attr:: ignore-provider-quota
:type: bool
:default: False

View File

@ -261,6 +261,11 @@ class Config(ConfigValue):
limits['cores'] = resource_limit.pop('max-cores', math.inf)
limits['instances'] = resource_limit.pop('max-servers', math.inf)
limits['ram'] = resource_limit.pop('max-ram', math.inf)
if 'max-volumes' in resource_limit:
limits['volumes'] = resource_limit.pop('max-volumes', math.inf)
if 'max-volume-gb' in resource_limit:
limits['volume-gb'] = resource_limit.pop(
'max-volume-gb', math.inf)
for k, v in resource_limit.items():
limits[k] = v
self.tenant_resource_limits[tenant_name] = limits

View File

@ -77,6 +77,10 @@ class FakeOpenStackCloud(object):
def _get_quota():
return 100, 20, 1000000
@staticmethod
def _get_volume_quota():
return 100, 1000000
def __init__(self, images=None, networks=None):
self.pause_creates = False
self._image_list = images
@ -117,10 +121,13 @@ class FakeOpenStackCloud(object):
device_owner=None),
]
self._floating_ip_list = []
self._volume_list = []
def _update_quota(self):
self.max_cores, self.max_instances, self.max_ram = FakeOpenStackCloud.\
_get_quota()
self.max_volumes, self.max_volume_gb = FakeOpenStackCloud.\
_get_volume_quota()
def _get(self, name_or_id, instance_list):
self.log.debug("Get %s in %s" % (name_or_id, repr(instance_list)))
@ -211,6 +218,7 @@ class FakeOpenStackCloud(object):
over_quota=over_quota,
flavor=kw.get('flavor'),
event=threading.Event(),
volumes=[],
_kw=kw)
instance_list.append(s)
t = threading.Thread(target=self._finish,
@ -325,6 +333,18 @@ class FakeOpenStackCloud(object):
total_ram_used=8192 * len(self._server_list)
)
def get_volume_limits(self):
self._update_quota()
return Dummy(
'limits',
absolute={
'maxTotalVolumes': self.max_volumes,
'maxTotalVolumeGigabytes': self.max_volume_gb,
})
def list_volumes(self):
return self._volume_list
def list_ports(self, filters=None):
if filters and filters.get('status') == 'DOWN':
return self._down_ports

View File

@ -19,11 +19,13 @@
from concurrent.futures import ThreadPoolExecutor
import functools
import logging
import math
import time
import operator
import cachetools.func
import openstack
from keystoneauth1.exceptions.catalog import EndpointNotFound
from nodepool.driver.utils import QuotaInformation
from nodepool.driver import statemachine
@ -34,6 +36,36 @@ from nodepool import version
CACHE_TTL = 10
def quota_from_flavor(flavor, label=None, volumes=None):
args = dict(instances=1,
cores=flavor.vcpus,
ram=flavor.ram)
if label and label.boot_from_volume:
args['volumes'] = 1
args['volume-gb'] = label.volume_size
elif volumes:
args['volumes'] = len(volumes)
args['volume-gb'] = sum([v.size for v in volumes])
return QuotaInformation(**args)
def quota_from_limits(compute, volume):
def bound_value(value):
if value == -1:
return math.inf
return value
args = dict(
instances=bound_value(compute.max_total_instances),
cores=bound_value(compute.max_total_cores),
ram=bound_value(compute.max_total_ram_size))
if volume is not None:
args['volumes'] = bound_value(volume['absolute']['maxTotalVolumes'])
args['volume-gb'] = bound_value(
volume['absolute']['maxTotalVolumeGigabytes'])
return QuotaInformation(**args)
class OpenStackInstance(statemachine.Instance):
def __init__(self, provider, server, quota):
super().__init__()
@ -192,7 +224,7 @@ class OpenStackCreateStateMachine(statemachine.StateMachine):
self.flavor = self.adapter._findFlavor(
flavor_name=self.label.flavor_name,
min_ram=self.label.min_ram)
self.quota = QuotaInformation.construct_from_flavor(self.flavor)
self.quota = quota_from_flavor(self.flavor, label=self.label)
self.external_id = None
def _handleServerFault(self):
@ -406,20 +438,32 @@ class OpenStackAdapter(statemachine.Adapter):
self._deleteServer(resource.id)
def listInstances(self):
volumes = {}
for volume in self._listVolumes():
volumes[volume.id] = volume
for server in self._listServers():
if server.status.lower() == 'deleted':
continue
flavor = self._getFlavorFromServer(server)
quota = QuotaInformation.construct_from_flavor(flavor)
server_volumes = []
for vattach in server.volumes:
volume = volumes.get(vattach.id)
if volume:
server_volumes.append(volume)
quota = quota_from_flavor(flavor, volumes=server_volumes)
yield OpenStackInstance(self.provider, server, quota)
def getQuotaLimits(self):
limits = self._client.get_compute_limits()
return QuotaInformation.construct_from_limits(limits)
compute = self._client.get_compute_limits()
try:
volume = self._client.get_volume_limits()
except EndpointNotFound:
volume = None
return quota_from_limits(compute, volume)
def getQuotaForLabel(self, label):
flavor = self._findFlavor(label.flavor_name, label.min_ram)
return QuotaInformation.construct_from_flavor(flavor)
return quota_from_flavor(flavor, label=label)
def getAZs(self):
azs = self._listAZs()
@ -629,6 +673,13 @@ class OpenStackAdapter(statemachine.Adapter):
def _listServers(self):
return self._client.list_servers(bare=True)
@cachetools.func.ttl_cache(maxsize=1, ttl=CACHE_TTL)
def _listVolumes(self):
try:
return self._client.list_volumes()
except EndpointNotFound:
return []
@cachetools.func.ttl_cache(maxsize=1, ttl=CACHE_TTL)
def _listFloatingIps(self):
return self._client.list_floating_ips()

View File

@ -110,6 +110,8 @@ class ProviderPool(ConfigPool):
self.name = pool_config['name']
self.max_cores = pool_config.get('max-cores', math.inf)
self.max_ram = pool_config.get('max-ram', math.inf)
self.max_volumes = pool_config.get('max-volumes', math.inf)
self.max_volume_gb = pool_config.get('max-volume-gb', math.inf)
self.ignore_provider_quota = pool_config.get('ignore-provider-quota',
False)
self.azs = pool_config.get('availability-zones')
@ -326,6 +328,8 @@ class OpenStackProviderConfig(ProviderConfig):
'ignore-provider-quota': bool,
'max-cores': int,
'max-ram': int,
'max-volumes': int,
'max-volume-gb': int,
'labels': [pool_label],
'availability-zones': [str],
'security-groups': [str]

View File

@ -408,6 +408,10 @@ class StateMachineHandler(NodeRequestHandler):
ram=getattr(self.pool, 'max_ram', None),
default=math.inf,
)
if getattr(self.pool, 'max_volumes', None):
args['volumes'] = self.pool.max_volumes
if getattr(self.pool, 'max_volume_gb', None):
args['volume_gb'] = self.pool.max_volume_gb
args.update(getattr(self.pool, 'max_resources', {}))
pool_quota = QuotaInformation(**args)
pool_quota.subtract(needed_quota)
@ -445,6 +449,10 @@ class StateMachineHandler(NodeRequestHandler):
ram=getattr(self.pool, 'max_ram', None),
default=math.inf,
)
if getattr(self.pool, 'max_volumes', None):
args['volumes'] = self.pool.max_volumes
if getattr(self.pool, 'max_volume_gb', None):
args['volume-gb'] = self.pool.max_volume_gb
args.update(getattr(self.pool, 'max_resources', {}))
pool_quota = QuotaInformation(**args)
pool_quota.subtract(

View File

@ -207,24 +207,6 @@ class QuotaInformation:
self.quota['compute'][k] = v
self.default = default
@staticmethod
def construct_from_flavor(flavor):
return QuotaInformation(instances=1,
cores=flavor.vcpus,
ram=flavor.ram)
@staticmethod
def construct_from_limits(limits):
def bound_value(value):
if value == -1:
return math.inf
return value
return QuotaInformation(
instances=bound_value(limits.max_total_instances),
cores=bound_value(limits.max_total_cores),
ram=bound_value(limits.max_total_ram_size))
def _get_default(self, value, default):
return value if value is not None else default

View File

@ -0,0 +1,48 @@
elements-dir: .
images-dir: '{images_dir}'
build-log-dir: '{build_log_dir}'
zookeeper-servers:
- host: {zookeeper_host}
port: {zookeeper_port}
chroot: {zookeeper_chroot}
zookeeper-tls:
ca: {zookeeper_ca}
cert: {zookeeper_cert}
key: {zookeeper_key}
labels:
- name: fake-label
min-ready: 0
providers:
- name: fake-provider
cloud: fake
driver: fake
region-name: fake-region
rate: 0.0001
diskimages:
- name: fake-image
pools:
- name: main
max-servers: 20
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
boot-from-volume: true
volume-size: 10
diskimages:
- name: fake-image
elements:
- fedora
- vm
release: 21
dib-cmd: nodepool/tests/fake-image-create
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -0,0 +1,48 @@
elements-dir: .
images-dir: '{images_dir}'
build-log-dir: '{build_log_dir}'
zookeeper-servers:
- host: {zookeeper_host}
port: {zookeeper_port}
chroot: {zookeeper_chroot}
zookeeper-tls:
ca: {zookeeper_ca}
cert: {zookeeper_cert}
key: {zookeeper_key}
labels:
- name: fake-label
min-ready: 0
providers:
- name: fake-provider
cloud: fake
driver: fake
region-name: fake-region
rate: 0.0001
diskimages:
- name: fake-image
pools:
- name: main
max-volume-gb: 20
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
boot-from-volume: true
volume-size: 10
diskimages:
- name: fake-image
elements:
- fedora
- vm
release: 21
dib-cmd: nodepool/tests/fake-image-create
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -0,0 +1,48 @@
elements-dir: .
images-dir: '{images_dir}'
build-log-dir: '{build_log_dir}'
zookeeper-servers:
- host: {zookeeper_host}
port: {zookeeper_port}
chroot: {zookeeper_chroot}
zookeeper-tls:
ca: {zookeeper_ca}
cert: {zookeeper_cert}
key: {zookeeper_key}
labels:
- name: fake-label
min-ready: 0
providers:
- name: fake-provider
cloud: fake
driver: fake
region-name: fake-region
rate: 0.0001
diskimages:
- name: fake-image
pools:
- name: main
max-volumes: 2
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
boot-from-volume: true
volume-size: 10
diskimages:
- name: fake-image
elements:
- fedora
- vm
release: 21
dib-cmd: nodepool/tests/fake-image-create
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -0,0 +1,52 @@
elements-dir: .
images-dir: '{images_dir}'
build-log-dir: '{build_log_dir}'
zookeeper-servers:
- host: {zookeeper_host}
port: {zookeeper_port}
chroot: {zookeeper_chroot}
zookeeper-tls:
ca: {zookeeper_ca}
cert: {zookeeper_cert}
key: {zookeeper_key}
tenant-resource-limits:
- tenant-name: tenant-1
max-volume-gb: 20
labels:
- name: fake-label
min-ready: 0
providers:
- name: fake-provider
cloud: fake
driver: fake
region-name: fake-region
rate: 0.0001
diskimages:
- name: fake-image
pools:
- name: main
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
boot-from-volume: true
volume-size: 10
diskimages:
- name: fake-image
elements:
- fedora
- vm
release: 21
dib-cmd: nodepool/tests/fake-image-create
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -0,0 +1,52 @@
elements-dir: .
images-dir: '{images_dir}'
build-log-dir: '{build_log_dir}'
zookeeper-servers:
- host: {zookeeper_host}
port: {zookeeper_port}
chroot: {zookeeper_chroot}
zookeeper-tls:
ca: {zookeeper_ca}
cert: {zookeeper_cert}
key: {zookeeper_key}
tenant-resource-limits:
- tenant-name: tenant-1
max-volumes: 2
labels:
- name: fake-label
min-ready: 0
providers:
- name: fake-provider
cloud: fake
driver: fake
region-name: fake-region
rate: 0.0001
diskimages:
- name: fake-image
pools:
- name: main
labels:
- name: fake-label
diskimage: fake-image
min-ram: 8192
boot-from-volume: true
volume-size: 10
diskimages:
- name: fake-image
elements:
- fedora
- vm
release: 21
dib-cmd: nodepool/tests/fake-image-create
env-vars:
TMPDIR: /opt/dib_tmp
DIB_IMAGE_CACHE: /opt/dib_cache
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2

View File

@ -179,7 +179,9 @@ class TestLauncher(tests.DBTestCase):
config,
max_cores=100,
max_instances=20,
max_ram=1000000):
max_ram=1000000,
max_volumes=100,
max_volume_gb=1000000):
'''
Successful node launch should have unlocked nodes in READY state
and assigned to the request. This should be run with a quota that
@ -189,10 +191,18 @@ class TestLauncher(tests.DBTestCase):
# patch the cloud with requested quota
def fake_get_quota():
return (max_cores, max_instances, max_ram)
def fake_get_volume_quota():
return (max_volumes, max_volume_gb)
self.useFixture(fixtures.MockPatchObject(
fakeadapter.FakeAdapter.fake_cloud, '_get_quota',
fake_get_quota
))
self.useFixture(fixtures.MockPatchObject(
fakeadapter.FakeAdapter.fake_cloud, '_get_volume_quota',
fake_get_volume_quota
))
configfile = self.setup_config(config)
self.useBuilder(configfile)
@ -290,6 +300,14 @@ class TestLauncher(tests.DBTestCase):
self._test_node_assignment_at_quota(
config='node_quota_pool_ram.yaml')
def test_node_assignment_at_pool_quota_volumes(self):
self._test_node_assignment_at_quota(
config='node_quota_pool_volumes.yaml')
def test_node_assignment_at_pool_quota_volume_gb(self):
self._test_node_assignment_at_quota(
config='node_quota_pool_volume_gb.yaml')
def _test_node_assignment_at_tenant_quota(self, config):
configfile = self.setup_config(config)
self.useBuilder(configfile)
@ -386,6 +404,18 @@ class TestLauncher(tests.DBTestCase):
self.assertReportedStat('nodepool.tenant_limits.tenant-1.ram',
value='16384', kind='g')
def test_node_assignment_at_tenant_quota_volumes(self):
self._test_node_assignment_at_tenant_quota(
'node_quota_tenant_volumes.yaml')
self.assertReportedStat('nodepool.tenant_limits.tenant-1.volumes',
value='2', kind='g')
def test_node_assignment_at_tenant_quota_volume_gb(self):
self._test_node_assignment_at_tenant_quota(
'node_quota_tenant_volume_gb.yaml')
self.assertReportedStat('nodepool.tenant_limits.tenant-1.volume-gb',
value='20', kind='g')
def test_node_assignment_at_tenant_quota_min_ready(self):
self._test_node_assignment_at_tenant_quota(
'node_quota_tenant_min_ready.yaml')
@ -412,6 +442,16 @@ class TestLauncher(tests.DBTestCase):
max_instances=math.inf,
max_ram=2 * 8192)
def test_node_assignment_at_cloud_volumes_quota(self):
self._test_node_assignment_at_quota(
config='node_quota_cloud_volumes.yaml',
max_volumes=2)
def test_node_assignment_at_cloud_volume_gb_quota(self):
self._test_node_assignment_at_quota(
config='node_quota_cloud_volumes.yaml',
max_volume_gb=20)
def test_decline_at_quota(self):
'''test that a provider at quota continues to decline requests'''

View File

@ -0,0 +1,8 @@
---
features:
- |
The OpenStack driver now supports volume quota. It will
automatically register the limits from the cloud and ensure that
labels that utilize boot-from-volume stay under the limit. Limits
can also be specified at the pool and tenant level in Nodepool's
configuration.