diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 1373b4e6b..94effeda9 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -328,6 +328,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: debian-stretch diskimage: debian-stretch min-ram: 512 @@ -336,6 +342,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: fedora-29 diskimage: fedora-29 min-ram: 1024 @@ -344,6 +356,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: ubuntu-bionic diskimage: ubuntu-bionic min-ram: 512 @@ -352,6 +370,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: ubuntu-trusty diskimage: ubuntu-trusty min-ram: 512 @@ -360,6 +384,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: ubuntu-xenial diskimage: ubuntu-xenial min-ram: 512 @@ -368,6 +398,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: opensuse-423 diskimage: opensuse-423 min-ram: 512 @@ -376,6 +412,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: opensuse-150 diskimage: opensuse-150 min-ram: 512 @@ -384,6 +426,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: opensuse-tumbleweed diskimage: opensuse-tumbleweed min-ram: 512 @@ -392,6 +440,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata - name: gentoo-17-0-systemd diskimage: gentoo-17-0-systemd min-ram: 512 @@ -400,6 +454,12 @@ providers: key-name: $NODEPOOL_KEY_NAME instance-properties: nodepool_devstack: testing + userdata: | + #cloud-config + write_files: + - content: | + testpassed + path: /etc/testfile_nodepool_userdata diskimages: - name: centos-7 diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 4e772fa1e..34dcdd890 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -921,6 +921,15 @@ Selecting the OpenStack driver adds the following options to the ``meta-data`` on the active server (e.g. within ``config-drive:openstack/latest/meta_data.json``) + .. attr:: userdata + :type: str + :default: None + + A string of userdata for a node. Example usage is to install + cloud-init package on image which will apply the userdata. + Additional info about options in cloud-config: + https://cloudinit.readthedocs.io/en/latest/topics/examples.html + Static Driver ------------- diff --git a/nodepool/driver/openstack/config.py b/nodepool/driver/openstack/config.py index 2f259978d..958609657 100644 --- a/nodepool/driver/openstack/config.py +++ b/nodepool/driver/openstack/config.py @@ -87,6 +87,7 @@ class ProviderLabel(ConfigValue): self.boot_from_volume = False self.volume_size = None self.instance_properties = None + self.userdata = None # The ProviderPool object that owns this label. self.pool = None @@ -103,7 +104,8 @@ class ProviderLabel(ConfigValue): other.console_log == self.console_log and other.boot_from_volume == self.boot_from_volume and other.volume_size == self.volume_size and - other.instance_properties == self.instance_properties) + other.instance_properties == self.instance_properties and + other.userdata == self.userdata) return False def __repr__(self): @@ -203,6 +205,7 @@ class ProviderPool(ConfigPool): pl.volume_size = label.get('volume-size', 50) pl.instance_properties = label.get('instance-properties', None) + pl.userdata = label.get('userdata', None) top_label = full_config.labels[pl.name] top_label.pools.append(self) @@ -355,6 +358,7 @@ class OpenStackProviderConfig(ProviderConfig): 'boot-from-volume': bool, 'volume-size': int, 'instance-properties': dict, + 'userdata': str, } label_min_ram = v.Schema({v.Required('min-ram'): int}, extra=True) diff --git a/nodepool/driver/openstack/handler.py b/nodepool/driver/openstack/handler.py index b72de912d..16f426adb 100644 --- a/nodepool/driver/openstack/handler.py +++ b/nodepool/driver/openstack/handler.py @@ -139,7 +139,8 @@ class OpenStackNodeLauncher(NodeLauncher): security_groups=self.pool.security_groups, boot_from_volume=self.label.boot_from_volume, volume_size=self.label.volume_size, - instance_properties=self.label.instance_properties) + instance_properties=self.label.instance_properties, + userdata=self.label.userdata) except openstack.cloud.exc.OpenStackCloudCreateException as e: if e.resource_id: self.node.external_id = e.resource_id diff --git a/nodepool/driver/openstack/provider.py b/nodepool/driver/openstack/provider.py index b9a1462d8..3c07c13dc 100755 --- a/nodepool/driver/openstack/provider.py +++ b/nodepool/driver/openstack/provider.py @@ -280,7 +280,7 @@ class OpenStackProvider(Provider): nodepool_image_name=None, networks=None, security_groups=None, boot_from_volume=False, volume_size=50, - instance_properties=None): + instance_properties=None, userdata=None): if not networks: networks = [] if not isinstance(image, dict): @@ -303,6 +303,8 @@ class OpenStackProvider(Provider): create_args['availability_zone'] = az if security_groups: create_args['security_groups'] = security_groups + if userdata: + create_args['userdata'] = userdata nics = [] for network in networks: net_id = self.findNetwork(network)['id'] diff --git a/nodepool/tests/fixtures/config_validate/good.yaml b/nodepool/tests/fixtures/config_validate/good.yaml index 0990261e9..9aba1340b 100644 --- a/nodepool/tests/fixtures/config_validate/good.yaml +++ b/nodepool/tests/fixtures/config_validate/good.yaml @@ -56,6 +56,12 @@ providers: instance-properties: a_key: a_value b_key: b_value + userdata: | + #cloud-config + password: password + chpasswd: { expire: False } + ssh_pwauth: True + hostname: test - name: cloud2 driver: openstack diff --git a/tools/check_devstack_plugin.sh b/tools/check_devstack_plugin.sh index 6f7a05d3c..db7c80833 100755 --- a/tools/check_devstack_plugin.sh +++ b/tools/check_devstack_plugin.sh @@ -61,6 +61,36 @@ function sshintonode { fi } +function showserver { + name=$1 + state='ready' + + node_id=`$NODEPOOL list | grep $name | grep $state | cut -d '|' -f5 | tr -d ' '` + EXPECTED=$(mktemp) + RESULT=$(mktemp) + source /opt/stack/devstack/openrc admin admin + + nova show $node_id | grep -Eo "user_data[ ]+.*|[ ]*$" | awk {'print $3'} |\ + base64 --decode > $RESULT + cat <$EXPECTED +#cloud-config +write_files: +- content: | + testpassed + path: /etc/testfile_nodepool_userdata +EOF + diff $EXPECTED $RESULT + if [[ $? -ne 0 ]]; then + echo "*** Failed to find userdata on server!" + FAILURE_REASON="Failed to find userdata on server for $node" + echo "Expected userdata:" + cat $EXPECTED + echo "Found userdata:" + cat $RESULT + RETURN=1 + fi +} + function checknm { name=$1 state='ready' @@ -116,6 +146,8 @@ if [ ${NODEPOOL_PAUSE_CENTOS_7_DIB,,} = 'false' ]; then sshintonode centos-7 # networkmanager check checknm centos-7 + # userdata check + showserver centos-7 fi if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then @@ -125,6 +157,8 @@ if [ ${NODEPOOL_PAUSE_DEBIAN_STRETCH_DIB,,} = 'false' ]; then waitfornode debian-stretch # check ssh for root user sshintonode debian-stretch + # userdata check + showserver debian-stretch fi if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then @@ -136,6 +170,8 @@ if [ ${NODEPOOL_PAUSE_FEDORA_29_DIB,,} = 'false' ]; then sshintonode fedora-29 # networkmanager check checknm fedora-29 + # userdata check + showserver fedora-29 fi if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then @@ -145,6 +181,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_BIONIC_DIB,,} = 'false' ]; then waitfornode ubuntu-bionic # check ssh for root user sshintonode ubuntu-bionic + # userdata check + showserver ubuntu-bionic fi if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then @@ -154,6 +192,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_TRUSTY_DIB,,} = 'false' ]; then waitfornode ubuntu-trusty # check ssh for root user sshintonode ubuntu-trusty + # userdata check + showserver ubuntu-trusty fi if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then @@ -163,6 +203,8 @@ if [ ${NODEPOOL_PAUSE_UBUNTU_XENIAL_DIB,,} = 'false' ]; then waitfornode ubuntu-xenial # check ssh for root user sshintonode ubuntu-xenial + # userdata check + showserver ubuntu-xenial fi if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then @@ -172,6 +214,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_423_DIB,,} = 'false' ]; then waitfornode opensuse-423 # check ssh for root user sshintonode opensuse-423 + # userdata check + showserver opensuse-423 fi if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then # check that image built @@ -180,6 +224,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_150_DIB,,} = 'false' ]; then waitfornode opensuse-150 # check ssh for root user sshintonode opensuse-150 + # userdata check + showserver opensuse-150 fi if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then # check that image built @@ -188,6 +234,8 @@ if [ ${NODEPOOL_PAUSE_OPENSUSE_TUMBLEWEED_DIB,,} = 'false' ]; then waitfornode opensuse-tumbleweed # check ssh for root user sshintonode opensuse-tumbleweed + # userdata check + showserver opensuse-tumbleweed fi if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then # check that image built @@ -196,6 +244,8 @@ if [ ${NODEPOOL_PAUSE_GENTOO_17_0_SYSTEMD_DIB,,} = 'false' ]; then waitfornode gentoo-17-0-systemd # check ssh for root user sshintonode gentoo-17-0-systemd + # userdata check + showserver gentoo-17-0-systemd fi set -o errexit