From aa1d4d224674f44d9cd882eddb2537907adf5382 Mon Sep 17 00:00:00 2001 From: Lingxian Kong Date: Tue, 7 Apr 2020 10:52:16 +1200 Subject: [PATCH] Datastore containerization Significant changes: * Using docker image to install datastore. * Datastore image is common to different datastores. * Using backup docker image to do backup and restore. * Support MariaDB replication * Set most of the functional jobs as non-voting as nested virtualization is not supported in CI. Change-Id: Ia9c97a63a961eebc336b70d28dc77638144c1834 --- .zuul.yaml | 36 +- api-ref/source/parameters.yaml | 10 +- ...instance-patch-detach-replica-request.json | 3 +- backup/Dockerfile | 41 + .../experimental => backup}/__init__.py | 0 .../drivers}/__init__.py | 0 backup/drivers/base.py | 207 + backup/drivers/innobackupex.py | 137 + backup/drivers/mariabackup.py | 87 + backup/drivers/mysql_base.py | 139 + backup/main.py | 149 + backup/requirements.txt | 6 + .../cassandra => backup/storage}/__init__.py | 0 backup/storage/base.py | 48 + backup/storage/swift.py | 294 ++ devstack/plugin.sh | 23 +- devstack/settings | 16 +- doc/source/admin/building_guest_images.rst | 93 +- doc/source/admin/run_trove_in_production.rst | 2 +- etc/trove/cloudinit/README | 3 - etc/trove/conf.d/README | 4 - etc/trove/conf.d/guest_info.conf | 1 - etc/trove/trove-guestagent.conf.sample | 166 - etc/trove/trove-workbook.yaml | 19 - etc/trove/trove.conf.sample | 311 -- integration/scripts/conf.json.example | 12 - .../files/elements/apt-conf-dir/README.rst | 16 - .../extra-data.d/99-use-host-apt-confd | 21 - .../files/elements/guest-agent/element-deps | 1 + .../99-reliable-apt-key-importing.bash | 34 - .../31-guest-agent-install | 51 + .../75-guest-agent-install | 45 - .../guest-agent-dev.service | 31 + .../guest-agent.service | 7 +- .../guest-agent/package-installs.yaml | 54 +- ...-systemd => 31-enable-guest-agent-systemd} | 0 .../files/elements/no-resolvconf/README.rst | 8 - .../finalise.d/99-disable-resolv-conf | 19 - .../files/elements/ubuntu-docker/element-deps | 1 + .../ubuntu-docker/install.d/21-docker | 19 + .../99-reliable-apt-key-importing.bash | 34 - .../ubuntu-guest/extra-data.d/11-ssh-key-dev | 17 + .../ubuntu-guest/extra-data.d/15-trove-dep | 28 - .../ubuntu-guest/extra-data.d/62-ssh-key | 27 - .../ubuntu-guest/install.d/05-base-apps | 10 - .../install.d/11-user} | 2 +- .../ubuntu-guest/install.d/12-ssh-key-dev | 22 + .../ubuntu-guest/install.d/15-trove-dep | 37 - .../elements/ubuntu-guest/install.d/50-user | 18 - .../ubuntu-guest/install.d/62-ssh-key | 25 - .../elements/ubuntu-guest/install.d/98-ssh | 8 - .../ubuntu-guest/install.d/99-clean-apt | 11 - .../{05-ipforwarding => 11-ipforwarding} | 0 .../post-install.d/{10-ntp => 12-ntp} | 0 ...e-guest-sudoers => 13-trove-guest-sudoers} | 0 .../pre-install.d/04-baseline-tools | 7 - .../pre-install.d/11-baseline-tools | 7 + .../files/elements/ubuntu-mariadb/README.md | 3 - .../pre-install.d/20-apparmor-mysql-local | 11 - .../pre-install.d/20-apparmor-mysql-local | 12 - .../ubuntu-postgresql/install.d/30-postgresql | 25 - .../pre-install.d/10-postgresql-repo | 14 - .../elements/ubuntu-xenial-guest/element-deps | 1 - .../extra-data.d/20-guest-systemd | 21 - .../ubuntu-xenial-guest/install.d/20-etc | 8 - .../install.d/21-use-ubuntu-certificates | 12 - .../post-install.d/91-hwe-kernel | 29 - .../pre-install.d/01-trim-pkgs | 90 - .../ubuntu-xenial-mariadb/element-deps | 1 - .../install.d/30-mariadb | 39 - .../elements/ubuntu-xenial-mysql/element-deps | 1 - .../ubuntu-xenial-mysql/install.d/30-mysql | 39 - .../ubuntu-xenial-postgresql/element-deps | 1 - integration/scripts/functions_qemu | 85 +- .../local.conf.d/ceilometer_cinder.conf.rc | 3 - .../local.conf.d/ceilometer_nova.conf.rc | 3 - .../local.conf.d/ceilometer_services.conf.rc | 3 - integration/scripts/local.conf.d/sample.rc | 42 - .../local.conf.d/trove_services.conf.rc | 24 - integration/scripts/local.conf.d/use_kvm.rc | 4 - .../scripts/local.conf.d/use_uuid_token.rc | 3 - .../scripts/local.conf.d/using_vagrant.rc | 9 - integration/scripts/local.conf.rc | 37 - integration/scripts/trovestack | 158 +- lower-constraints.txt | 1 + playbooks/image-build/run.yaml | 3 +- requirements.txt | 1 + roles/trove-devstack/defaults/main.yml | 2 +- tools/trove-pylint.config | 6 + tox.ini | 6 +- trove/backup/models.py | 2 +- trove/cmd/guest.py | 16 +- trove/common/cfg.py | 234 +- trove/common/exception.py | 18 +- trove/common/schemas/atom-link.rng | 141 - trove/common/schemas/atom.rng | 597 --- trove/common/schemas/v1.1/limits.rng | 28 - trove/common/utils.py | 10 +- trove/configuration/service.py | 6 +- trove/guestagent/api.py | 12 +- trove/guestagent/backup/__init__.py | 45 - trove/guestagent/backup/backupagent.py | 178 - trove/guestagent/common/configuration.py | 23 +- trove/guestagent/common/guestagent_utils.py | 24 + trove/guestagent/common/operating_system.py | 105 +- .../experimental/cassandra/manager.py | 368 -- .../experimental/cassandra/service.py | 1314 ------ .../experimental/couchbase/manager.py | 122 - .../experimental/couchbase/service.py | 268 -- .../experimental/couchbase/system.py | 50 - .../datastore/experimental/couchdb/manager.py | 167 - .../datastore/experimental/couchdb/service.py | 584 --- .../datastore/experimental/couchdb/system.py | 74 - .../datastore/experimental/db2/__init__.py | 0 .../datastore/experimental/db2/manager.py | 157 - .../datastore/experimental/db2/service.py | 626 --- .../datastore/experimental/db2/system.py | 89 - .../experimental/mariadb/__init__.py | 0 .../datastore/experimental/mariadb/manager.py | 29 - .../datastore/experimental/mariadb/service.py | 109 - .../experimental/mongodb/__init__.py | 0 .../datastore/experimental/mongodb/manager.py | 268 -- .../datastore/experimental/mongodb/service.py | 843 ---- .../datastore/experimental/mongodb/system.py | 46 - .../experimental/percona/__init__.py | 0 .../datastore/experimental/percona/manager.py | 36 - .../datastore/experimental/percona/service.py | 84 - .../experimental/postgresql/__init__.py | 0 .../experimental/postgresql/manager.py | 344 -- .../experimental/postgresql/pgsql_query.py | 177 - .../experimental/postgresql/service.py | 1058 ----- .../datastore/experimental/pxc/__init__.py | 0 .../datastore/experimental/pxc/manager.py | 27 - .../datastore/experimental/pxc/service.py | 56 - .../datastore/experimental/redis/__init__.py | 0 .../datastore/experimental/redis/manager.py | 345 -- .../datastore/experimental/redis/service.py | 561 --- .../datastore/experimental/redis/system.py | 37 - .../experimental/vertica/__init__.py | 0 .../datastore/experimental/vertica/manager.py | 161 - .../datastore/experimental/vertica/service.py | 618 --- .../datastore/experimental/vertica/system.py | 125 - .../datastore/galera_common/__init__.py | 0 .../datastore/galera_common/manager.py | 80 - .../datastore/galera_common/service.py | 93 - trove/guestagent/datastore/manager.py | 283 +- .../couchbase => mariadb}/__init__.py | 0 trove/guestagent/datastore/mariadb/manager.py | 26 + trove/guestagent/datastore/mariadb/service.py | 88 + trove/guestagent/datastore/mysql/manager.py | 38 +- trove/guestagent/datastore/mysql/service.py | 90 +- .../datastore/mysql_common/manager.py | 616 ++- .../datastore/mysql_common/service.py | 969 ++--- trove/guestagent/datastore/service.py | 90 +- .../datastore/technical-preview/__init__.py | 0 trove/guestagent/dbaas.py | 45 +- .../guestagent/strategies/backup/__init__.py | 25 - trove/guestagent/strategies/backup/base.py | 145 - .../backup/experimental/__init__.py | 0 .../backup/experimental/cassandra_impl.py | 117 - .../backup/experimental/couchbase_impl.py | 108 - .../backup/experimental/couchdb_impl.py | 35 - .../backup/experimental/db2_impl.py | 176 - .../backup/experimental/mariadb_impl.py | 112 - .../backup/experimental/mongo_impl.py | 106 - .../backup/experimental/postgresql_impl.py | 257 -- .../backup/experimental/redis_impl.py | 39 - .../strategies/backup/mysql_impl.py | 150 - .../guestagent/strategies/replication/base.py | 4 +- .../replication/experimental/__init__.py | 0 .../experimental/postgresql_impl.py | 306 -- .../replication/experimental/redis_sync.py | 91 - .../{experimental => }/mariadb_gtid.py | 58 +- .../strategies/replication/mysql_base.py | 99 +- .../strategies/replication/mysql_binlog.py | 79 - .../strategies/replication/mysql_gtid.py | 63 +- .../guestagent/strategies/restore/__init__.py | 24 - trove/guestagent/strategies/restore/base.py | 113 - .../restore/experimental/__init__.py | 0 .../restore/experimental/cassandra_impl.py | 69 - .../restore/experimental/couchbase_impl.py | 199 - .../restore/experimental/couchdb_impl.py | 41 - .../restore/experimental/db2_impl.py | 90 - .../restore/experimental/mariadb_impl.py | 160 - .../restore/experimental/mongo_impl.py | 51 - .../restore/experimental/postgresql_impl.py | 202 - .../restore/experimental/redis_impl.py | 74 - .../strategies/restore/mysql_impl.py | 364 -- .../couchdb => utils}/__init__.py | 0 trove/guestagent/utils/docker.py | 152 + trove/guestagent/utils/mysql.py | 85 + trove/instance/models.py | 28 +- trove/instance/service.py | 1 - trove/taskmanager/api.py | 6 +- trove/taskmanager/manager.py | 129 +- trove/taskmanager/models.py | 284 +- trove/templates/mariadb/config.template | 1 - .../templates/mariadb/replica.config.template | 3 +- .../mariadb/replica_source.config.template | 2 + trove/templates/mysql/config.template | 1 - trove/tests/api/backups.py | 4 +- trove/tests/api/configurations.py | 6 +- trove/tests/api/instances.py | 1 + trove/tests/api/instances_actions.py | 31 +- trove/tests/api/instances_delete.py | 10 + trove/tests/api/instances_resize.py | 31 +- trove/tests/api/mgmt/datastore_versions.py | 3 + trove/tests/api/replication.py | 49 +- trove/tests/fakes/guestagent.py | 2 +- trove/tests/scenario/groups/backup_group.py | 92 +- .../tests/scenario/runners/backup_runners.py | 4 +- .../scenario/runners/replication_runners.py | 4 +- trove/tests/scenario/runners/test_runners.py | 2 +- .../unittests/backup/test_backupagent.py | 580 --- trove/tests/unittests/backup/test_storage.py | 364 -- trove/tests/unittests/guestagent/__init__.py | 0 .../test_agent_heartbeats_models.py | 235 -- trove/tests/unittests/guestagent/test_api.py | 507 --- .../unittests/guestagent/test_backups.py | 983 ----- .../guestagent/test_cassandra_manager.py | 812 ---- .../guestagent/test_configuration.py | 460 -- .../guestagent/test_couchbase_manager.py | 173 - .../guestagent/test_couchdb_manager.py | 316 -- .../guestagent/test_datastore_manager.py | 24 - .../tests/unittests/guestagent/test_dbaas.py | 3721 ----------------- .../guestagent/test_galera_cluster_api.py | 151 - .../guestagent/test_galera_manager.py | 123 - .../guestagent/test_guestagent_utils.py | 177 - .../unittests/guestagent/test_manager.py | 556 --- .../guestagent/test_mariadb_manager.py | 65 - .../tests/unittests/guestagent/test_models.py | 69 - .../guestagent/test_mysql_manager.py | 718 ---- .../guestagent/test_operating_system.py | 1300 ------ trove/tests/unittests/guestagent/test_pkg.py | 560 --- .../tests/unittests/guestagent/test_query.py | 420 -- .../guestagent/test_redis_manager.py | 380 -- .../unittests/guestagent/test_service.py | 31 - .../tests/unittests/guestagent/test_volume.py | 314 -- trove/tests/unittests/taskmanager/test_api.py | 2 +- .../unittests/taskmanager/test_manager.py | 27 +- .../unittests/taskmanager/test_models.py | 43 +- trove/tests/util/server_connection.py | 2 +- 242 files changed, 3364 insertions(+), 29701 deletions(-) create mode 100644 backup/Dockerfile rename {trove/common/strategies/storage/experimental => backup}/__init__.py (100%) rename {trove/guestagent/datastore/experimental => backup/drivers}/__init__.py (100%) create mode 100644 backup/drivers/base.py create mode 100644 backup/drivers/innobackupex.py create mode 100644 backup/drivers/mariabackup.py create mode 100644 backup/drivers/mysql_base.py create mode 100644 backup/main.py create mode 100644 backup/requirements.txt rename {trove/guestagent/datastore/experimental/cassandra => backup/storage}/__init__.py (100%) create mode 100644 backup/storage/base.py create mode 100644 backup/storage/swift.py delete mode 100644 etc/trove/cloudinit/README delete mode 100644 etc/trove/conf.d/README delete mode 100644 etc/trove/conf.d/guest_info.conf delete mode 100644 etc/trove/trove-guestagent.conf.sample delete mode 100644 etc/trove/trove-workbook.yaml delete mode 100644 etc/trove/trove.conf.sample delete mode 100644 integration/scripts/conf.json.example delete mode 100644 integration/scripts/files/elements/apt-conf-dir/README.rst delete mode 100755 integration/scripts/files/elements/apt-conf-dir/extra-data.d/99-use-host-apt-confd delete mode 100644 integration/scripts/files/elements/guest-agent/environment.d/99-reliable-apt-key-importing.bash create mode 100755 integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/31-guest-agent-install delete mode 100755 integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/75-guest-agent-install create mode 100644 integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent-dev.service rename integration/scripts/files/elements/guest-agent/post-install.d/{11-enable-guest-agent-systemd => 31-enable-guest-agent-systemd} (100%) delete mode 100644 integration/scripts/files/elements/no-resolvconf/README.rst delete mode 100755 integration/scripts/files/elements/no-resolvconf/finalise.d/99-disable-resolv-conf create mode 100644 integration/scripts/files/elements/ubuntu-docker/element-deps create mode 100755 integration/scripts/files/elements/ubuntu-docker/install.d/21-docker delete mode 100644 integration/scripts/files/elements/ubuntu-guest/environment.d/99-reliable-apt-key-importing.bash create mode 100755 integration/scripts/files/elements/ubuntu-guest/extra-data.d/11-ssh-key-dev delete mode 100755 integration/scripts/files/elements/ubuntu-guest/extra-data.d/15-trove-dep delete mode 100755 integration/scripts/files/elements/ubuntu-guest/extra-data.d/62-ssh-key delete mode 100755 integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps rename integration/scripts/files/elements/{guest-agent/install.d/50-user => ubuntu-guest/install.d/11-user} (99%) create mode 100755 integration/scripts/files/elements/ubuntu-guest/install.d/12-ssh-key-dev delete mode 100755 integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep delete mode 100755 integration/scripts/files/elements/ubuntu-guest/install.d/50-user delete mode 100755 integration/scripts/files/elements/ubuntu-guest/install.d/62-ssh-key delete mode 100755 integration/scripts/files/elements/ubuntu-guest/install.d/98-ssh delete mode 100755 integration/scripts/files/elements/ubuntu-guest/install.d/99-clean-apt rename integration/scripts/files/elements/ubuntu-guest/post-install.d/{05-ipforwarding => 11-ipforwarding} (100%) rename integration/scripts/files/elements/ubuntu-guest/post-install.d/{10-ntp => 12-ntp} (100%) rename integration/scripts/files/elements/ubuntu-guest/post-install.d/{62-trove-guest-sudoers => 13-trove-guest-sudoers} (100%) delete mode 100755 integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools create mode 100755 integration/scripts/files/elements/ubuntu-guest/pre-install.d/11-baseline-tools delete mode 100644 integration/scripts/files/elements/ubuntu-mariadb/README.md delete mode 100755 integration/scripts/files/elements/ubuntu-mariadb/pre-install.d/20-apparmor-mysql-local delete mode 100755 integration/scripts/files/elements/ubuntu-mysql/pre-install.d/20-apparmor-mysql-local delete mode 100755 integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql delete mode 100755 integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo delete mode 100644 integration/scripts/files/elements/ubuntu-xenial-guest/element-deps delete mode 100755 integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd delete mode 100755 integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc delete mode 100755 integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates delete mode 100755 integration/scripts/files/elements/ubuntu-xenial-guest/post-install.d/91-hwe-kernel delete mode 100755 integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs delete mode 100644 integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps delete mode 100755 integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb delete mode 100644 integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps delete mode 100755 integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql delete mode 100644 integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps delete mode 100644 integration/scripts/local.conf.d/ceilometer_cinder.conf.rc delete mode 100644 integration/scripts/local.conf.d/ceilometer_nova.conf.rc delete mode 100644 integration/scripts/local.conf.d/ceilometer_services.conf.rc delete mode 100644 integration/scripts/local.conf.d/sample.rc delete mode 100644 integration/scripts/local.conf.d/trove_services.conf.rc delete mode 100644 integration/scripts/local.conf.d/use_kvm.rc delete mode 100644 integration/scripts/local.conf.d/use_uuid_token.rc delete mode 100644 integration/scripts/local.conf.d/using_vagrant.rc delete mode 100644 integration/scripts/local.conf.rc delete mode 100644 trove/common/schemas/atom-link.rng delete mode 100644 trove/common/schemas/atom.rng delete mode 100644 trove/common/schemas/v1.1/limits.rng delete mode 100644 trove/guestagent/backup/__init__.py delete mode 100644 trove/guestagent/backup/backupagent.py delete mode 100644 trove/guestagent/datastore/experimental/cassandra/manager.py delete mode 100644 trove/guestagent/datastore/experimental/cassandra/service.py delete mode 100644 trove/guestagent/datastore/experimental/couchbase/manager.py delete mode 100644 trove/guestagent/datastore/experimental/couchbase/service.py delete mode 100644 trove/guestagent/datastore/experimental/couchbase/system.py delete mode 100644 trove/guestagent/datastore/experimental/couchdb/manager.py delete mode 100644 trove/guestagent/datastore/experimental/couchdb/service.py delete mode 100644 trove/guestagent/datastore/experimental/couchdb/system.py delete mode 100644 trove/guestagent/datastore/experimental/db2/__init__.py delete mode 100644 trove/guestagent/datastore/experimental/db2/manager.py delete mode 100644 trove/guestagent/datastore/experimental/db2/service.py delete mode 100644 trove/guestagent/datastore/experimental/db2/system.py delete mode 100644 trove/guestagent/datastore/experimental/mariadb/__init__.py delete mode 100644 trove/guestagent/datastore/experimental/mariadb/manager.py delete mode 100644 trove/guestagent/datastore/experimental/mariadb/service.py delete mode 100644 trove/guestagent/datastore/experimental/mongodb/__init__.py delete mode 100644 trove/guestagent/datastore/experimental/mongodb/manager.py delete mode 100644 trove/guestagent/datastore/experimental/mongodb/service.py delete mode 100644 trove/guestagent/datastore/experimental/mongodb/system.py delete mode 100644 trove/guestagent/datastore/experimental/percona/__init__.py delete mode 100644 trove/guestagent/datastore/experimental/percona/manager.py delete mode 100644 trove/guestagent/datastore/experimental/percona/service.py delete mode 100644 trove/guestagent/datastore/experimental/postgresql/__init__.py delete mode 100644 trove/guestagent/datastore/experimental/postgresql/manager.py delete mode 100644 trove/guestagent/datastore/experimental/postgresql/pgsql_query.py delete mode 100644 trove/guestagent/datastore/experimental/postgresql/service.py delete mode 100644 trove/guestagent/datastore/experimental/pxc/__init__.py delete mode 100644 trove/guestagent/datastore/experimental/pxc/manager.py delete mode 100644 trove/guestagent/datastore/experimental/pxc/service.py delete mode 100644 trove/guestagent/datastore/experimental/redis/__init__.py delete mode 100644 trove/guestagent/datastore/experimental/redis/manager.py delete mode 100644 trove/guestagent/datastore/experimental/redis/service.py delete mode 100644 trove/guestagent/datastore/experimental/redis/system.py delete mode 100644 trove/guestagent/datastore/experimental/vertica/__init__.py delete mode 100644 trove/guestagent/datastore/experimental/vertica/manager.py delete mode 100644 trove/guestagent/datastore/experimental/vertica/service.py delete mode 100644 trove/guestagent/datastore/experimental/vertica/system.py delete mode 100644 trove/guestagent/datastore/galera_common/__init__.py delete mode 100644 trove/guestagent/datastore/galera_common/manager.py delete mode 100644 trove/guestagent/datastore/galera_common/service.py rename trove/guestagent/datastore/{experimental/couchbase => mariadb}/__init__.py (100%) create mode 100644 trove/guestagent/datastore/mariadb/manager.py create mode 100644 trove/guestagent/datastore/mariadb/service.py delete mode 100644 trove/guestagent/datastore/technical-preview/__init__.py delete mode 100644 trove/guestagent/strategies/backup/__init__.py delete mode 100644 trove/guestagent/strategies/backup/base.py delete mode 100644 trove/guestagent/strategies/backup/experimental/__init__.py delete mode 100644 trove/guestagent/strategies/backup/experimental/cassandra_impl.py delete mode 100644 trove/guestagent/strategies/backup/experimental/couchbase_impl.py delete mode 100644 trove/guestagent/strategies/backup/experimental/couchdb_impl.py delete mode 100644 trove/guestagent/strategies/backup/experimental/db2_impl.py delete mode 100644 trove/guestagent/strategies/backup/experimental/mariadb_impl.py delete mode 100644 trove/guestagent/strategies/backup/experimental/mongo_impl.py delete mode 100644 trove/guestagent/strategies/backup/experimental/postgresql_impl.py delete mode 100644 trove/guestagent/strategies/backup/experimental/redis_impl.py delete mode 100644 trove/guestagent/strategies/backup/mysql_impl.py delete mode 100644 trove/guestagent/strategies/replication/experimental/__init__.py delete mode 100644 trove/guestagent/strategies/replication/experimental/postgresql_impl.py delete mode 100644 trove/guestagent/strategies/replication/experimental/redis_sync.py rename trove/guestagent/strategies/replication/{experimental => }/mariadb_gtid.py (51%) delete mode 100644 trove/guestagent/strategies/replication/mysql_binlog.py delete mode 100644 trove/guestagent/strategies/restore/__init__.py delete mode 100644 trove/guestagent/strategies/restore/base.py delete mode 100644 trove/guestagent/strategies/restore/experimental/__init__.py delete mode 100644 trove/guestagent/strategies/restore/experimental/cassandra_impl.py delete mode 100644 trove/guestagent/strategies/restore/experimental/couchbase_impl.py delete mode 100644 trove/guestagent/strategies/restore/experimental/couchdb_impl.py delete mode 100644 trove/guestagent/strategies/restore/experimental/db2_impl.py delete mode 100644 trove/guestagent/strategies/restore/experimental/mariadb_impl.py delete mode 100644 trove/guestagent/strategies/restore/experimental/mongo_impl.py delete mode 100644 trove/guestagent/strategies/restore/experimental/postgresql_impl.py delete mode 100644 trove/guestagent/strategies/restore/experimental/redis_impl.py delete mode 100644 trove/guestagent/strategies/restore/mysql_impl.py rename trove/guestagent/{datastore/experimental/couchdb => utils}/__init__.py (100%) create mode 100644 trove/guestagent/utils/docker.py create mode 100644 trove/guestagent/utils/mysql.py delete mode 100644 trove/tests/unittests/backup/test_backupagent.py delete mode 100644 trove/tests/unittests/backup/test_storage.py delete mode 100644 trove/tests/unittests/guestagent/__init__.py delete mode 100644 trove/tests/unittests/guestagent/test_agent_heartbeats_models.py delete mode 100644 trove/tests/unittests/guestagent/test_api.py delete mode 100644 trove/tests/unittests/guestagent/test_backups.py delete mode 100644 trove/tests/unittests/guestagent/test_cassandra_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_configuration.py delete mode 100644 trove/tests/unittests/guestagent/test_couchbase_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_couchdb_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_datastore_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_dbaas.py delete mode 100644 trove/tests/unittests/guestagent/test_galera_cluster_api.py delete mode 100644 trove/tests/unittests/guestagent/test_galera_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_guestagent_utils.py delete mode 100644 trove/tests/unittests/guestagent/test_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_mariadb_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_models.py delete mode 100644 trove/tests/unittests/guestagent/test_mysql_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_operating_system.py delete mode 100644 trove/tests/unittests/guestagent/test_pkg.py delete mode 100644 trove/tests/unittests/guestagent/test_query.py delete mode 100644 trove/tests/unittests/guestagent/test_redis_manager.py delete mode 100644 trove/tests/unittests/guestagent/test_service.py delete mode 100644 trove/tests/unittests/guestagent/test_volume.py diff --git a/.zuul.yaml b/.zuul.yaml index 43f777fceb..2d1b19bc4b 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -14,15 +14,14 @@ - openstack-tox-pylint - trove-tox-bandit-baseline: voting: false - - trove-tempest: + - trove-tempest + - trove-functional-mysql: voting: false - - trove-functional-mysql - - trove-scenario-mysql-single - - trove-scenario-mysql-multi - - trove-scenario-mariadb-single - - trove-scenario-postgresql-single: + - trove-scenario-mysql-single: voting: false - - trove-scenario-postgresql-multi: + - trove-scenario-mysql-multi: + voting: false + - trove-scenario-mariadb-single: voting: false - trove-scenario-mariadb-multi: voting: false @@ -34,9 +33,12 @@ queue: trove jobs: - openstack-tox-pylint - - trove-functional-mysql - - trove-scenario-mysql-single - - trove-scenario-mysql-multi + - trove-functional-mysql: + voting: false + - trove-scenario-mysql-single: + voting: false + - trove-scenario-mysql-multi: + voting: false experimental: jobs: - trove-grenade @@ -145,7 +147,7 @@ trove_resize_time_out: 1800 trove_test_datastore: 'mysql' trove_test_group: 'mysql' - trove_test_datastore_version: '5.7' + trove_test_datastore_version: '5.7.29' - job: name: trove-functional-mysql-nondev @@ -153,11 +155,11 @@ vars: devstack_localrc: TROVE_RESIZE_TIME_OUT: 1800 - TROVE_NON_DEV_IMAGE_URL_MYSQL: https://tarballs.opendev.org/openstack/trove/images/trove-master-mysql-ubuntu-xenial.qcow2 + TROVE_NON_DEV_IMAGE_URL: https://tarballs.opendev.org/openstack/trove/images/trove-master-mysql-ubuntu-xenial.qcow2 trove_resize_time_out: 1800 trove_test_datastore: 'mysql' trove_test_group: 'mysql' - trove_test_datastore_version: '5.7' + trove_test_datastore_version: '5.7.29' - job: name: trove-grenade @@ -212,7 +214,7 @@ vars: trove_test_datastore: mariadb trove_test_group: mariadb-supported-single - trove_test_datastore_version: 10.4 + trove_test_datastore_version: 10.4.12 devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false @@ -222,7 +224,7 @@ vars: trove_test_datastore: mariadb trove_test_group: mariadb-supported-multi - trove_test_datastore_version: 10.4 + trove_test_datastore_version: 10.4.12 devstack_localrc: TROVE_ENABLE_IMAGE_BUILD: false @@ -232,7 +234,7 @@ vars: trove_test_datastore: mysql trove_test_group: mysql-supported-single - trove_test_datastore_version: 5.7 + trove_test_datastore_version: 5.7.29 - job: name: trove-scenario-mysql-multi @@ -240,7 +242,7 @@ vars: trove_test_datastore: mysql trove_test_group: mysql-supported-multi - trove_test_datastore_version: 5.7 + trove_test_datastore_version: 5.7.29 - job: name: trove-scenario-percona-multi diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index c6279686b4..b78359231a 100755 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -688,10 +688,9 @@ replica_count: type: integer replica_of: description: | - ID or name of an existing instance to replicate - from. + ID or name of an existing instance to replicate from. in: body - required: false + required: true type: string restore_point: description: | @@ -735,9 +734,10 @@ shard_id: type: string slave_of: description: | - To detach a replica, set ``slave_of`` to null. + To detach a replica, set ``slave_of`` to null. Deprecated in favor of + ``replica_of`` in: body - required: true + required: false type: string tenant_id: description: | diff --git a/api-ref/source/samples/instance-patch-detach-replica-request.json b/api-ref/source/samples/instance-patch-detach-replica-request.json index f61219c8a1..b45d75c7f4 100644 --- a/api-ref/source/samples/instance-patch-detach-replica-request.json +++ b/api-ref/source/samples/instance-patch-detach-replica-request.json @@ -1,6 +1,5 @@ { "instance": { - "replica_of": null, - "slave_of": null + "replica_of": null } } diff --git a/backup/Dockerfile b/backup/Dockerfile new file mode 100644 index 0000000000..c260e50b82 --- /dev/null +++ b/backup/Dockerfile @@ -0,0 +1,41 @@ +FROM ubuntu:18.04 +LABEL maintainer="anlin.kong@gmail.com" + +ARG APTOPTS="-y -qq --no-install-recommends --allow-unauthenticated" +ARG PERCONA_XTRABACKUP_VERSION=24 +ENV DEBIAN_FRONTEND noninteractive +ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 + +RUN apt-get update \ + && apt-get install $APTOPTS gnupg2 lsb-release apt-utils apt-transport-https ca-certificates software-properties-common curl \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install percona-xtrabackup for mysql +RUN curl -sSL https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb -o percona-release.deb \ + && dpkg -i percona-release.deb \ + && percona-release enable-only tools release \ + && apt-get update \ + && apt-get install $APTOPTS percona-xtrabackup-${PERCONA_XTRABACKUP_VERSION} \ + && apt-get clean + +# Install mariabackup for mariadb +Run apt-key adv --fetch-keys 'https://mariadb.org/mariadb_release_signing_key.asc' \ + && add-apt-repository "deb [arch=amd64] http://mirror2.hs-esslingen.de/mariadb/repo/10.4/ubuntu $(lsb_release -cs) main" \ + && apt-get update \ + && apt-get install $APTOPTS mariadb-backup \ + && apt-get clean + +RUN apt-get update \ + && apt-get install $APTOPTS build-essential python3-setuptools python3-all python3-all-dev python3-pip libffi-dev libssl-dev libxml2-dev libxslt1-dev libyaml-dev \ + && apt-get clean + +COPY . /opt/trove/backup +WORKDIR /opt/trove/backup + +RUN pip3 --no-cache-dir install -U -r requirements.txt + +RUN curl -sSL https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_amd64 -o /usr/local/bin/dumb-init \ + && chmod +x /usr/local/bin/dumb-init + +ENTRYPOINT ["dumb-init", "--single-child", "--"] diff --git a/trove/common/strategies/storage/experimental/__init__.py b/backup/__init__.py similarity index 100% rename from trove/common/strategies/storage/experimental/__init__.py rename to backup/__init__.py diff --git a/trove/guestagent/datastore/experimental/__init__.py b/backup/drivers/__init__.py similarity index 100% rename from trove/guestagent/datastore/experimental/__init__.py rename to backup/drivers/__init__.py diff --git a/backup/drivers/base.py b/backup/drivers/base.py new file mode 100644 index 0000000000..033553bcf9 --- /dev/null +++ b/backup/drivers/base.py @@ -0,0 +1,207 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import signal +import subprocess + +from oslo_config import cfg +from oslo_log import log as logging + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class BaseRunner(object): + """Base class for Backup Strategy implementations.""" + + # Subclass should provide the commands. + cmd = None + restore_cmd = None + prepare_cmd = None + + encrypt_key = CONF.backup_encryption_key + default_data_dir = '/var/lib/mysql/data' + + def __init__(self, *args, **kwargs): + self.process = None + self.pid = None + self.base_filename = kwargs.get('filename') + self.storage = kwargs.pop('storage', None) + self.location = kwargs.pop('location', '') + self.checksum = kwargs.pop('checksum', '') + + if 'restore_location' not in kwargs: + kwargs['restore_location'] = self.default_data_dir + self.restore_location = kwargs['restore_location'] + + self.command = self.cmd % kwargs + self.restore_command = (self.decrypt_cmd + + self.unzip_cmd + + (self.restore_cmd % kwargs)) + self.prepare_command = self.prepare_cmd % kwargs + + @property + def filename(self): + """Subclasses may overwrite this to declare a format (.tar).""" + return self.base_filename + + @property + def manifest(self): + """Target file name.""" + return "%s%s%s" % (self.filename, + self.zip_manifest, + self.encrypt_manifest) + + @property + def zip_cmd(self): + return ' | gzip' + + @property + def unzip_cmd(self): + return 'gzip -d -c | ' + + @property + def zip_manifest(self): + return '.gz' + + @property + def encrypt_cmd(self): + return (' | openssl enc -aes-256-cbc -md sha512 -pbkdf2 -iter 10000 ' + '-salt -pass pass:%s' % + self.encrypt_key) if self.encrypt_key else '' + + @property + def decrypt_cmd(self): + if self.encrypt_key: + return ('openssl enc -d -aes-256-cbc -md sha512 -pbkdf2 -iter ' + '10000 -salt -pass pass:%s | ' + % self.encrypt_key) + else: + return '' + + @property + def encrypt_manifest(self): + return '.enc' if self.encrypt_key else '' + + def _run(self): + LOG.info("Running backup cmd: %s", self.command) + self.process = subprocess.Popen(self.command, shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + preexec_fn=os.setsid) + self.pid = self.process.pid + + def __enter__(self): + """Start up the process.""" + self.pre_backup() + self._run() + return self + + def __exit__(self, exc_type, exc_value, traceback): + """Clean up everything.""" + if getattr(self, 'process', None): + try: + # Send a sigterm to the session leader, so that all + # child processes are killed and cleaned up on terminate + os.killpg(self.process.pid, signal.SIGTERM) + self.process.terminate() + except OSError: + pass + + if exc_type is not None: + return False + + try: + err = self.process.stderr.read() + if err: + raise Exception(err) + except OSError: + pass + + if not self.check_process(): + raise Exception() + + self.post_backup() + + return True + + def read(self, chunk_size): + return self.process.stdout.read(chunk_size) + + def get_metadata(self): + """Hook for subclasses to get metadata from the backup.""" + return {} + + def check_process(self): + """Hook for subclasses to check process for errors.""" + return True + + def check_restore_process(self): + """Hook for subclasses to check the restore process for errors.""" + return True + + def pre_backup(self): + """Hook for subclasses to run commands before backup.""" + pass + + def post_backup(self): + """Hook for subclasses to run commands after backup.""" + pass + + def pre_restore(self): + """Hook that is called before the restore command.""" + pass + + def post_restore(self): + """Hook that is called after the restore command.""" + pass + + def unpack(self, location, checksum, command): + stream = self.storage.load(location, checksum) + + LOG.info('Running restore from stream, command: %s', command) + self.process = subprocess.Popen(command, shell=True, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE) + content_length = 0 + for chunk in stream: + self.process.stdin.write(chunk) + content_length += len(chunk) + self.process.stdin.close() + + try: + err = self.process.stderr.read() + if err: + raise Exception(err) + except OSError: + pass + + if not self.check_restore_process(): + raise Exception() + + return content_length + + def run_restore(self): + return self.unpack(self.location, self.checksum, self.restore_command) + + def restore(self): + """Restore backup to data directory. + + :returns Restored data size. + """ + self.pre_restore() + content_length = self.run_restore() + self.post_restore() + return content_length diff --git a/backup/drivers/innobackupex.py b/backup/drivers/innobackupex.py new file mode 100644 index 0000000000..e077d49714 --- /dev/null +++ b/backup/drivers/innobackupex.py @@ -0,0 +1,137 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from oslo_concurrency import processutils +from oslo_config import cfg +from oslo_log import log as logging + +from backup.drivers import mysql_base + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class InnoBackupEx(mysql_base.MySQLBaseRunner): + """Implementation of Backup and Restore for InnoBackupEx.""" + backup_log = '/tmp/innobackupex.log' + prepare_log = '/tmp/prepare.log' + restore_cmd = ('xbstream -x -C %(restore_location)s --parallel=2' + ' 2>/tmp/xbstream_extract.log') + prepare_cmd = ('innobackupex' + ' --defaults-file=%(restore_location)s/backup-my.cnf' + ' --ibbackup=xtrabackup' + ' --apply-log' + ' %(restore_location)s' + ' 2>' + prepare_log) + + @property + def cmd(self): + cmd = ('innobackupex' + ' --stream=xbstream' + ' --parallel=2 ' + + self.user_and_pass + ' %s' % self.default_data_dir + + ' 2>' + self.backup_log + ) + return cmd + self.zip_cmd + self.encrypt_cmd + + def check_restore_process(self): + """Check whether xbstream restore is successful.""" + LOG.info('Checking return code of xbstream restore process.') + return_code = self.process.wait() + if return_code != 0: + LOG.error('xbstream exited with %s', return_code) + return False + + with open('/tmp/xbstream_extract.log', 'r') as xbstream_log: + for line in xbstream_log: + # Ignore empty lines + if not line.strip(): + continue + + LOG.error('xbstream restore failed with: %s', + line.rstrip('\n')) + return False + + return True + + def post_restore(self): + """Hook that is called after the restore command.""" + LOG.info("Running innobackupex prepare: %s.", self.prepare_command) + processutils.execute(self.prepare_command, shell=True) + + LOG.info("Checking innobackupex prepare log") + with open(self.prepare_log, 'r') as prepare_log: + output = prepare_log.read() + if not output: + msg = "innobackupex prepare log file empty" + raise Exception(msg) + + last_line = output.splitlines()[-1].strip() + if not re.search('completed OK!', last_line): + msg = "innobackupex prepare did not complete successfully" + raise Exception(msg) + + +class InnoBackupExIncremental(InnoBackupEx): + """InnoBackupEx incremental backup.""" + + incremental_prep = ('innobackupex' + ' --defaults-file=%(restore_location)s/backup-my.cnf' + ' --ibbackup=xtrabackup' + ' --apply-log' + ' --redo-only' + ' %(restore_location)s' + ' %(incremental_args)s' + ' 2>/tmp/innoprepare.log') + + def __init__(self, *args, **kwargs): + if not kwargs.get('lsn'): + raise AttributeError('lsn attribute missing') + self.parent_location = kwargs.pop('parent_location', '') + self.parent_checksum = kwargs.pop('parent_checksum', '') + self.restore_content_length = 0 + + super(InnoBackupExIncremental, self).__init__(*args, **kwargs) + + @property + def cmd(self): + cmd = ('innobackupex' + ' --stream=xbstream' + ' --incremental' + ' --incremental-lsn=%(lsn)s ' + + self.user_and_pass + ' %s' % self.default_data_dir + + ' 2>' + self.backup_log) + return cmd + self.zip_cmd + self.encrypt_cmd + + def get_metadata(self): + _meta = super(InnoBackupExIncremental, self).get_metadata() + + _meta.update({ + 'parent_location': self.parent_location, + 'parent_checksum': self.parent_checksum, + }) + return _meta + + def run_restore(self): + """Run incremental restore. + + First grab all parents and prepare them with '--redo-only'. After + all backups are restored the super class InnoBackupEx post_restore + method is called to do the final prepare with '--apply-log' + """ + LOG.debug('Running incremental restore') + self.incremental_restore(self.location, self.checksum) + return self.restore_content_length diff --git a/backup/drivers/mariabackup.py b/backup/drivers/mariabackup.py new file mode 100644 index 0000000000..e10cca30b9 --- /dev/null +++ b/backup/drivers/mariabackup.py @@ -0,0 +1,87 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg +from oslo_log import log as logging + +from backup.drivers import mysql_base + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class MariaBackup(mysql_base.MySQLBaseRunner): + """Implementation of Backup and Restore using mariabackup.""" + backup_log = '/tmp/mariabackup.log' + restore_log = '/tmp/mbstream_extract.log' + restore_cmd = ('mbstream -x -C %(restore_location)s 2>' + restore_log) + prepare_cmd = '' + + @property + def cmd(self): + cmd = ('mariabackup --backup --stream=xbstream ' + + self.user_and_pass + ' 2>' + self.backup_log) + return cmd + self.zip_cmd + self.encrypt_cmd + + def check_restore_process(self): + LOG.debug('Checking return code of mbstream restore process.') + return_code = self.process.wait() + if return_code != 0: + LOG.error('mbstream exited with %s', return_code) + return False + + return True + + +class MariaBackupIncremental(MariaBackup): + """Incremental backup and restore using mariabackup.""" + incremental_prep = ('mariabackup --prepare ' + '--target-dir=%(restore_location)s ' + '%(incremental_args)s ' + '2>/tmp/innoprepare.log') + + def __init__(self, *args, **kwargs): + if not kwargs.get('lsn'): + raise AttributeError('lsn attribute missing') + self.parent_location = kwargs.pop('parent_location', '') + self.parent_checksum = kwargs.pop('parent_checksum', '') + self.restore_content_length = 0 + + super(MariaBackupIncremental, self).__init__(*args, **kwargs) + + @property + def cmd(self): + cmd = ( + 'mariabackup --backup --stream=xbstream' + ' --incremental-lsn=%(lsn)s ' + + self.user_and_pass + + ' 2>' + + self.backup_log + ) + return cmd + self.zip_cmd + self.encrypt_cmd + + def get_metadata(self): + meta = super(MariaBackupIncremental, self).get_metadata() + + meta.update({ + 'parent_location': self.parent_location, + 'parent_checksum': self.parent_checksum, + }) + return meta + + def run_restore(self): + """Run incremental restore.""" + LOG.debug('Running incremental restore') + self.incremental_restore(self.location, self.checksum) + return self.restore_content_length diff --git a/backup/drivers/mysql_base.py b/backup/drivers/mysql_base.py new file mode 100644 index 0000000000..2450daf03c --- /dev/null +++ b/backup/drivers/mysql_base.py @@ -0,0 +1,139 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import re +import shutil + +from oslo_concurrency import processutils +from oslo_config import cfg +from oslo_log import log as logging + +from backup.drivers import base + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class MySQLBaseRunner(base.BaseRunner): + def __init__(self, *args, **kwargs): + super(MySQLBaseRunner, self).__init__(*args, **kwargs) + + @property + def user_and_pass(self): + return ('--user=%(user)s --password=%(password)s --host=%(host)s' % + {'user': CONF.db_user, + 'password': CONF.db_password, + 'host': CONF.db_host}) + + @property + def filename(self): + return '%s.xbstream' % self.base_filename + + def check_process(self): + """Check the backup output for 'completed OK!'.""" + LOG.debug('Checking backup process output.') + with open(self.backup_log, 'r') as backup_log: + output = backup_log.read() + if not output: + LOG.error("Backup log file %s empty.", self.backup_log) + return False + + last_line = output.splitlines()[-1].strip() + if not re.search('completed OK!', last_line): + LOG.error("Backup did not complete successfully.") + return False + + return True + + def get_metadata(self): + LOG.debug('Getting metadata for backup %s', self.base_filename) + meta = {} + lsn = re.compile(r"The latest check point \(for incremental\): " + r"'(\d+)'") + with open(self.backup_log, 'r') as backup_log: + output = backup_log.read() + match = lsn.search(output) + if match: + meta = {'lsn': match.group(1)} + + LOG.info("Updated metadata for backup %s: %s", self.base_filename, + meta) + + return meta + + def incremental_restore_cmd(self, incremental_dir): + """Return a command for a restore with a incremental location.""" + args = {'restore_location': incremental_dir} + return (self.decrypt_cmd + self.unzip_cmd + self.restore_cmd % args) + + def incremental_prepare_cmd(self, incremental_dir): + if incremental_dir is not None: + incremental_arg = '--incremental-dir=%s' % incremental_dir + else: + incremental_arg = '' + + args = { + 'restore_location': self.restore_location, + 'incremental_args': incremental_arg, + } + + return self.incremental_prep % args + + def incremental_prepare(self, incremental_dir): + prepare_cmd = self.incremental_prepare_cmd(incremental_dir) + + LOG.info("Running restore prepare command: %s.", prepare_cmd) + processutils.execute(prepare_cmd, shell=True) + + def incremental_restore(self, location, checksum): + """Recursively apply backups from all parents. + + If we are the parent then we restore to the restore_location and + we apply the logs to the restore_location only. + + Otherwise if we are an incremental we restore to a subfolder to + prevent stomping on the full restore data. Then we run apply log + with the '--incremental-dir' flag + + :param location: The source backup location. + :param checksum: Checksum of the source backup for validation. + """ + metadata = self.storage.load_metadata(location, checksum) + incremental_dir = None + + if 'parent_location' in metadata: + LOG.info("Restoring parent: %(parent_location)s" + " checksum: %(parent_checksum)s.", metadata) + + parent_location = metadata['parent_location'] + parent_checksum = metadata['parent_checksum'] + # Restore parents recursively so backup are applied sequentially + self.incremental_restore(parent_location, parent_checksum) + # for *this* backup set the incremental_dir + # just use the checksum for the incremental path as it is + # sufficiently unique /var/lib/mysql/ + incremental_dir = os.path.join('/var/lib/mysql', checksum) + os.makedirs(incremental_dir) + command = self.incremental_restore_cmd(incremental_dir) + else: + # The parent (full backup) use the same command from InnobackupEx + # super class and do not set an incremental_dir. + command = self.restore_command + + self.restore_content_length += self.unpack(location, checksum, command) + self.incremental_prepare(incremental_dir) + + # Delete after restoring this part of backup + if incremental_dir: + shutil.rmtree(incremental_dir) diff --git a/backup/main.py b/backup/main.py new file mode 100644 index 0000000000..8e24478e61 --- /dev/null +++ b/backup/main.py @@ -0,0 +1,149 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import importutils +import sys + +topdir = os.path.normpath( + os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) +sys.path.insert(0, topdir) + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +cli_opts = [ + cfg.StrOpt('backup-id'), + cfg.StrOpt( + 'storage-driver', + default='swift', + choices=['swift'] + ), + cfg.StrOpt( + 'driver', + default='innobackupex', + choices=['innobackupex', 'xtrabackup', 'mariabackup'] + ), + cfg.BoolOpt('backup'), + cfg.StrOpt('backup-encryption-key'), + cfg.StrOpt('db-user'), + cfg.StrOpt('db-password'), + cfg.StrOpt('db-host'), + cfg.StrOpt('os-token'), + cfg.StrOpt('os-auth-url'), + cfg.StrOpt('os-tenant-id'), + cfg.StrOpt('swift-container', default='database_backups'), + cfg.DictOpt('swift-extra-metadata'), + cfg.StrOpt('restore-from'), + cfg.StrOpt('restore-checksum'), + cfg.BoolOpt('incremental'), + cfg.StrOpt('parent-location'), + cfg.StrOpt( + 'parent-checksum', + help='It is up to the storage driver to decide to validate the ' + 'checksum or not. ' + ), +] + +driver_mapping = { + 'innobackupex': 'backup.drivers.innobackupex.InnoBackupEx', + 'innobackupex_inc': 'backup.drivers.innobackupex.InnoBackupExIncremental', + 'mariabackup': 'backup.drivers.mariabackup.MariaBackup', + 'mariabackup_inc': 'backup.drivers.mariabackup.MariaBackupIncremental', +} +storage_mapping = { + 'swift': 'backup.storage.swift.SwiftStorage', +} + + +def stream_backup_to_storage(runner_cls, storage): + parent_metadata = {} + + if CONF.incremental: + if not CONF.parent_location: + LOG.error('--parent-location should be provided for incremental ' + 'backup') + exit(1) + + parent_metadata = storage.load_metadata(CONF.parent_location, + CONF.parent_checksum) + parent_metadata.update( + { + 'parent_location': CONF.parent_location, + 'parent_checksum': CONF.parent_checksum + } + ) + + try: + with runner_cls(filename=CONF.backup_id, **parent_metadata) as bkup: + checksum, location = storage.save( + bkup, + metadata=CONF.swift_extra_metadata + ) + LOG.info('Backup successfully, checksum: %s, location: %s', + checksum, location) + except Exception as err: + LOG.exception('Failed to call stream_backup_to_storage, error: %s', + err) + + +def stream_restore_from_storage(runner_cls, storage): + lsn = "" + if storage.is_incremental_backup(CONF.restore_from): + lsn = storage.get_backup_lsn(CONF.restore_from) + + try: + runner = runner_cls(storage=storage, location=CONF.restore_from, + checksum=CONF.restore_checksum, lsn=lsn) + restore_size = runner.restore() + LOG.info('Restore successfully, restore_size: %s', restore_size) + except Exception as err: + LOG.exception('Failed to call stream_restore_from_storage, error: %s', + err) + + +def main(): + CONF.register_cli_opts(cli_opts) + logging.register_options(CONF) + CONF(sys.argv[1:], project='trove-backup') + logging.setup(CONF, 'trove-backup') + + runner_cls = importutils.import_class(driver_mapping[CONF.driver]) + storage = importutils.import_class(storage_mapping[CONF.storage_driver])() + + if CONF.backup: + if CONF.incremental: + runner_cls = importutils.import_class( + driver_mapping['%s_inc' % CONF.driver]) + + LOG.info('Starting backup database to %s, backup ID %s', + CONF.storage_driver, CONF.backup_id) + stream_backup_to_storage(runner_cls, storage) + else: + if storage.is_incremental_backup(CONF.restore_from): + LOG.debug('Restore from incremental backup') + runner_cls = importutils.import_class( + driver_mapping['%s_inc' % CONF.driver]) + + LOG.info('Starting restore database from %s, location: %s', + CONF.storage_driver, CONF.restore_from) + + stream_restore_from_storage(runner_cls, storage) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/backup/requirements.txt b/backup/requirements.txt new file mode 100644 index 0000000000..38358bd3c1 --- /dev/null +++ b/backup/requirements.txt @@ -0,0 +1,6 @@ +oslo.config!=4.3.0,!=4.4.0;python_version>='3.0' # Apache-2.0 +oslo.log;python_version>='3.0' # Apache-2.0 +oslo.utils!=3.39.1,!=3.40.0,!=3.40.1;python_version>='3.0' # Apache-2.0 +oslo.concurrency;python_version>='3.0' # Apache-2.0 +keystoneauth1 # Apache-2.0 +python-swiftclient # Apache-2.0 diff --git a/trove/guestagent/datastore/experimental/cassandra/__init__.py b/backup/storage/__init__.py similarity index 100% rename from trove/guestagent/datastore/experimental/cassandra/__init__.py rename to backup/storage/__init__.py diff --git a/backup/storage/base.py b/backup/storage/base.py new file mode 100644 index 0000000000..a15b1ccc70 --- /dev/null +++ b/backup/storage/base.py @@ -0,0 +1,48 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc + + +class Storage(object): + """Base class for Storage driver implementation.""" + + @abc.abstractmethod + def save(self, stream, metadata=None, **kwargs): + """Persist information from the stream. + + Should return the new backup checkshum and location. + """ + + @abc.abstractmethod + def load(self, location, backup_checksum, **kwargs): + """Load a stream from the data location. + + Should return an object that provides "read" method. + """ + + def load_metadata(self, parent_location, parent_checksum): + """Load metadata for a parent backup. + + It's up to the storage driver to decide how to implement this function. + """ + return {} + + def is_incremental_backup(self, location): + """Check if the location is an incremental backup.""" + return False + + @abc.abstractmethod + def get_backup_lsn(self, location): + """Get the backup LSN.""" diff --git a/backup/storage/swift.py b/backup/storage/swift.py new file mode 100644 index 0000000000..3930e68a3e --- /dev/null +++ b/backup/storage/swift.py @@ -0,0 +1,294 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import json + +from keystoneauth1 import session +from keystoneauth1.identity import v3 +from oslo_config import cfg +from oslo_log import log as logging +import swiftclient + +from backup.storage import base + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +def _get_user_keystone_session(auth_url, token, tenant_id): + auth = v3.Token( + auth_url=auth_url, token=token, + project_domain_name="Default", + project_id=tenant_id + ) + return session.Session(auth=auth, verify=False) + + +def _get_service_client(auth_url, token, tenant_id): + sess = _get_user_keystone_session(auth_url, token, tenant_id) + return swiftclient.Connection(session=sess) + + +def _set_attr(original): + """Return a swift friendly header key.""" + key = original.replace('_', '-') + return 'X-Object-Meta-%s' % key + + +def _get_attr(original): + """Get a friendly name from an object header key.""" + key = original.replace('-', '_') + key = key.replace('x_object_meta_', '') + return key + + +class StreamReader(object): + """Wrap the stream from the backup process and chunk it into segements.""" + + def __init__(self, stream, container, filename, max_file_size): + self.stream = stream + self.container = container + self.filename = filename + self.max_file_size = max_file_size + self.segment_length = 0 + self.process = None + self.file_number = 0 + self.end_of_file = False + self.end_of_segment = False + self.segment_checksum = hashlib.md5() + + @property + def base_filename(self): + """Filename with extensions removed.""" + return self.filename.split('.')[0] + + @property + def segment(self): + return '%s_%08d' % (self.base_filename, self.file_number) + + @property + def first_segment(self): + return '%s_%08d' % (self.base_filename, 0) + + @property + def segment_path(self): + return '%s/%s' % (self.container, self.segment) + + def read(self, chunk_size=2 ** 16): + if self.end_of_segment: + self.segment_length = 0 + self.segment_checksum = hashlib.md5() + self.end_of_segment = False + + # Upload to a new file if we are starting or too large + if self.segment_length > (self.max_file_size - chunk_size): + self.file_number += 1 + self.end_of_segment = True + return '' + + chunk = self.stream.read(chunk_size) + if not chunk: + self.end_of_file = True + return '' + + self.segment_checksum.update(chunk) + self.segment_length += len(chunk) + return chunk + + +class SwiftStorage(base.Storage): + def __init__(self): + self.client = _get_service_client(CONF.os_auth_url, CONF.os_token, + CONF.os_tenant_id) + + def save(self, stream, metadata=None, container='database_backups'): + """Persist data from the stream to swift. + + * Read data from stream, upload to swift + * Update the new object metadata, stream provides method to get + metadata. + + :returns the new object checkshum and swift full URL. + """ + filename = stream.manifest + LOG.info('Saving %(filename)s to %(container)s in swift.', + {'filename': filename, 'container': container}) + + # Create the container if it doesn't already exist + LOG.debug('Ensuring container %s', container) + self.client.put_container(container) + + # Swift Checksum is the checksum of the concatenated segment checksums + swift_checksum = hashlib.md5() + # Wrap the output of the backup process to segment it for swift + stream_reader = StreamReader(stream, container, filename, + 2 * (1024 ** 3)) + + url = self.client.url + # Full location where the backup manifest is stored + location = "%s/%s/%s" % (url, container, filename) + LOG.info('Uploading to %s', location) + + # Information about each segment upload job + segment_results = [] + + # Read from the stream and write to the container in swift + while not stream_reader.end_of_file: + LOG.debug('Uploading segment %s.', stream_reader.segment) + path = stream_reader.segment_path + etag = self.client.put_object(container, + stream_reader.segment, + stream_reader) + + segment_checksum = stream_reader.segment_checksum.hexdigest() + + # Check each segment MD5 hash against swift etag + if etag != segment_checksum: + msg = ('Failed to upload data segment to swift. ETAG: %(tag)s ' + 'Segment MD5: %(checksum)s.' % + {'tag': etag, 'checksum': segment_checksum}) + raise Exception(msg) + + segment_results.append({ + 'path': path, + 'etag': etag, + 'size_bytes': stream_reader.segment_length + }) + + swift_checksum.update(segment_checksum.encode()) + + # All segments uploaded. + num_segments = len(segment_results) + LOG.debug('File uploaded in %s segments.', num_segments) + + # An SLO will be generated if the backup was more than one segment in + # length. + large_object = num_segments > 1 + + # Meta data is stored as headers + if metadata is None: + metadata = {} + metadata.update(stream.get_metadata()) + headers = {} + for key, value in metadata.items(): + headers[_set_attr(key)] = value + + LOG.debug('Metadata headers: %s', headers) + if large_object: + manifest_data = json.dumps(segment_results) + LOG.info('Creating the SLO manifest file, manifest content: %s', + manifest_data) + # The etag returned from the manifest PUT is the checksum of the + # manifest object (which is empty); this is not the checksum we + # want. + self.client.put_object(container, + filename, + manifest_data, + query_string='multipart-manifest=put') + + # Validation checksum is the Swift Checksum + final_swift_checksum = swift_checksum.hexdigest() + else: + LOG.info('Moving segment %(segment)s to %(filename)s.', + {'segment': stream_reader.first_segment, + 'filename': filename}) + segment_result = segment_results[0] + # Just rename it via a special put copy. + headers['X-Copy-From'] = segment_result['path'] + self.client.put_object(container, + filename, '', + headers=headers) + + # Delete the old segment file that was copied + LOG.debug('Deleting the old segment file %s.', + stream_reader.first_segment) + self.client.delete_object(container, + stream_reader.first_segment) + + final_swift_checksum = segment_result['etag'] + + # Validate the object by comparing checksums + resp = self.client.head_object(container, filename) + # swift returns etag in double quotes + # e.g. '"dc3b0827f276d8d78312992cc60c2c3f"' + etag = resp['etag'].strip('"') + + # Raise an error and mark backup as failed + if etag != final_swift_checksum: + msg = ('Failed to upload data to swift. Manifest ETAG: %(tag)s ' + 'Swift MD5: %(checksum)s' % + {'tag': etag, 'checksum': final_swift_checksum}) + raise Exception(msg) + + return (final_swift_checksum, location) + + def _explodeLocation(self, location): + storage_url = "/".join(location.split('/')[:-2]) + container = location.split('/')[-2] + filename = location.split('/')[-1] + return storage_url, container, filename + + def _verify_checksum(self, etag, checksum): + etag_checksum = etag.strip('"') + if etag_checksum != checksum: + msg = ('Checksum validation failure, actual: %s, expected: %s' % + (etag_checksum, checksum)) + raise Exception(msg) + + def load(self, location, backup_checksum): + """Get object from the location.""" + storage_url, container, filename = self._explodeLocation(location) + + headers, contents = self.client.get_object(container, filename, + resp_chunk_size=2 ** 16) + + if backup_checksum: + self._verify_checksum(headers.get('etag', ''), backup_checksum) + + return contents + + def load_metadata(self, parent_location, parent_checksum): + """Load metadata from swift.""" + if not parent_location: + return {} + + _, container, filename = self._explodeLocation(parent_location) + headers = self.client.head_object(container, filename) + + if parent_checksum: + self._verify_checksum(headers.get('etag', ''), parent_checksum) + + _meta = {} + for key, value in headers.items(): + if key.startswith('x-object-meta'): + _meta[_get_attr(key)] = value + + return _meta + + def is_incremental_backup(self, location): + """Check if the location is an incremental backup.""" + _, container, filename = self._explodeLocation(location) + headers = self.client.head_object(container, filename) + + if 'x-object-meta-parent-location' in headers: + return True + + return False + + def get_backup_lsn(self, location): + """Get the backup LSN.""" + _, container, filename = self._explodeLocation(location) + headers = self.client.head_object(container, filename) + return headers.get('x-object-meta-lsn') diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 4de219e46b..8bbcbac766 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -362,7 +362,7 @@ function create_subnet_v6 { if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " fi - if [ -n $SUBNETPOOL_V6_ID ]; then + if [[ -n $SUBNETPOOL_V6_ID ]]; then subnet_params+="--subnet-pool $SUBNETPOOL_V6_ID " else subnet_params+="--subnet-range $FIXED_RANGE_V6 $ipv6_modes} " @@ -447,27 +447,26 @@ function create_guest_image { return 0 fi - image_name="trove-datastore-${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}-${TROVE_DATASTORE_TYPE}" - image_url_var="TROVE_NON_DEV_IMAGE_URL_${TROVE_DATASTORE_TYPE^^}" - image_url=`eval echo '$'"$image_url_var"` + image_name="trove-guest-${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}" mkdir -p $HOME/images image_file=$HOME/images/${image_name}.qcow2 - if [[ -n ${image_url} ]]; then - echo "Downloading guest image from ${image_url}" - curl -sSL ${image_url} -o ${image_file} + if [[ -n ${TROVE_NON_DEV_IMAGE_URL} ]]; then + echo "Downloading guest image from ${TROVE_NON_DEV_IMAGE_URL}" + curl -sSL ${TROVE_NON_DEV_IMAGE_URL} -o ${image_file} else echo "Starting to create guest image" - TROVE_BRANCH=${TROVE_BRANCH} $DEST/trove/integration/scripts/trovestack \ + $DEST/trove/integration/scripts/trovestack \ build-image \ - ${TROVE_DATASTORE_TYPE} \ ${TROVE_IMAGE_OS} \ ${TROVE_IMAGE_OS_RELEASE} \ - true + true \ + ${TROVE_IMAGE_OS} \ + ${image_file} fi - if [ ! -f ${image_file} ]; then + if [[ ! -f ${image_file} ]]; then echo "Image file was not found at ${image_file}" exit 1 fi @@ -485,7 +484,7 @@ function create_guest_image { $TROVE_MANAGE datastore_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION echo "Add parameter validation rules if available" - if [ -f $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json ]; then + if [[ -f $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json ]]; then $TROVE_MANAGE db_load_datastore_config_parameters "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" \ $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json fi diff --git a/devstack/settings b/devstack/settings index 44b71bb6dc..541d0f7d57 100644 --- a/devstack/settings +++ b/devstack/settings @@ -29,15 +29,9 @@ TROVE_LOCAL_API_PASTE_INI=${TROVE_LOCAL_API_PASTE_INI:-${TROVE_LOCAL_CONF_DIR}/a TROVE_LOCAL_POLICY_JSON=${TROVE_LOCAL_POLICY_JSON:-${TROVE_LOCAL_CONF_DIR}/policy.json} TROVE_IMAGE_OS=${TROVE_IMAGE_OS:-"ubuntu"} -TROVE_IMAGE_OS_RELEASE=${TROVE_IMAGE_OS_RELEASE:-"xenial"} +TROVE_IMAGE_OS_RELEASE=${TROVE_IMAGE_OS_RELEASE:-"bionic"} TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"} -if [[ "$DISTRO" == "xenial" || "$DISTRO" == "bionic" ]]; then - TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.7"} - TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.7"} -else - TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.6"} - TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.6"} -fi +TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.7.29"} # Configuration values listed here for reference TROVE_MAX_ACCEPTED_VOLUME_SIZE=${TROVE_MAX_ACCEPTED_VOLUME_SIZE} @@ -46,8 +40,8 @@ TROVE_MAX_VOLUMES_PER_TENANT=${TROVE_MAX_VOLUMES_PER_TENANT} TROVE_AGENT_CALL_LOW_TIMEOUT=${TROVE_AGENT_CALL_LOW_TIMEOUT} TROVE_AGENT_CALL_HIGH_TIMEOUT=${TROVE_AGENT_CALL_HIGH_TIMEOUT:-1200} TROVE_RESIZE_TIME_OUT=${TROVE_RESIZE_TIME_OUT} -TROVE_USAGE_TIMEOUT=${TROVE_USAGE_TIMEOUT:-900} -TROVE_STATE_CHANGE_WAIT_TIME=${TROVE_STATE_CHANGE_WAIT_TIME} +TROVE_USAGE_TIMEOUT=${TROVE_USAGE_TIMEOUT:-1800} +TROVE_STATE_CHANGE_WAIT_TIME=${TROVE_STATE_CHANGE_WAIT_TIME:-600} TROVE_COMMAND_PROCESS_TIMEOUT=${TROVE_COMMAND_PROCESS_TIMEOUT:-60} # Set up the host gateway @@ -90,4 +84,4 @@ CELLSV2_SETUP=singleconductor # Enable or disable the Trove guest image build during devstack installation. TROVE_ENABLE_IMAGE_BUILD=${TROVE_ENABLE_IMAGE_BUILD:-"true"} -TROVE_NON_DEV_IMAGE_URL_MYSQL=${TROVE_NON_DEV_IMAGE_URL_MYSQL:-""} +TROVE_NON_DEV_IMAGE_URL=${TROVE_NON_DEV_IMAGE_URL:-""} diff --git a/doc/source/admin/building_guest_images.rst b/doc/source/admin/building_guest_images.rst index abac6bec38..358bc94758 100644 --- a/doc/source/admin/building_guest_images.rst +++ b/doc/source/admin/building_guest_images.rst @@ -26,54 +26,42 @@ stored in Glance. This document shows you the steps to build the guest images. periodically built and published in http://tarballs.openstack.org/trove/images/ in Trove upstream CI. - Additionally, if you install Trove in devstack environment, a MySQL image + Additionally, if you install Trove in devstack environment, the guest image is created and registered in Glance automatically, unless it's disabled by setting ``TROVE_ENABLE_IMAGE_BUILD=false`` in devstack local.conf file. High Level Overview of a Trove Guest Instance ============================================= -At the most basic level, a Trove Guest Instance is a Nova instance -launched by Trove in response to a create command. For most of this -document, we will confine ourselves to single instance databases; in -other words, without the additional complexity of replication or -mirroring. Guest instances and Guest images for replicated and -mirrored database instances will be addressed specifically in later -sections of this document. +At the most basic level, a Trove Guest Instance is a Nova instance launched by +Trove in response to a create command. This section describes the various +components of a Trove Guest Instance. -This section describes the various components of a Trove Guest -Instance. +---------------- +Operating System +---------------- ------------------------------ -Operating System and Database ------------------------------ +The officially supported operating system is Ubuntu, based on which the +functional tests are running. -A Trove Guest Instance contains at least a functioning Operating -System and the database software that the instance wishes to provide -(as a Service). For example, if your chosen operating system is Ubuntu -and you wish to deliver MySQL version 5.7, then your guest instance is -a Nova instance running the Ubuntu operating system and will have -MySQL version 5.7 installed on it. +------ +Docker +------ + +Since Vitoria release, all the datastore services are installed by docker +container inside the Trove instance, so docker should be installed when +building the guest image. ----------------- Trove Guest Agent ----------------- -Trove supports multiple databases, some of them are relational (RDBMS) -and some are non-relational (NoSQL). In order to provide a common -management interface to all of these, the Trove Guest Instance has on -it a 'Guest Agent'. The Trove Guest Agent is a component of the -Trove system that is specific to the database running on that Guest -Instance. +The guest agent runs inside the Nova instances that are used to run the +database engines. The agent listens to the messaging bus for the topic and is +responsible for actually translating and executing the commands that are sent +to it by the task manager component for the particular datastore. -The purpose of the Trove Guest Agent is to implement the Trove Guest -Agent API for the specific database. This includes such things as the -implementation of the database 'start' and 'stop' commands. The Trove -Guest Agent API is the common API used by Trove to communicate with -any guest database, and the Guest Agent is the implementation of that -API for the specific database. - -The Trove Guest Agent runs inside the Trove Guest Instance. +Trove guest agent is responsible for datastore docker container management. ------------------------------------------ Injected Configuration for the Guest Agent @@ -104,44 +92,45 @@ services(e.g. the message queue). Building Guest Images ===================== +Since Victoria release, a single trove guest image can be used for different +datastores, it's unnecessary to maintain different images for differnt +datastores. + ----------------------------- Build images using trovestack ----------------------------- ``trovestack`` is the recommended tooling provided by Trove community to build -the guest images. Before running ``trovestack`` command, go to the scripts -folder: +the guest images. Before running ``trovestack`` command: .. code-block:: console git clone https://opendev.org/openstack/trove cd trove/integration/scripts -The trove guest agent image could be created by running the following command: +The trove guest image could be created by running the following command: .. code-block:: console $ ./trovestack build-image \ - ${datastore_type} \ ${guest_os} \ ${guest_os_release} \ ${dev_mode} \ ${guest_username} \ - ${imagepath} + ${output_image_path} -* Currently, only ``guest_os=ubuntu`` and ``guest_os_release=xenial`` are fully +* Currently, only ``guest_os=ubuntu`` and ``guest_os_release=bionic`` are fully tested and supported. * Default input values: .. code-block:: ini - datastore_type=mysql guest_os=ubuntu - guest_os_release=xenial + guest_os_release=bionic dev_mode=true guest_username=ubuntu - imagepath=$HOME/images/trove-${guest_os}-${guest_os_release}-${datastore_type} + output_image_path=$HOME/images/trove-guest--${guest_os}-${guest_os_release}-dev * ``dev_mode=true`` is mainly for testing purpose for trove developers and it's necessary to build the image on the trove controller host, because the host @@ -159,31 +148,27 @@ The trove guest agent image could be created by running the following command: * ``HOST_SCP_USERNAME``: Only used in dev mode, this is the user name used by guest agent to connect to the controller host, e.g. in devstack environment, it should be the ``stack`` user. - * ``GUEST_WORKING_DIR``: The place to save the guest image, default value is - ``$HOME/images``. - * ``TROVE_BRANCH``: Only used in dev mode. The branch name of Trove code - repository, by default it's master, use other branches as needed such as - stable/train. -For example, in order to build a MySQL image for Ubuntu Xenial operating +For example, in order to build a guest image for Ubuntu Bionic operating system in development mode: .. code-block:: console - $ ./trovestack build-image mysql ubuntu xenial true + $ ./trovestack build-image ubuntu bionic true ubuntu Once the image build is finished, the cloud administrator needs to register the image in Glance and register a new datastore or version in Trove using -``trove-manage`` command, e.g. after building an image for MySQL 5.7.1: +``trove-manage`` command, e.g. after building an image for MySQL 5.7.29: .. code-block:: console - $ openstack image create ubuntu-mysql-5.7.1-dev \ - --public \ + $ openstack image create trove-guest-ubuntu-bionic \ + --private \ --disk-format qcow2 \ --container-format bare \ - --file ~/images/ubuntu-xenial-mysql.qcow2 - $ trove-manage datastore_version_update mysql 5.7.1 mysql $image_id "" 1 + --file ~/images/trove-guest-ubuntu-bionic-dev.qcow2 + $ trove-manage datastore_version_update mysql 5.7.29 mysql $image_id "" 1 + $ trove-manage db_load_datastore_config_parameters mysql 5.7.29 ${trove_repo_dir}/trove/templates/mysql/validation-rules.json If you see anything error or need help for the image creation, please ask help either in ``#openstack-trove`` IRC channel or sending emails to diff --git a/doc/source/admin/run_trove_in_production.rst b/doc/source/admin/run_trove_in_production.rst index 2b751fd843..c5c1a6e762 100644 --- a/doc/source/admin/run_trove_in_production.rst +++ b/doc/source/admin/run_trove_in_production.rst @@ -322,7 +322,7 @@ Command examples: # Use 5.7.29 as the default datastore version for 'mysql' trove-manage datastore_update mysql 5.7.29 # Register configuration parameters for 5.7.29 version of datastore 'mysql' - trove-manage db_load_datastore_config_parameters mysql 5.7.29 $workdir/trove/trove/templates/mysql/validation-rules.json + trove-manage db_load_datastore_config_parameters mysql 5.7.29 ${trove_repo_dir}}/trove/templates/mysql/validation-rules.json Quota Management diff --git a/etc/trove/cloudinit/README b/etc/trove/cloudinit/README deleted file mode 100644 index f9e9aa8d68..0000000000 --- a/etc/trove/cloudinit/README +++ /dev/null @@ -1,3 +0,0 @@ -These cloudinit scripts will used as userdata on instance create -File names should match pattern: service_type.cloudinit -For example: mysql.cloudinit diff --git a/etc/trove/conf.d/README b/etc/trove/conf.d/README deleted file mode 100644 index d27a4ff9c7..0000000000 --- a/etc/trove/conf.d/README +++ /dev/null @@ -1,4 +0,0 @@ -These conf files are read and used by the guest to provide extra -information to the guest. The first example of this is the -guest_info.conf which will have the uuid of the instance so that -the guest can report back things to the infra. diff --git a/etc/trove/conf.d/guest_info.conf b/etc/trove/conf.d/guest_info.conf deleted file mode 100644 index 6a1f77c271..0000000000 --- a/etc/trove/conf.d/guest_info.conf +++ /dev/null @@ -1 +0,0 @@ -# Guest-specific information injected by the taskmanager diff --git a/etc/trove/trove-guestagent.conf.sample b/etc/trove/trove-guestagent.conf.sample deleted file mode 100644 index 8e49d4bbbb..0000000000 --- a/etc/trove/trove-guestagent.conf.sample +++ /dev/null @@ -1,166 +0,0 @@ -[DEFAULT] - - -#=========== RPC Configuration ====================== - -# URL representing the messaging driver to use and its full configuration. -# If not set, we fall back to the 'rpc_backend' option and driver specific -# configuration. -#transport_url= - -# The messaging driver to use. Options include rabbit, qpid and zmq. -# Default is rabbit. (string value) -#rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the 'transport_url option. -control_exchange = trove - -# ========== Configuration options for Swift ========== - -# The swift_url can be specified directly or fetched from Keystone catalog. - -# To fetch from Keystone, comment out swift_url, and uncomment the others. -# swift_url = http://10.0.0.1:8080/v1/AUTH_ -# Region name of this node. Default value is None. -# os_region_name = RegionOne -# Service type to use when searching catalog. -# swift_service_type = object-store - - -# ========== Datastore Manager Configurations ========== - -# Datastore manager implementations. -# Format: list of 'datastore-type:datastore.manager.implementation.module' -# datastore_registry_ext = mysql:trove.guestagent.datastore.mysql.manager.Manager, percona:trove.guestagent.datastore.mysql.manager.Manager - - -# ========== Default Users / DBs Configuration ========== - -# Permissions to grant "root" user by default -root_grant = ALL -root_grant_option = True -# root_grant = ALTER ROUTINE, CREATE, ALTER, CREATE ROUTINE, CREATE TEMPORARY TABLES, CREATE VIEW, CREATE USER, DELETE, DROP, EVENT, EXECUTE, INDEX, INSERT, LOCK TABLES, PROCESS, REFERENCES, SELECT, SHOW DATABASES, SHOW VIEW, TRIGGER, UPDATE, USAGE -# root_grant_option = False - -# Default password Length for root password -# default_password_length = 36 - - -# ========== Default Storage Options for backup ========== - -# Default configuration for storage strategy and storage options -# for backups - -# For storage to Swift, use the following as defaults: -# storage_strategy = SwiftStorage -# storage_namespace = trove.common.strategies.storage.swift - -# Default config options for storing backups to swift -# backup_swift_container = database_backups -# backup_use_gzip_compression = True -# backup_use_openssl_encryption = True -# backup_aes_cbc_key = "default_aes_cbc_key" -# backup_use_snet = False -# backup_chunk_size = 65536 -# backup_segment_max_size = 2147483648 - - -# ========== Sample Logging Configuration ========== - -# Show debugging output in logs (sets DEBUG log level output) -# debug = True - -# Directory and path for log files -log_dir = /var/log/trove/ -log_file = logfile.txt -log_config_append = /etc/trove/trove-logging-guestagent.conf - -[profiler] -# If False fully disable profiling feature. -#enabled = False -# If False doesn't trace SQL requests. -#trace_sqlalchemy = True - -[oslo_messaging_notifications] - -# -# From oslo.messaging -# - -# The Driver(s) to handle sending notifications. Possible -# values are messaging, messagingv2, routing, log, test, noop -# (multi valued) -# Deprecated group/name - [DEFAULT]/notification_driver -#driver = - -# A URL representing the messaging driver to use for -# notifications. If not set, we fall back to the same -# configuration used for RPC. (string value) -# Deprecated group/name - [DEFAULT]/notification_transport_url -#transport_url = - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -# Deprecated group/name - [DEFAULT]/notification_topics -#topics = notifications - -# The maximum number of attempts to re-send a notification -# message which failed to be delivered due to a recoverable -# error. 0 - No retry, -1 - indefinite (integer value) -#retry = -1 - -# ========== Datastore Specific Configuration Options ========== - -[mysql] -# For mysql, the following are the defaults for backup, and restore: -# backup_strategy = InnoBackupEx -# backup_namespace = trove.guestagent.strategies.backup.mysql_impl -# restore_namespace = trove.guestagent.strategies.restore.mysql_impl -# Default configuration for mysql replication -# replication_strategy = MysqlBinlogReplication -# replication_namespace = trove.guestagent.strategies.replication.mysql_binlog -# replication_user = slave_user -# replication_password = slave_password - -# Users to ignore for user create/list/delete operations -# ignore_users = os_admin - -# Databases to ignore for db create/list/delete operations -# ignore_dbs = mysql, information_schema, performance_schema - -[vertica] -# For vertica, following are the defaults needed: -# mount_point = /var/lib/vertica -# readahead_size = 2048 -# guestagent_strategy = trove.common.strategies.cluster.experimental.vertica.guestagent.VerticaGuestAgentStrategy - -[redis] -# For redis, the following are the defaults for backup, and restore: -# backup_strategy = RedisBackup -# backup_namespace = trove.guestagent.strategies.backup.experimental.redis_impl -# restore_namespace = trove.guestagent.strategies.restore.experimental.redis_impl - -[percona] -backup_namespace = trove.guestagent.strategies.backup.mysql_impl -restore_namespace = trove.guestagent.strategies.restore.mysql_impl - -[couchbase] -backup_namespace = trove.guestagent.strategies.backup.experimental.couchbase_impl -restore_namespace = trove.guestagent.strategies.restore.experimental.couchbase_impl - -[cassandra] -backup_namespace = trove.guestagent.strategies.backup.experimental.cassandra_impl -restore_namespace = trove.guestagent.strategies.restore.experimental.cassandra_impl - -[db2] -# For db2, the following are the defaults for backup, and restore: -# backup_strategy = DB2OfflineBackup -# backup_namespace = trove.guestagent.strategies.backup.experimental.db2_impl -# restore_namespace = trove.guestagent.strategies.restore.experimental.db2_impl - -[couchdb] -#For CouchDB, the following are the defaults for backup and restore: -# backup_strategy = CouchDBBackup -# backup_namespace = trove.guestagent.strategies.backup.experimental.couchdb_impl -# restore_namespace = trove.guestagent.strategies.restore.experimental.couchdb_impl diff --git a/etc/trove/trove-workbook.yaml b/etc/trove/trove-workbook.yaml deleted file mode 100644 index d549713352..0000000000 --- a/etc/trove/trove-workbook.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -version: '2.0' - -name: trove - -description: Trove Workflows - -workflows: - - backup_create: - input: [instance, name, description, incremental] - output: - status: <% $.message %> - - tasks: - backup_create: - action: trove.backups_create instance=<% $.instance %> name=<% $.name %> description=<% $.description %> incremental=<% $.incremental %> - publish: - message: <% 'Backup complete' %> diff --git a/etc/trove/trove.conf.sample b/etc/trove/trove.conf.sample deleted file mode 100644 index e4fdd98cba..0000000000 --- a/etc/trove/trove.conf.sample +++ /dev/null @@ -1,311 +0,0 @@ -[DEFAULT] -# Show debugging output in logs (sets DEBUG log level output) -debug = True - -# Address to bind the API server -bind_host = 0.0.0.0 - -# Port the bind the API server to -bind_port = 8779 - -# Number of workers for the API service. The default will -# be the number of CPUs available. (integer value) -#trove_api_workers=None - -#===================== RPC Configuration ================================= - -# URL representing the messaging driver to use and its full configuration. -# If not set, we fall back to the 'rpc_backend' option and driver specific -# configuration. -#transport_url= - -# The messaging driver to use. Options include rabbit, qpid and zmq. -# Default is rabbit. (string value) -#rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the 'transport_url option. -control_exchange = trove - -# Maximum line size of message headers to be accepted. -# max_header_line may need to be increased when using large tokens -# (typically those generated by the Keystone v3 API with big service -# catalogs) -# max_header_line = 16384 - -#DB Api Implementation -db_api_implementation = "trove.db.sqlalchemy.api" - -# Configuration options for talking to nova via the novaclient. -trove_auth_url = http://0.0.0.0/identity/v2.0 -#nova_compute_url = http://localhost:8774/v2 -#cinder_url = http://localhost:8776/v1 -#swift_url = http://localhost:8080/v1/AUTH_ -#neutron_url = http://localhost:9696/ - -# nova_compute_url, cinder_url, swift_url, and can all be fetched -# from Keystone. To fetch from Keystone, comment out nova_compute_url, -# cinder_url, swift_url, and and optionally uncomment the lines below. - -# Region name of this node. Used when searching catalog. Default value is None. -#os_region_name = RegionOne -# Service type to use when searching catalog. -#nova_compute_service_type = compute -# Service type to use when searching catalog. -#cinder_service_type = volumev2 -# Service type to use when searching catalog. -#swift_service_type = object-store -# Service type to use when searching catalog. -#neutron_service_type = network - -#ip_regex = ^(15.|123.) -#black_list_regex = ^10.0.0. - -# Config options for enabling volume service -trove_volume_support = True -block_device_mapping = vdb -device_path = /dev/vdb -# Maximum volume size for an instance -max_accepted_volume_size = 10 -max_instances_per_tenant = 5 -# Maximum volume capacity (in GB) spanning across all trove volumes per tenant -max_volumes_per_tenant = 100 -max_backups_per_tenant = 5 -volume_time_out=30 - -# Config options for rate limits -http_get_rate = 200 -http_post_rate = 200 -http_put_rate = 200 -http_delete_rate = 200 -http_mgmt_post_rate = 200 - -# Trove DNS -trove_dns_support = False -dns_account_id = 123456 -dns_auth_url = http://127.0.0.1/identity/v2.0 -dns_username = user -dns_passkey = password -dns_ttl = 3600 -dns_domain_name = 'trove.com.' -dns_domain_id = 11111111-1111-1111-1111-111111111111 -dns_driver = trove.dns.designate.driver.DesignateDriver -dns_instance_entry_factory = trove.dns.designate.driver.DesignateInstanceEntryFactory -dns_endpoint_url = http://127.0.0.1/v1/ -dns_service_type = dns - -# Neutron -network_driver = trove.network.nova.NovaNetwork -management_networks = - - -# Taskmanager queue name -taskmanager_queue = taskmanager - -# Auth -admin_roles = admin - -# Guest related conf -agent_heartbeat_time = 10 -agent_call_low_timeout = 5 -agent_call_high_timeout = 150 - -# Reboot time out for instances -reboot_time_out = 60 - -# Trove api-paste file name -api_paste_config = api-paste.ini - - -# ============ Notification System configuration =========================== - -# Sets the notification driver used by oslo.messaging. Options include -# messaging, messagingv2, log and routing. Default is 'noop' -# notification_driver=noop - -# Topics used for OpenStack notifications, list value. Default is 'notifications'. -# notification_topics=notifications - -# ============ Logging information ============================= -#log_dir = /integration/report -#log_file = trove-api.log - - -[database] - -# SQLAlchemy connection string for the reference implementation -# registry server. Any valid SQLAlchemy connection string is fine. -# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine -# connection = sqlite:///trove_test.sqlite -connection = mysql+pymysql://root:e1a2c042c828d3566d0a@localhost/trove -#connection = postgresql://trove:trove@localhost/trove - -# Period in seconds after which SQLAlchemy should reestablish its connection -# to the database. -# -# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop -# idle connections. This can result in 'MySQL Gone Away' exceptions. If you -# notice this, you can lower this value to ensure that SQLAlchemy reconnects -# before MySQL can drop the connection. -idle_timeout = 3600 - - - -# ============ SSL configuration (and enablement) ============================= -# In order to enable SSL for the trove api server, uncomment -# the cert_file and key_file - and of course have those files -# accessible. The existence of those setting and files will -# enable SSL. - -[profiler] -# If False fully disable profiling feature. -#enabled = False -# If False doesn't trace SQL requests. -#trace_sqlalchemy = True - -[ssl] - -#cert_file = /path/to/server.crt -#key_file = /path/to/server.key -#optional: -#ca_file = /path/to/ca_file - -[oslo_messaging_notifications] - -# -# From oslo.messaging -# - -# The Driver(s) to handle sending notifications. Possible -# values are messaging, messagingv2, routing, log, test, noop -# (multi valued) -# Deprecated group/name - [DEFAULT]/notification_driver -#driver = - -# A URL representing the messaging driver to use for -# notifications. If not set, we fall back to the same -# configuration used for RPC. (string value) -# Deprecated group/name - [DEFAULT]/notification_transport_url -#transport_url = - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -# Deprecated group/name - [DEFAULT]/notification_topics -#topics = notifications - -# The maximum number of attempts to re-send a notification -# message which failed to be delivered due to a recoverable -# error. 0 - No retry, -1 - indefinite (integer value) -#retry = -1 - -[mysql] -root_on_create = False -# Format (single port or port range): A, B-C -# where C greater than B -tcp_ports = 3306 -volume_support = True -device_path = /dev/vdb - -# Users to ignore for user create/list/delete operations -ignore_users = os_admin, root -ignore_dbs = mysql, information_schema, performance_schema - - -[redis] -tcp_ports = 6379, 16379 -volume_support = True -device_path = /dev/vdb - -[cassandra] -tcp_ports = 7000, 7001, 9042, 9160 -volume_support = True -device_path = /dev/vdb - -[couchbase] -tcp_ports = 8091, 8092, 4369, 11209-11211, 21100-21199 -volume_support = True -device_path = /dev/vdb - -[mongodb] -tcp_ports = 2500, 27017, 27019 -volume_support = True -device_path = /dev/vdb -num_config_servers_per_cluster = 1 -num_query_routers_per_cluster = 1 - -[vertica] -tcp_ports = 5433, 5434, 22, 5444, 5450, 4803 -udp_ports = 5433, 4803, 4804, 6453 -volume_support = True -device_path = /dev/vdb -cluster_support = True -cluster_member_count = 3 -api_strategy = trove.common.strategies.cluster.experimental.vertica.api.VerticaAPIStrategy - - -# ============ CORS configuration ============================= - -[cors] - -# -# From oslo.middleware.cors -# - -# Indicate whether this resource may be shared with the domain received in the -# requests "origin" header. (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple -# Headers. (list value) -#expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list value) -#allow_methods = GET,PUT,POST,DELETE,PATCH - -# Indicate which header field names may be used during the actual request. -# (list value) -#allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID - - -[cors.subdomain] - -# -# From oslo.middleware.cors -# - -# Indicate whether this resource may be shared with the domain received in the -# requests "origin" header. (list value) -#allowed_origin = - -# Indicate that the actual request can include user credentials (boolean value) -#allow_credentials = true - -# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple -# Headers. (list value) -#expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID - -# Maximum cache age of CORS preflight requests. (integer value) -#max_age = 3600 - -# Indicate which methods can be used during the actual request. (list value) -#allow_methods = GET,PUT,POST,DELETE,PATCH - -# Indicate which header field names may be used during the actual request. -# (list value) -#allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID - -[oslo_middleware] - -# -# From oslo.middleware -# - -# Whether the application is behind a proxy or not. This determines if the -# middleware should parse the headers or not. (boolean value) -#enable_proxy_headers_parsing = false diff --git a/integration/scripts/conf.json.example b/integration/scripts/conf.json.example deleted file mode 100644 index ee6bc7bf0a..0000000000 --- a/integration/scripts/conf.json.example +++ /dev/null @@ -1,12 +0,0 @@ -{ - "devstack":null, - "glance":null, - "horizon":null, - "keystone":null, - "nova":null, - "python_openstackclient":null, - "python_novaclient":null, - "trove":null, - "python_troveclient":null, - "tempest":null -} diff --git a/integration/scripts/files/elements/apt-conf-dir/README.rst b/integration/scripts/files/elements/apt-conf-dir/README.rst deleted file mode 100644 index c94e00ea33..0000000000 --- a/integration/scripts/files/elements/apt-conf-dir/README.rst +++ /dev/null @@ -1,16 +0,0 @@ -============ -apt-conf-dir -============ - -This element overrides the default apt.conf.d directory for APT based systems. - -Environment Variables ---------------------- - -DIB_APT_CONF_DIR: - :Required: No - :Default: None - :Description: To override `DIB_APT_CONF_DIR`, set it to the path to your - apt.conf.d. The new apt.conf.d will take effect at build time - and run time. - :Example: ``DIB_APT_CONF_DIR=/etc/apt/apt.conf`` diff --git a/integration/scripts/files/elements/apt-conf-dir/extra-data.d/99-use-host-apt-confd b/integration/scripts/files/elements/apt-conf-dir/extra-data.d/99-use-host-apt-confd deleted file mode 100755 index e286d684e1..0000000000 --- a/integration/scripts/files/elements/apt-conf-dir/extra-data.d/99-use-host-apt-confd +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Override the default /etc/apt/apt.conf.d directory with $DIB_APT_CONF_DIR - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -# exit directly if DIB_APT_CONF_DIR is not defined properly -if [ -z "${DIB_APT_CONF_DIR:-}" ] ; then - echo "DIB_APT_CONF_DIR is not set - no apt.conf.d will be copied in" - exit 0 -elif [ ! -d "$DIB_APT_CONF_DIR" ] ; then - echo "$DIB_APT_CONF_DIR is not a valid apt.conf.d directory." - echo "You should assign a proper apt.conf.d directory in DIB_APT_CONF_DIR" - exit 1 -fi - -# copy the apt.conf to cloudimg -sudo cp -L -f -R $DIB_APT_CONF_DIR $TMP_MOUNT_PATH/etc/apt diff --git a/integration/scripts/files/elements/guest-agent/element-deps b/integration/scripts/files/elements/guest-agent/element-deps index 6dcd66dc2b..ef30983797 100644 --- a/integration/scripts/files/elements/guest-agent/element-deps +++ b/integration/scripts/files/elements/guest-agent/element-deps @@ -4,3 +4,4 @@ pkg-map source-repositories svc-map pip-and-virtualenv +ubuntu-docker diff --git a/integration/scripts/files/elements/guest-agent/environment.d/99-reliable-apt-key-importing.bash b/integration/scripts/files/elements/guest-agent/environment.d/99-reliable-apt-key-importing.bash deleted file mode 100644 index 9622b29257..0000000000 --- a/integration/scripts/files/elements/guest-agent/environment.d/99-reliable-apt-key-importing.bash +++ /dev/null @@ -1,34 +0,0 @@ -# sometimes the primary key server is unavailable and we should try an -# alternate. see -# https://bugs.launchpad.net/percona-server/+bug/907789. Disable -# shell errexit so we can interrogate the exit code and take action -# based on the exit code. We will reenable it later. -# -# NOTE(zhaochao): we still have this problem from time to time, so it's -# better use more reliable keyservers and just retry on that(for now, 3 -# tries should be fine). -# According to: -# [1] https://www.gnupg.org/faq/gnupg-faq.html#new_user_default_keyserver -# [2] https://sks-keyservers.net/overview-of-pools.php -# we'll just the primary suggested pool: pool.sks-keyservers.net. -function get_key_robust() { - KEY=$1 - set +e - - tries=1 - while [ $tries -le 3 ]; do - if [ $tries -eq 3 ]; then - set -e - fi - - echo "Importing the key, try: $tries" - # Behind a firewall should use the port 80 instead of the default port 11371 - apt-key adv --keyserver hkp://pool.sks-keyservers.net:80 --recv-keys ${KEY} && break - - tries=$((tries+1)) - done - - set -e -} - -export -f get_key_robust diff --git a/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/31-guest-agent-install b/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/31-guest-agent-install new file mode 100755 index 0000000000..1eb66ffd1f --- /dev/null +++ b/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/31-guest-agent-install @@ -0,0 +1,51 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi +set -eu +set -o pipefail + +DEV_MODE=${DEV_MODE:-"true"} +SCRIPTDIR=$(dirname $0) +GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"} +GUEST_VENV=/opt/guest-agent-venv + +for folder in "/var/lib/trove" "/etc/trove" "/etc/trove/certs" "/etc/trove/conf.d" "/var/log/trove" "/opt/trove-guestagent"; do + mkdir -p ${folder} + chown -R ${GUEST_USERNAME}:root ${folder} +done + +install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.logrotate /etc/logrotate.d/guest-agent + +# Create a virtual environment (with dependencies installed) for guest agent service +${DIB_PYTHON} -m virtualenv ${GUEST_VENV} +${GUEST_VENV}/bin/pip install pip --upgrade +${GUEST_VENV}/bin/pip install -U -c /opt/upper-constraints.txt /opt/guest-agent +chown -R ${GUEST_USERNAME}:root ${GUEST_VENV} + +if [[ ${DEV_MODE} == "true" ]]; then + [[ -n "${HOST_SCP_USERNAME}" ]] || die "HOST_SCP_USERNAME needs to be set to the trovestack host user" + [[ -n "${ESCAPED_PATH_TROVE}" ]] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host" + + sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${SCRIPTDIR}/guest-agent-dev.service > /etc/systemd/system/guest-agent.service +else + # Link the trove-guestagent out to /usr/local/bin where the startup scripts look for + ln -s ${GUEST_VENV}/bin/trove-guestagent /usr/local/bin/guest-agent || true + + case "$DIB_INIT_SYSTEM" in + systemd) + sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g" ${SCRIPTDIR}/guest-agent.service > /etc/systemd/system/guest-agent.service + ;; + upstart) + install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.conf /etc/init/guest-agent.conf + ;; + sysv) + install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.init /etc/init.d/guest-agent.init + ;; + *) + echo "Unsupported init system" + exit 1 + ;; + esac +fi diff --git a/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/75-guest-agent-install b/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/75-guest-agent-install deleted file mode 100755 index 87a11958e2..0000000000 --- a/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/75-guest-agent-install +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then - set -x -fi -set -eu -set -o pipefail - -SCRIPTDIR=$(dirname $0) -GUEST_VENV=/opt/guest-agent-venv -GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"} - -# Create a virtual environment for guest agent -${DIB_PYTHON} -m virtualenv ${GUEST_VENV} -${GUEST_VENV}/bin/pip install pip --upgrade -${GUEST_VENV}/bin/pip install -U -c /opt/upper-constraints.txt /opt/guest-agent -chown -R ${GUEST_USERNAME}:root ${GUEST_VENV} - -# Link the trove-guestagent out to /usr/local/bin where the startup scripts look for -ln -s ${GUEST_VENV}/bin/trove-guestagent /usr/local/bin/guest-agent || true - -for folder in "/var/lib/trove" "/etc/trove" "/etc/trove/certs" "/etc/trove/conf.d" "/var/log/trove"; do - mkdir -p ${folder} - chown -R ${GUEST_USERNAME}:root ${folder} -done - -install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.logrotate /etc/logrotate.d/guest-agent - -case "$DIB_INIT_SYSTEM" in - systemd) - mkdir -p /usr/lib/systemd/system - touch /usr/lib/systemd/system/guest-agent.service - sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g" ${SCRIPTDIR}/guest-agent.service > /usr/lib/systemd/system/guest-agent.service - ;; - upstart) - install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.conf /etc/init/guest-agent.conf - ;; - sysv) - install -D -g root -o ${GUEST_USERNAME} -m 0644 ${SCRIPTDIR}/guest-agent.init /etc/init.d/guest-agent.init - ;; - *) - echo "Unsupported init system" - exit 1 - ;; -esac diff --git a/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent-dev.service b/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent-dev.service new file mode 100644 index 0000000000..261ac81184 --- /dev/null +++ b/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent-dev.service @@ -0,0 +1,31 @@ +[Unit] +Description=OpenStack Trove Guest Agent Service for Development +After=syslog.target network.target + +[Install] +WantedBy=multi-user.target + +[Service] +Type=simple +User=GUEST_USERNAME +Group=GUEST_USERNAME + +# This script is only for testing purpose for dev_mode=true, the controller +# IP address should be defined in /etc/trove/controller.conf, e.g. +# CONTROLLER=192.168.32.151 +EnvironmentFile=/etc/trove/controller.conf + +# If ~/trove-installed does not exist, copy the trove source from +# the user's development environment, then touch the sentinel file +ExecStartPre=/bin/bash -c "test -e /home/GUEST_USERNAME/trove-installed || sudo rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/GUEST_USERNAME/.ssh/id_rsa' -az --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:PATH_TROVE/ /home/GUEST_USERNAME/trove && touch /home/GUEST_USERNAME/trove-installed" + +ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove /var/log/trove/ /home/GUEST_USERNAME/trove" + +# Take care of the changes in requirements.txt +ExecStartPre=/bin/bash -c "sudo /opt/guest-agent-venv/bin/pip install -r /home/GUEST_USERNAME/trove/requirements.txt -c /opt/upper-constraints.txt" + +# Start guest-agent.service in virtual environment +ExecStart=/bin/bash -c "/opt/guest-agent-venv/bin/python /home/GUEST_USERNAME/trove/contrib/trove-guestagent --config-dir=/etc/trove/conf.d" + +TimeoutSec=300 +Restart=on-failure \ No newline at end of file diff --git a/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.service b/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.service index 788bebc92f..a468de5434 100644 --- a/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.service +++ b/integration/scripts/files/elements/guest-agent/install.d/guest-agent-source-install/guest-agent.service @@ -1,15 +1,16 @@ [Unit] -Description=OpenStack Trove Guest Agent +Description=OpenStack Trove Guest Agent Service After=network.target syslog.service Wants=syslog.service [Service] User=GUEST_USERNAME Group=GUEST_USERNAME -ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove/conf.d" -ExecStart=/usr/local/bin/guest-agent --config-dir=/etc/trove/conf.d KillMode=mixed Restart=always +ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove /var/log/trove/" +ExecStart=/usr/local/bin/guest-agent --config-dir=/etc/trove/conf.d + [Install] WantedBy=multi-user.target diff --git a/integration/scripts/files/elements/guest-agent/package-installs.yaml b/integration/scripts/files/elements/guest-agent/package-installs.yaml index 582d48bc3a..37e7daa4b3 100644 --- a/integration/scripts/files/elements/guest-agent/package-installs.yaml +++ b/integration/scripts/files/elements/guest-agent/package-installs.yaml @@ -1,53 +1,15 @@ guest-agent: installtype: package build-essential: - installtype: source +python3-all: +python3-all-dev: +python3-pip: +python3-sqlalchemy: +libxml2-dev: +libxslt1-dev: libffi-dev: - installtype: source libssl-dev: - installtype: source -python-dev: - installtype: source - -acl: -acpid: -apparmor: -apparmor-utils: -apt-transport-https: -at: -bash-completion: -cloud-guest-utils: -cloud-init: -cron: -curl: -dbus: -dkms: -dmeventd: -ethtool: -gawk: -ifenslave: -ifupdown: -iptables: -iputils-tracepath: -irqbalance: -isc-dhcp-client: -less: -logrotate: -lsof: -net-tools: -netbase: -netcat-openbsd: -open-vm-tools: - arch: i386, amd64 +libyaml-dev: openssh-client: openssh-server: -pollinate: -psmisc: -rsyslog: -socat: -tcpdump: -ubuntu-cloudimage-keyring: -ureadahead: -uuid-runtime: -vim-tiny: -vlan: +rsync: diff --git a/integration/scripts/files/elements/guest-agent/post-install.d/11-enable-guest-agent-systemd b/integration/scripts/files/elements/guest-agent/post-install.d/31-enable-guest-agent-systemd similarity index 100% rename from integration/scripts/files/elements/guest-agent/post-install.d/11-enable-guest-agent-systemd rename to integration/scripts/files/elements/guest-agent/post-install.d/31-enable-guest-agent-systemd diff --git a/integration/scripts/files/elements/no-resolvconf/README.rst b/integration/scripts/files/elements/no-resolvconf/README.rst deleted file mode 100644 index 8a3dfc7d4f..0000000000 --- a/integration/scripts/files/elements/no-resolvconf/README.rst +++ /dev/null @@ -1,8 +0,0 @@ -This element clears out /etc/resolv.conf and prevents dhclient from populating -it with data from DHCP. This means that DNS resolution will not work from the -guest. This is OK because all outbound connections from the guest will -be based using raw IP addresses. - -In addition we remove dns from the nsswitch.conf hosts setting. - -This means that the guest never waits for DNS timeouts to occur. diff --git a/integration/scripts/files/elements/no-resolvconf/finalise.d/99-disable-resolv-conf b/integration/scripts/files/elements/no-resolvconf/finalise.d/99-disable-resolv-conf deleted file mode 100755 index 5bf5fded3d..0000000000 --- a/integration/scripts/files/elements/no-resolvconf/finalise.d/99-disable-resolv-conf +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -echo "" > /etc/resolv.conf -echo "" > /etc/resolv.conf.ORIG -if [ -d /etc/dhcp/dhclient-enter-hooks.d ]; then - # Debian/Ubuntu - echo "#!/bin/sh -make_resolv_conf() { : ; }" > /etc/dhcp/dhclient-enter-hooks.d/noresolvconf - chmod +x /etc/dhcp/dhclient-enter-hooks.d/noresolvconf - rm -f /etc/dhcp/dhclient-enter-hooks.d/resolvconf -else - # RHEL/CentOS/Fedora - echo "#!/bin/sh -make_resolv_conf() { : ; }" > /etc/dhclient-enter-hooks - chmod +x /etc/dhclient-enter-hooks -fi - -if [ -e /etc/nsswitch.conf ]; then - sed -i -e "/hosts:/ s/dns//g" /etc/nsswitch.conf -fi diff --git a/integration/scripts/files/elements/ubuntu-docker/element-deps b/integration/scripts/files/elements/ubuntu-docker/element-deps new file mode 100644 index 0000000000..10e545838f --- /dev/null +++ b/integration/scripts/files/elements/ubuntu-docker/element-deps @@ -0,0 +1 @@ +ubuntu-guest \ No newline at end of file diff --git a/integration/scripts/files/elements/ubuntu-docker/install.d/21-docker b/integration/scripts/files/elements/ubuntu-docker/install.d/21-docker new file mode 100755 index 0000000000..44041384a4 --- /dev/null +++ b/integration/scripts/files/elements/ubuntu-docker/install.d/21-docker @@ -0,0 +1,19 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi +set -eu +set -o pipefail + +GUEST_USERNAME=${GUEST_USERNAME:-"ubuntu"} + +echo "Installing docker" +export DEBIAN_FRONTEND=noninteractive +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - +add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${DIB_RELEASE} stable" +apt-get update +apt-get install -y -qq docker-ce >/dev/null + +echo "Adding ${GUEST_USERNAME} user to docker group" +usermod -aG docker ${GUEST_USERNAME} diff --git a/integration/scripts/files/elements/ubuntu-guest/environment.d/99-reliable-apt-key-importing.bash b/integration/scripts/files/elements/ubuntu-guest/environment.d/99-reliable-apt-key-importing.bash deleted file mode 100644 index 9622b29257..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/environment.d/99-reliable-apt-key-importing.bash +++ /dev/null @@ -1,34 +0,0 @@ -# sometimes the primary key server is unavailable and we should try an -# alternate. see -# https://bugs.launchpad.net/percona-server/+bug/907789. Disable -# shell errexit so we can interrogate the exit code and take action -# based on the exit code. We will reenable it later. -# -# NOTE(zhaochao): we still have this problem from time to time, so it's -# better use more reliable keyservers and just retry on that(for now, 3 -# tries should be fine). -# According to: -# [1] https://www.gnupg.org/faq/gnupg-faq.html#new_user_default_keyserver -# [2] https://sks-keyservers.net/overview-of-pools.php -# we'll just the primary suggested pool: pool.sks-keyservers.net. -function get_key_robust() { - KEY=$1 - set +e - - tries=1 - while [ $tries -le 3 ]; do - if [ $tries -eq 3 ]; then - set -e - fi - - echo "Importing the key, try: $tries" - # Behind a firewall should use the port 80 instead of the default port 11371 - apt-key adv --keyserver hkp://pool.sks-keyservers.net:80 --recv-keys ${KEY} && break - - tries=$((tries+1)) - done - - set -e -} - -export -f get_key_robust diff --git a/integration/scripts/files/elements/ubuntu-guest/extra-data.d/11-ssh-key-dev b/integration/scripts/files/elements/ubuntu-guest/extra-data.d/11-ssh-key-dev new file mode 100755 index 0000000000..498169c326 --- /dev/null +++ b/integration/scripts/files/elements/ubuntu-guest/extra-data.d/11-ssh-key-dev @@ -0,0 +1,17 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi +set -eu +set -o pipefail + +source $_LIB/die + +[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" + +# Guest agent needs to ssh into the controller to download code in dev mode. +if [[ ${DEV_MODE} == "true" && -e ${SSH_DIR}/id_rsa ]]; then + sudo -Hiu ${HOST_SCP_USERNAME} dd if=${SSH_DIR}/id_rsa of=${TMP_HOOKS_PATH}/id_rsa + sudo -Hiu ${HOST_SCP_USERNAME} dd if=${SSH_DIR}/id_rsa.pub of=${TMP_HOOKS_PATH}/id_rsa.pub +fi diff --git a/integration/scripts/files/elements/ubuntu-guest/extra-data.d/15-trove-dep b/integration/scripts/files/elements/ubuntu-guest/extra-data.d/15-trove-dep deleted file mode 100755 index 5a0c1ecdcd..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/extra-data.d/15-trove-dep +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -set -e -set -o xtrace - -# CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER -# PURPOSE: Setup the requirements file for use by 15-reddwarf-dep - -source $_LIB/die - -TROVE_BRANCH=${TROVE_BRANCH:-'master'} -REQUIREMENTS_FILE=${TROVESTACK_SCRIPTS}/../../requirements.txt - -[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" -[ -e ${REQUIREMENTS_FILE} ] || die "Requirements not found" -[ -n "$HOST_USERNAME" ] || die "HOST_USERNAME not set" - -sudo -Hiu ${HOST_USERNAME} dd if=${REQUIREMENTS_FILE} of=${TMP_HOOKS_PATH}/requirements.txt - -UC_FILE=upper-constraints.txt -UC_DIR=$(pwd) -UC_BRANCH=${TROVE_BRANCH##stable/} - -curl -L -o "${UC_DIR}/${UC_FILE}" "https://releases.openstack.org/constraints/upper/${UC_BRANCH}" -if [ -f "${UC_DIR}/${UC_FILE}" ]; then - sudo -Hiu ${HOST_USERNAME} dd if="${UC_DIR}/${UC_FILE}" of=${TMP_HOOKS_PATH}/${UC_FILE} - rm -f "${UC_DIR}/${UC_FILE}" -fi diff --git a/integration/scripts/files/elements/ubuntu-guest/extra-data.d/62-ssh-key b/integration/scripts/files/elements/ubuntu-guest/extra-data.d/62-ssh-key deleted file mode 100755 index 894950dbef..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/extra-data.d/62-ssh-key +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -set -o xtrace - -# CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER -# PURPOSE: creates the SSH key on the host if it doesn't exist. Then this copies the keys over to a staging area where -# they will be duplicated in the guest VM. -# This process allows the host to log into the guest but more importantly the guest phones home to get the trove -# source - -source $_LIB/die - -[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" -[ -n "${HOST_USERNAME}" ] || die "HOST_USERNAME needs to be set to the user for the current user on the host" - -if [ `whoami` = "root" ]; then - die "This should not be run as root" -fi - -# Guest agent needs to ssh into the controller to download code in dev mode. -if [ -e ${SSH_DIR}/id_rsa ]; then - sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa of=${TMP_HOOKS_PATH}/id_rsa - sudo -Hiu ${HOST_USERNAME} dd if=${SSH_DIR}/id_rsa.pub of=${TMP_HOOKS_PATH}/id_rsa.pub -else - die "SSH keys must exist" -fi diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps b/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps deleted file mode 100755 index 3a8cacfb6d..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: Install basic services and applications - -set -e -set -o xtrace - -export DEBIAN_FRONTEND=noninteractive -apt-get --allow-unauthenticated -y install ntp apparmor-utils diff --git a/integration/scripts/files/elements/guest-agent/install.d/50-user b/integration/scripts/files/elements/ubuntu-guest/install.d/11-user similarity index 99% rename from integration/scripts/files/elements/guest-agent/install.d/50-user rename to integration/scripts/files/elements/ubuntu-guest/install.d/11-user index 8a2b145fe5..456073f703 100755 --- a/integration/scripts/files/elements/guest-agent/install.d/50-user +++ b/integration/scripts/files/elements/ubuntu-guest/install.d/11-user @@ -19,4 +19,4 @@ if ! id -u ${GUEST_USERNAME} >/dev/null 2>&1; then ${GUEST_USERNAME} ${GUEST_USERNAME} _EOF_ -fi +fi \ No newline at end of file diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/12-ssh-key-dev b/integration/scripts/files/elements/ubuntu-guest/install.d/12-ssh-key-dev new file mode 100755 index 0000000000..8be7882a40 --- /dev/null +++ b/integration/scripts/files/elements/ubuntu-guest/install.d/12-ssh-key-dev @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ ${DIB_DEBUG_TRACE:-0} -gt 0 ]; then + set -x +fi +set -eu +set -o pipefail + +GUEST_SSH_DIR="/home/${GUEST_USERNAME}/.ssh" +TMP_HOOKS_DIR="/tmp/in_target.d" + +if [ ! -e ${GUEST_SSH_DIR} ]; then + # this method worked more reliable in vmware fusion over doing sudo -Hiu ${GUEST_USERNAME} + mkdir ${GUEST_SSH_DIR} + chown -R ${GUEST_USERNAME}:${GUEST_USERNAME} ${GUEST_SSH_DIR} +fi + +if [[ ${DEV_MODE} == "true" && -e "${TMP_HOOKS_DIR}/id_rsa" ]]; then + sudo -Hiu ${GUEST_USERNAME} dd of=${GUEST_SSH_DIR}/id_rsa.pub if=${TMP_HOOKS_DIR}/id_rsa.pub + sudo -Hiu ${GUEST_USERNAME} dd of=${GUEST_SSH_DIR}/id_rsa if=${TMP_HOOKS_DIR}/id_rsa + sudo -Hiu ${GUEST_USERNAME} chmod 600 ${GUEST_SSH_DIR}/id_rsa +fi diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep b/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep deleted file mode 100755 index 25b4845928..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: Install trove guest python dependencies - see trovestack functions_qemu - -set -e -set -o xtrace - -export DEBIAN_FRONTEND=noninteractive -apt-get --allow-unauthenticated -y install \ - libxml2-dev libxslt1-dev libffi-dev libssl-dev libyaml-dev \ - python3-pip python3-sqlalchemy python3-setuptools - -# Install python 3.7, some python lib (e.g. oslo.concurrency>4.0.0) requries -# Python 3.7 -add-apt-repository --yes ppa:deadsnakes/ppa -apt update -apt install -y python3.7 python3.7-dev - -update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 10 -python3.5 -m pip install pip==9.0.3 -python3.5 -m pip install -U wheel setuptools - -TMP_HOOKS_DIR="/tmp/in_target.d" - -UPPER_CONSTRAINTS= -if [ -f ${TMP_HOOKS_DIR}/upper-constraints.txt ]; then - UPPER_CONSTRAINTS=" -c ${TMP_HOOKS_DIR}/upper-constraints.txt" -fi - -python3.7 -m pip install pip==9.0.3 -python3.7 -m pip install -U wheel setuptools -python3.7 -m pip install --upgrade -r ${TMP_HOOKS_DIR}/requirements.txt ${UPPER_CONSTRAINTS} - -echo "diagnostic pip freeze output follows" -python3.7 -m pip freeze -echo "diagnostic pip freeze output above" diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/50-user b/integration/scripts/files/elements/ubuntu-guest/install.d/50-user deleted file mode 100755 index 99f68966b6..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/install.d/50-user +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: Add the guest image user that will own the trove agent source...if the user does not already exist - -set -e -set -o xtrace - -if ! id -u ${GUEST_USERNAME} >/dev/null 2>&1; then - echo "Adding ${GUEST_USERNAME} user" - useradd -G sudo -m ${GUEST_USERNAME} -s /bin/bash - chown ${GUEST_USERNAME}:${GUEST_USERNAME} /home/${GUEST_USERNAME} - passwd ${GUEST_USERNAME} <<_EOF_ -${GUEST_USERNAME} -${GUEST_USERNAME} -_EOF_ -fi - diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/62-ssh-key b/integration/scripts/files/elements/ubuntu-guest/install.d/62-ssh-key deleted file mode 100755 index 21a54d28ba..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/install.d/62-ssh-key +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# PURPOSE: take "staged" ssh keys (see extra-data.d/62-ssh-key) and put them in the GUEST_USERS home directory -# In future, this should be removed and use Nova keypair to inject ssh keys. - -set -e -set -o xtrace - -SSH_DIR="/home/${GUEST_USERNAME}/.ssh" -TMP_HOOKS_DIR="/tmp/in_target.d" - -if [ ! -e ${SSH_DIR} ]; then - # this method worked more reliable in vmware fusion over doing sudo -Hiu ${GUEST_USERNAME} - mkdir ${SSH_DIR} - chown ${GUEST_USERNAME}:${GUEST_USERNAME} ${SSH_DIR} -fi - -if [ -e "${TMP_HOOKS_DIR}/id_rsa" ]; then - sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa.pub if=${TMP_HOOKS_DIR}/id_rsa.pub - sudo -Hiu ${GUEST_USERNAME} dd of=${SSH_DIR}/id_rsa if=${TMP_HOOKS_DIR}/id_rsa - sudo -Hiu ${GUEST_USERNAME} chmod 600 ${SSH_DIR}/id_rsa -else - echo "SSH Keys were not staged by host" - exit -1 -fi diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/98-ssh b/integration/scripts/files/elements/ubuntu-guest/install.d/98-ssh deleted file mode 100755 index 2134c19889..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/install.d/98-ssh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -# Regenerate host keys now. XXX: Really should be a cloud-init task, should get -# that working. - -set -e -set -o xtrace - -dpkg-reconfigure openssh-server diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/99-clean-apt b/integration/scripts/files/elements/ubuntu-guest/install.d/99-clean-apt deleted file mode 100755 index cc348c5cb9..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/install.d/99-clean-apt +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: Delete contents of apt cache on guest (saves image disk space) - -set -e -set -o xtrace - -apt-get clean - - diff --git a/integration/scripts/files/elements/ubuntu-guest/post-install.d/05-ipforwarding b/integration/scripts/files/elements/ubuntu-guest/post-install.d/11-ipforwarding similarity index 100% rename from integration/scripts/files/elements/ubuntu-guest/post-install.d/05-ipforwarding rename to integration/scripts/files/elements/ubuntu-guest/post-install.d/11-ipforwarding diff --git a/integration/scripts/files/elements/ubuntu-guest/post-install.d/10-ntp b/integration/scripts/files/elements/ubuntu-guest/post-install.d/12-ntp similarity index 100% rename from integration/scripts/files/elements/ubuntu-guest/post-install.d/10-ntp rename to integration/scripts/files/elements/ubuntu-guest/post-install.d/12-ntp diff --git a/integration/scripts/files/elements/ubuntu-guest/post-install.d/62-trove-guest-sudoers b/integration/scripts/files/elements/ubuntu-guest/post-install.d/13-trove-guest-sudoers similarity index 100% rename from integration/scripts/files/elements/ubuntu-guest/post-install.d/62-trove-guest-sudoers rename to integration/scripts/files/elements/ubuntu-guest/post-install.d/13-trove-guest-sudoers diff --git a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools b/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools deleted file mode 100755 index 7b9221cf99..0000000000 --- a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -# Install baseline packages and tools. - -set -e -set -o xtrace - -apt-get --allow-unauthenticated install -y language-pack-en python-software-properties software-properties-common \ No newline at end of file diff --git a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/11-baseline-tools b/integration/scripts/files/elements/ubuntu-guest/pre-install.d/11-baseline-tools new file mode 100755 index 0000000000..12c864d64b --- /dev/null +++ b/integration/scripts/files/elements/ubuntu-guest/pre-install.d/11-baseline-tools @@ -0,0 +1,7 @@ +#!/bin/bash +# Install baseline packages and tools. + +set -e +set -o xtrace + +DEBIAN_FRONTEND=noninteractive apt-get --allow-unauthenticated install -y -qq software-properties-common apt-transport-https ca-certificates ntp >/dev/null \ No newline at end of file diff --git a/integration/scripts/files/elements/ubuntu-mariadb/README.md b/integration/scripts/files/elements/ubuntu-mariadb/README.md deleted file mode 100644 index 757f00b864..0000000000 --- a/integration/scripts/files/elements/ubuntu-mariadb/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Sets up a MariaDB server install in the image. - -TODO: auto-tune settings based on host resources or metadata service. diff --git a/integration/scripts/files/elements/ubuntu-mariadb/pre-install.d/20-apparmor-mysql-local b/integration/scripts/files/elements/ubuntu-mariadb/pre-install.d/20-apparmor-mysql-local deleted file mode 100755 index a3e1dc7c7d..0000000000 --- a/integration/scripts/files/elements/ubuntu-mariadb/pre-install.d/20-apparmor-mysql-local +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -set -e - -#CONTEXT: chroot on host -#PURPOSE: Allows mysqld to create temporary files when restoring backups - -cat <>/etc/apparmor.d/local/usr.sbin.mysqld - /tmp/ rw, - /tmp/** rwk, -EOF diff --git a/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/20-apparmor-mysql-local b/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/20-apparmor-mysql-local deleted file mode 100755 index 90bd85b10c..0000000000 --- a/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/20-apparmor-mysql-local +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -set -e - -#CONTEXT: chroot on host -#PURPOSE: Allows mysqld to create temporary files when restoring backups - -mkdir -p /etc/apparmor.d/local/ -cat <>/etc/apparmor.d/local/usr.sbin.mysqld - /tmp/ rw, - /tmp/** rwk, -EOF diff --git a/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql b/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql deleted file mode 100755 index cb33bb6c4f..0000000000 --- a/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh - -set -e -set -o xtrace - -export DEBIAN_FRONTEND=noninteractive - -cat > "/etc/sysctl.d/10-postgresql-performance.conf" << _EOF_ -# See 'http://www.postgresql.org/docs/9.6/static/kernel-resources.html' -# for best practices. -# It is recommended to disable memory overcommit, -# but the Python interpreter may require it on smaller flavors. -# We therefore stick with the heuristic overcommit setting. -vm.overcommit_memory=0 -_EOF_ - -apt-get --allow-unauthenticated -y install libpq-dev postgresql-12 postgresql-server-dev-12 postgresql-client-12 - -pgsql_conf=/etc/postgresql/12/main/postgresql.conf -sed -i "/listen_addresses/c listen_addresses = '*'" ${pgsql_conf} - -systemctl restart postgresql - -# Install the native Python client. -pip3 install psycopg2 diff --git a/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo b/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo deleted file mode 100755 index 7f387b2369..0000000000 --- a/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh - -set -e -set -o xtrace - -[ -n "${DIB_RELEASE}" ] || die "RELEASE must be set to a valid Ubuntu release (e.g. trusty)" - -cat < /etc/apt/sources.list.d/postgresql.list -deb http://apt.postgresql.org/pub/repos/apt/ ${DIB_RELEASE}-pgdg main -EOL - -wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - - -apt-get update diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/element-deps b/integration/scripts/files/elements/ubuntu-xenial-guest/element-deps deleted file mode 100644 index eaa808e186..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-guest/element-deps +++ /dev/null @@ -1 +0,0 @@ -ubuntu-guest diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd b/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd deleted file mode 100755 index 030efc8927..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -e -set -o xtrace - -# CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER -# PURPOSE: stages the bootstrap file and upstart conf file while replacing variables so that guest image is properly -# configured - -source $_LIB/die - -[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set" - -[ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image" -[ -n "${HOST_SCP_USERNAME}" ] || die "HOST_SCP_USERNAME needs to be set to the user for the host instance" -[ -n "${ESCAPED_PATH_TROVE}" ] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host" -[ -n "${TROVESTACK_SCRIPTS}" ] || die "TROVESTACK_SCRIPTS needs to be set to the trove/integration/scripts dir" -[ -n "${ESCAPED_GUEST_LOGDIR}" ] || die "ESCAPED_GUEST_LOGDIR must be set to the escaped guest log dir" - -sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/GUEST_LOGDIR/${ESCAPED_GUEST_LOGDIR}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${TROVESTACK_SCRIPTS}/files/trove-guest.systemd.conf > ${TMP_HOOKS_PATH}/trove-guest.service - diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc b/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc deleted file mode 100755 index c36eb31ba5..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: take "staged" trove-guest.conf file and put it in the init directory on guest image - -dd if=/tmp/in_target.d/trove-guest.service of=/etc/systemd/system/trove-guest.service - -systemctl enable trove-guest.service diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates b/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates deleted file mode 100755 index b55a0ea294..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: configure trove-guest service to use system store of trusted certificates - -GUEST_UNIT_DROPINS="/etc/systemd/system/trove-guest.service.d" - -mkdir -v -p ${GUEST_UNIT_DROPINS} -cat < ${GUEST_UNIT_DROPINS}/30-use-system-certificates.conf -[Service] -Environment=REQUESTS_CA_BUNDLE=/etc/ssl/certs -EOF diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/post-install.d/91-hwe-kernel b/integration/scripts/files/elements/ubuntu-xenial-guest/post-install.d/91-hwe-kernel deleted file mode 100755 index b60a0a95e0..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-guest/post-install.d/91-hwe-kernel +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -set -e -set -o xtrace - -# The HWE stack must be installed for nested virtualization on ppc64el. This -# environment variable is set automatically by trovestack, but it may also be -# set by the user when manually invoking disk-image-create. - -case "$DIB_USE_HWE_KERNEL" in - true|True|TRUE|yes|Yes|YES) - DIB_USE_HWE_KERNEL=true - ;; - *) - DIB_USE_HWE_KERNEL=false - ;; -esac - -if [ "$DIB_USE_HWE_KERNEL" == "true" ]; then - export DEBIAN_FRONTEND=noninteractive - - PKG_ARCH=$(dpkg --print-architecture) - - case "$PKG_ARCH" in - amd64|arm64|ppc64el|s390x) - apt-get --allow-unauthenticated install -y linux-generic-hwe-16.04 - ;; - esac -fi diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs b/integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs deleted file mode 100755 index 125f6c7892..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: Install basic services and applications - -set -e -set -o xtrace - -export DEBIAN_FRONTEND=noninteractive -apt-get -y purge acpid\ - apport\ - apport-symptoms\ - apt-transport-https\ - aptitude\ - at\ - bash-completion\ - bc\ - bind9-host\ - bsdmainutils\ - busybox-static\ - byobu\ - command-not-found\ - command-not-found-data\ - curl\ - dbus\ - dmidecode\ - dosfstools\ - ed\ - fonts-ubuntu-font-family-console\ - friendly-recovery\ - ftp\ - fuse\ - geoip-database\ - groff-base\ - hdparm\ - info\ - install-info\ - iptables\ - iputils-tracepath\ - irqbalance\ - language-selector-common\ - libaccountsservice0\ - libevent-2.0-5\ - libgeoip1\ - libnfnetlink0\ - libpcap0.8\ - libpci3\ - libpipeline1\ - libpolkit-gobject-1-0\ - libsasl2-modules\ - libusb-1.0-0\ - lshw\ - lsof\ - ltrace\ - man-db\ - mlocate\ - mtr-tiny\ - nano\ - ntfs-3g\ - parted\ - patch\ - plymouth-theme-ubuntu-text\ - popularity-contest\ - powermgmt-base\ - ppp\ - screen\ - shared-mime-info\ - strace\ - tcpdump\ - telnet\ - time\ - tmux\ - ubuntu-standard\ - ufw\ - update-manager-core\ - update-notifier-common\ - usbutils\ - uuid-runtime\ - -# The following packages cannot be removed as they cause cloud-init to be -# uninstalled in Ubuntu 14.04 -# gir1.2-glib-2.0 -# libdbus-glib-1-2 -# libgirepository-1.0-1 -# python-chardet -# python-serial -# xz-utils - -apt-get -y autoremove - diff --git a/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps b/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps deleted file mode 100644 index b215f58443..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps +++ /dev/null @@ -1 +0,0 @@ -ubuntu-mariadb \ No newline at end of file diff --git a/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb b/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb deleted file mode 100755 index 6d12202c34..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: Install controller base required packages -# Refer to https://mariadb.com/kb/en/library/installing-mariadb-deb-files - -set -e -set -o xtrace - -export DEBIAN_FRONTEND=noninteractive - -# These GPG key IDs are used to fetch keys from a keyserver on Ubuntu & Debian -apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8 -curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup | - bash -s -- --mariadb-server-version="mariadb-10.4" --skip-key-import --skip-maxscale - -apt-get install -y -qq apt-transport-https ca-certificates gnupg2 - -# NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html -wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb -dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb - -# Disable password prompt -debconf-set-selections <<< "mariadb-server mysql-server/root_password password ''" -debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password ''" - -apt-get update -qq -apt-get install -y -qq --allow-unauthenticated mariadb-server mariadb-client galera-4 libmariadb3 mariadb-backup mariadb-common - -cat </etc/mysql/conf.d/no_perf_schema.cnf -[mysqld] -performance_schema = off -EOF - -chown mysql:mysql /etc/mysql/my.cnf -rm -f /etc/init.d/mysql - -systemctl daemon-reload -systemctl enable mariadb \ No newline at end of file diff --git a/integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps b/integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps deleted file mode 100644 index bd3447a6ec..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps +++ /dev/null @@ -1 +0,0 @@ -ubuntu-mysql diff --git a/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql deleted file mode 100755 index e9f2d8c67d..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# CONTEXT: GUEST during CONSTRUCTION as ROOT -# PURPOSE: Install controller base required packages - -set -e -set -o xtrace - -export DEBIAN_FRONTEND=noninteractive - -apt-get --allow-unauthenticated -y install mysql-client mysql-server gnupg2 - -# NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html -wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb -dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb -apt-get update - -# Xenial provides mysql 5.7 which requires percona-xtrabackup-24 -PXB_VERSION_OVERRIDE=24 -apt-get --allow-unauthenticated -y install percona-xtrabackup-${PXB_VERSION_OVERRIDE} - -cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_ -[mysqld] -performance_schema = off -show_compatibility_56 = on -_EOF_ - -mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf -chown mysql:mysql /etc/mysql/my.cnf -cat >/etc/mysql/my.cnf <<_EOF_ -[mysql] -!includedir /etc/mysql/conf.d/ -_EOF_ - -if [ -e /etc/init/mysql.conf ]; then - rm -f /etc/init/mysql.conf -fi - -systemctl enable mysql diff --git a/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps b/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps deleted file mode 100644 index 6a0e1b09c6..0000000000 --- a/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps +++ /dev/null @@ -1 +0,0 @@ -ubuntu-postgresql \ No newline at end of file diff --git a/integration/scripts/functions_qemu b/integration/scripts/functions_qemu index f43078d9a1..783ae9c69d 100644 --- a/integration/scripts/functions_qemu +++ b/integration/scripts/functions_qemu @@ -3,44 +3,36 @@ # Additional functions that would mostly just pertain to a Ubuntu + Qemu setup # -function build_vm() { - exclaim "Actually building the image, this can take up to 15 minutes" - rm -rf ~/.cache/image-create +function build_guest_image() { + exclaim "Actually building the image, params: $@" - local datastore_type=$1 - local guest_os=$2 - local guest_release=$3 - local dev_mode=$4 - local guest_username=$5 - local image_output=$6 + local guest_os=$1 + local guest_release=$2 + local dev_mode=$3 + local guest_username=$4 + local image_output=$5 local elementes="base vm" local trove_elements_path=${PATH_TROVE}/integration/scripts/files/elements - local GUEST_IMAGETYPE=${GUEST_IMAGETYPE:-"qcow2"} - local GUEST_IMAGESIZE=${GUEST_IMAGESIZE:-4} + local GUEST_IMAGESIZE=${GUEST_IMAGESIZE:-3} local GUEST_CACHEDIR=${GUEST_CACHEDIR:-"$HOME/.cache/image-create"} + rm -rf ${GUEST_CACHEDIR} local working_dir=$(dirname ${image_output}) export GUEST_USERNAME=${guest_username} + export HOST_SCP_USERNAME=${HOST_SCP_USERNAME:-$(whoami)} + export ESCAPED_PATH_TROVE=$(echo ${PATH_TROVE} | sed 's/\//\\\//g') + export DEV_MODE=${dev_mode,,} # In dev mode, the trove guest agent needs to download trove code from # trove-taskmanager host during service initialization. - if [[ "${dev_mode,,}" == "true" ]]; then - export PATH_TROVE=${PATH_TROVE} - export ESCAPED_PATH_TROVE=$(echo ${PATH_TROVE} | sed 's/\//\\\//g') - export GUEST_LOGDIR=${GUEST_LOGDIR:-"/var/log/trove/"} - export ESCAPED_GUEST_LOGDIR=$(echo ${GUEST_LOGDIR} | sed 's/\//\\\//g') - export TROVESTACK_SCRIPTS=${TROVESTACK_SCRIPTS} - export HOST_SCP_USERNAME=${HOST_SCP_USERNAME:-$(whoami)} - export HOST_USERNAME=${HOST_SCP_USERNAME} + if [[ "${DEV_MODE}" == "true" ]]; then export SSH_DIR=${SSH_DIR:-"$HOME/.ssh"} - export DEST=${DEST:-'/opt/stack'} - export TROVE_BRANCH=${TROVE_BRANCH:-'master'} manage_ssh_keys fi # For system-wide installs, DIB will automatically find the elements, so we only check local path - if [ "${DIB_LOCAL_ELEMENTS_PATH}" ]; then + if [[ "${DIB_LOCAL_ELEMENTS_PATH}" ]]; then export ELEMENTS_PATH=${trove_elements_path}:${DIB_LOCAL_ELEMENTS_PATH} else export ELEMENTS_PATH=${trove_elements_path} @@ -50,36 +42,26 @@ function build_vm() { export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive" # https://cloud-images.ubuntu.com/releases is more stable than the daily - # builds(https://cloud-images.ubuntu.com/xenial/current/), - # e.g. sometimes SHA256SUMS file is missing in the daily builds - declare -A releasemapping=( ["xenial"]="16.04" ["bionic"]="18.04") + # builds (https://cloud-images.ubuntu.com/xenial/current/), + # e.g. sometimes SHA256SUMS file is missing in the daily builds website. + # Ref: diskimage_builder/elements/ubuntu/root.d/10-cache-ubuntu-tarball + declare -A image_file_mapping=( ["xenial"]="ubuntu-16.04-server-cloudimg-amd64-root.tar.gz" ["bionic"]="ubuntu-18.04-server-cloudimg-amd64.squashfs" ) export DIB_CLOUD_IMAGES="https://cloud-images.ubuntu.com/releases/${DIB_RELEASE}/release/" - export BASE_IMAGE_FILE="ubuntu-${releasemapping[${DIB_RELEASE}]}-server-cloudimg-amd64-root.tar.gz" + export BASE_IMAGE_FILE=${image_file_mapping[${DIB_RELEASE}]} TEMP=$(mktemp -d ${working_dir}/diskimage-create.XXXXXXX) pushd $TEMP > /dev/null elementes="$elementes ${guest_os}" - - if [[ "${dev_mode,,}" == "false" ]]; then - elementes="$elementes pip-and-virtualenv" - elementes="$elementes pip-cache" - elementes="$elementes guest-agent" - else - # Install guest agent dependencies, user, etc. - elementes="$elementes ${guest_os}-guest" - # Install guest agent service - elementes="$elementes ${guest_os}-${guest_release}-guest" - fi - - elementes="$elementes ${guest_os}-${datastore_type}" - elementes="$elementes ${guest_os}-${guest_release}-${datastore_type}" + elementes="$elementes pip-and-virtualenv" + elementes="$elementes pip-cache" + elementes="$elementes guest-agent" + elementes="$elementes ${guest_os}-docker" # Build the image disk-image-create -x \ -a amd64 \ -o ${image_output} \ - -t ${GUEST_IMAGETYPE} \ --image-size ${GUEST_IMAGESIZE} \ --image-cache ${GUEST_CACHEDIR} \ $elementes @@ -91,25 +73,6 @@ function build_vm() { exclaim "Image ${image_output} was built successfully." } -function build_guest_image() { - exclaim "Params for build_guest_image function: $@" - - local datastore_type=${1:-"mysql"} - local guest_os=${2:-"ubuntu"} - local guest_release=${3:-"xenial"} - local dev_mode=${4:-"true"} - local guest_username=${5:-"ubuntu"} - local output=$6 - - VALID_SERVICES='mysql percona mariadb redis cassandra couchbase mongodb postgresql couchdb vertica db2 pxc' - if ! [[ " $VALID_SERVICES " =~ " $datastore_type " ]]; then - exclaim "You did not pass in a valid datastore type. Valid types are:" $VALID_SERVICES - exit 1 - fi - - build_vm ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} ${output} -} - function clean_instances() { LIST=`virsh -q list|awk '{print $1}'` for i in $LIST; do sudo virsh destroy $i; done @@ -117,6 +80,8 @@ function clean_instances() { # In dev mode, guest agent needs to ssh into the controller to download code. function manage_ssh_keys() { + SSH_DIR=${SSH_DIR:-"$HOME/.ssh"} + if [ -d ${SSH_DIR} ]; then echo "${SSH_DIR} already exists" else diff --git a/integration/scripts/local.conf.d/ceilometer_cinder.conf.rc b/integration/scripts/local.conf.d/ceilometer_cinder.conf.rc deleted file mode 100644 index 9b80b6c690..0000000000 --- a/integration/scripts/local.conf.d/ceilometer_cinder.conf.rc +++ /dev/null @@ -1,3 +0,0 @@ -[[post-config|\$CINDER_CONF]] -[DEFAULT] -notification_driver = messagingv2 diff --git a/integration/scripts/local.conf.d/ceilometer_nova.conf.rc b/integration/scripts/local.conf.d/ceilometer_nova.conf.rc deleted file mode 100644 index fe48b0224b..0000000000 --- a/integration/scripts/local.conf.d/ceilometer_nova.conf.rc +++ /dev/null @@ -1,3 +0,0 @@ -[[post-config|\$NOVA_CONF]] -[DEFAULT] -instance_usage_audit = True diff --git a/integration/scripts/local.conf.d/ceilometer_services.conf.rc b/integration/scripts/local.conf.d/ceilometer_services.conf.rc deleted file mode 100644 index ce33948ef2..0000000000 --- a/integration/scripts/local.conf.d/ceilometer_services.conf.rc +++ /dev/null @@ -1,3 +0,0 @@ -[[post-config|\$CEILOMETER_CONF]] -[notification] -store_events = True diff --git a/integration/scripts/local.conf.d/sample.rc b/integration/scripts/local.conf.d/sample.rc deleted file mode 100644 index 401418d8da..0000000000 --- a/integration/scripts/local.conf.d/sample.rc +++ /dev/null @@ -1,42 +0,0 @@ -# -# Files in this directory are automatically added to the devstack -# local.conf file, between a specific set of tags. -# -# Filenames must end with '.rc' to be recognized; sample.rc is -# ignored. -# -# A '\' is required in front of any devstack variables since all -# .rc files are parsed first (using eval). -# -# Meta section headings must be included in each file, such as: -# [[local|localrc]] -# as the order of inserting the files is not guaranteed. -# -# All files are inherently included by default - to exclude a file, -# add a variable 'FILENAME_IN_UPPERCASE_MINUS_RC=false' in trovestack.rc -# For Example: USING_VAGRANT=false (for the using_vagrant.rc file). -# -# Symbolic links are followed, so additional files can be loaded -# by placing them in an external directory and linking it in -# local.conf.d (this should allow complete flexibility in setting -# up testing options). -# For Example: -# cd /path/to/trove/integration/scripts/local.conf.d -# ln -s $HOME/local.conf.d local.conf.d -# cp /path/to/my_conf.rc $HOME/local.conf.d - - -[[local|localrc]] -# Put regular devstack variables under this meta section heading. -# This section is written out to a file and sourced by devstack, -# so it can contain logic as well. - -# The following section types should only contain ini file style -# section headings and name=value pairs -[[post-config|\$TROVE_CONF]] - -[[post-config|\$TROVE_TASKMANAGER_CONF]] - -[[post-config|\$TROVE_CONDUCTOR_CONF]] - -[[post-config|\$TROVE_API_PASTE_INI]] diff --git a/integration/scripts/local.conf.d/trove_services.conf.rc b/integration/scripts/local.conf.d/trove_services.conf.rc deleted file mode 100644 index 6eedc1c30b..0000000000 --- a/integration/scripts/local.conf.d/trove_services.conf.rc +++ /dev/null @@ -1,24 +0,0 @@ -[[post-config|\$TROVE_CONF]] -[profiler] -enabled = $ENABLE_PROFILER -trace_sqlalchemy = $PROFILER_TRACE_SQL - -[[post-config|\$TROVE_TASKMANAGER_CONF]] -[profiler] -enabled = $ENABLE_PROFILER -trace_sqlalchemy = $PROFILER_TRACE_SQL - -[[post-config|\$TROVE_CONDUCTOR_CONF]] -[profiler] -enabled = $ENABLE_PROFILER -trace_sqlalchemy = $PROFILER_TRACE_SQL - -[[post-config|\$TROVE_GUESTAGENT_CONF]] -[profiler] -enabled = $ENABLE_PROFILER -trace_sqlalchemy = $PROFILER_TRACE_SQL - -[[post-config|\$TROVE_API_PASTE_INI]] -[filter:osprofiler] -enabled = $ENABLE_PROFILER -hmac_keys = $PROFILER_HMAC_KEYS diff --git a/integration/scripts/local.conf.d/use_kvm.rc b/integration/scripts/local.conf.d/use_kvm.rc deleted file mode 100644 index 06bc2ebcb2..0000000000 --- a/integration/scripts/local.conf.d/use_kvm.rc +++ /dev/null @@ -1,4 +0,0 @@ -[[local|localrc]] - -# force kvm as the libvirt type. -LIBVIRT_TYPE=kvm diff --git a/integration/scripts/local.conf.d/use_uuid_token.rc b/integration/scripts/local.conf.d/use_uuid_token.rc deleted file mode 100644 index 587a4064c8..0000000000 --- a/integration/scripts/local.conf.d/use_uuid_token.rc +++ /dev/null @@ -1,3 +0,0 @@ -[[local|localrc]] - -KEYSTONE_TOKEN_FORMAT=UUID diff --git a/integration/scripts/local.conf.d/using_vagrant.rc b/integration/scripts/local.conf.d/using_vagrant.rc deleted file mode 100644 index 7333cd466b..0000000000 --- a/integration/scripts/local.conf.d/using_vagrant.rc +++ /dev/null @@ -1,9 +0,0 @@ -[[local|localrc]] - -# This is similar to code found at -# https://github.com/bcwaldon/vagrant_devstack/blob/master/Vagrantfile -# and seems to make instances ping'able in VirtualBox. -FLAT_INTERFACE=eth1 -PUBLIC_INTERFACE=eth1 -FLOATING_RANGE=`ip_chunk eth0 1`.`ip_chunk eth0 2`.`ip_chunk eth0 3`.128/28 -HOST_IP=`ip_chunk eth0 1`.`ip_chunk eth0 2`.`ip_chunk eth0 3`.`ip_chunk eth0 4` diff --git a/integration/scripts/local.conf.rc b/integration/scripts/local.conf.rc deleted file mode 100644 index b9a2edabea..0000000000 --- a/integration/scripts/local.conf.rc +++ /dev/null @@ -1,37 +0,0 @@ -$TROVE_PRESENT_TAG -# Set some arguments for devstack. -# -# Note: This file contains autogenerated parts. -# All lines are removed from between the tag/end of tag -# markers (lines with '$MARKER_TOKEN' at beginning and end) and -# are replaced by trovestack. -# Edits to these sections will not persist. -# -# See the '$USER_OPTS_TAG' section -# for ways to insert user args into this file. -# - -# -# This section is for things that belong in localrc -# It comes from $DEFAULT_LOCALRC -# -[[local|localrc]] - -$LOCALRC_OPTS_TAG -$LOCALRC_OPTS_TAG_END - -# -# User options here were inserted from the file USER_LOCAL_CONF -# (defaults to $USERHOME/.$LOCAL_CONF) -# - -$USER_OPTS_TAG -$USER_OPTS_TAG_END - -# -# Additional options here were inserted by trovestack -# automatically from files in $LOCAL_CONF_D -# - -$ADD_OPTS_TAG -$ADD_OPTS_TAG_END diff --git a/integration/scripts/trovestack b/integration/scripts/trovestack index 4f6273e652..d41b236f3f 100755 --- a/integration/scripts/trovestack +++ b/integration/scripts/trovestack @@ -124,7 +124,7 @@ if is_fedora; then else PKG_INSTALL_OPTS="DEBIAN_FRONTEND=noninteractive" PKG_MGR=apt-get - PKG_GET_ARGS="-y --allow-unauthenticated --force-yes" + PKG_GET_ARGS="-y --allow-unauthenticated --force-yes -qq" fi PKG_INSTALL_ARG="install" PKG_UPDATE_ARG="update" @@ -522,57 +522,15 @@ function set_bin_path() { function cmd_set_datastore() { local IMAGEID=$1 - local DATASTORE_TYPE=$2 - - # rd_manage datastore_update - rd_manage datastore_update "$DATASTORE_TYPE" "" - PACKAGES=${PACKAGES:-""} - - if [ "$DATASTORE_TYPE" == "mysql" ]; then - VERSION="5.7" - elif [ "$DATASTORE_TYPE" == "percona" ]; then - PACKAGES=${PACKAGES:-"percona-server-server-5.6"} - VERSION="5.6" - elif [ "$DATASTORE_TYPE" == "pxc" ]; then - PACKAGES=${PACKAGES:-"percona-xtradb-cluster-server-5.6"} - VERSION="5.6" - elif [ "$DATASTORE_TYPE" == "mariadb" ]; then - VERSION="10.4" - elif [ "$DATASTORE_TYPE" == "mongodb" ]; then - PACKAGES=${PACKAGES:-"mongodb-org"} - VERSION="3.2" - elif [ "$DATASTORE_TYPE" == "redis" ]; then - PACKAGES=${PACKAGES:-""} - VERSION="3.2.6" - elif [ "$DATASTORE_TYPE" == "cassandra" ]; then - PACKAGES=${PACKAGES:-"cassandra"} - VERSION="2.1.0" - elif [ "$DATASTORE_TYPE" == "couchbase" ]; then - PACKAGES=${PACKAGES:-"couchbase-server"} - VERSION="2.2.0" - elif [ "$DATASTORE_TYPE" == "postgresql" ]; then - VERSION="9.6" - elif [ "$DATASTORE_TYPE" == "couchdb" ]; then - PACKAGES=${PACKAGES:-"couchdb"} - VERSION="1.6.1" - elif [ "$DATASTORE_TYPE" == "vertica" ]; then - PACKAGES=${PACKAGES:-"vertica"} - VERSION="9.0.1" - elif [ "$DATASTORE_TYPE" == "db2" ]; then - PACKAGES=${PACKAGES:-""} - VERSION="11.1" - else - echo "Unrecognized datastore type. ($DATASTORE_TYPE)" - exit 1 - fi + rd_manage datastore_update "$datastore" "" # trove-manage datastore_version_update - rd_manage datastore_version_update "$DATASTORE_TYPE" "$VERSION" "$DATASTORE_TYPE" $IMAGEID "$PACKAGES" 1 - rd_manage datastore_update "$DATASTORE_TYPE" "$VERSION" + rd_manage datastore_version_update "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" "${DATASTORE_TYPE}" $IMAGEID "" 1 + rd_manage datastore_update "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" - if [ -f "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json ]; then + if [[ -f "$PATH_TROVE"/trove/templates/${DATASTORE_TYPE}/validation-rules.json ]]; then # add the configuration parameters to the database for the kick-start datastore - rd_manage db_load_datastore_config_parameters "$DATASTORE_TYPE" "$VERSION" "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json + rd_manage db_load_datastore_config_parameters "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" "$PATH_TROVE"/trove/templates/${DATASTORE_TYPE}/validation-rules.json fi } @@ -627,8 +585,8 @@ function install_test_packages() { DATASTORE_TYPE=$1 sudo -H $HTTP_PROXY pip install openstack.nose_plugin proboscis pexpect - if [ "$DATASTORE_TYPE" = "couchbase" ]; then - if [ "$DISTRO" == "ubuntu" ]; then + if [[ "$DATASTORE_TYPE" = "couchbase" ]]; then + if [[ "$DISTRO" == "ubuntu" ]]; then # Install Couchbase SDK for scenario tests. sudo -H $HTTP_PROXY curl http://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add - echo "deb http://packages.couchbase.com/ubuntu trusty trusty/main" | sudo tee /etc/apt/sources.list.d/couchbase-csdk.list @@ -649,12 +607,12 @@ function mod_confs() { TROVE_REPORT_DIR=${TROVE_REPORT_DIR:=$TROVESTACK_SCRIPTS/../report/} EXTRA_CONF=$TROVESTACK_SCRIPTS/conf/test.extra.conf - if [ -e $EXTRA_CONF ]; then + if [[ -e $EXTRA_CONF ]]; then cat $EXTRA_CONF >> $TEST_CONF fi # Append datastore specific configuration file DATASTORE_CONF=$TROVESTACK_SCRIPTS/conf/$DATASTORE_TYPE.conf - if [ ! -f $DATASTORE_CONF ]; then + if [[ ! -f $DATASTORE_CONF ]]; then exclaim "Datastore configuration file ${DATASTORE_CONF} not found" exit 1 fi @@ -695,14 +653,14 @@ function mod_confs() { sed -i "/%shared_network_subnet%/d" $TEST_CONF fi - if [ "$DATASTORE_TYPE" = "vertica" ]; then + if [[ "$DATASTORE_TYPE" = "vertica" ]]; then # Vertica needs more time than mysql for its boot/start/stop operations. setup_cluster_configs cluster_member_count 3 - elif [ "$DATASTORE_TYPE" = "pxc" ]; then + elif [[ "$DATASTORE_TYPE" = "pxc" ]]; then setup_cluster_configs min_cluster_member_count 2 - elif [ "$DATASTORE_TYPE" = "cassandra" ]; then + elif [[ "$DATASTORE_TYPE" = "cassandra" ]]; then setup_cluster_configs cluster_member_count 2 - elif [ "$DATASTORE_TYPE" = "mongodb" ]; then + elif [[ "$DATASTORE_TYPE" = "mongodb" ]]; then setup_cluster_configs cluster_member_count 2 # Decrease the number of required config servers per cluster to save resources. iniset $TROVE_CONF $DATASTORE_TYPE num_config_servers_per_cluster 1 @@ -747,7 +705,7 @@ function cmd_test_init() { local DATASTORE_TYPE=$1 local DATASTORE_VERSION=$2 - if [ -z "${DATASTORE_TYPE}" ]; then + if [[ -z "${DATASTORE_TYPE}" ]]; then exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exit 1 fi @@ -768,73 +726,60 @@ function cmd_test_init() { pip3 install -U git+https://opendev.org/openstack/python-troveclient@master#egg=python-troveclient } +# Build trove guest image function cmd_build_image() { exclaim "Params for cmd_build_image function: $@" - local IMAGE_DATASTORE_TYPE=${1:-'mysql'} - local IMAGE_GUEST_OS=${2:-'ubuntu'} - local IMAGE_GUEST_RELEASE=${3:-'xenial'} - local DEV_MODE=${4:-'true'} - local guest_username=${5:-'ubuntu'} - local output=$6 + local image_guest_os=${1:-'ubuntu'} + local image_guest_release=${2:-'bionic'} + local dev_mode=${3:-'true'} + local guest_username=${4:-'ubuntu'} + local output=$5 if [[ -z "$output" ]]; then - image_name="trove-datastore-${IMAGE_GUEST_OS}-${IMAGE_GUEST_RELEASE}-${IMAGE_DATASTORE_TYPE}" + image_name="trove-guest-${image_guest_os}-${image_guest_release}" + if [[ ${dev_mode} == "true" ]]; then + image_name="${image_name}-dev" + fi image_folder=$HOME/images - output="${image_folder}/${image_name}" + output="${image_folder}/${image_name}.qcow2" fi + # Always rebuild the image. - sudo rm -f $output - sudo mkdir -p $(dirname $output); sudo chmod 777 -R $(dirname $output) + sudo rm -rf ${output} + sudo mkdir -p $(dirname ${output}); sudo chmod 777 -R $(dirname ${output}) echo "Ensuring we have all packages needed to build image." sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS update - sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS install qemu git kpartx debootstrap + sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS install qemu git kpartx debootstrap squashfs-tools sudo -H $HTTP_PROXY pip install diskimage-builder - exclaim "Use diskimage-builder to actually build the Trove Guest Agent Image." - build_guest_image $IMAGE_DATASTORE_TYPE $IMAGE_GUEST_OS $IMAGE_GUEST_RELEASE $DEV_MODE ${guest_username} $output + build_guest_image ${image_guest_os} ${image_guest_release} ${dev_mode} ${guest_username} ${output} } # Build guest image and upload to Glance, register the datastore and configuration parameters. -# We could skip the image build and upload by: -# 1. MYSQL_IMAGE_ID is passed, or -# 2. There is an image in Glance contains the datastore name function cmd_build_and_upload_image() { - local datastore_type=$1 - local guest_os=${2:-"ubuntu"} - local guest_release=${3:-"xenial"} - local dev_mode=${4:-"true"} - local guest_username=${5:-"ubuntu"} - local output_dir=${6:-"$HOME/images"} + local guest_os=${1:-"ubuntu"} + local guest_release=${2:-"bionic"} + local dev_mode=${3:-"true"} + local guest_username=${4:-"ubuntu"} + local output_dir=${5:-"$HOME/images"} - if [ -z "${datastore_type}" ]; then - exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" - exit 1 - fi + name=trove-guest-${guest_os}-${guest_release} + glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image list --name $name -f value -c ID) + if [[ -z ${glance_imageid} ]]; then + mkdir -p ${output_dir} + output=${output_dir}/${name} + cmd_build_image ${guest_os} ${guest_release} ${dev_mode} ${guest_username} ${output} - image_var="${datastore_type^^}_IMAGE_ID" - glance_imageid=`eval echo '$'"$image_var"` - - if [[ -z $glance_imageid ]]; then - # Find the first image id with the name contains datastore_type. - glance_imageid=$(openstack $CLOUD_ADMIN_ARG image list | grep "$datastore_type" | awk 'NR==1 {print}' | awk '{print $2}') - - if [[ -z $glance_imageid ]]; then - mkdir -p ${output_dir} - name=trove-datastore-${guest_os}-${guest_release}-${datastore_type} - output=${output_dir}/$name.qcow2 - cmd_build_image ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} $output - - glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image create $name --public --disk-format qcow2 --container-format bare --file $output --property hw_rng_model='virtio' -c id -f value) - [[ -z "$glance_imageid" ]] && echo "Glance upload failed!" && exit 1 - fi + glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image create ${name} --public --disk-format qcow2 --container-format bare --file ${output} --property hw_rng_model='virtio' --tag trove -c id -f value) + [[ -z "$glance_imageid" ]] && echo "Glance upload failed!" && exit 1 fi exclaim "Using Glance image ID: $glance_imageid" exclaim "Updating Datastores" - cmd_set_datastore "${glance_imageid}" "${datastore_type}" + cmd_set_datastore "${glance_imageid}" } @@ -991,11 +936,11 @@ function cmd_stop() { function cmd_int_tests() { exclaim "Running Trove Integration Tests..." - if [ ! $USAGE_ENDPOINT ]; then + if [[ ! $USAGE_ENDPOINT ]]; then export USAGE_ENDPOINT=trove.tests.util.usage.FakeVerifier fi cd $TROVESTACK_SCRIPTS - if [ $# -lt 1 ]; then + if [[ $# -lt 1 ]]; then args="--group=mysql" else args="$@" @@ -1203,7 +1148,7 @@ function cmd_kick_start() { local DATASTORE_TYPE=$1 local DATASTORE_VERSION=$2 - if [ -z "${DATASTORE_TYPE}" ]; then + if [[ -z "${DATASTORE_TYPE}" ]]; then exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exit 1 fi @@ -1220,10 +1165,13 @@ function cmd_kick_start() { function cmd_gate_tests() { local DATASTORE_TYPE=${1:-'mysql'} local TEST_GROUP=${2:-${DATASTORE_TYPE}} - local DATASTORE_VERSION=${3:-'5.7'} + local DATASTORE_VERSION=${3:-'5.7.29'} local HOST_SCP_USERNAME=${4:-$(whoami)} local GUEST_USERNAME=${5:-'ubuntu'} + export DATASTORE_TYPE=${DATASTORE_TYPE} + export DATASTORE_VERSION=${DATASTORE_VERSION} + exclaim "Running cmd_gate_tests ..." export REPORT_DIRECTORY=${REPORT_DIRECTORY:=$HOME/gate-tests-report/} @@ -1238,7 +1186,7 @@ function cmd_gate_tests() { cd $TROVESTACK_SCRIPTS # Build and upload guest image, register datastore version. - cmd_build_and_upload_image ${DATASTORE_TYPE} + cmd_build_and_upload_image cmd_kick_start "${DATASTORE_TYPE}" "${DATASTORE_VERSION}" diff --git a/lower-constraints.txt b/lower-constraints.txt index 81b58e1ece..dd8c339ccc 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -25,6 +25,7 @@ deprecation==2.0 diskimage-builder==1.1.2 doc8==0.6.0 docutils==0.14 +docker==4.2.0 dogpile.cache==0.6.5 dulwich==0.19.0 enum34===1.0.4 diff --git a/playbooks/image-build/run.yaml b/playbooks/image-build/run.yaml index 75d57edce5..76763ebd23 100644 --- a/playbooks/image-build/run.yaml +++ b/playbooks/image-build/run.yaml @@ -7,12 +7,11 @@ - name: Build Trove guest image shell: >- ./trovestack build-image \ - {{ datastore_type }} \ {{ guest_os }} \ {{ guest_os_release }} \ {{ dev_mode }} \ {{ guest_username }} \ - {{ ansible_user_dir }}/images/trove-{{ branch }}-{{ datastore_type }}-{{ guest_os }}-{{ guest_os_release }}{{ image_suffix }} + {{ ansible_user_dir }}/images/trove-{{ branch }}-guest-{{ guest_os }}-{{ guest_os_release }}{{ image_suffix }}.qcow2 args: chdir: "{{ ansible_user_dir }}/src/opendev.org/openstack/trove/integration/scripts" tags: diff --git a/requirements.txt b/requirements.txt index c53d8f7ccc..902a0ce14b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -48,3 +48,4 @@ xmltodict>=0.10.1 # MIT cryptography>=2.1.4 # BSD/Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 diskimage-builder!=1.6.0,!=1.7.0,!=1.7.1,>=1.1.2 # Apache-2.0 +docker>=4.2.0 # Apache-2.0 diff --git a/roles/trove-devstack/defaults/main.yml b/roles/trove-devstack/defaults/main.yml index 5280e12571..a0e4f5eed3 100644 --- a/roles/trove-devstack/defaults/main.yml +++ b/roles/trove-devstack/defaults/main.yml @@ -1,5 +1,5 @@ devstack_base_dir: /opt/stack trove_test_datastore: 'mysql' trove_test_group: 'mysql' -trove_test_datastore_version: '5.7' +trove_test_datastore_version: '5.7.29' trove_resize_time_out: '' diff --git a/tools/trove-pylint.config b/tools/trove-pylint.config index 7a773950f8..85571893be 100644 --- a/tools/trove-pylint.config +++ b/tools/trove-pylint.config @@ -1497,6 +1497,12 @@ "Instance of 'FreshInstance' has no 'get_replication_master_snapshot' member", "Manager._create_replication_slave" ], + [ + "trove/taskmanager/manager.py", + "E1136", + "Value 'snapshot' is unsubscriptable", + "Manager._create_replication_slave" + ], [ "trove/taskmanager/manager.py", "E1101", diff --git a/tox.ini b/tox.ini index f808205569..20b5aa43a7 100644 --- a/tox.ini +++ b/tox.ini @@ -53,9 +53,7 @@ ignore-path = .venv,.tox,.git,dist,doc,*egg-info,tools,etc,build,*.po,*.pot,inte [flake8] show-source = True -# H301 is ignored on purpose. -# The rest of the ignores are TODOs. -ignore = E402,E731,F601,F821,H301,H404,H405,H501,W503,W504,W605 +ignore = E125,E129,E402,E731,F601,F821,H301,H306,H404,H405,H501,W503,W504,W605 enable-extensions = H203,H106 builtins = _ # add *.yaml for playbooks/trove-devstack-base.yaml, as it will be matched by @@ -68,7 +66,7 @@ import_exceptions = trove.common.i18n [flake8:local-plugins] extension = - T103= checks:check_raised_localized_exceptions + # T103= checks:check_raised_localized_exceptions T104 = checks:check_no_basestring T105 = checks:no_translate_logs N335 = checks:assert_raises_regexp diff --git a/trove/backup/models.py b/trove/backup/models.py index 993d00388a..38c847f633 100644 --- a/trove/backup/models.py +++ b/trove/backup/models.py @@ -268,7 +268,7 @@ class Backup(object): try: cls.delete(context, child.id) except exception.NotFound: - LOG.exception("Backup %s cannot be found.", backup_id) + LOG.warning("Backup %s cannot be found.", backup_id) def _delete_resources(): backup = cls.get_by_id(context, backup_id) diff --git a/trove/cmd/guest.py b/trove/cmd/guest.py index 6e12db5754..ff86e4f30d 100644 --- a/trove/cmd/guest.py +++ b/trove/cmd/guest.py @@ -12,7 +12,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - import sys from oslo_config import cfg as openstack_cfg @@ -23,6 +22,7 @@ from trove.common import cfg from trove.common import debug_utils from trove.common.i18n import _ from trove.guestagent import api as guest_api +from trove.guestagent.common import operating_system CONF = cfg.CONF # The guest_id opt definition must match the one in common/cfg.py @@ -31,9 +31,18 @@ CONF.register_opts([openstack_cfg.StrOpt('guest_id', default=None, openstack_cfg.StrOpt('instance_rpc_encr_key', help=('Key (OpenSSL aes_cbc) for ' 'instance RPC encryption.'))]) +LOG = logging.getLogger(__name__) def main(): + log_levels = [ + 'docker=WARN', + ] + default_log_levels = logging.get_default_log_levels() + default_log_levels.extend(log_levels) + logging.set_defaults(default_log_levels=default_log_levels) + logging.register_options(CONF) + cfg.parse_args(sys.argv) logging.setup(CONF, None) debug_utils.setup() @@ -50,6 +59,11 @@ def main(): "was not injected into the guest or not read by guestagent")) raise RuntimeError(msg) + # Create user and group for running docker container. + LOG.info('Creating user and group for database service') + uid = cfg.get_configuration_property('database_service_uid') + operating_system.create_user('database', uid) + # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. diff --git a/trove/common/cfg.py b/trove/common/cfg.py index becfe393a2..e1f233b611 100644 --- a/trove/common/cfg.py +++ b/trove/common/cfg.py @@ -175,10 +175,10 @@ common_opts = [ help="Maximum time (in seconds) to wait for Guest Agent " "'quick' requests (such as retrieving a list of " "users or databases)."), - cfg.IntOpt('agent_call_high_timeout', default=60 * 5, + cfg.IntOpt('agent_call_high_timeout', default=60 * 3, help="Maximum time (in seconds) to wait for Guest Agent 'slow' " "requests (such as restarting the database)."), - cfg.IntOpt('agent_replication_snapshot_timeout', default=36000, + cfg.IntOpt('agent_replication_snapshot_timeout', default=60 * 30, help='Maximum time (in seconds) to wait for taking a Guest ' 'Agent replication snapshot.'), cfg.IntOpt('command_process_timeout', default=30, @@ -186,8 +186,9 @@ common_opts = [ 'commands to complete.'), # The guest_id opt definition must match the one in cmd/guest.py cfg.StrOpt('guest_id', default=None, help="ID of the Guest Instance."), - cfg.IntOpt('state_change_wait_time', default=60 * 10, - help='Maximum time (in seconds) to wait for a state change.'), + cfg.IntOpt('state_change_wait_time', default=180, + help='Maximum time (in seconds) to wait for database state ' + 'change.'), cfg.IntOpt('state_change_poll_time', default=3, help='Interval between state change poll requests (seconds).'), cfg.IntOpt('agent_heartbeat_time', default=10, @@ -293,9 +294,11 @@ common_opts = [ help='The region this service is located.'), cfg.StrOpt('backup_runner', default='trove.guestagent.backup.backup_types.InnoBackupEx', - help='Runner to use for backups.'), + help='Runner to use for backups.', + deprecated_for_removal=True), cfg.DictOpt('backup_runner_options', default={}, - help='Additional options to be passed to the backup runner.'), + help='Additional options to be passed to the backup runner.', + deprecated_for_removal=True), cfg.BoolOpt('verify_swift_checksum_on_restore', default=True, help='Enable verification of Swift checksum before starting ' 'restore. Makes sure the checksum of original backup matches ' @@ -304,11 +307,12 @@ common_opts = [ help='Require the replica volume size to be greater than ' 'or equal to the size of the master volume ' 'during replica creation.'), - cfg.StrOpt('storage_strategy', default='SwiftStorage', + cfg.StrOpt('storage_strategy', default='swift', help="Default strategy to store backups."), cfg.StrOpt('storage_namespace', default='trove.common.strategies.storage.swift', - help='Namespace to load the default storage strategy from.'), + help='Namespace to load the default storage strategy from.', + deprecated_for_removal=True), cfg.StrOpt('backup_swift_container', default='database_backups', help='Swift container to put backups in.'), cfg.BoolOpt('backup_use_gzip_compression', default=True, @@ -429,15 +433,12 @@ common_opts = [ cfg.IntOpt('usage_timeout', default=60 * 30, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), - cfg.IntOpt('restore_usage_timeout', default=36000, + cfg.IntOpt('restore_usage_timeout', default=60 * 60, help='Maximum time (in seconds) to wait for a Guest instance ' 'restored from a backup to become active.'), cfg.IntOpt('cluster_usage_timeout', default=36000, help='Maximum time (in seconds) to wait for a cluster to ' 'become active.'), - cfg.IntOpt('timeout_wait_for_service', default=120, - help='Maximum time (in seconds) to wait for a service to ' - 'become alive.'), cfg.StrOpt('module_aes_cbc_key', default='module_aes_cbc_key', help='OpenSSL aes_cbc key for module encryption.'), cfg.ListOpt('module_types', default=['ping', 'new_relic_license'], @@ -466,6 +467,10 @@ common_opts = [ help='Key (OpenSSL aes_cbc) to encrypt instance keys in DB.'), cfg.StrOpt('instance_rpc_encr_key', help='Key (OpenSSL aes_cbc) for instance RPC encryption.'), + cfg.StrOpt('database_service_uid', default='1001', + help='The UID(GID) of database service user.'), + cfg.StrOpt('backup_docker_image', default='openstacktrove/db-backup:1.0.0', + help='The docker image used for backup and restore.'), ] @@ -544,7 +549,7 @@ mysql_opts = [ help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), - cfg.StrOpt('backup_strategy', default='InnoBackupEx', + cfg.StrOpt('backup_strategy', default='innobackupex', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), @@ -564,28 +569,10 @@ mysql_opts = [ cfg.IntOpt('usage_timeout', default=400, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), - cfg.StrOpt('backup_namespace', - default='trove.guestagent.strategies.backup.mysql_impl', - help='Namespace to load backup strategies from.', - deprecated_name='backup_namespace', - deprecated_group='DEFAULT'), - cfg.StrOpt('restore_namespace', - default='trove.guestagent.strategies.restore.mysql_impl', - help='Namespace to load restore strategies from.', - deprecated_name='restore_namespace', - deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), - cfg.DictOpt('backup_incremental_strategy', - default={'InnoBackupEx': 'InnoBackupExIncremental'}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental backup, the runner will use the default full ' - 'backup.', - deprecated_name='backup_incremental_strategy', - deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for mysql.'), @@ -611,6 +598,10 @@ mysql_opts = [ help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), + cfg.StrOpt( + 'docker_image', default='mysql', + help='Database docker image.' + ) ] # Percona @@ -653,28 +644,10 @@ percona_opts = [ cfg.IntOpt('usage_timeout', default=450, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), - cfg.StrOpt('backup_namespace', - default='trove.guestagent.strategies.backup.mysql_impl', - help='Namespace to load backup strategies from.', - deprecated_name='backup_namespace', - deprecated_group='DEFAULT'), - cfg.StrOpt('restore_namespace', - default='trove.guestagent.strategies.restore.mysql_impl', - help='Namespace to load restore strategies from.', - deprecated_name='restore_namespace', - deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), - cfg.DictOpt('backup_incremental_strategy', - default={'InnoBackupEx': 'InnoBackupExIncremental'}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental backup, the runner will use the default full ' - 'backup.', - deprecated_name='backup_incremental_strategy', - deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for percona.'), @@ -739,22 +712,10 @@ pxc_opts = [ cfg.IntOpt('usage_timeout', default=450, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), - cfg.StrOpt('backup_namespace', - default='trove.guestagent.strategies.backup.mysql_impl', - help='Namespace to load backup strategies from.'), - cfg.StrOpt('restore_namespace', - default='trove.guestagent.strategies.restore.mysql_impl', - help='Namespace to load restore strategies from.'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), - cfg.DictOpt('backup_incremental_strategy', - default={'InnoBackupEx': 'InnoBackupExIncremental'}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental backup, the runner will use the default full ' - 'backup.'), cfg.ListOpt('ignore_users', default=['os_admin', 'root', 'clusterrepuser'], help='Users to exclude when listing users.'), cfg.ListOpt('ignore_dbs', @@ -818,12 +779,6 @@ redis_opts = [ help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), - cfg.DictOpt('backup_incremental_strategy', default={}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental, the runner will use the default full backup.', - deprecated_name='backup_incremental_strategy', - deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='RedisSyncReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', @@ -837,18 +792,6 @@ redis_opts = [ help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), - cfg.StrOpt('backup_namespace', - default="trove.guestagent.strategies.backup.experimental." - "redis_impl", - help='Namespace to load backup strategies from.', - deprecated_name='backup_namespace', - deprecated_group='DEFAULT'), - cfg.StrOpt('restore_namespace', - default="trove.guestagent.strategies.restore.experimental." - "redis_impl", - help='Namespace to load restore strategies from.', - deprecated_name='restore_namespace', - deprecated_group='DEFAULT'), cfg.BoolOpt('cluster_support', default=True, help='Enable clusters to be created and managed.'), cfg.StrOpt('api_strategy', @@ -893,12 +836,6 @@ cassandra_opts = [ help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), - cfg.DictOpt('backup_incremental_strategy', default={}, - help='Incremental strategy based on the default backup ' - 'strategy. For strategies that do not implement incremental ' - 'backups, the runner performs full backup instead.', - deprecated_name='backup_incremental_strategy', - deprecated_group='DEFAULT'), cfg.StrOpt('backup_strategy', default="NodetoolSnapshot", help='Default strategy to perform backups.', deprecated_name='backup_strategy', @@ -912,18 +849,6 @@ cassandra_opts = [ help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), - cfg.StrOpt('backup_namespace', - default="trove.guestagent.strategies.backup.experimental." - "cassandra_impl", - help='Namespace to load backup strategies from.', - deprecated_name='backup_namespace', - deprecated_group='DEFAULT'), - cfg.StrOpt('restore_namespace', - default="trove.guestagent.strategies.restore.experimental." - "cassandra_impl", - help='Namespace to load restore strategies from.', - deprecated_name='restore_namespace', - deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for Cassandra.'), @@ -1002,12 +927,6 @@ couchbase_opts = [ help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), - cfg.DictOpt('backup_incremental_strategy', default={}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental, the runner will use the default full backup.', - deprecated_name='backup_incremental_strategy', - deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/couchbase', @@ -1018,18 +937,6 @@ couchbase_opts = [ 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), - cfg.StrOpt('backup_namespace', - default='trove.guestagent.strategies.backup.experimental.' - 'couchbase_impl', - help='Namespace to load backup strategies from.', - deprecated_name='backup_namespace', - deprecated_group='DEFAULT'), - cfg.StrOpt('restore_namespace', - default='trove.guestagent.strategies.restore.experimental.' - 'couchbase_impl', - help='Namespace to load restore strategies from.', - deprecated_name='restore_namespace', - deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', @@ -1066,12 +973,6 @@ mongodb_opts = [ help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), - cfg.DictOpt('backup_incremental_strategy', default={}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental, the runner will use the default full backup.', - deprecated_name='backup_incremental_strategy', - deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/mongodb', @@ -1109,18 +1010,6 @@ mongodb_opts = [ 'mongodb.guestagent.MongoDbGuestAgentStrategy', help='Class that implements datastore-specific Guest Agent API ' 'logic.'), - cfg.StrOpt('backup_namespace', - default='trove.guestagent.strategies.backup.experimental.' - 'mongo_impl', - help='Namespace to load backup strategies from.', - deprecated_name='backup_namespace', - deprecated_group='DEFAULT'), - cfg.StrOpt('restore_namespace', - default='trove.guestagent.strategies.restore.experimental.' - 'mongo_impl', - help='Namespace to load restore strategies from.', - deprecated_name='restore_namespace', - deprecated_group='DEFAULT'), cfg.PortOpt('mongodb_port', default=27017, help='Port for mongod and mongos instances.'), cfg.PortOpt('configsvr_port', default=27019, @@ -1164,11 +1053,6 @@ postgresql_opts = [ help='The TCP port the server listens on.'), cfg.StrOpt('backup_strategy', default='PgBaseBackup', help='Default strategy to perform backups.'), - cfg.DictOpt('backup_incremental_strategy', - default={'PgBaseBackup': 'PgBaseBackupIncremental'}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental, the runner will use the default full backup.'), cfg.StrOpt('replication_strategy', default='PostgresqlReplicationStreaming', help='Default strategy for replication.'), @@ -1188,14 +1072,6 @@ postgresql_opts = [ 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), - cfg.StrOpt('backup_namespace', - default='trove.guestagent.strategies.backup.experimental.' - 'postgresql_impl', - help='Namespace to load backup strategies from.'), - cfg.StrOpt('restore_namespace', - default='trove.guestagent.strategies.restore.experimental.' - 'postgresql_impl', - help='Namespace to load restore strategies from.'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb'), @@ -1248,16 +1124,6 @@ couchdb_opts = [ help='Default strategy to perform backups.'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), - cfg.StrOpt('backup_namespace', default='trove.guestagent.strategies' - '.backup.experimental.couchdb_impl', - help='Namespace to load backup strategies from.'), - cfg.StrOpt('restore_namespace', default='trove.guestagent.strategies' - '.restore.experimental.couchdb_impl', - help='Namespace to load restore strategies from.'), - cfg.DictOpt('backup_incremental_strategy', default={}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental, the runner will use the default full backup.'), cfg.BoolOpt('root_on_create', default=False, help='Enable the automatic creation of the root user for the ' 'service during instance-create. The generated password for ' @@ -1303,10 +1169,6 @@ vertica_opts = [ 'if trove_security_groups_support is True).'), cfg.StrOpt('backup_strategy', default=None, help='Default strategy to perform backups.'), - cfg.DictOpt('backup_incremental_strategy', default={}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental, the runner will use the default full backup.'), cfg.StrOpt('replication_strategy', default=None, help='Default strategy for replication.'), cfg.StrOpt('mount_point', default='/var/lib/vertica', @@ -1317,9 +1179,11 @@ vertica_opts = [ cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), cfg.StrOpt('backup_namespace', default=None, - help='Namespace to load backup strategies from.'), + help='Namespace to load backup strategies from.', + deprecated_for_removal=True), cfg.StrOpt('restore_namespace', default=None, - help='Namespace to load restore strategies from.'), + help='Namespace to load restore strategies from.', + deprecated_for_removal=True), cfg.IntOpt('readahead_size', default=2048, help='Size(MB) to be set as readahead_size for data volume'), cfg.BoolOpt('cluster_support', default=True, @@ -1387,22 +1251,6 @@ db2_opts = [ 'service during instance-create. The generated password for ' 'the root user is immediately returned in the response of ' "instance-create as the 'password' field."), - cfg.StrOpt('backup_namespace', - default='trove.guestagent.strategies.backup.experimental.' - 'db2_impl', - help='Namespace to load backup strategies from.', - deprecated_name='backup_namespace', - deprecated_group='DEFAULT'), - cfg.StrOpt('restore_namespace', - default='trove.guestagent.strategies.restore.experimental.' - 'db2_impl', - help='Namespace to load restore strategies from.', - deprecated_name='restore_namespace', - deprecated_group='DEFAULT'), - cfg.DictOpt('backup_incremental_strategy', default={}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental, the runner will use the default full backup.'), cfg.ListOpt('ignore_users', default=['PUBLIC', 'DB2INST1']), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', @@ -1432,21 +1280,14 @@ mariadb_opts = [ help='List of UDP ports and/or port ranges to open ' 'in the security group (only applicable ' 'if trove_security_groups_support is True).'), - cfg.StrOpt('backup_namespace', - default='trove.guestagent.strategies.backup.experimental' - '.mariadb_impl', - help='Namespace to load backup strategies from.', - deprecated_name='backup_namespace', - deprecated_group='DEFAULT'), - cfg.StrOpt('backup_strategy', default='MariaBackup', + cfg.StrOpt('backup_strategy', default='mariabackup', help='Default strategy to perform backups.', deprecated_name='backup_strategy', deprecated_group='DEFAULT'), cfg.StrOpt('replication_strategy', default='MariaDBGTIDReplication', help='Default strategy for replication.'), cfg.StrOpt('replication_namespace', - default='trove.guestagent.strategies.replication.experimental' - '.mariadb_gtid', + default='trove.guestagent.strategies.replication.mariadb_gtid', help='Namespace to load replication strategies from.'), cfg.StrOpt('mount_point', default='/var/lib/mysql', help="Filesystem path for mounting " @@ -1459,25 +1300,10 @@ mariadb_opts = [ cfg.IntOpt('usage_timeout', default=400, help='Maximum time (in seconds) to wait for a Guest to become ' 'active.'), - cfg.StrOpt('restore_namespace', - default='trove.guestagent.strategies.restore.experimental' - '.mariadb_impl', - help='Namespace to load restore strategies from.', - deprecated_name='restore_namespace', - deprecated_group='DEFAULT'), cfg.BoolOpt('volume_support', default=True, help='Whether to provision a Cinder volume for datadir.'), cfg.StrOpt('device_path', default='/dev/vdb', help='Device path for volume if volume support is enabled.'), - cfg.DictOpt('backup_incremental_strategy', - default={'MariaBackup': - 'MariaBackupIncremental'}, - help='Incremental Backup Runner based on the default ' - 'strategy. For strategies that do not implement an ' - 'incremental backup, the runner will use the default full ' - 'backup.', - deprecated_name='backup_incremental_strategy', - deprecated_group='DEFAULT'), cfg.StrOpt('root_controller', default='trove.extensions.common.service.DefaultRootController', help='Root controller implementation for mysql.'), @@ -1521,6 +1347,10 @@ mariadb_opts = [ help='Character length of generated passwords.', deprecated_name='default_password_length', deprecated_group='DEFAULT'), + cfg.StrOpt( + 'docker_image', default='mariadb', + help='Database docker image.' + ) ] # RPC version groups diff --git a/trove/common/exception.py b/trove/common/exception.py index b7cc4fc8d4..1f25c9693d 100644 --- a/trove/common/exception.py +++ b/trove/common/exception.py @@ -307,7 +307,7 @@ class VolumeAttachmentsNotFound(NotFound): class VolumeCreationFailure(TroveError): - message = _("Failed to create a volume in Nova.") + message = _("Failed to create volume.") class VolumeSizeNotSpecified(BadRequest): @@ -341,6 +341,16 @@ class ReplicationSlaveAttachError(TroveError): message = _("Exception encountered attaching slave to new replica source.") +class SlaveOperationNotSupported(TroveError): + message = _("The '%(operation)s' operation is not supported for slaves in " + "replication.") + + +class UnableToDetermineLastMasterGTID(TroveError): + message = _("Unable to determine last GTID executed on master " + "(from file %(binlog_file)s).") + + class TaskManagerError(TroveError): message = _("An error occurred communicating with the task manager: " @@ -688,9 +698,3 @@ class LogAccessForbidden(Forbidden): class LogsNotAvailable(Forbidden): message = _("Log actions are not supported.") - - -class SlaveOperationNotSupported(TroveError): - - message = _("The '%(operation)s' operation is not supported for slaves in " - "replication.") diff --git a/trove/common/schemas/atom-link.rng b/trove/common/schemas/atom-link.rng deleted file mode 100644 index edba5eee6c..0000000000 --- a/trove/common/schemas/atom-link.rng +++ /dev/null @@ -1,141 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1 - [^:]* - - - - - - .+/.+ - - - - - - [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* - - - - - - - - - - - - xml:base - xml:lang - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/trove/common/schemas/atom.rng b/trove/common/schemas/atom.rng deleted file mode 100644 index c2df4e4101..0000000000 --- a/trove/common/schemas/atom.rng +++ /dev/null @@ -1,597 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - text - html - - - - - - - - - xhtml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - An atom:feed must have an atom:author unless all of its atom:entry children have an atom:author. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - An atom:entry must have at least one atom:link element with a rel attribute of 'alternate' or an atom:content. - - - An atom:entry must have an atom:author if its feed does not. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - text - html - - - - - - - - - - - - - xhtml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1 - [^:]* - - - - - - .+/.+ - - - - - - [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* - - - - - - - - - - .+@.+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - xml:base - xml:lang - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/trove/common/schemas/v1.1/limits.rng b/trove/common/schemas/v1.1/limits.rng deleted file mode 100644 index a66af4b9c4..0000000000 --- a/trove/common/schemas/v1.1/limits.rng +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/trove/common/utils.py b/trove/common/utils.py index 999c149197..25664877ce 100644 --- a/trove/common/utils.py +++ b/trove/common/utils.py @@ -185,7 +185,7 @@ class MethodInspector(object): def build_polling_task(retriever, condition=lambda value: value, - sleep_time=1, time_out=0): + sleep_time=1, time_out=0, initial_delay=0): """Run a function in a loop with backoff on error. The condition function runs based on the retriever function result. @@ -197,7 +197,8 @@ def build_polling_task(retriever, condition=lambda value: value, raise loopingcall.LoopingCallDone(retvalue=obj) call = loopingcall.BackOffLoopingCall(f=poll_and_check) - return call.start(initial_delay=0, starting_interval=sleep_time, + return call.start(initial_delay=initial_delay, + starting_interval=sleep_time, max_interval=30, timeout=time_out) @@ -210,7 +211,7 @@ def wait_for_task(polling_task): def poll_until(retriever, condition=lambda value: value, - sleep_time=3, time_out=0): + sleep_time=3, time_out=0, initial_delay=0): """Retrieves object until it passes condition, then returns it. If time_out_limit is passed in, PollTimeOut will be raised once that @@ -218,7 +219,8 @@ def poll_until(retriever, condition=lambda value: value, """ task = build_polling_task(retriever, condition=condition, - sleep_time=sleep_time, time_out=time_out) + sleep_time=sleep_time, time_out=time_out, + initial_delay=initial_delay) return wait_for_task(task) diff --git a/trove/configuration/service.py b/trove/configuration/service.py index e9d660bd70..18ec05c3c2 100644 --- a/trove/configuration/service.py +++ b/trove/configuration/service.py @@ -218,8 +218,6 @@ class ConfigurationsController(wsgi.Controller): def _refresh_on_all_instances(self, context, configuration_id): """Refresh a configuration group on all single instances. """ - LOG.debug("Re-applying configuration group '%s' to all instances.", - configuration_id) single_instances = instances_models.DBInstance.find_all( tenant_id=context.project_id, configuration_id=configuration_id, @@ -228,8 +226,8 @@ class ConfigurationsController(wsgi.Controller): config = models.Configuration(context, configuration_id) for dbinstance in single_instances: - LOG.debug("Re-applying configuration to instance: %s", - dbinstance.id) + LOG.info("Re-applying configuration %s to instance: %s", + configuration_id, dbinstance.id) instance = instances_models.Instance.load(context, dbinstance.id) instance.update_configuration(config) diff --git a/trove/guestagent/api.py b/trove/guestagent/api.py index b199ce985a..27e626e0cb 100644 --- a/trove/guestagent/api.py +++ b/trove/guestagent/api.py @@ -314,7 +314,7 @@ class API(object): device_path='/dev/vdb', mount_point='/mnt/volume', backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, - modules=None): + modules=None, ds_version=None): """Make an asynchronous call to prepare the guest as a database container optionally includes a backup id for restores """ @@ -335,7 +335,8 @@ class API(object): device_path=device_path, mount_point=mount_point, backup_info=backup_info, config_contents=config_contents, root_password=root_password, overrides=overrides, - cluster_config=cluster_config, snapshot=snapshot, modules=modules) + cluster_config=cluster_config, snapshot=snapshot, modules=modules, + ds_version=ds_version) def _create_guest_queue(self): """Call to construct, start and immediately stop rpc server in order @@ -409,15 +410,14 @@ class API(object): self._call("reset_configuration", self.agent_high_timeout, version=version, configuration=configuration) - def stop_db(self, do_not_start_on_reboot=False): + def stop_db(self): """Stop the database server.""" LOG.debug("Sending the call to stop the database process " "on the Guest.") version = self.API_BASE_VERSION - self._call("stop_db", self.agent_high_timeout, - version=version, - do_not_start_on_reboot=do_not_start_on_reboot) + self._call("stop_db", self.agent_low_timeout, + version=version) def upgrade(self, instance_version, location, metadata=None): """Make an asynchronous call to self upgrade the guest agent.""" diff --git a/trove/guestagent/backup/__init__.py b/trove/guestagent/backup/__init__.py deleted file mode 100644 index 810691f61a..0000000000 --- a/trove/guestagent/backup/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Rackspace Hosting -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from trove.guestagent.backup.backupagent import BackupAgent - -AGENT = BackupAgent() - - -def backup(context, backup_info): - """ - Main entry point for starting a backup based on the given backup id. This - will create a backup for this DB instance and will then store the backup - in a configured repository (e.g. Swift) - - :param context: the context token which contains the users details - :param backup_id: the id of the persisted backup object - """ - return AGENT.execute_backup(context, backup_info) - - -def restore(context, backup_info, restore_location): - """ - Main entry point for restoring a backup based on the given backup id. This - will transfer backup data to this instance an will carry out the - appropriate restore procedure (eg. mysqldump) - - :param context: the context token which contains the users details - :param backup_id: the id of the persisted backup object - """ - return AGENT.execute_restore(context, backup_info, restore_location) diff --git a/trove/guestagent/backup/backupagent.py b/trove/guestagent/backup/backupagent.py deleted file mode 100644 index 1fd7342a8e..0000000000 --- a/trove/guestagent/backup/backupagent.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging -from oslo_utils import timeutils - -from trove.backup.state import BackupState -from trove.common import cfg -from trove.common.i18n import _ -from trove.common.strategies.storage import get_storage_strategy -from trove.conductor import api as conductor_api -from trove.guestagent.dbaas import get_filesystem_volume_stats -from trove.guestagent.strategies.backup.base import BackupError -from trove.guestagent.strategies.backup.base import UnknownBackupType -from trove.guestagent.strategies.backup import get_backup_strategy -from trove.guestagent.strategies.restore import get_restore_strategy - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -CONFIG_MANAGER = CONF.get('mysql' - if not CONF.datastore_manager - else CONF.datastore_manager) - -STRATEGY = CONFIG_MANAGER.backup_strategy -BACKUP_NAMESPACE = CONFIG_MANAGER.backup_namespace -RESTORE_NAMESPACE = CONFIG_MANAGER.restore_namespace -RUNNER = get_backup_strategy(STRATEGY, BACKUP_NAMESPACE) -EXTRA_OPTS = CONF.backup_runner_options.get(STRATEGY, '') - -# Try to get the incremental strategy or return the default 'backup_strategy' -INCREMENTAL = CONFIG_MANAGER.backup_incremental_strategy.get( - STRATEGY, STRATEGY) - -INCREMENTAL_RUNNER = get_backup_strategy(INCREMENTAL, BACKUP_NAMESPACE) - - -class BackupAgent(object): - def _get_restore_runner(self, backup_type): - """Returns the RestoreRunner associated with this backup type.""" - try: - runner = get_restore_strategy(backup_type, RESTORE_NAMESPACE) - except ImportError: - raise UnknownBackupType(_("Unknown Backup type: %(type)s in " - "namespace %(ns)s") - % {"type": backup_type, - "ns": RESTORE_NAMESPACE}) - return runner - - def stream_backup_to_storage(self, context, backup_info, runner, storage, - parent_metadata={}, extra_opts=EXTRA_OPTS): - backup_id = backup_info['id'] - conductor = conductor_api.API(context) - - # Store the size of the filesystem before the backup. - mount_point = CONFIG_MANAGER.mount_point - stats = get_filesystem_volume_stats(mount_point) - backup_state = { - 'backup_id': backup_id, - 'size': stats.get('used', 0.0), - 'state': BackupState.BUILDING, - } - conductor.update_backup(CONF.guest_id, - sent=timeutils.utcnow_ts(microsecond=True), - **backup_state) - LOG.debug("Updated state for %s to %s.", backup_id, backup_state) - - try: - with runner(filename=backup_id, extra_opts=extra_opts, - **parent_metadata) as bkup: - LOG.debug("Starting backup %s.", backup_id) - meta = {} - meta['datastore'] = backup_info['datastore'] - meta['datastore_version'] = backup_info['datastore_version'] - success, note, checksum, location = storage.save( - bkup.manifest, - bkup, - metadata=meta) - - backup_state.update({ - 'checksum': checksum, - 'location': location, - 'note': note, - 'success': success, - 'backup_type': bkup.backup_type, - }) - - LOG.debug("Backup %(backup_id)s completed status: " - "%(success)s.", backup_state) - LOG.debug("Backup %(backup_id)s file swift checksum: " - "%(checksum)s.", backup_state) - LOG.debug("Backup %(backup_id)s location: " - "%(location)s.", backup_state) - - if not success: - raise BackupError(note) - - backup_state.update({'state': BackupState.COMPLETED}) - - return meta - - except Exception: - LOG.exception( - "Error saving backup: %(backup_id)s.", backup_state) - backup_state.update({'state': BackupState.FAILED}) - raise - finally: - LOG.info("Completed backup %(backup_id)s.", backup_state) - conductor.update_backup(CONF.guest_id, - sent=timeutils.utcnow_ts( - microsecond=True), - **backup_state) - LOG.info("Updated state for %s to %s.", backup_id, backup_state) - - def execute_backup(self, context, backup_info, - runner=RUNNER, extra_opts=EXTRA_OPTS, - incremental_runner=INCREMENTAL_RUNNER): - - LOG.info("Running backup %(id)s.", backup_info) - storage = get_storage_strategy( - CONF.storage_strategy, - CONF.storage_namespace)(context) - - # Check if this is an incremental backup and grab the parent metadata - parent_metadata = {} - if backup_info.get('parent'): - runner = incremental_runner - LOG.debug("Using incremental backup runner: %s.", runner.__name__) - parent = backup_info['parent'] - parent_metadata = storage.load_metadata(parent['location'], - parent['checksum']) - # The parent could be another incremental backup so we need to - # reset the location and checksum to *this* parents info - parent_metadata.update({ - 'parent_location': parent['location'], - 'parent_checksum': parent['checksum'] - }) - - self.stream_backup_to_storage(context, backup_info, runner, storage, - parent_metadata, extra_opts) - - def execute_restore(self, context, backup_info, restore_location): - try: - restore_runner = self._get_restore_runner(backup_info['type']) - storage = get_storage_strategy( - CONF.storage_strategy, - CONF.storage_namespace)(context) - - runner = restore_runner(storage, location=backup_info['location'], - checksum=backup_info['checksum'], - restore_location=restore_location) - backup_info['restore_location'] = restore_location - - LOG.info("Restoring instance from backup %(id)s to " - "%(restore_location)s", backup_info) - - content_size = runner.restore() - LOG.info("Restore from backup %(id)s completed successfully " - "to %(restore_location)s", backup_info) - LOG.debug("Restore size: %s", content_size) - except Exception: - LOG.exception("Error restoring backup %(id)s", backup_info) - raise - else: - LOG.debug("Restored backup %(id)s", backup_info) diff --git a/trove/guestagent/common/configuration.py b/trove/guestagent/common/configuration.py index 0ee8c1be25..1ce15c0b14 100644 --- a/trove/guestagent/common/configuration.py +++ b/trove/guestagent/common/configuration.py @@ -16,12 +16,16 @@ import abc import os import re + +from oslo_log import log as logging import six from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode +LOG = logging.getLogger(__name__) + class ConfigurationManager(object): """ @@ -76,6 +80,11 @@ class ConfigurationManager(object): implementation changes in the future. :type override_strategy ConfigurationOverrideStrategy """ + base_config_dir = os.path.dirname(base_config_path) + operating_system.create_directory( + base_config_dir, user=owner, group=group, force=True, as_root=True + ) + self._base_config_path = base_config_path self._owner = owner self._group = group @@ -111,9 +120,13 @@ class ConfigurationManager(object): :returns: Configuration file as a Python dict. """ - base_options = operating_system.read_file( - self._base_config_path, codec=self._codec, - as_root=self._requires_root) + try: + base_options = operating_system.read_file( + self._base_config_path, codec=self._codec, + as_root=self._requires_root) + except Exception: + LOG.warning('File %s not found', self._base_config_path) + return None updates = self._override_strategy.parse_updates() guestagent_utils.update_dict(updates, base_options) @@ -124,8 +137,8 @@ class ConfigurationManager(object): """Write given contents to the base configuration file. Remove all existing overrides (both system and user). - :param contents Contents of the configuration file. - :type contents string or dict + :param options Contents of the configuration file. + :type options string or dict """ if isinstance(options, dict): # Serialize a dict of options for writing. diff --git a/trove/guestagent/common/guestagent_utils.py b/trove/guestagent/common/guestagent_utils.py index 6945be7283..c5c6b7e8e8 100644 --- a/trove/guestagent/common/guestagent_utils.py +++ b/trove/guestagent/common/guestagent_utils.py @@ -20,6 +20,7 @@ import re import six from trove.common import pagination +from trove.common import utils def update_dict(updates, target): @@ -140,3 +141,26 @@ def serialize_list(li, limit=None, marker=None, include_marker=False): page, next_name = paginate_list(li, limit=limit, marker=marker, include_marker=include_marker) return [item.serialize() for item in page], next_name + + +def get_filesystem_volume_stats(fs_path): + try: + stats = os.statvfs(fs_path) + except OSError: + raise RuntimeError("Filesystem not found (%s)" % fs_path) + + total = stats.f_blocks * stats.f_bsize + free = stats.f_bfree * stats.f_bsize + # return the size in GB + used_gb = utils.to_gb(total - free) + total_gb = utils.to_gb(total) + + output = { + 'block_size': stats.f_bsize, + 'total_blocks': stats.f_blocks, + 'free_blocks': stats.f_bfree, + 'total': total_gb, + 'free': free, + 'used': used_gb + } + return output diff --git a/trove/guestagent/common/operating_system.py b/trove/guestagent/common/operating_system.py index 6142168f7b..1e53c7464f 100644 --- a/trove/guestagent/common/operating_system.py +++ b/trove/guestagent/common/operating_system.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from functools import reduce import inspect import operator import os @@ -21,13 +22,12 @@ import re import stat import tempfile -from functools import reduce from oslo_concurrency.processutils import UnknownArgumentError from trove.common import exception +from trove.common import utils from trove.common.i18n import _ from trove.common.stream_codecs import IdentityCodec -from trove.common import utils REDHAT = 'redhat' DEBIAN = 'debian' @@ -479,6 +479,41 @@ def service_discovery(service_candidates): return result +def _execute_shell_cmd(cmd, options, *args, **kwargs): + """Execute a given shell command passing it + given options (flags) and arguments. + + Takes optional keyword arguments: + :param as_root: Execute as root. + :type as_root: boolean + + :param timeout: Number of seconds if specified, + default if not. + There is no timeout if set to None. + :type timeout: integer + + :raises: class:`UnknownArgumentError` if passed unknown args. + """ + + exec_args = {} + if kwargs.pop('as_root', False): + exec_args['run_as_root'] = True + exec_args['root_helper'] = 'sudo' + + if 'timeout' in kwargs: + exec_args['timeout'] = kwargs.pop('timeout') + + exec_args['shell'] = kwargs.pop('shell', False) + + if kwargs: + raise UnknownArgumentError(_("Got unknown keyword args: %r") % kwargs) + + cmd_flags = _build_command_options(options) + cmd_args = cmd_flags + list(args) + stdout, stderr = utils.execute_with_timeout(cmd, *cmd_args, **exec_args) + return stdout + + def create_directory(dir_path, user=None, group=None, force=True, **kwargs): """Create a given directory and update its ownership (recursively) to the given user and group if any. @@ -559,6 +594,7 @@ def _create_directory(dir_path, force=True, **kwargs): :param force: No error if existing, make parent directories as needed. :type force: boolean + :param as_root: Run as root user, default: False. """ options = (('p', force),) @@ -802,39 +838,6 @@ def list_files_in_directory(root_dir, recursive=False, pattern=None, if not pattern or re.match(pattern, name)} -def _execute_shell_cmd(cmd, options, *args, **kwargs): - """Execute a given shell command passing it - given options (flags) and arguments. - - Takes optional keyword arguments: - :param as_root: Execute as root. - :type as_root: boolean - - :param timeout: Number of seconds if specified, - default if not. - There is no timeout if set to None. - :type timeout: integer - - :raises: class:`UnknownArgumentError` if passed unknown args. - """ - - exec_args = {} - if kwargs.pop('as_root', False): - exec_args['run_as_root'] = True - exec_args['root_helper'] = 'sudo' - - if 'timeout' in kwargs: - exec_args['timeout'] = kwargs.pop('timeout') - - if kwargs: - raise UnknownArgumentError(_("Got unknown keyword args: %r") % kwargs) - - cmd_flags = _build_command_options(options) - cmd_args = cmd_flags + list(args) - stdout, stderr = utils.execute_with_timeout(cmd, *cmd_args, **exec_args) - return stdout - - def _build_command_options(options): """Build a list of flags from given pairs (option, is_enabled). Each option is prefixed with a single '-'. @@ -867,3 +870,35 @@ def is_mount(path): def get_current_user(): """Returns name of the current OS user""" return pwd.getpwuid(os.getuid())[0] + + +def create_user(user_name, user_id, group_name=None, group_id=None): + group_name = group_name or user_name + group_id = group_id or user_id + + try: + _execute_shell_cmd('groupadd', [], '--gid', group_id, group_name, + as_root=True) + except exception.ProcessExecutionError as err: + if 'already exists' not in err.stderr: + raise exception.UnprocessableEntity( + 'Failed to add group %s, error: %s' % (group_name, err.stderr) + ) + + try: + _execute_shell_cmd('useradd', [], '--uid', user_id, '--gid', group_id, + '-M', user_name, as_root=True) + except exception.ProcessExecutionError as err: + if 'already exists' not in err.stderr: + raise exception.UnprocessableEntity( + 'Failed to add user %s, error: %s' % (user_name, err.stderr) + ) + + +def remove_dir_contents(folder): + """Remove all the files and sub-directories but keep the folder. + + Use shell=True here because shell=False doesn't support '*' + """ + path = os.path.join(folder, '*') + _execute_shell_cmd(f'rm -rf {path}', [], shell=True, as_root=True) diff --git a/trove/guestagent/datastore/experimental/cassandra/manager.py b/trove/guestagent/datastore/experimental/cassandra/manager.py deleted file mode 100644 index 504d9b3fd2..0000000000 --- a/trove/guestagent/datastore/experimental/cassandra/manager.py +++ /dev/null @@ -1,368 +0,0 @@ -# Copyright 2013 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os - -from oslo_log import log as logging - -from trove.common import cfg -from trove.common import instance as trove_instance -from trove.common.notification import EndNotification -from trove.guestagent import backup -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.cassandra import service -from trove.guestagent.datastore import manager -from trove.guestagent import guest_log -from trove.guestagent import volume - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class Manager(manager.Manager): - - GUEST_LOG_DEFS_SYSTEM_LABEL = 'system' - - def __init__(self, manager_name='cassandra'): - super(Manager, self).__init__(manager_name) - self._app = None - self._admin = None - - @property - def status(self): - return self.app.status - - @property - def app(self): - if self._app is None: - self._app = self.build_app() - return self._app - - def build_app(self): - return service.CassandraApp() - - @property - def admin(self): - if self._admin is None: - self._admin = self.app.build_admin() - return self._admin - - @property - def configuration_manager(self): - return self.app.configuration_manager - - def get_datastore_log_defs(self): - system_log_file = self.validate_log_file( - self.app.cassandra_system_log_file, self.app.cassandra_owner) - return { - self.GUEST_LOG_DEFS_SYSTEM_LABEL: { - self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, - self.GUEST_LOG_USER_LABEL: self.app.cassandra_owner, - self.GUEST_LOG_FILE_LABEL: system_log_file - } - } - - def guest_log_enable(self, context, log_name, disable): - if disable: - LOG.debug("Disabling system log.") - self.app.set_logging_level('OFF') - else: - log_level = CONF.get(self.manager_name).get('system_log_level') - LOG.debug("Enabling system log with logging level: %s", log_level) - self.app.set_logging_level(log_level) - - return False - - def restart(self, context): - self.app.restart() - - def start_db_with_conf_changes(self, context, config_contents): - self.app.start_db_with_conf_changes(config_contents) - - def stop_db(self, context, do_not_start_on_reboot=False): - self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) - - def reset_configuration(self, context, configuration): - self.app.reset_configuration(configuration) - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - """This is called from prepare in the base class.""" - self.app.install_if_needed(packages) - self.app.init_storage_structure(mount_point) - - if config_contents or device_path or backup_info: - - # FIXME(pmalik) Once the cassandra bug - # https://issues.apache.org/jira/browse/CASSANDRA-2356 - # is fixed, this code may have to be revisited. - # - # Cassandra generates system keyspaces on the first start. - # The stored properties include the 'cluster_name', which once - # saved cannot be easily changed without removing the system - # tables. It is crucial that the service does not boot up in - # the middle of the configuration procedure. - # We wait here for the service to come up, stop it properly and - # remove the generated keyspaces before proceeding with - # configuration. If it does not start up within the time limit - # we assume it is not going to and proceed with configuration - # right away. - LOG.debug("Waiting for database first boot.") - if (self.app.status.wait_for_real_status_to_change_to( - trove_instance.ServiceStatuses.RUNNING, - CONF.state_change_wait_time, - False)): - LOG.debug("Stopping database prior to initial configuration.") - self.app.stop_db() - self.app._remove_system_tables() - - LOG.debug("Starting initial configuration.") - if config_contents: - LOG.debug("Applying configuration.") - self.app.configuration_manager.save_configuration( - config_contents) - cluster_name = None - if cluster_config: - cluster_name = cluster_config.get('id', None) - self.app.apply_initial_guestagent_configuration( - cluster_name=cluster_name) - - if cluster_config: - self.app.write_cluster_topology( - cluster_config['dc'], cluster_config['rack'], - prefer_local=True) - - if device_path: - LOG.debug("Preparing data volume.") - device = volume.VolumeDevice(device_path) - # unmount if device is already mounted - device.unmount_device(device_path) - device.format() - if os.path.exists(mount_point): - # rsync exiting data - LOG.debug("Migrating existing data.") - device.migrate_data(mount_point) - # mount the volume - LOG.debug("Mounting new volume.") - device.mount(mount_point) - - if not cluster_config: - if backup_info: - self._perform_restore(backup_info, context, mount_point) - - LOG.debug("Starting database with configuration changes.") - self.app.start_db(update_db=False) - - if not self.app.has_user_config(): - LOG.debug("Securing superuser access.") - self.app.secure() - self.app.restart() - - self._admin = self.app.build_admin() - - if not cluster_config and self.is_root_enabled(context): - self.status.report_root(context) - - def pre_upgrade(self, context): - data_dir = self.app.cassandra_data_dir - mount_point, _data = os.path.split(data_dir) - save_etc_dir = "%s/etc" % mount_point - home_save = "%s/trove_user" % mount_point - - self.app.status.begin_restart() - self.app.drain() - self.app.stop_db() - - operating_system.copy("%s/." % self.app.cassandra_conf_dir, - save_etc_dir, - preserve=True, as_root=True) - operating_system.copy("%s/." % os.path.expanduser('~'), home_save, - preserve=True, as_root=True) - - self.unmount_volume(context, mount_point=mount_point) - - return { - 'mount_point': mount_point, - 'save_etc_dir': save_etc_dir, - 'home_save': home_save - } - - def post_upgrade(self, context, upgrade_info): - self.app.stop_db() - - if 'device' in upgrade_info: - self.mount_volume(context, mount_point=upgrade_info['mount_point'], - device_path=upgrade_info['device'], - write_to_fstab=True) - operating_system.chown(path=upgrade_info['mount_point'], - user=self.app.cassandra_owner, - group=self.app.cassandra_owner, - recursive=True, - as_root=True) - - self._restore_home_directory(upgrade_info['home_save']) - self._restore_directory(upgrade_info['save_etc_dir'], - self.app.cassandra_conf_dir) - - self._reset_app() - self.app.start_db() - self.app.upgrade_sstables() - self.app.status.end_restart() - - def change_passwords(self, context, users): - with EndNotification(context): - self.admin.change_passwords(context, users) - - def update_attributes(self, context, username, hostname, user_attrs): - with EndNotification(context): - self.admin.update_attributes(context, username, hostname, - user_attrs) - - def create_database(self, context, databases): - with EndNotification(context): - self.admin.create_database(context, databases) - - def create_user(self, context, users): - with EndNotification(context): - self.admin.create_user(context, users) - - def delete_database(self, context, database): - with EndNotification(context): - self.admin.delete_database(context, database) - - def delete_user(self, context, user): - with EndNotification(context): - self.admin.delete_user(context, user) - - def get_user(self, context, username, hostname): - return self.admin.get_user(context, username, hostname) - - def grant_access(self, context, username, hostname, databases): - self.admin.grant_access(context, username, hostname, databases) - - def revoke_access(self, context, username, hostname, database): - self.admin.revoke_access(context, username, hostname, database) - - def list_access(self, context, username, hostname): - return self.admin.list_access(context, username, hostname) - - def list_databases(self, context, limit=None, marker=None, - include_marker=False): - return self.admin.list_databases(context, limit, marker, - include_marker) - - def list_users(self, context, limit=None, marker=None, - include_marker=False): - return self.admin.list_users(context, limit, marker, include_marker) - - def enable_root(self, context): - return self.app.enable_root() - - def enable_root_with_password(self, context, root_password=None): - return self.app.enable_root(root_password=root_password) - - def disable_root(self, context): - self.app.enable_root(root_password=None) - - def is_root_enabled(self, context): - return self.app.is_root_enabled() - - def _perform_restore(self, backup_info, context, restore_location): - LOG.info("Restoring database from backup %s.", backup_info['id']) - try: - backup.restore(context, backup_info, restore_location) - self.app._apply_post_restore_updates(backup_info) - except Exception as e: - LOG.error(e) - LOG.error("Error performing restore from backup %s.", - backup_info['id']) - self.app.status.set_status(trove_instance.ServiceStatuses.FAILED) - raise - LOG.info("Restored database successfully.") - - def create_backup(self, context, backup_info): - """ - Entry point for initiating a backup for this instance. - The call currently blocks guestagent until the backup is finished. - - :param backup_info: a dictionary containing the db instance id of the - backup task, location, type, and other data. - """ - - with EndNotification(context): - backup.backup(context, backup_info) - - def update_overrides(self, context, overrides, remove=False): - LOG.debug("Updating overrides.") - if remove: - self.app.remove_overrides() - else: - self.app.update_overrides(context, overrides, remove) - - def apply_overrides(self, context, overrides): - """Configuration changes are made in the config YAML file and - require restart, so this is a no-op. - """ - pass - - def get_data_center(self, context): - return self.app.get_data_center() - - def get_rack(self, context): - return self.app.get_rack() - - def set_seeds(self, context, seeds): - self.app.set_seeds(seeds) - - def get_seeds(self, context): - return self.app.get_seeds() - - def set_auto_bootstrap(self, context, enabled): - self.app.set_auto_bootstrap(enabled) - - def node_cleanup_begin(self, context): - self.app.node_cleanup_begin() - - def node_cleanup(self, context): - self.app.node_cleanup() - - def node_decommission(self, context): - self.app.node_decommission() - - def cluster_secure(self, context, password): - os_admin = self.app.cluster_secure(password) - self._admin = self.app.build_admin() - return os_admin - - def get_admin_credentials(self, context): - return self.app.get_admin_credentials() - - def store_admin_credentials(self, context, admin_credentials): - self.app.store_admin_credentials(admin_credentials) - self._admin = self.app.build_admin() - - def _reset_app(self): - """ - A function for reseting app and admin properties. - It is useful when we want to force reload application. - Possible usages: loading new configuration files, loading new - datastore password - """ - self._app = None - self._admin = None diff --git a/trove/guestagent/datastore/experimental/cassandra/service.py b/trove/guestagent/datastore/experimental/cassandra/service.py deleted file mode 100644 index 3ec2a38361..0000000000 --- a/trove/guestagent/datastore/experimental/cassandra/service.py +++ /dev/null @@ -1,1314 +0,0 @@ -# Copyright 2013 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import stat - -from cassandra.auth import PlainTextAuthProvider -from cassandra.cluster import Cluster -from cassandra.cluster import NoHostAvailable -from cassandra import OperationTimedOut -from cassandra.policies import ConstantReconnectionPolicy -from oslo_log import log as logging -from oslo_utils import netutils - -from trove.common import cfg -from trove.common.db.cassandra import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as rd_instance -from trove.common.stream_codecs import IniCodec -from trove.common.stream_codecs import PropertiesCodec -from trove.common.stream_codecs import SafeYamlCodec -from trove.common.stream_codecs import XmlCodec -from trove.common import utils -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import OneFileOverrideStrategy -from trove.guestagent.common import guestagent_utils -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore import service -from trove.guestagent import pkg - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -packager = pkg.Package() - - -class CassandraApp(object): - """Prepares DBaaS on a Guest container.""" - - _ADMIN_USER = 'os_admin' - - _CONF_AUTH_SEC = 'authentication' - _CONF_USR_KEY = 'username' - _CONF_PWD_KEY = 'password' - _CONF_DIR_MODS = stat.S_IRWXU - _CONF_FILE_MODS = stat.S_IRUSR - - CASSANDRA_CONF_FILE = "cassandra.yaml" - CASSANDRA_TOPOLOGY_FILE = 'cassandra-rackdc.properties' - CASSANDRA_LOGBACK_FILE = "logback.xml" - - _TOPOLOGY_CODEC = PropertiesCodec( - delimiter='=', unpack_singletons=True, string_mappings={ - 'true': True, 'false': False}) - - CASSANDRA_KILL_CMD = "sudo killall java || true" - - def __init__(self): - self.state_change_wait_time = CONF.state_change_wait_time - self.status = CassandraAppStatus(self.get_current_superuser()) - - revision_dir = guestagent_utils.build_file_path( - os.path.dirname(self.cassandra_conf), - ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) - self.configuration_manager = ConfigurationManager( - self.cassandra_conf, - self.cassandra_owner, self.cassandra_owner, - SafeYamlCodec(default_flow_style=False), requires_root=True, - override_strategy=OneFileOverrideStrategy(revision_dir)) - - lb_revision_dir = guestagent_utils.build_file_path( - os.path.dirname(self.cassandra_logback), 'logback-overrides') - self.logback_conf_manager = ConfigurationManager( - self.cassandra_logback, - self.cassandra_owner, self.cassandra_owner, - XmlCodec(), requires_root=True, - override_strategy=OneFileOverrideStrategy(lb_revision_dir)) - - @property - def service_candidates(self): - return ['cassandra'] - - @property - def cassandra_conf_dir(self): - return { - operating_system.REDHAT: "/etc/cassandra/default.conf/", - operating_system.DEBIAN: "/etc/cassandra/", - operating_system.SUSE: "/etc/cassandra/default.conf/" - }[operating_system.get_os()] - - @property - def cassandra_conf(self): - return guestagent_utils.build_file_path(self.cassandra_conf_dir, - self.CASSANDRA_CONF_FILE) - - @property - def cassandra_topology(self): - return guestagent_utils.build_file_path(self.cassandra_conf_dir, - self.CASSANDRA_TOPOLOGY_FILE) - - @property - def cassandra_owner(self): - return 'cassandra' - - @property - def cassandra_data_dir(self): - return guestagent_utils.build_file_path( - self.cassandra_working_dir, 'data') - - @property - def cassandra_working_dir(self): - return "/var/lib/cassandra" - - @property - def cassandra_system_log_file(self): - return guestagent_utils.build_file_path( - self.cassandra_log_dir, 'system', 'log') - - @property - def cassandra_log_dir(self): - return "/var/log/cassandra" - - @property - def cassandra_logback(self): - return guestagent_utils.build_file_path(self.cassandra_conf_dir, - self.CASSANDRA_LOGBACK_FILE) - - @property - def default_superuser_password(self): - return "cassandra" - - @property - def default_superuser_pwd_hash(self): - # Default 'salted_hash' value for 'cassandra' user on Cassandra 2.1. - return "$2a$10$wPEVuXBU7WE2Uwzqq3t19ObRJyoKztzC/Doyfr0VtDmVXC4GDAV3e" - - @property - def cqlsh_conf_path(self): - return "~/.cassandra/cqlshrc" - - def build_admin(self): - return CassandraAdmin(self.get_current_superuser()) - - def install_if_needed(self, packages): - """Prepare the guest machine with a Cassandra server installation.""" - LOG.info("Preparing Guest as a Cassandra Server") - if not packager.pkg_is_installed(packages): - self._install_db(packages) - LOG.debug("Cassandra install_if_needed complete") - - def init_storage_structure(self, mount_point): - try: - operating_system.create_directory(mount_point, as_root=True) - except exception.ProcessExecutionError: - LOG.exception("Error while initiating storage structure.") - - def start_db(self, update_db=False, enable_on_boot=True): - self.status.start_db_service( - self.service_candidates, self.state_change_wait_time, - enable_on_boot=enable_on_boot, update_db=update_db) - - def stop_db(self, update_db=False, do_not_start_on_reboot=False): - self.status.stop_db_service( - self.service_candidates, self.state_change_wait_time, - disable_on_boot=do_not_start_on_reboot, update_db=update_db) - - def restart(self): - self.status.restart_db_service( - self.service_candidates, self.state_change_wait_time) - - def _install_db(self, packages): - """Install Cassandra server""" - LOG.debug("Installing Cassandra server.") - packager.pkg_install(packages, None, 10000) - LOG.debug("Finished installing Cassandra server") - - def _remove_system_tables(self): - """ - Clean up the system keyspace. - - System tables are initialized on the first boot. - They store certain properties, such as 'cluster_name', - that cannot be easily changed once afterwards. - The system keyspace needs to be cleaned up first. The - tables will be regenerated on the next startup. - Make sure to also cleanup the commitlog and caches to avoid - startup errors due to inconsistencies. - - The service should not be running at this point. - """ - if self.status.is_running: - raise RuntimeError(_("Cannot remove system tables. " - "The service is still running.")) - - LOG.info('Removing existing system tables.') - system_keyspace_dir = guestagent_utils.build_file_path( - self.cassandra_data_dir, 'system') - commitlog_file = guestagent_utils.build_file_path( - self.cassandra_working_dir, 'commitlog') - chaches_dir = guestagent_utils.build_file_path( - self.cassandra_working_dir, 'saved_caches') - - operating_system.remove(system_keyspace_dir, - force=True, recursive=True, as_root=True) - operating_system.remove(commitlog_file, - force=True, recursive=True, as_root=True) - operating_system.remove(chaches_dir, - force=True, recursive=True, as_root=True) - - operating_system.create_directory( - system_keyspace_dir, - user=self.cassandra_owner, group=self.cassandra_owner, - force=True, as_root=True) - operating_system.create_directory( - commitlog_file, - user=self.cassandra_owner, group=self.cassandra_owner, - force=True, as_root=True) - operating_system.create_directory( - chaches_dir, - user=self.cassandra_owner, group=self.cassandra_owner, - force=True, as_root=True) - - def _apply_post_restore_updates(self, backup_info): - """The service should not be running at this point. - - The restored database files carry some properties over from the - original instance that need to be updated with appropriate - values for the new instance. - These include: - - - Reset the 'cluster_name' property to match the new unique - ID of this instance. - This is to ensure that the restored instance is a part of a new - single-node cluster rather than forming a one with the - original node. - - Reset the administrator's password. - The original password from the parent instance may be - compromised or long lost. - - A general procedure is: - - update the configuration property with the current value - so that the service can start up - - reset the superuser password - - restart the service - - change the cluster name - - restart the service - - :seealso: _reset_admin_password - :seealso: change_cluster_name - """ - - if self.status.is_running: - raise RuntimeError(_("Cannot reset the cluster name. " - "The service is still running.")) - - LOG.debug("Applying post-restore updates to the database.") - - try: - # Change the 'cluster_name' property to the current in-database - # value so that the database can start up. - self._update_cluster_name_property(backup_info['instance_id']) - - # Reset the superuser password so that we can log-in. - self._reset_admin_password() - - # Start the database and update the 'cluster_name' to the - # new value. - self.start_db(update_db=False) - self.change_cluster_name(CONF.guest_id) - finally: - self.stop_db() # Always restore the initial state of the service. - - def cluster_secure(self, password): - return self.secure(password=password).serialize() - - def secure(self, update_user=None, password=None): - """Configure the Trove administrative user. - Update an existing user if given. - Create a new one using the default database credentials - otherwise and drop the built-in user when finished. - """ - LOG.info('Configuring Trove superuser.') - - if password is None: - password = utils.generate_random_password() - - admin_username = update_user.name if update_user else self._ADMIN_USER - os_admin = models.CassandraUser(admin_username, password) - - if update_user: - CassandraAdmin(update_user).alter_user_password(os_admin) - else: - cassandra = models.CassandraUser( - models.CassandraUser.root_username, - self.default_superuser_password) - CassandraAdmin(cassandra)._create_superuser(os_admin) - CassandraAdmin(os_admin).drop_user(cassandra) - - self._update_admin_credentials(os_admin) - - return os_admin - - def _update_admin_credentials(self, user): - self.__create_cqlsh_config({self._CONF_AUTH_SEC: - {self._CONF_USR_KEY: user.name, - self._CONF_PWD_KEY: user.password}}) - - # Update the internal status with the new user. - self.status = CassandraAppStatus(user) - - def store_admin_credentials(self, admin_credentials): - user = models.CassandraUser.deserialize(admin_credentials) - self._update_admin_credentials(user) - - def get_admin_credentials(self): - return self.get_current_superuser().serialize() - - def _reset_admin_password(self): - """ - Reset the password of the Trove's administrative superuser. - - The service should not be running at this point. - - A general password reset procedure is: - - disable user authentication and remote access - - restart the service - - update the password in the 'system_auth.credentials' table - - re-enable authentication and make the host reachable - - restart the service - """ - if self.status.is_running: - raise RuntimeError(_("Cannot reset the administrative password. " - "The service is still running.")) - - try: - # Disable automatic startup in case the node goes down before - # we have the superuser secured. - operating_system.disable_service_on_boot(self.service_candidates) - - self.__disable_remote_access() - self.__disable_authentication() - - # We now start up the service and immediately re-enable - # authentication in the configuration file (takes effect after - # restart). - # Then we reset the superuser password to its default value - # and restart the service to get user functions back. - self.start_db(update_db=False, enable_on_boot=False) - self.__enable_authentication() - os_admin = self.__reset_user_password_to_default(self._ADMIN_USER) - self.status = CassandraAppStatus(os_admin) - self.restart() - - # Now change the administrative password to a new secret value. - self.secure(update_user=os_admin) - finally: - self.stop_db() # Always restore the initial state of the service. - - # At this point, we should have a secured database with new Trove-only - # superuser password. - # Proceed to re-enable remote access and automatic startup. - self.__enable_remote_access() - operating_system.enable_service_on_boot(self.service_candidates) - - def __reset_user_password_to_default(self, username): - LOG.debug("Resetting the password of user '%(user)s' to '%(pw)s'.", - {'user': username, 'pw': self.default_superuser_password}) - - user = models.CassandraUser(username, self.default_superuser_password) - with CassandraLocalhostConnection(user) as client: - client.execute( - "UPDATE system_auth.credentials SET salted_hash=%s " - "WHERE username='{}';", (user.name,), - (self.default_superuser_pwd_hash,)) - - return user - - def change_cluster_name(self, cluster_name): - """Change the 'cluster_name' property of an exesting running instance. - Cluster name is stored in the database and is required to match the - configuration value. Cassandra fails to start otherwise. - """ - - if not self.status.is_running: - raise RuntimeError(_("Cannot change the cluster name. " - "The service is not running.")) - - LOG.debug("Changing the cluster name to '%s'.", cluster_name) - - # Update the in-database value. - self.__reset_cluster_name(cluster_name) - - # Update the configuration property. - self._update_cluster_name_property(cluster_name) - - self.restart() - - def __reset_cluster_name(self, cluster_name): - # Reset the in-database value stored locally on this node. - current_superuser = self.get_current_superuser() - with CassandraLocalhostConnection(current_superuser) as client: - client.execute( - "UPDATE system.local SET cluster_name = '{}' " - "WHERE key='local';", (cluster_name,)) - - # Newer version of Cassandra require a flush to ensure the changes - # to the local system keyspace persist. - self.flush_tables('system', 'local') - - def __create_cqlsh_config(self, sections): - config_path = self._get_cqlsh_conf_path() - config_dir = os.path.dirname(config_path) - if not os.path.exists(config_dir): - os.mkdir(config_dir, self._CONF_DIR_MODS) - else: - os.chmod(config_dir, self._CONF_DIR_MODS) - operating_system.write_file(config_path, sections, codec=IniCodec()) - os.chmod(config_path, self._CONF_FILE_MODS) - - def get_current_superuser(self): - """ - Build the Trove superuser. - Use the stored credentials. - If not available fall back to the defaults. - """ - if self.has_user_config(): - return self._load_current_superuser() - - LOG.warning( - "Trove administrative user has not been configured yet. " - "Using the built-in default: %s", - models.CassandraUser.root_username) - return models.CassandraUser(models.CassandraUser.root_username, - self.default_superuser_password) - - def has_user_config(self): - """ - Return TRUE if there is a client configuration file available - on the guest. - """ - return os.path.exists(self._get_cqlsh_conf_path()) - - def _load_current_superuser(self): - config = operating_system.read_file(self._get_cqlsh_conf_path(), - codec=IniCodec()) - return models.CassandraUser( - config[self._CONF_AUTH_SEC][self._CONF_USR_KEY], - config[self._CONF_AUTH_SEC][self._CONF_PWD_KEY] - ) - - def apply_initial_guestagent_configuration(self, cluster_name=None): - """Update guestagent-controlled configuration properties. - These changes to the default template are necessary in order to make - the database service bootable and accessible in the guestagent context. - - :param cluster_name: The 'cluster_name' configuration property. - Use the unique guest id by default. - :type cluster_name: string - """ - self.configuration_manager.apply_system_override( - {'data_file_directories': [self.cassandra_data_dir]}) - self._make_host_reachable() - self._update_cluster_name_property(cluster_name or CONF.guest_id) - # A single-node instance may use the SimpleSnitch - # (keyspaces use SimpleStrategy). - # A network-aware snitch has to be used otherwise. - if cluster_name is None: - updates = {'endpoint_snitch': 'SimpleSnitch'} - else: - updates = {'endpoint_snitch': 'GossipingPropertyFileSnitch'} - self.configuration_manager.apply_system_override(updates) - - def _make_host_reachable(self): - """ - Some of these settings may be overridden by user defined - configuration groups. - - authenticator and authorizer - - Necessary to enable users and permissions. - rpc_address - Enable remote connections on all interfaces. - broadcast_rpc_address - RPC address to broadcast to drivers and - other clients. Must be set if - rpc_address = 0.0.0.0 and can never be - 0.0.0.0 itself. - listen_address - The address on which the node communicates with - other nodes. Can never be 0.0.0.0. - seed_provider - A list of discovery contact points. - """ - self.__enable_authentication() - self.__enable_remote_access() - - def __enable_remote_access(self): - updates = { - 'rpc_address': "0.0.0.0", - 'broadcast_rpc_address': netutils.get_my_ipv4(), - 'listen_address': netutils.get_my_ipv4(), - 'seed_provider': {'parameters': - [{'seeds': netutils.get_my_ipv4()}] - } - } - - self.configuration_manager.apply_system_override(updates) - - def __disable_remote_access(self): - updates = { - 'rpc_address': "127.0.0.1", - 'listen_address': '127.0.0.1', - 'seed_provider': {'parameters': - [{'seeds': '127.0.0.1'}] - } - } - - self.configuration_manager.apply_system_override(updates) - - def __enable_authentication(self): - updates = { - 'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator', - 'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer' - } - - self.configuration_manager.apply_system_override(updates) - - def __disable_authentication(self): - updates = { - 'authenticator': 'org.apache.cassandra.auth.AllowAllAuthenticator', - 'authorizer': 'org.apache.cassandra.auth.AllowAllAuthorizer' - } - - self.configuration_manager.apply_system_override(updates) - - def _update_cluster_name_property(self, name): - """This 'cluster_name' property prevents nodes from one - logical cluster from talking to another. - All nodes in a cluster must have the same value. - """ - self.configuration_manager.apply_system_override({'cluster_name': - name}) - - def update_overrides(self, context, overrides, remove=False): - if overrides: - self.configuration_manager.apply_user_override(overrides) - - def remove_overrides(self): - self.configuration_manager.remove_user_override() - - def write_cluster_topology(self, data_center, rack, prefer_local=True): - LOG.info('Saving Cassandra cluster topology configuration.') - - config = {'dc': data_center, - 'rack': rack, - 'prefer_local': prefer_local} - - operating_system.write_file(self.cassandra_topology, config, - codec=self._TOPOLOGY_CODEC, as_root=True) - operating_system.chown( - self.cassandra_topology, - self.cassandra_owner, self.cassandra_owner, as_root=True) - operating_system.chmod( - self.cassandra_topology, FileMode.ADD_READ_ALL, as_root=True) - - def start_db_with_conf_changes(self, config_contents): - LOG.debug("Starting database with configuration changes.") - if self.status.is_running: - raise RuntimeError(_("The service is still running.")) - - self.configuration_manager.save_configuration(config_contents) - # The configuration template has to be updated with - # guestagent-controlled settings. - self.apply_initial_guestagent_configuration() - self.start_db(True) - - def reset_configuration(self, configuration): - LOG.debug("Resetting configuration.") - config_contents = configuration['config_contents'] - self.configuration_manager.save_configuration(config_contents) - - def _get_cqlsh_conf_path(self): - return os.path.expanduser(self.cqlsh_conf_path) - - def get_data_center(self): - config = operating_system.read_file(self.cassandra_topology, - codec=self._TOPOLOGY_CODEC) - return config['dc'] - - def get_rack(self): - config = operating_system.read_file(self.cassandra_topology, - codec=self._TOPOLOGY_CODEC) - return config['rack'] - - def set_seeds(self, seeds): - LOG.debug("Setting seed nodes: %s", seeds) - updates = { - 'seed_provider': {'parameters': - [{'seeds': ','.join(seeds)}] - } - } - - self.configuration_manager.apply_system_override(updates) - - def get_seeds(self): - """Return a list of seed node IPs if any. - - The seed IPs are stored as a comma-separated string in the - seed-provider parameters: - [{'class_name': '', 'parameters': [{'seeds': ','}, ...]}] - """ - - def find_first(key, dict_list): - for item in dict_list: - if key in item: - return item[key] - return [] - - sp_property = self.configuration_manager.get_value('seed_provider', []) - seeds_str = find_first('seeds', find_first('parameters', sp_property)) - return seeds_str.split(',') if seeds_str else [] - - def set_auto_bootstrap(self, enabled): - """Auto-bootstrap makes new (non-seed) nodes automatically migrate the - right data to themselves. - The feature has to be turned OFF when initializing a fresh cluster - without data. - It must be turned back ON once the cluster is initialized. - """ - LOG.debug("Setting auto-bootstrapping: %s", enabled) - updates = {'auto_bootstrap': enabled} - self.configuration_manager.apply_system_override(updates) - - def node_cleanup_begin(self): - """Suspend periodic status updates and mark the instance busy - throughout the operation. - """ - self.status.begin_restart() - self.status.set_status(rd_instance.ServiceStatuses.BLOCKED) - - def node_cleanup(self): - """Cassandra does not automatically remove data from nodes that - lose part of their partition range to a newly added node. - Cleans up keyspaces and partition keys no longer belonging to the node. - - Do not treat cleanup failures as fatal. Resume the heartbeat after - finishing and let it signal the true state of the instance to the - caller. - """ - LOG.debug("Running node cleanup.") - # nodetool -h -p -u -pw cleanup - try: - self._run_nodetool_command('cleanup') - self.status.set_status(rd_instance.ServiceStatuses.RUNNING) - except Exception: - LOG.exception("The node failed to complete its cleanup.") - finally: - self.status.end_restart() - - def node_decommission(self): - """Causes a live node to decommission itself, - streaming its data to the next node on the ring. - - Shutdown the database after successfully finishing the operation, - or leave the node in a failed state otherwise. - - Suspend periodic status updates, so that the caller can poll for the - database shutdown. - """ - LOG.debug("Decommissioning the node.") - # nodetool -h -p -u -pw decommission - self.status.begin_restart() - try: - self._run_nodetool_command('decommission') - except Exception: - LOG.exception("The node failed to decommission itself.") - self.status.set_status(rd_instance.ServiceStatuses.FAILED) - return - finally: - # Cassandra connections have ability to automatically discover and - # fallback to other cluster nodes whenever a node goes down. - # Reset the status after decomissioning to ensure the heartbeat - # connection talks to this node only. - self.status = CassandraAppStatus(self.get_current_superuser()) - - try: - self.stop_db(update_db=True, do_not_start_on_reboot=True) - finally: - self.status.end_restart() - - def flush_tables(self, keyspace, *tables): - """Flushes one or more tables from the memtable. - """ - LOG.debug("Flushing tables.") - # nodetool -h -p -u -pw flush -- - # ( ... ) - self._run_nodetool_command('flush', keyspace, *tables) - - def set_logging_level(self, log_level): - """Set the log Cassandra's system log verbosity level. - """ - # Apply the change at runtime. - self._run_nodetool_command('setlogginglevel', 'root', log_level) - - # Persist the change. - self.logback_conf_manager.apply_system_override( - {'configuration': {'root': {'@level': log_level}}}) - - def drain(self): - """Drains Cassandra node so that it can upgraded safely. - """ - LOG.debug("Draining node.") - self._run_nodetool_command('drain') - - def upgrade_sstables(self): - """Upgrades sstables to match new datastore version. - """ - LOG.debug("Upgrading sstables.") - self._run_nodetool_command('upgradesstables') - - def _run_nodetool_command(self, cmd, *args, **kwargs): - """Execute a nodetool command on this node. - """ - return utils.execute('nodetool', '-h', 'localhost', - cmd, *args, **kwargs) - - def enable_root(self, root_password=None): - """Cassandra's 'root' user is called 'cassandra'. - Create a new superuser if it does not exist and grant it full - superuser-level access to all keyspaces. - """ - cassandra = models.CassandraUser.root(password=root_password) - admin = self.build_admin() - if self.is_root_enabled(): - admin.alter_user_password(cassandra) - else: - admin._create_superuser(cassandra) - - return cassandra.serialize() - - def is_root_enabled(self): - """The Trove administrative user ('os_admin') should normally be the - only superuser in the system. - """ - found = self.build_admin().list_superusers() - return len([user for user in found - if user.name != self._ADMIN_USER]) > 0 - - -class CassandraAppStatus(service.BaseDbStatus): - - def __init__(self, superuser): - """ - :param superuser: User account the Status uses for connecting - to the database. - :type superuser: CassandraUser - """ - super(CassandraAppStatus, self).__init__() - self.__user = superuser - self.__client = None - - @property - def client(self): - if self.__client is None: - self.__client = CassandraLocalhostConnection(self.__user) - return self.__client - - def _get_actual_db_status(self): - try: - if self.client.local_node_is_up(): - return rd_instance.ServiceStatuses.RUNNING - except NoHostAvailable: - return rd_instance.ServiceStatuses.SHUTDOWN - except Exception: - LOG.exception("Error getting Cassandra status.") - - return rd_instance.ServiceStatuses.SHUTDOWN - - def cleanup_stalled_db_services(self): - utils.execute_with_timeout(CassandraApp.CASSANDRA_KILL_CMD, shell=True) - - -class CassandraAdmin(object): - """Handles administrative tasks on the Cassandra database. - - In Cassandra only SUPERUSERS can create other users and grant permissions - to database resources. Trove uses the 'cassandra' superuser to perform its - administrative tasks. - - The users it creates are all 'normal' (NOSUPERUSER) accounts. - The permissions it can grant are also limited to non-superuser operations. - This is to prevent anybody from creating a new superuser via the Trove API. - """ - - # Non-superuser grant modifiers. - __NO_SUPERUSER_MODIFIERS = ('ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT') - - _KS_NAME_REGEX = re.compile('^$') - - def __init__(self, user): - self.__admin_user = user - self.__client = None - - @property - def client(self): - if self.__client is None: - self.__client = CassandraLocalhostConnection(self.__admin_user) - return self.__client - - def create_user(self, context, users): - """ - Create new non-superuser accounts. - New users are by default granted full access to all database resources. - """ - for item in users: - self._create_user_and_grant(self.client, - self._deserialize_user(item)) - - def _create_user_and_grant(self, client, user): - """ - Create new non-superuser account and grant it full access to its - databases. - """ - self._create_user(client, user) - for db in user.databases: - self._grant_full_access_on_keyspace( - client, self._deserialize_keyspace(db), user) - - def _create_user(self, client, user): - # Create only NOSUPERUSER accounts here. - LOG.debug("Creating a new user '%s'.", user.name) - client.execute("CREATE USER '{}' WITH PASSWORD %s NOSUPERUSER;", - (user.name,), (user.password,)) - - def _create_superuser(self, user): - """Create a new superuser account and grant it full superuser-level - access to all keyspaces. - """ - LOG.debug("Creating a new superuser '%s'.", user.name) - self.client.execute("CREATE USER '{}' WITH PASSWORD %s SUPERUSER;", - (user.name,), (user.password,)) - self.client.execute( - "GRANT ALL PERMISSIONS ON ALL KEYSPACES TO '{}';", (user.name,)) - - def delete_user(self, context, user): - self.drop_user(self._deserialize_user(user)) - - def drop_user(self, user): - self._drop_user(self.client, user) - - def _drop_user(self, client, user): - LOG.debug("Deleting user '%s'.", user.name) - client.execute("DROP USER '{}';", (user.name, )) - - def get_user(self, context, username, hostname): - user = self._find_user(self.client, username) - return user.serialize() if user is not None else None - - def _find_user(self, client, username): - """ - Lookup a user with a given username. - Omit user names on the ignore list. - Return a new Cassandra user instance or None if no match is found. - """ - return next((user for user in self._get_listed_users(client) - if user.name == username), None) - - def list_users(self, context, limit=None, marker=None, - include_marker=False): - """ - List all non-superuser accounts. Omit names on the ignored list. - Return an empty set if None. - """ - return guestagent_utils.serialize_list( - self._get_listed_users(self.client), - limit=limit, marker=marker, include_marker=include_marker) - - def _get_listed_users(self, client): - """ - Return a set of unique user instances. - Omit user names on the ignore list. - """ - return self._get_users( - client, lambda user: user.name not in self.ignore_users) - - def _get_users(self, client, matcher=None): - """ - :param matcher Filter expression. - :type matcher callable - """ - acl = self._get_acl(client) - return {self._build_user(user.name, acl) - for user in client.execute("LIST USERS;") - if not matcher or matcher(user)} - - def _load_user(self, client, username, check_reserved=True): - if check_reserved: - models.CassandraUser(username).check_reserved() - - acl = self._get_acl(client, username=username) - return self._build_user(username, acl) - - def _build_user(self, username, acl): - user = models.CassandraUser(username) - for ks, permissions in acl.get(username, {}).items(): - if permissions: - user.databases.append(models.CassandraSchema(ks).serialize()) - return user - - def _get_acl(self, client, username=None): - """Return the ACL for a database user. - Return ACLs for all users if no particular username is specified. - - The ACL has the following format: - {username #1: - {keyspace #1: {access mod(s)...}, - keyspace #2: {...}}, - username #2: - {keyspace #1: {...}, - keyspace #3: {...}} - } - """ - - def build_list_query(username): - query_tokens = ["LIST ALL PERMISSIONS"] - if username: - query_tokens.extend(["OF", "'%s'" % username]) - query_tokens.append("NORECURSIVE;") - return ' '.join(query_tokens) - - def parse_keyspace_name(resource): - """Parse a keyspace name from a resource string. - The resource string has the following form: - - where 'object' is one of the database objects (keyspace, table...). - Return the name as a singleton set. Return an empty set if no match - is found. - """ - match = self._KS_NAME_REGEX.match(resource) - if match: - return {match.group(1)} - return {} - - def update_acl(username, keyspace, permission, acl): - permissions = acl.get(username, {}).get(keyspace) - if permissions is None: - guestagent_utils.update_dict({user: {keyspace: {permission}}}, - acl) - else: - permissions.add(permission) - - all_keyspace_names = None - acl = dict() - for item in client.execute(build_list_query(username)): - user = item.username - resource = item.resource - permission = item.permission - if user and resource and permission: - if resource == '': - # Cache the full keyspace list to improve performance and - # ensure consistent results for all users. - if all_keyspace_names is None: - all_keyspace_names = { - item.name - for item in self._get_available_keyspaces(client) - } - keyspaces = all_keyspace_names - else: - keyspaces = parse_keyspace_name(resource) - - for keyspace in keyspaces: - update_acl(user, keyspace, permission, acl) - - return acl - - def list_superusers(self): - """List all system users existing in the database.""" - return self._get_users(self.client, lambda user: user.super) - - def grant_access(self, context, username, hostname, databases): - """ - Grant full access on keyspaces to a given username. - """ - user = models.CassandraUser(username) - for db in databases: - self._grant_full_access_on_keyspace( - self.client, models.CassandraSchema(db), user) - - def revoke_access(self, context, username, hostname, database): - """ - Revoke all permissions on any database resources from a given username. - """ - user = models.CassandraUser(username) - self._revoke_all_access_on_keyspace( - self.client, models.CassandraSchema(database), user) - - def _grant_full_access_on_keyspace(self, client, keyspace, user, - check_reserved=True): - """ - Grant all non-superuser permissions on a keyspace to a given user. - """ - if check_reserved: - user.check_reserved() - keyspace.check_reserved() - - for access in self.__NO_SUPERUSER_MODIFIERS: - self._grant_permission_on_keyspace(client, access, keyspace, user) - - def _grant_permission_on_keyspace(self, client, modifier, keyspace, user): - """ - Grant a non-superuser permission on a keyspace to a given user. - Raise an exception if the caller attempts to grant a superuser access. - """ - LOG.debug("Granting '%(mod)s' access on '%(keyspace_name)s' to " - "user '%(user)s'.", - {'mod': modifier, 'keyspace_name': keyspace.name, - 'user': user.name}) - if modifier in self.__NO_SUPERUSER_MODIFIERS: - client.execute("GRANT {} ON KEYSPACE \"{}\" TO '{}';", - (modifier, keyspace.name, user.name)) - else: - raise exception.UnprocessableEntity( - "Invalid permission modifier (%s). Allowed values are: '%s'" - % (modifier, ', '.join(self.__NO_SUPERUSER_MODIFIERS))) - - def _revoke_all_access_on_keyspace(self, client, keyspace, user, - check_reserved=True): - if check_reserved: - user.check_reserved() - keyspace.check_reserved() - - LOG.debug("Revoking all permissions on '%(keyspace_name)s' " - "from user '%(user)s'.", {'keyspace_name': keyspace.name, - 'user': user.name}) - client.execute("REVOKE ALL PERMISSIONS ON KEYSPACE \"{}\" FROM '{}';", - (keyspace.name, user.name)) - - def update_attributes(self, context, username, hostname, user_attrs): - user = self._load_user(self.client, username) - new_name = user_attrs.get('name') - new_password = user_attrs.get('password') - self._update_user(self.client, user, new_name, new_password) - - def _update_user(self, client, user, new_username, new_password): - """ - Update a user of a given username. - Updatable attributes include username and password. - If a new username and password are given a new user with those - attributes is created and all permissions from the original - user get transferred to it. The original user is then dropped - therefore revoking its permissions. - If only new password is specified the existing user gets altered - with that password. - """ - if new_username is not None and user.name != new_username: - if new_password is not None: - self._rename_user(client, user, new_username, new_password) - else: - raise exception.UnprocessableEntity( - _("Updating username requires specifying a password " - "as well.")) - elif new_password is not None and user.password != new_password: - user.password = new_password - self._alter_user_password(client, user) - - def _rename_user(self, client, user, new_username, new_password): - """ - Rename a given user also updating its password. - Transfer the current permissions to the new username. - Drop the old username therefore revoking its permissions. - """ - LOG.debug("Renaming user '%(old)s' to '%(new)s'", - {'old': user.name, 'new': new_username}) - new_user = models.CassandraUser(new_username, new_password) - new_user.databases.extend(user.databases) - self._create_user_and_grant(client, new_user) - self._drop_user(client, user) - - def alter_user_password(self, user): - self._alter_user_password(self.client, user) - - def change_passwords(self, context, users): - for user in users: - self._alter_user_password(self.client, - self._deserialize_user(user)) - - def _alter_user_password(self, client, user): - LOG.debug("Changing password of user '%s'.", user.name) - client.execute("ALTER USER '{}' " - "WITH PASSWORD %s;", (user.name,), (user.password,)) - - def create_database(self, context, databases): - for item in databases: - self._create_single_node_keyspace( - self.client, self._deserialize_keyspace(item)) - - def _create_single_node_keyspace(self, client, keyspace): - """ - Create a single-replica keyspace. - - Cassandra stores replicas on multiple nodes to ensure reliability and - fault tolerance. All replicas are equally important; - there is no primary or master. - A replication strategy determines the nodes where - replicas are placed. SimpleStrategy is for a single data center only. - The total number of replicas across the cluster is referred to as the - replication factor. - - Replication Strategy: - 'SimpleStrategy' is not optimized for multiple data centers. - 'replication_factor' The number of replicas of data on multiple nodes. - Required for SimpleStrategy; otherwise, not used. - - Keyspace names are case-insensitive by default. - To make a name case-sensitive, enclose it in double quotation marks. - """ - client.execute("CREATE KEYSPACE \"{}\" WITH REPLICATION = " - "{{ 'class' : 'SimpleStrategy', " - "'replication_factor' : 1 }};", (keyspace.name,)) - - def delete_database(self, context, database): - self._drop_keyspace(self.client, - self._deserialize_keyspace(database)) - - def _drop_keyspace(self, client, keyspace): - LOG.debug("Dropping keyspace '%s'.", keyspace.name) - client.execute("DROP KEYSPACE \"{}\";", (keyspace.name,)) - - def list_databases(self, context, limit=None, marker=None, - include_marker=False): - return guestagent_utils.serialize_list( - self._get_available_keyspaces(self.client), - limit=limit, marker=marker, include_marker=include_marker) - - def _get_available_keyspaces(self, client): - """ - Return a set of unique keyspace instances. - Omit keyspace names on the ignore list. - """ - return {models.CassandraSchema(db.keyspace_name) - for db in client.execute("SELECT * FROM " - "system.schema_keyspaces;") - if db.keyspace_name not in self.ignore_dbs} - - def list_access(self, context, username, hostname): - user = self._find_user(self.client, username) - if user: - return user.databases - - raise exception.UserNotFound(uuid=username) - - def _deserialize_keyspace(self, keyspace_dict, check_reserved=True): - if keyspace_dict: - db = models.CassandraSchema.deserialize(keyspace_dict) - if check_reserved: - db.check_reserved() - - return db - - return None - - def _deserialize_user(self, user_dict, check_reserved=True): - if user_dict: - user = models.CassandraUser.deserialize(user_dict) - if check_reserved: - user.check_reserved() - - return user - - return None - - @property - def ignore_users(self): - return cfg.get_ignored_users() - - @property - def ignore_dbs(self): - return cfg.get_ignored_dbs() - - -class CassandraConnection(object): - """A wrapper to manage a Cassandra connection.""" - - # Cassandra 2.1 only supports protocol versions 3 and lower. - NATIVE_PROTOCOL_VERSION = 3 - CONNECTION_TIMEOUT_SEC = CONF.agent_call_high_timeout - RECONNECT_DELAY_SEC = 3 - - def __init__(self, contact_points, user): - self.__user = user - # A Cluster is initialized with a set of initial contact points. - # After the driver connects to one of the nodes it will automatically - # discover the rest. - # Will connect to '127.0.0.1' if None contact points are given. - # - # Set the 'reconnection_policy' so that dead connections recover fast. - self._cluster = Cluster( - contact_points=contact_points, - auth_provider=PlainTextAuthProvider(user.name, user.password), - protocol_version=self.NATIVE_PROTOCOL_VERSION, - connect_timeout=self.CONNECTION_TIMEOUT_SEC, - control_connection_timeout=self.CONNECTION_TIMEOUT_SEC, - reconnection_policy=ConstantReconnectionPolicy( - self.RECONNECT_DELAY_SEC, max_attempts=None)) - self.__session = None - - self._connect() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self._disconnect() - - def execute(self, query, identifiers=None, data_values=None, timeout=None): - """ - Execute a query with a given sequence or dict of data values to bind. - If a sequence is used, '%s' should be used the placeholder for each - argument. If a dict is used, '%(name)s' style placeholders must - be used. - Only data values should be supplied this way. Other items, - such as keyspaces, table names, and column names should be set - ahead of time. Use the '{}' style placeholders and - 'identifiers' parameter for those. - Raise an exception if the operation exceeds the given timeout (sec). - There is no timeout if set to None. - Return a set of rows or an empty list if None. - """ - if self.is_active(): - try: - rows = self.__session.execute(self.__bind(query, identifiers), - data_values, timeout) - return rows or [] - except OperationTimedOut: - LOG.error("Query execution timed out.") - raise - - LOG.debug("Cannot perform this operation on a closed connection.") - raise exception.UnprocessableEntity() - - def __bind(self, query, identifiers): - if identifiers: - return query.format(*identifiers) - return query - - def node_is_up(self, host_ip): - """Test whether the Cassandra node located at the given IP is up. - """ - for host in self._cluster.metadata.all_hosts(): - if host.address == host_ip: - return host.is_up - return False - - def local_node_is_up(self): - """Test whether Cassandra is up on the localhost. - """ - return (self.node_is_up('127.0.0.1') or - self.node_is_up(netutils.get_my_ipv4())) - - def _connect(self): - if not self._cluster.is_shutdown: - LOG.debug("Connecting to a Cassandra cluster as '%s'.", - self.__user.name) - if not self.is_active(): - self.__session = self._cluster.connect() - else: - LOG.debug("Connection already open.") - LOG.debug("Connected to cluster: '%s'", - self._cluster.metadata.cluster_name) - for host in self._cluster.metadata.all_hosts(): - LOG.debug("Connected to node: '%(address)s' in rack " - "'%(rack)s' at datacenter '%(datacenter)s'", - {'address': host.address, 'rack': host.rack, - 'datacenter': host.datacenter}) - else: - LOG.debug("Cannot perform this operation on a terminated cluster.") - raise exception.UnprocessableEntity() - - def _disconnect(self): - if self.is_active(): - try: - LOG.debug("Disconnecting from cluster: '%s'", - self._cluster.metadata.cluster_name) - self._cluster.shutdown() - except Exception: - LOG.debug("Failed to disconnect from a Cassandra cluster.") - - def is_active(self): - return self.__session and not self.__session.is_shutdown - - def __del__(self): - # The connections would survive the parent object's GC. - # We need to close it explicitly. - self._disconnect() - - -class CassandraLocalhostConnection(CassandraConnection): - """ - A connection to the localhost Cassandra server. - """ - - def __init__(self, user): - super(CassandraLocalhostConnection, self).__init__(None, user) diff --git a/trove/guestagent/datastore/experimental/couchbase/manager.py b/trove/guestagent/datastore/experimental/couchbase/manager.py deleted file mode 100644 index cca76afa49..0000000000 --- a/trove/guestagent/datastore/experimental/couchbase/manager.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) 2013 eBay Software Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging - -from trove.common import instance as rd_instance -from trove.common.notification import EndNotification -from trove.guestagent import backup -from trove.guestagent.datastore.experimental.couchbase import service -from trove.guestagent.datastore.experimental.couchbase import system -from trove.guestagent.datastore import manager -from trove.guestagent import volume - - -LOG = logging.getLogger(__name__) - - -class Manager(manager.Manager): - """ - This is Couchbase Manager class. It is dynamically loaded - based off of the datastore of the trove instance - """ - def __init__(self): - self.appStatus = service.CouchbaseAppStatus() - self.app = service.CouchbaseApp(self.appStatus) - super(Manager, self).__init__('couchbase') - - @property - def status(self): - return self.appStatus - - def reset_configuration(self, context, configuration): - self.app.reset_configuration(configuration) - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - """This is called from prepare in the base class.""" - self.app.install_if_needed(packages) - if device_path: - device = volume.VolumeDevice(device_path) - # unmount if device is already mounted - device.unmount_device(device_path) - device.format() - device.mount(mount_point) - LOG.debug('Mounted the volume (%s).', device_path) - self.app.start_db_with_conf_changes(config_contents) - LOG.debug('Securing couchbase now.') - self.app.initial_setup() - if backup_info: - LOG.debug('Now going to perform restore.') - self._perform_restore(backup_info, - context, - mount_point) - - def restart(self, context): - """ - Restart this couchbase instance. - This method is called when the guest agent - gets a restart message from the taskmanager. - """ - self.app.restart() - - def start_db_with_conf_changes(self, context, config_contents): - self.app.start_db_with_conf_changes(config_contents) - - def stop_db(self, context, do_not_start_on_reboot=False): - """ - Stop this couchbase instance. - This method is called when the guest agent - gets a stop message from the taskmanager. - """ - self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) - - def enable_root(self, context): - LOG.debug("Enabling root.") - return self.app.enable_root() - - def enable_root_with_password(self, context, root_password=None): - return self.app.enable_root(root_password) - - def is_root_enabled(self, context): - LOG.debug("Checking if root is enabled.") - return os.path.exists(system.pwd_file) - - def _perform_restore(self, backup_info, context, restore_location): - """ - Restores all couchbase buckets and their documents from the - backup. - """ - LOG.info("Restoring database from backup %s", backup_info['id']) - try: - backup.restore(context, backup_info, restore_location) - except Exception as e: - LOG.error("Error performing restore from backup %s", - backup_info['id']) - LOG.error(e) - self.status.set_status(rd_instance.ServiceStatuses.FAILED) - raise - LOG.info("Restored database successfully") - - def create_backup(self, context, backup_info): - """ - Backup all couchbase buckets and their documents. - """ - with EndNotification(context): - backup.backup(context, backup_info) diff --git a/trove/guestagent/datastore/experimental/couchbase/service.py b/trove/guestagent/datastore/experimental/couchbase/service.py deleted file mode 100644 index c10e821077..0000000000 --- a/trove/guestagent/datastore/experimental/couchbase/service.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright (c) 2013 eBay Software Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import stat -import subprocess -import tempfile - -from oslo_log import log as logging -from oslo_utils import netutils -import pexpect -import six - -from trove.common import cfg -from trove.common.db import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as rd_instance -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.couchbase import system -from trove.guestagent.datastore import service -from trove.guestagent import pkg - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -packager = pkg.Package() - - -class CouchbaseApp(object): - """ - Handles installation and configuration of couchbase - on a trove instance. - """ - - def __init__(self, status, state_change_wait_time=None): - """ - Sets default status and state_change_wait_time - """ - if state_change_wait_time: - self.state_change_wait_time = state_change_wait_time - else: - self.state_change_wait_time = CONF.state_change_wait_time - self.status = status - - def install_if_needed(self, packages): - """ - Install couchbase if needed, do nothing if it is already installed. - """ - LOG.info('Preparing Guest as Couchbase Server.') - if not packager.pkg_is_installed(packages): - LOG.debug('Installing Couchbase.') - self._install_couchbase(packages) - - def initial_setup(self): - self.ip_address = netutils.get_my_ipv4() - mount_point = CONF.couchbase.mount_point - try: - LOG.info('Couchbase Server change data dir path.') - operating_system.chown(mount_point, 'couchbase', 'couchbase', - as_root=True) - pwd = CouchbaseRootAccess.get_password() - utils.execute_with_timeout( - (system.cmd_node_init - % {'data_path': mount_point, - 'IP': self.ip_address, - 'PWD': pwd}), shell=True) - operating_system.remove(system.INSTANCE_DATA_DIR, force=True, - as_root=True) - LOG.debug('Couchbase Server initialize cluster.') - utils.execute_with_timeout( - (system.cmd_cluster_init - % {'IP': self.ip_address, 'PWD': pwd}), - shell=True) - utils.execute_with_timeout(system.cmd_set_swappiness, shell=True) - utils.execute_with_timeout(system.cmd_update_sysctl_conf, - shell=True) - LOG.info('Couchbase Server initial setup finished.') - except exception.ProcessExecutionError: - LOG.exception('Error performing initial Couchbase setup.') - raise RuntimeError(_("Couchbase Server initial setup failed")) - - def _install_couchbase(self, packages): - """ - Install the Couchbase Server. - """ - LOG.debug('Installing Couchbase Server. Creating %s', - system.COUCHBASE_CONF_DIR) - operating_system.create_directory(system.COUCHBASE_CONF_DIR, - as_root=True) - pkg_opts = {} - packager.pkg_install(packages, pkg_opts, system.TIME_OUT) - self.start_db() - LOG.debug('Finished installing Couchbase Server.') - - def stop_db(self, update_db=False, do_not_start_on_reboot=False): - self.status.stop_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time, - disable_on_boot=do_not_start_on_reboot, update_db=update_db) - - def restart(self): - self.status.restart_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time) - - def start_db(self, update_db=False): - self.status.start_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time, - enable_on_boot=True, update_db=update_db) - - def enable_root(self, root_password=None): - return CouchbaseRootAccess.enable_root(root_password) - - def start_db_with_conf_changes(self, config_contents): - LOG.info("Starting Couchbase with configuration changes.\n" - "Configuration contents:\n %s.", config_contents) - if self.status.is_running: - LOG.error("Cannot start Couchbase with configuration changes. " - "Couchbase state == %s.", self.status) - raise RuntimeError(_("Couchbase is not stopped.")) - self._write_config(config_contents) - self.start_db(True) - - def reset_configuration(self, configuration): - config_contents = configuration['config_contents'] - LOG.debug("Resetting configuration.") - self._write_config(config_contents) - - def _write_config(self, config_contents): - """ - Update contents of Couchbase configuration file - """ - return - - -class CouchbaseAppStatus(service.BaseDbStatus): - """ - Handles all of the status updating for the couchbase guest agent. - """ - - def _get_actual_db_status(self): - self.ip_address = netutils.get_my_ipv4() - pwd = None - try: - pwd = CouchbaseRootAccess.get_password() - return self._get_status_from_couchbase(pwd) - except exception.ProcessExecutionError: - # log the exception, but continue with native config approach - LOG.exception("Error getting the Couchbase status.") - - try: - out, err = utils.execute_with_timeout( - system.cmd_get_password_from_config, shell=True) - except exception.ProcessExecutionError: - LOG.exception("Error getting the root password from the " - "native Couchbase config file.") - return rd_instance.ServiceStatuses.SHUTDOWN - - config_pwd = out.strip() if out is not None else None - if not config_pwd or config_pwd == pwd: - LOG.debug("The root password from the native Couchbase config " - "file is either empty or already matches the " - "stored value.") - return rd_instance.ServiceStatuses.SHUTDOWN - - try: - status = self._get_status_from_couchbase(config_pwd) - except exception.ProcessExecutionError: - LOG.exception("Error getting Couchbase status using the " - "password parsed from the native Couchbase " - "config file.") - return rd_instance.ServiceStatuses.SHUTDOWN - - # if the parsed root password worked, update the stored value to - # avoid having to consult/parse the couchbase config file again. - LOG.debug("Updating the stored value for the Couchbase " - "root password.") - CouchbaseRootAccess().write_password_to_file(config_pwd) - return status - - def _get_status_from_couchbase(self, pwd): - out, err = utils.execute_with_timeout( - (system.cmd_couchbase_status % - {'IP': self.ip_address, 'PWD': pwd}), - shell=True) - server_stats = json.loads(out) - if not err and server_stats["clusterMembership"] == "active": - return rd_instance.ServiceStatuses.RUNNING - else: - return rd_instance.ServiceStatuses.SHUTDOWN - - def cleanup_stalled_db_services(self): - utils.execute_with_timeout(system.cmd_kill) - - -class CouchbaseRootAccess(object): - - @classmethod - def enable_root(cls, root_password=None): - user = models.DatastoreUser.root(password=root_password) - - if root_password: - CouchbaseRootAccess().write_password_to_file(root_password) - else: - CouchbaseRootAccess().set_password(user.password) - return user.serialize() - - def set_password(self, root_password): - self.ip_address = netutils.get_my_ipv4() - child = pexpect.spawn(system.cmd_reset_pwd % {'IP': self.ip_address}) - try: - child.expect('.*password.*') - child.sendline(root_password) - child.expect('.*(yes/no).*') - child.sendline('yes') - child.expect('.*successfully.*') - except pexpect.TIMEOUT: - child.delayafterclose = 1 - child.delayafterterminate = 1 - try: - child.close(force=True) - except pexpect.ExceptionPexpect: - # Close fails to terminate a sudo process on some OSes. - subprocess.call(['sudo', 'kill', str(child.pid)]) - - self.write_password_to_file(root_password) - - def write_password_to_file(self, root_password): - operating_system.create_directory(system.COUCHBASE_CONF_DIR, - as_root=True) - try: - tempfd, tempname = tempfile.mkstemp() - os.fchmod(tempfd, stat.S_IRUSR | stat.S_IWUSR) - if isinstance(root_password, six.text_type): - root_password = root_password.encode('utf-8') - os.write(tempfd, root_password) - os.fchmod(tempfd, stat.S_IRUSR) - os.close(tempfd) - except OSError as err: - message = _("An error occurred in saving password " - "(%(errno)s). %(strerror)s.") % { - "errno": err.errno, - "strerror": err.strerror} - LOG.exception(message) - raise RuntimeError(message) - - operating_system.move(tempname, system.pwd_file, as_root=True) - - @staticmethod - def get_password(): - pwd = "password" - if os.path.exists(system.pwd_file): - with open(system.pwd_file) as file: - pwd = file.readline().strip() - return pwd diff --git a/trove/guestagent/datastore/experimental/couchbase/system.py b/trove/guestagent/datastore/experimental/couchbase/system.py deleted file mode 100644 index 4b266f66fe..0000000000 --- a/trove/guestagent/datastore/experimental/couchbase/system.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2013 eBay Software Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from trove.common import cfg -CONF = cfg.CONF - -TIME_OUT = 1200 -COUCHBASE_DUMP_DIR = '/tmp/backups' -COUCHBASE_CONF_DIR = '/etc/couchbase' -COUCHBASE_WEBADMIN_PORT = '8091' -COUCHBASE_REST_API = 'http://localhost:' + COUCHBASE_WEBADMIN_PORT -BUCKETS_JSON = '/buckets.json' -SECRET_KEY = '/secret_key' -SERVICE_CANDIDATES = ["couchbase-server"] -INSTANCE_DATA_DIR = '/opt/couchbase/var/lib/couchbase/data' -cmd_couchbase_status = ('sudo /opt/couchbase/bin/couchbase-cli server-info ' - '-c %(IP)s:8091 -u root -p %(PWD)s') -cmd_node_init = ('sudo /opt/couchbase/bin/couchbase-cli node-init ' - '-c %(IP)s:8091 --node-init-data-path=%(data_path)s ' - '-u root -p %(PWD)s') -cmd_cluster_init = ('sudo /opt/couchbase/bin/couchbase-cli cluster-init ' - '-c %(IP)s:8091 --cluster-init-username=root ' - '--cluster-init-password=%(PWD)s ' - '--cluster-init-port=8091 ' - '-u root -p %(PWD)s') -cmd_kill = 'sudo pkill -u couchbase' -""" For optimal couchbase operations, swappiness of vm should be set to 0. -Reference link: http://docs.couchbase.com/couchbase-manual-2 -.5/cb-admin/#using-couchbase-in-the-cloud """ -cmd_set_swappiness = 'sudo sysctl vm.swappiness=0' -cmd_update_sysctl_conf = ('echo "vm.swappiness = 0" | sudo tee -a ' - '/etc/sysctl.conf') -cmd_reset_pwd = 'sudo /opt/couchbase/bin/cbreset_password %(IP)s:8091' -pwd_file = COUCHBASE_CONF_DIR + SECRET_KEY -cmd_get_password_from_config = ( - r"""sudo /opt/couchbase/bin/erl -noinput -eval 'case file:read_file(""" - r""""/opt/couchbase/var/lib/couchbase/config/config.dat") of {ok, B} ->""" - r"""io:format("~p~n", [binary_to_term(B)]) end.' -run init stop""" - r""" | grep '\[{"root",\[{password,' | awk -F\" '{print $4}'""") diff --git a/trove/guestagent/datastore/experimental/couchdb/manager.py b/trove/guestagent/datastore/experimental/couchdb/manager.py deleted file mode 100644 index aeb3f9be19..0000000000 --- a/trove/guestagent/datastore/experimental/couchdb/manager.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging - -from trove.common import instance as rd_instance -from trove.guestagent import backup -from trove.guestagent.datastore.experimental.couchdb import service -from trove.guestagent.datastore import manager -from trove.guestagent import volume - -LOG = logging.getLogger(__name__) - - -class Manager(manager.Manager): - """ - This is CouchDB Manager class. It is dynamically loaded - based off of the datastore of the Trove instance. - """ - - def __init__(self): - self.appStatus = service.CouchDBAppStatus() - self.app = service.CouchDBApp(self.appStatus) - super(Manager, self).__init__('couchdb') - - @property - def status(self): - return self.appStatus - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - """This is called from prepare in the base class.""" - self.app.install_if_needed(packages) - if device_path: - self.app.stop_db() - device = volume.VolumeDevice(device_path) - # unmount if device is already mounted - device.unmount_device(device_path) - device.format() - if os.path.exists(mount_point): - device.migrate_data(mount_point) - device.mount(mount_point) - LOG.debug('Mounted the volume (%s).', device_path) - self.app.start_db() - self.app.change_permissions() - self.app.make_host_reachable() - if backup_info: - self._perform_restore(backup_info, context, mount_point) - self.app.secure() - - def stop_db(self, context, do_not_start_on_reboot=False): - """ - Stop this CouchDB instance. - This method is called when the guest agent - gets a stop message from the taskmanager. - """ - LOG.debug("Stopping the CouchDB instance.") - self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) - - def restart(self, context): - """ - Restart this CouchDB instance. - This method is called when the guest agent - gets a restart message from the taskmanager. - """ - LOG.debug("Restarting the CouchDB instance.") - self.app.restart() - - def start_db_with_conf_changes(self, context, config_contents): - LOG.debug("Starting CouchDB with configuration changes.") - self.app.start_db_with_conf_changes(config_contents) - - def _perform_restore(self, backup_info, context, restore_location): - """ - Restores all CouchDB databases and their documents from the - backup. - """ - LOG.info("Restoring database from backup %s", - backup_info['id']) - try: - backup.restore(context, backup_info, restore_location) - except Exception: - LOG.exception("Error performing restore from backup %s", - backup_info['id']) - self.status.set_status(rd_instance.ServiceStatuses.FAILED) - raise - LOG.info("Restored database successfully") - - def create_backup(self, context, backup_info): - LOG.debug("Creating backup for CouchDB.") - backup.backup(context, backup_info) - - def create_admin_user(self, context, password): - self.app.create_admin_user(password) - - def store_admin_password(self, context, password): - self.app.store_admin_password(password) - - def create_user(self, context, users): - LOG.debug("Creating user(s).") - return service.CouchDBAdmin().create_user(users) - - def delete_user(self, context, user): - LOG.debug("Deleting user.") - return service.CouchDBAdmin().delete_user(user) - - def list_users(self, context, limit=None, marker=None, - include_marker=False): - LOG.debug("Listing users.") - return service.CouchDBAdmin().list_users(limit, marker, include_marker) - - def get_user(self, context, username, hostname): - LOG.debug("Show details of user %s.", username) - return service.CouchDBAdmin().get_user(username, hostname) - - def grant_access(self, context, username, hostname, databases): - LOG.debug("Granting access.") - return service.CouchDBAdmin().grant_access(username, databases) - - def revoke_access(self, context, username, hostname, database): - LOG.debug("Revoking access.") - return service.CouchDBAdmin().revoke_access(username, database) - - def list_access(self, context, username, hostname): - LOG.debug("Listing access.") - return service.CouchDBAdmin().list_access(username, hostname) - - def enable_root(self, context): - LOG.debug("Enabling root.") - return service.CouchDBAdmin().enable_root() - - def enable_root_with_password(self, context, root_password=None): - return service.CouchDBAdmin().enable_root(root_pwd=root_password) - - def is_root_enabled(self, context): - LOG.debug("Checking if root is enabled.") - return service.CouchDBAdmin().is_root_enabled() - - def create_database(self, context, databases): - LOG.debug("Creating database(s).") - return service.CouchDBAdmin().create_database(databases) - - def list_databases(self, context, limit=None, marker=None, - include_marker=False): - LOG.debug("Listing databases.") - return service.CouchDBAdmin().list_databases(limit, marker, - include_marker) - - def delete_database(self, context, database): - LOG.debug("Deleting database.") - return service.CouchDBAdmin().delete_database(database) diff --git a/trove/guestagent/datastore/experimental/couchdb/service.py b/trove/guestagent/datastore/experimental/couchdb/service.py deleted file mode 100644 index 71c6d8a85f..0000000000 --- a/trove/guestagent/datastore/experimental/couchdb/service.py +++ /dev/null @@ -1,584 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast -import getpass -import json -from oslo_log import log as logging - -from trove.common import cfg -from trove.common.db.couchdb import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as rd_instance -from trove.common import pagination -from trove.common.stream_codecs import JsonCodec -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore.experimental.couchdb import system -from trove.guestagent.datastore import service -from trove.guestagent import pkg - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -packager = pkg.Package() - -COUCHDB_LIB_DIR = "/var/lib/couchdb" -COUCHDB_LOG_DIR = "/var/log/couchdb" -COUCHDB_CONFIG_DIR = "/etc/couchdb" -COUCHDB_BIN_DIR = "/var/run/couchdb" - - -class CouchDBApp(object): - """ - Handles installation and configuration of CouchDB - on a Trove instance. - """ - - def __init__(self, status, state_change_wait_time=None): - """ - Sets default status and state_change_wait_time. - """ - self.state_change_wait_time = ( - state_change_wait_time if state_change_wait_time else - CONF.state_change_wait_time - ) - LOG.debug("state_change_wait_time = %s.", self.state_change_wait_time) - self.status = status - - def install_if_needed(self, packages): - """ - Install CouchDB if needed, do nothing if it is already installed. - """ - LOG.info('Preparing guest as a CouchDB server.') - if not packager.pkg_is_installed(packages): - LOG.debug("Installing packages: %s.", str(packages)) - packager.pkg_install(packages, {}, system.TIME_OUT) - LOG.info("Finished installing CouchDB server.") - - def change_permissions(self): - """ - When CouchDB is installed, a default user 'couchdb' is created. - Inorder to start/stop/restart CouchDB service as the current - OS user, add the current OS user to the 'couchdb' group and provide - read/write access to the 'couchdb' group. - """ - try: - LOG.debug("Changing permissions.") - for dir in [COUCHDB_LIB_DIR, COUCHDB_LOG_DIR, - COUCHDB_BIN_DIR, COUCHDB_CONFIG_DIR]: - operating_system.chown(dir, 'couchdb', 'couchdb', as_root=True) - operating_system.chmod(dir, FileMode.ADD_GRP_RW, as_root=True) - - operating_system.change_user_group(getpass.getuser(), 'couchdb', - as_root=True) - LOG.debug("Successfully changed permissions.") - except exception.ProcessExecutionError: - LOG.exception("Error changing permissions.") - - def stop_db(self, update_db=False, do_not_start_on_reboot=False): - self.status.stop_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time, - disable_on_boot=do_not_start_on_reboot, update_db=update_db) - - def start_db(self, update_db=False): - self.status.start_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time, - enable_on_boot=True, update_db=update_db) - - def restart(self): - self.status.restart_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time) - - def make_host_reachable(self): - try: - LOG.debug("Changing bind address to 0.0.0.0 .") - self.stop_db() - out, err = utils.execute_with_timeout( - system.UPDATE_BIND_ADDRESS, shell=True - ) - self.start_db() - except exception.ProcessExecutionError: - LOG.exception("Error while trying to update bind address of" - " CouchDB server.") - - def start_db_with_conf_changes(self, config_contents): - ''' - Will not be implementing configuration change API for CouchDB in - the Kilo release. Currently all that this method does is to start - the CouchDB server without any configuration changes. Looks like - this needs to be implemented to enable volume resize on the guest - agent side. - ''' - LOG.info("Starting CouchDB with configuration changes.") - self.start_db(True) - - def store_admin_password(self, password): - LOG.debug('Storing the admin password.') - creds = CouchDBCredentials(username=system.COUCHDB_ADMIN_NAME, - password=password) - creds.write(system.COUCHDB_ADMIN_CREDS_FILE) - return creds - - def create_admin_user(self, password): - ''' - Creating the admin user, os_admin, for the couchdb instance - ''' - LOG.debug('Creating the admin user.') - creds = self.store_admin_password(password) - out, err = utils.execute_with_timeout( - system.COUCHDB_CREATE_ADMIN % {'password': creds.password}, - shell=True) - LOG.debug('Created admin user.') - - def secure(self): - ''' - Create the Trove admin user. - The service should not be running at this point. - ''' - self.start_db(update_db=False) - password = utils.generate_random_password() - self.create_admin_user(password) - LOG.debug("CouchDB secure complete.") - - @property - def admin_password(self): - creds = CouchDBCredentials() - creds.read(system.COUCHDB_ADMIN_CREDS_FILE) - return creds.password - - -class CouchDBAppStatus(service.BaseDbStatus): - """ - Handles all of the status updating for the CouchDB guest agent. - We can verify that CouchDB is running by running the command: - curl http://127.0.0.1:5984/ - The response will be similar to: - {"couchdb":"Welcome","version":"1.6.0"} - """ - - def _get_actual_db_status(self): - try: - out, err = utils.execute_with_timeout( - system.COUCHDB_SERVER_STATUS, shell=True - ) - LOG.debug("CouchDB status = %r", out) - server_status = json.loads(out) - status = server_status["couchdb"] - if status == 'Welcome': - LOG.debug("Status of CouchDB is active.") - return rd_instance.ServiceStatuses.RUNNING - else: - LOG.debug("Status of CouchDB is not active.") - return rd_instance.ServiceStatuses.SHUTDOWN - except exception.ProcessExecutionError: - LOG.exception("Error getting CouchDB status.") - return rd_instance.ServiceStatuses.SHUTDOWN - - -class CouchDBAdmin(object): - '''Handles administrative functions on CouchDB.''' - - # user is cached by making it a class attribute - admin_user = None - - def _admin_user(self): - if not type(self).admin_user: - creds = CouchDBCredentials() - creds.read(system.COUCHDB_ADMIN_CREDS_FILE) - user = models.CouchDBUser(creds.username, creds.password) - type(self).admin_user = user - return type(self).admin_user - - def _is_modifiable_user(self, name): - if name in cfg.get_ignored_users(): - return False - elif name == system.COUCHDB_ADMIN_NAME: - return False - return True - - def _is_modifiable_database(self, name): - return name not in cfg.get_ignored_dbs() - - def create_user(self, users): - LOG.debug("Creating user(s) for accessing CouchDB database(s).") - self._admin_user() - try: - for item in users: - user = models.CouchDBUser.deserialize(item) - try: - LOG.debug("Creating user: %s.", user.name) - utils.execute_with_timeout( - system.CREATE_USER_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'username': user.name, - 'username': user.name, - 'password': user.password}, - shell=True) - except exception.ProcessExecutionError: - LOG.exception("Error creating user: %s.", user.name) - - for database in user.databases: - mydb = models.CouchDBSchema.deserialize(database) - try: - LOG.debug("Granting user: %(user)s access to " - "database: %(db)s.", - {'user': user.name, 'db': mydb.name}) - out, err = utils.execute_with_timeout( - system.GRANT_ACCESS_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': mydb.name, - 'username': user.name}, - shell=True) - except exception.ProcessExecutionError as pe: - LOG.debug("Error granting user: %(user)s access to " - "database: %(db)s.", - {'user': user.name, 'db': mydb.name}) - LOG.debug(pe) - except exception.ProcessExecutionError as pe: - LOG.exception("An error occurred creating users: %s.", str(pe)) - - def delete_user(self, user): - LOG.debug("Delete a given CouchDB user.") - couchdb_user = models.CouchDBUser.deserialize(user) - db_names = self.list_database_names() - - for db in db_names: - userlist = [] - try: - out, err = utils.execute_with_timeout( - system.DB_ACCESS_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': db}, - shell=True) - except exception.ProcessExecutionError: - LOG.debug( - "Error while trying to get the users for database: %s.", - db) - continue - - evalout = ast.literal_eval(out) - if evalout: - members = evalout['members'] - names = members['names'] - for i in range(0, len(names)): - couchdb_user.databases = db - userlist.append(names[i]) - if couchdb_user.name in userlist: - userlist.remove(couchdb_user.name) - out2, err2 = utils.execute_with_timeout( - system.REVOKE_ACCESS_COMMAND % { - 'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': db, - 'username': userlist}, - shell=True) - - try: - out2, err = utils.execute_with_timeout( - system.DELETE_REV_ID % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password}, - shell=True) - evalout2 = ast.literal_eval(out2) - rows = evalout2['rows'] - userlist = [] - - for i in range(0, len(rows)): - row = rows[i] - username = "org.couchdb.user:" + couchdb_user.name - if row['key'] == username: - rev = row['value'] - revid = rev['rev'] - utils.execute_with_timeout( - system.DELETE_USER_COMMAND % { - 'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'username': couchdb_user.name, - 'revid': revid}, - shell=True) - except exception.ProcessExecutionError as pe: - LOG.exception( - "There was an error while deleting user: %s.", pe) - raise exception.GuestError(original_message=_( - "Unable to delete user: %s.") % couchdb_user.name) - - def list_users(self, limit=None, marker=None, include_marker=False): - '''List all users and the databases they have access to.''' - users = [] - db_names = self.list_database_names() - try: - out, err = utils.execute_with_timeout( - system.ALL_USERS_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password}, - shell=True) - except exception.ProcessExecutionError: - LOG.debug("Error while trying to get list of all couchdb users") - evalout = ast.literal_eval(out) - rows = evalout['rows'] - userlist = [] - for i in range(0, len(rows)): - row = rows[i] - uname = row['key'] - if not self._is_modifiable_user(uname): - break - elif uname[17:]: - userlist.append(uname[17:]) - for i in range(len(userlist)): - user = models.CouchDBUser(userlist[i]) - for db in db_names: - try: - out2, err = utils.execute_with_timeout( - system.DB_ACCESS_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': db}, - shell=True) - except exception.ProcessExecutionError: - LOG.debug( - "Error while trying to get users for database: %s.", - db) - continue - evalout2 = ast.literal_eval(out2) - if evalout2: - members = evalout2['members'] - names = members['names'] - for i in range(0, len(names)): - if user.name == names[i]: - user.databases = db - users.append(user.serialize()) - next_marker = None - return users, next_marker - - def get_user(self, username, hostname): - '''Get Information about the given user.''' - LOG.debug('Getting user %s.', username) - user = self._get_user(username, hostname) - if not user: - return None - return user.serialize() - - def _get_user(self, username, hostname): - user = models.CouchDBUser(username) - db_names = self.list_database_names() - for db in db_names: - try: - out, err = utils.execute_with_timeout( - system.DB_ACCESS_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': db}, - shell=True) - except exception.ProcessExecutionError: - LOG.debug( - "Error while trying to get the users for database: %s.", - db) - continue - - evalout = ast.literal_eval(out) - if evalout: - members = evalout['members'] - names = members['names'] - for i in range(0, len(names)): - if user.name == names[i]: - user.databases = db - return user - - def grant_access(self, username, databases): - if self._get_user(username, None).name != username: - raise exception.BadRequest(_( - 'Cannot grant access for non-existant user: ' - '%(user)s') % {'user': username}) - else: - user = models.CouchDBUser(username) - if not self._is_modifiable_user(user.name): - LOG.warning('Cannot grant access for reserved user ' - '%(user)s', {'user': username}) - if not user: - raise exception.BadRequest(_( - 'Cannot grant access for reserved or non-existant user ' - '%(user)s') % {'user': username}) - for db_name in databases: - out, err = utils.execute_with_timeout( - system.GRANT_ACCESS_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': db_name, - 'username': username}, - shell=True) - - def revoke_access(self, username, database): - userlist = [] - if self._is_modifiable_user(username): - out, err = utils.execute_with_timeout( - system.DB_ACCESS_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': database}, - shell=True) - evalout = ast.literal_eval(out) - members = evalout['members'] - names = members['names'] - for i in range(0, len(names)): - userlist.append(names[i]) - if username in userlist: - userlist.remove(username) - out2, err2 = utils.execute_with_timeout( - system.REVOKE_ACCESS_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': database, - 'username': userlist}, - shell=True) - - def list_access(self, username, hostname): - '''Returns a list of all databases which the user has access to''' - user = self._get_user(username, hostname) - return user.databases - - def enable_root(self, root_pwd=None): - '''Create admin user root''' - root_user = models.CouchDBUser.root(password=root_pwd) - out, err = utils.execute_with_timeout( - system.ENABLE_ROOT % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'password': root_pwd}, - shell=True) - return root_user.serialize() - - def is_root_enabled(self): - '''Check if user root exists''' - out, err = utils.execute_with_timeout( - system.IS_ROOT_ENABLED % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password}, - shell=True) - evalout = ast.literal_eval(out) - if evalout['root']: - return True - else: - return False - - def create_database(self, databases): - '''Create the given database(s).''' - dbName = None - db_create_failed = [] - LOG.debug("Creating CouchDB databases.") - - for database in databases: - dbName = models.CouchDBSchema.deserialize(database).name - if self._is_modifiable_database(dbName): - LOG.debug('Creating CouchDB database %s', dbName) - try: - utils.execute_with_timeout( - system.CREATE_DB_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': dbName}, - shell=True) - except exception.ProcessExecutionError: - LOG.exception( - "There was an error creating database: %s.", dbName) - db_create_failed.append(dbName) - pass - else: - LOG.warning('Cannot create database with a reserved name ' - '%(db)s', {'db': dbName}) - db_create_failed.append(dbName) - if len(db_create_failed) > 0: - LOG.exception("Creating the following databases failed: %s.", - db_create_failed) - - def list_database_names(self): - '''Get the list of database names.''' - out, err = utils.execute_with_timeout( - system.LIST_DB_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password}, - shell=True) - dbnames_list = eval(out) - for hidden in cfg.get_ignored_dbs(): - if hidden in dbnames_list: - dbnames_list.remove(hidden) - return dbnames_list - - def list_databases(self, limit=None, marker=None, include_marker=False): - '''Lists all the CouchDB databases.''' - databases = [] - db_names = self.list_database_names() - pag_dblist, marker = pagination.paginate_list(db_names, limit, marker, - include_marker) - databases = [models.CouchDBSchema(db_name).serialize() - for db_name in pag_dblist] - LOG.debug('databases = ' + str(databases)) - return databases, marker - - def delete_database(self, database): - '''Delete the specified database.''' - dbName = models.CouchDBSchema.deserialize(database).name - if self._is_modifiable_database(dbName): - try: - LOG.debug("Deleting CouchDB database: %s.", dbName) - utils.execute_with_timeout( - system.DELETE_DB_COMMAND % - {'admin_name': self._admin_user().name, - 'admin_password': self._admin_user().password, - 'dbname': dbName}, - shell=True) - except exception.ProcessExecutionError: - LOG.exception( - "There was an error while deleting database:%s.", dbName) - raise exception.GuestError(original_message=_( - "Unable to delete database: %s.") % dbName) - else: - LOG.warning('Cannot delete a reserved database ' - '%(db)s', {'db': dbName}) - - -class CouchDBCredentials(object): - """Handles storing/retrieving credentials. Stored as json in files""" - - def __init__(self, username=None, password=None): - self.username = username - self.password = password - - def read(self, filename): - credentials = operating_system.read_file(filename, codec=JsonCodec()) - self.username = credentials['username'] - self.password = credentials['password'] - - def write(self, filename): - self.clear_file(filename) - credentials = {'username': self.username, - 'password': self.password} - operating_system.write_file(filename, credentials, codec=JsonCodec()) - operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW) - - @staticmethod - def clear_file(filename): - LOG.debug("Creating clean file %s", filename) - if operating_system.file_discovery([filename]): - operating_system.remove(filename) - # force file creation by just opening it - open(filename, 'wb') - operating_system.chmod(filename, - operating_system.FileMode.SET_USR_RW, - as_root=True) diff --git a/trove/guestagent/datastore/experimental/couchdb/system.py b/trove/guestagent/datastore/experimental/couchdb/system.py deleted file mode 100644 index 4376b15c12..0000000000 --- a/trove/guestagent/datastore/experimental/couchdb/system.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os import path - -SERVICE_CANDIDATES = ["couchdb"] -UPDATE_BIND_ADDRESS = ( - "sudo sed -i -r 's/;bind_address = 127.0.0.1/bind_address = 0.0.0.0/' " - "/etc/couchdb/local.ini") -TIME_OUT = 1200 -COUCHDB_HTTPD_PORT = "5984" -COUCHDB_SERVER_STATUS = "curl http://127.0.0.1:" + COUCHDB_HTTPD_PORT -COUCHDB_ADMIN_NAME = 'os_admin' -COUCHDB_CREATE_ADMIN = ( - "curl -X PUT http://127.0.0.1:" + COUCHDB_HTTPD_PORT + - "/_config/admins/" + COUCHDB_ADMIN_NAME + " -d '\"%(password)s\"'") -COUCHDB_ADMIN_CREDS_FILE = path.join(path.expanduser('~'), - '.os_couchdb_admin_creds.json') -CREATE_USER_COMMAND = ( - "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/_users/org.couchdb.user:%(username)s -H \"Accept:" - " application/json\" -H \"Content-Type: application/json\" -d \'{\"name\"" - ": \"%(username)s\", \"password\": \"%(password)s\", \"roles\": []," - " \"type\":\"user\"}\'") -DELETE_REV_ID = ( - "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/_users/_all_docs") -DELETE_USER_COMMAND = ( - "curl -X DELETE http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/_users/org.couchdb.user:%(username)s?rev=" - "%(revid)s") -ALL_USERS_COMMAND = ( - "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/_users/_all_docs") -DB_ACCESS_COMMAND = ( - "curl -s http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/%(dbname)s/_security") -GRANT_ACCESS_COMMAND = ( - "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/%(dbname)s/_security -d \'{\"admins\":{\"names\"" - ":[], \"roles\":[]}, \"members\":{\"" + "names\":[\"%(username)s\"],\"" - "roles\":[]}}\'") -REVOKE_ACCESS_COMMAND = ( - "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/%(dbname)s/_security" + " -d \'{\"admins\":{\"" - "names\":[], \"roles\":[]}, \"members\":{\"" + "names\":%(username)s,\"" - "roles\":[]}}\'") -ENABLE_ROOT = ( - "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:5984" - "/_config/admins/root -d '\"%(password)s\"'") -IS_ROOT_ENABLED = ( - "curl -s http://%(admin_name)s:%(admin_password)s@localhost:5984/_config/" - "admins") -CREATE_DB_COMMAND = ( - "curl -X PUT http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/%(dbname)s") -LIST_DB_COMMAND = ( - "curl -X GET http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/_all_dbs") -DELETE_DB_COMMAND = ( - "curl -X DELETE http://%(admin_name)s:%(admin_password)s@localhost:" + - COUCHDB_HTTPD_PORT + "/%(dbname)s") diff --git a/trove/guestagent/datastore/experimental/db2/__init__.py b/trove/guestagent/datastore/experimental/db2/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/experimental/db2/manager.py b/trove/guestagent/datastore/experimental/db2/manager.py deleted file mode 100644 index ac4b0a0693..0000000000 --- a/trove/guestagent/datastore/experimental/db2/manager.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright 2015 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging - -from trove.common import instance as ds_instance -from trove.common.notification import EndNotification -from trove.guestagent import backup -from trove.guestagent.datastore.experimental.db2 import service -from trove.guestagent.datastore import manager -from trove.guestagent import volume - - -LOG = logging.getLogger(__name__) - - -class Manager(manager.Manager): - """ - This is DB2 Manager class. It is dynamically loaded - based off of the datastore of the Trove instance. - """ - def __init__(self): - self.appStatus = service.DB2AppStatus() - self.app = service.DB2App(self.appStatus) - self.admin = service.DB2Admin() - super(Manager, self).__init__('db2') - - @property - def status(self): - return self.appStatus - - @property - def configuration_manager(self): - return self.app.configuration_manager - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - """This is called from prepare in the base class.""" - if device_path: - device = volume.VolumeDevice(device_path) - device.unmount_device(device_path) - device.format() - if os.path.exists(mount_point): - device.migrate_data(mount_point) - device.mount(mount_point) - LOG.debug("Mounted the volume.") - self.app.update_hostname() - self.app.change_ownership(mount_point) - self.app.start_db() - if backup_info: - self._perform_restore(backup_info, context, mount_point) - if config_contents: - self.app.configuration_manager.save_configuration( - config_contents) - - def restart(self, context): - """ - Restart this DB2 instance. - This method is called when the guest agent - gets a restart message from the taskmanager. - """ - LOG.debug("Restart a DB2 server instance.") - self.app.restart() - - def stop_db(self, context, do_not_start_on_reboot=False): - """ - Stop this DB2 instance. - This method is called when the guest agent - gets a stop message from the taskmanager. - """ - LOG.debug("Stop a given DB2 server instance.") - self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) - - def create_database(self, context, databases): - LOG.debug("Creating database(s) %s.", databases) - with EndNotification(context): - self.admin.create_database(databases) - - def delete_database(self, context, database): - LOG.debug("Deleting database %s.", database) - with EndNotification(context): - return self.admin.delete_database(database) - - def list_databases(self, context, limit=None, marker=None, - include_marker=False): - LOG.debug("Listing all databases.") - return self.admin.list_databases(limit, marker, include_marker) - - def create_user(self, context, users): - LOG.debug("Create user(s).") - with EndNotification(context): - self.admin.create_user(users) - - def delete_user(self, context, user): - LOG.debug("Delete a user %s.", user) - with EndNotification(context): - self.admin.delete_user(user) - - def get_user(self, context, username, hostname): - LOG.debug("Show details of user %s.", username) - return self.admin.get_user(username, hostname) - - def list_users(self, context, limit=None, marker=None, - include_marker=False): - LOG.debug("List all users.") - return self.admin.list_users(limit, marker, include_marker) - - def list_access(self, context, username, hostname): - LOG.debug("List all the databases the user has access to.") - return self.admin.list_access(username, hostname) - - def start_db_with_conf_changes(self, context, config_contents): - LOG.debug("Starting DB2 with configuration changes.") - self.app.start_db_with_conf_changes(config_contents) - - def _perform_restore(self, backup_info, context, restore_location): - LOG.info("Restoring database from backup %s.", backup_info['id']) - try: - backup.restore(context, backup_info, restore_location) - except Exception: - LOG.exception("Error performing restore from backup %s.", - backup_info['id']) - self.status.set_status(ds_instance.ServiceStatuses.FAILED) - raise - LOG.info("Restored database successfully.") - - def create_backup(self, context, backup_info): - LOG.debug("Creating backup.") - backup.backup(context, backup_info) - - def update_overrides(self, context, overrides, remove=False): - LOG.debug("Updating overrides.") - if remove: - self.app.remove_overrides() - else: - self.app.update_overrides(context, overrides) - - def apply_overrides(self, context, overrides): - if overrides: - LOG.debug("Applying overrides: %s", str(overrides)) - self.app.apply_overrides(overrides) diff --git a/trove/guestagent/datastore/experimental/db2/service.py b/trove/guestagent/datastore/experimental/db2/service.py deleted file mode 100644 index d863a073fa..0000000000 --- a/trove/guestagent/datastore/experimental/db2/service.py +++ /dev/null @@ -1,626 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging -from oslo_utils import encodeutils - -from trove.common import cfg -from trove.common.db import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as rd_instance -from trove.common.stream_codecs import PropertiesCodec -from trove.common import utils -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import ImportOverrideStrategy -from trove.guestagent.common import guestagent_utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.db2 import system -from trove.guestagent.datastore import service - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -MOUNT_POINT = CONF.db2.mount_point -FAKE_CFG = os.path.join(MOUNT_POINT, "db2.cfg.fake") -DB2_DEFAULT_CFG = os.path.join(MOUNT_POINT, "db2_default_dbm.cfg") - - -class DB2App(object): - """ - Handles installation and configuration of DB2 - on a Trove instance. - """ - def __init__(self, status, state_change_wait_time=None): - LOG.debug("Initialize DB2App.") - self.state_change_wait_time = ( - state_change_wait_time if state_change_wait_time else - CONF.state_change_wait_time - ) - LOG.debug("state_change_wait_time = %s.", self.state_change_wait_time) - self.status = status - self.dbm_default_config = {} - self.init_config() - ''' - If DB2 guest agent has been configured for online backups, - every database that is created will be configured for online - backups. Since online backups are done using archive logging, - we need to create a directory to store the archived logs. - ''' - if CONF.db2.backup_strategy == 'DB2OnlineBackup': - create_db2_dir(system.DB2_ARCHIVE_LOGS_DIR) - - def init_config(self): - if not operating_system.exists(MOUNT_POINT, True): - operating_system.create_directory(MOUNT_POINT, - system.DB2_INSTANCE_OWNER, - system.DB2_INSTANCE_OWNER, - as_root=True) - """ - The database manager configuration file - db2systm is stored under the - /home/db2inst1/sqllib directory. To update the configuration - parameters, DB2 recommends using the command - UPDATE DBM CONFIGURATION - commands instead of directly updating the config file. - - The existing PropertiesCodec implementation has been reused to handle - text-file operations. Configuration overrides are implemented using - the ImportOverrideStrategy of the guestagent configuration manager. - """ - LOG.debug("Initialize DB2 configuration") - revision_dir = ( - guestagent_utils.build_file_path( - os.path.join(MOUNT_POINT, - os.path.dirname(system.DB2_INSTANCE_OWNER)), - ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) - ) - if not operating_system.exists(FAKE_CFG): - operating_system.write_file(FAKE_CFG, '', as_root=True) - operating_system.chown(FAKE_CFG, system.DB2_INSTANCE_OWNER, - system.DB2_INSTANCE_OWNER, as_root=True) - self.configuration_manager = ( - ConfigurationManager(FAKE_CFG, system.DB2_INSTANCE_OWNER, - system.DB2_INSTANCE_OWNER, - PropertiesCodec(delimiter='='), - requires_root=True, - override_strategy=ImportOverrideStrategy( - revision_dir, "cnf")) - ) - ''' - Below we are getting the database manager default configuration and - saving it to the DB2_DEFAULT_CFG file. This is done to help with - correctly resetting the configurations to the original values when - user wants to detach a user-defined configuration group from an - instance. DB2 provides a command to reset the database manager - configuration parameters (RESET DBM CONFIGURATION) but this command - resets all the configuration parameters to the system defaults. When - we build a DB2 guest image there are certain configurations - parameters like SVCENAME which we set so that the instance can start - correctly. Hence resetting this value to the system default will - render the instance in an unstable state. Instead, the recommended - way for resetting a subset of configuration parameters is to save - the output of GET DBM CONFIGURATION of the original configuration - and then call UPDATE DBM CONFIGURATION to reset the value. - http://www.ibm.com/support/knowledgecenter/SSEPGG_10.5.0/ - com.ibm.db2.luw.admin.cmd.doc/doc/r0001970.html - ''' - if not operating_system.exists(DB2_DEFAULT_CFG): - run_command(system.GET_DBM_CONFIGURATION % { - "dbm_config": DB2_DEFAULT_CFG}) - self.process_default_dbm_config() - - def process_default_dbm_config(self): - """ - Once the default database manager configuration is saved to - DB2_DEFAULT_CFG, we try to store the configuration parameters - and values into a dictionary object, dbm_default_config. For - example, a sample content of the database manager configuration - file looks like this: - Buffer pool (DFT_MON_BUFPOOL) = OFF - We need to process this so that we key it on the configuration - parameter DFT_MON_BUFPOOL. - """ - with open(DB2_DEFAULT_CFG) as cfg_file: - for line in cfg_file: - if '=' in line: - item = line.rstrip('\n').split(' = ') - fIndex = item[0].rfind('(') - lIndex = item[0].rfind(')') - if fIndex > -1: - param = item[0][fIndex + 1: lIndex] - value = item[1] - ''' - Some of the configuration parameters have the keyword - AUTOMATIC to indicate that DB2 will automatically - adjust the setting depending on system resources. - For some configuration parameters, DB2 also allows - setting a starting value along with the AUTOMATIC - setting. In the configuration parameter listing, - this is displayed as: - MON_HEAP_SZ = AUTOMATIC(90) - This can be set using the following command: - db2 update dbm cfg using mon_heap_sz 90 automatic - ''' - if not value: - value = 'NULL' - elif 'AUTOMATIC' in value: - fIndex = item[1].rfind('(') - lIndex = item[1].rfind(')') - if fIndex > -1: - default_value = item[1][fIndex + 1: lIndex] - value = default_value + " AUTOMATIC" - self.dbm_default_config.update({param: value}) - - def update_hostname(self): - """ - When DB2 server is installed, it uses the hostname of the - instance were the image was built. This needs to be updated - to reflect the guest instance. - """ - LOG.debug("Update the hostname of the DB2 instance.") - try: - run_command(system.UPDATE_HOSTNAME, - superuser='root') - except exception.ProcessExecutionError: - raise RuntimeError(_("Command to update the hostname failed.")) - - def change_ownership(self, mount_point): - """ - When DB2 server instance is installed, it does not have the - DB2 local database directory created (/home/db2inst1/db2inst1). - This gets created when we mount the cinder volume. So we need - to change ownership of this directory to the DB2 instance user - - db2inst1. - """ - LOG.debug("Changing ownership of the DB2 data directory.") - try: - operating_system.chown(mount_point, - system.DB2_INSTANCE_OWNER, - system.DB2_INSTANCE_OWNER, - recursive=False, as_root=True) - except exception.ProcessExecutionError: - raise RuntimeError(_( - "Command to change ownership of DB2 data directory failed.")) - - def _enable_db_on_boot(self): - LOG.debug("Enable DB on boot.") - try: - run_command(system.ENABLE_AUTOSTART) - except exception.ProcessExecutionError: - raise RuntimeError(_( - "Command to enable DB2 server on boot failed.")) - - def _disable_db_on_boot(self): - LOG.debug("Disable DB2 on boot.") - try: - run_command(system.DISABLE_AUTOSTART) - except exception.ProcessExecutionError: - raise RuntimeError(_( - "Command to disable DB2 server on boot failed.")) - - def start_db_with_conf_changes(self, config_contents): - LOG.info("Starting DB2 with configuration changes.") - self.configuration_manager.save_configuration(config_contents) - self.start_db(True) - - def start_db(self, update_db=False): - LOG.debug("Start the DB2 server instance.") - self._enable_db_on_boot() - try: - run_command(system.START_DB2) - except exception.ProcessExecutionError: - pass - - if not self.status.wait_for_real_status_to_change_to( - rd_instance.ServiceStatuses.RUNNING, - self.state_change_wait_time, update_db): - LOG.error("Start of DB2 server instance failed.") - self.status.end_restart() - raise RuntimeError(_("Could not start DB2.")) - - def stop_db(self, update_db=False, do_not_start_on_reboot=False): - LOG.debug("Stop the DB2 server instance.") - if do_not_start_on_reboot: - self._disable_db_on_boot() - try: - run_command(system.STOP_DB2) - except exception.ProcessExecutionError: - pass - - if not (self.status.wait_for_real_status_to_change_to( - rd_instance.ServiceStatuses.SHUTDOWN, - self.state_change_wait_time, update_db)): - LOG.error("Could not stop DB2.") - self.status.end_restart() - raise RuntimeError(_("Could not stop DB2.")) - - def restart(self): - LOG.debug("Restarting DB2 server instance.") - try: - self.status.begin_restart() - self.stop_db() - self.start_db() - finally: - self.status.end_restart() - - def update_overrides(self, context, overrides, remove=False): - if overrides: - self.apply_overrides(overrides) - - def remove_overrides(self): - config = self.configuration_manager.get_user_override() - self._reset_config(config) - self.configuration_manager.remove_user_override() - - def apply_overrides(self, overrides): - self._apply_config(overrides) - self.configuration_manager.apply_user_override(overrides) - - def _update_dbm_config(self, param, value): - try: - run_command( - system.UPDATE_DBM_CONFIGURATION % { - "parameter": param, - "value": value}) - except exception.ProcessExecutionError: - LOG.exception("Failed to update config %s", param) - raise - - def _reset_config(self, config): - try: - for k, v in config.items(): - default_cfg_value = self.dbm_default_config[k] - self._update_dbm_config(k, default_cfg_value) - except Exception: - LOG.exception("DB2 configuration reset failed.") - raise RuntimeError(_("DB2 configuration reset failed.")) - LOG.info("DB2 configuration reset completed.") - - def _apply_config(self, config): - try: - for k, v in config.items(): - self._update_dbm_config(k, v) - except Exception: - LOG.exception("DB2 configuration apply failed") - raise RuntimeError(_("DB2 configuration apply failed")) - LOG.info("DB2 config apply completed.") - - -class DB2AppStatus(service.BaseDbStatus): - """ - Handles all of the status updating for the DB2 guest agent. - """ - def _get_actual_db_status(self): - LOG.debug("Getting the status of the DB2 server instance.") - try: - out, err = utils.execute_with_timeout( - system.DB2_STATUS, shell=True) - if "0" not in out: - return rd_instance.ServiceStatuses.RUNNING - else: - return rd_instance.ServiceStatuses.SHUTDOWN - except exception.ProcessExecutionError: - LOG.exception("Error getting the DB2 server status.") - return rd_instance.ServiceStatuses.CRASHED - - -def run_command(command, superuser=system.DB2_INSTANCE_OWNER, - timeout=system.TIMEOUT): - return utils.execute_with_timeout("sudo", "su", "-", superuser, "-c", - command, timeout=timeout) - - -def create_db2_dir(dir_name): - if not operating_system.exists(dir_name, True): - operating_system.create_directory(dir_name, - system.DB2_INSTANCE_OWNER, - system.DB2_INSTANCE_OWNER, - as_root=True) - - -def remove_db2_dir(dir_name): - operating_system.remove(dir_name, - force=True, - as_root=True) - - -class DB2Admin(object): - """ - Handles administrative tasks on the DB2 instance. - """ - def create_database(self, databases): - """Create the given database(s).""" - dbName = None - db_create_failed = [] - LOG.debug("Creating DB2 databases.") - for item in databases: - mydb = models.DatastoreSchema.deserialize(item) - mydb.check_create() - dbName = mydb.name - LOG.debug("Creating DB2 database: %s.", dbName) - try: - run_command(system.CREATE_DB_COMMAND % {'dbname': dbName}) - except exception.ProcessExecutionError: - LOG.exception( - "There was an error creating database: %s.", dbName) - db_create_failed.append(dbName) - - ''' - Configure each database to do archive logging for online - backups. Once the database is configured, it will go in to a - BACKUP PENDING state. In this state, the database will not - be accessible for any operations. To get the database back to - normal mode, we have to do a full offline backup as soon as we - configure it for archive logging. - ''' - try: - if CONF.db2.backup_strategy == 'DB2OnlineBackup': - run_command(system.UPDATE_DB_LOG_CONFIGURATION % { - 'dbname': dbName}) - run_command(system.RECOVER_FROM_BACKUP_PENDING_MODE % { - 'dbname': dbName}) - except exception.ProcessExecutionError: - LOG.exception( - "There was an error while configuring the database for " - "online backup: %s.", dbName) - - if len(db_create_failed) > 0: - LOG.exception("Creating the following databases failed: %s.", - db_create_failed) - - def delete_database(self, database): - """Delete the specified database.""" - dbName = None - try: - mydb = models.DatastoreSchema.deserialize(database) - mydb.check_delete() - dbName = mydb.name - LOG.debug("Deleting DB2 database: %s.", dbName) - run_command(system.DELETE_DB_COMMAND % {'dbname': dbName}) - except exception.ProcessExecutionError: - LOG.exception( - "There was an error while deleting database:%s.", dbName) - raise exception.GuestError(original_message=_( - "Unable to delete database: %s.") % dbName) - - def list_databases(self, limit=None, marker=None, include_marker=False): - LOG.debug("Listing all the DB2 databases.") - databases = [] - next_marker = None - - try: - out, err = run_command(system.LIST_DB_COMMAND) - dblist = out.split() - result = iter(dblist) - count = 0 - - if marker is not None: - try: - item = next(result) - while item != marker: - item = next(result) - - if item == marker: - marker = None - except StopIteration: - pass - - try: - item = next(result) - while item: - count = count + 1 - if (limit and count <= limit) or limit is None: - db2_db = models.DatastoreSchema(name=item) - LOG.debug("database = %s .", item) - next_marker = db2_db.name - databases.append(db2_db.serialize()) - item = next(result) - else: - next_marker = None - break - except StopIteration: - next_marker = None - LOG.debug("databases = %s.", str(databases)) - except exception.ProcessExecutionError as pe: - err_msg = encodeutils.exception_to_unicode(pe) - LOG.exception("An error occurred listing databases: %s.", - err_msg) - return databases, next_marker - - def create_user(self, users): - LOG.debug("Creating user(s) for accessing DB2 database(s).") - try: - for item in users: - user = models.DatastoreUser.deserialize(item) - user.check_create() - try: - LOG.debug("Creating OS user: %s.", user.name) - utils.execute_with_timeout( - system.CREATE_USER_COMMAND % { - 'login': user.name, 'login': user.name, - 'passwd': user.password}, shell=True) - except exception.ProcessExecutionError: - LOG.exception("Error creating user: %s.", user.name) - continue - - for database in user.databases: - mydb = models.DatastoreSchema.deserialize(database) - try: - LOG.debug("Granting user: %(user)s access to " - "database: %(db)s.", - {'user': user.name, 'db': mydb.name}) - run_command(system.GRANT_USER_ACCESS % { - 'dbname': mydb.name, 'login': user.name}) - except exception.ProcessExecutionError as pe: - LOG.debug("Error granting user: %(user)s access to " - "database: %(db)s.", - {'user': user.name, 'db': mydb.name}) - LOG.debug(pe) - except exception.ProcessExecutionError as pe: - LOG.exception("An error occurred creating users: %s.", str(pe)) - - def delete_user(self, user): - LOG.debug("Delete a given user.") - db2_user = models.DatastoreUser.deserialize(user) - db2_user.check_delete() - userName = db2_user.name - user_dbs = db2_user.databases - LOG.debug("For user %(user)s, databases to be deleted = %(dbs)r.", - {'user': userName, 'dbs': user_dbs}) - - if len(user_dbs) == 0: - databases = self.list_access(db2_user.name, None) - else: - databases = user_dbs - - LOG.debug("databases for user = %r.", databases) - for database in databases: - mydb = models.DatastoreSchema.deserialize(database) - try: - run_command(system.REVOKE_USER_ACCESS % { - 'dbname': mydb.name, - 'login': userName}) - LOG.debug("Revoked access for user:%(user)s on " - "database:%(db)s.", - {'user': userName, 'db': mydb.name}) - except exception.ProcessExecutionError: - LOG.debug("Error occurred while revoking access to %s.", - mydb.name) - try: - utils.execute_with_timeout(system.DELETE_USER_COMMAND % { - 'login': db2_user.name.lower()}, shell=True) - except exception.ProcessExecutionError as pe: - LOG.exception( - "There was an error while deleting user: %s.", pe) - raise exception.GuestError(original_message=_( - "Unable to delete user: %s.") % userName) - - def list_users(self, limit=None, marker=None, include_marker=False): - LOG.debug( - "List all users for all the databases in a DB2 server instance.") - users = [] - user_map = {} - next_marker = None - count = 0 - - databases, marker = self.list_databases() - for database in databases: - db2_db = models.DatastoreSchema.deserialize(database) - out = None - try: - out, err = run_command( - system.LIST_DB_USERS % {'dbname': db2_db.name}) - except exception.ProcessExecutionError: - LOG.debug( - "There was an error while listing users for database: %s.", - db2_db.name) - continue - - userlist = [] - for item in out.split('\n'): - LOG.debug("item = %r", item) - user = item.split() if item != "" else None - LOG.debug("user = %r", user) - if (user is not None - and (user[0] not in cfg.get_ignored_users() - and user[1] == 'Y')): - userlist.append(user[0]) - result = iter(userlist) - - if marker is not None: - try: - item = next(result) - while item != marker: - item = next(result) - - if item == marker: - marker = None - except StopIteration: - pass - - try: - item = next(result) - - while item: - ''' - Check if the user has already been discovered. If so, - add this database to the database list for this user. - ''' - if item in user_map: - db2user = user_map.get(item) - db2user.databases = db2_db.name - item = next(result) - continue - ''' - If this user was not previously discovered, then add - this to the user's list. - ''' - count = count + 1 - if (limit and count <= limit) or limit is None: - db2_user = models.DatastoreUser(name=item, - databases=db2_db.name) - users.append(db2_user.serialize()) - user_map.update({item: db2_user}) - item = next(result) - else: - next_marker = None - break - except StopIteration: - next_marker = None - - if count == limit: - break - return users, next_marker - - def get_user(self, username, hostname): - LOG.debug("Get details of a given database user.") - user = self._get_user(username, hostname) - if not user: - return None - return user.serialize() - - def _get_user(self, username, hostname): - LOG.debug("Get details of a given database user %s.", username) - user = models.DatastoreUser(name=username) - databases, marker = self.list_databases() - out = None - for database in databases: - db2_db = models.DatastoreSchema.deserialize(database) - try: - out, err = run_command( - system.LIST_DB_USERS % {'dbname': db2_db.name}) - except exception.ProcessExecutionError: - LOG.debug( - "Error while trying to get the users for database: %s.", - db2_db.name) - continue - - for item in out.split('\n'): - user_access = item.split() if item != "" else None - if (user_access is not None and - user_access[0].lower() == username.lower() and - user_access[1] == 'Y'): - user.databases = db2_db.name - break - return user - - def list_access(self, username, hostname): - """ - Show all the databases to which the user has more than - USAGE granted. - """ - LOG.debug("Listing databases that user: %s has access to.", username) - user = self._get_user(username, hostname) - return user.databases diff --git a/trove/guestagent/datastore/experimental/db2/system.py b/trove/guestagent/datastore/experimental/db2/system.py deleted file mode 100644 index 28e10f5110..0000000000 --- a/trove/guestagent/datastore/experimental/db2/system.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2015 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from trove.common import cfg - -CONF = cfg.CONF -TIMEOUT = 1200 -DB2_INSTANCE_OWNER = "db2inst1" -MOUNT_POINT = CONF.db2.mount_point -DB2_BACKUP_DIR = MOUNT_POINT + "/backup" -DB2_ARCHIVE_LOGS_DIR = MOUNT_POINT + "/ArchiveLogs" -UPDATE_HOSTNAME = ( - 'source /home/db2inst1/sqllib/db2profile;' - 'db2set -g DB2SYSTEM="$(hostname)"') -ENABLE_AUTOSTART = ( - "/opt/ibm/db2/current/instance/db2iauto -on " + DB2_INSTANCE_OWNER) -DISABLE_AUTOSTART = ( - "/opt/ibm/db2/current/instance/db2iauto -off " + DB2_INSTANCE_OWNER) -START_DB2 = "db2start" -QUIESCE_DB2 = ("db2 QUIESCE INSTANCE DB2INST1 RESTRICTED ACCESS IMMEDIATE " - "FORCE CONNECTIONS") -UNQUIESCE_DB2 = "db2 UNQUIESCE INSTANCE DB2INST1" -STOP_DB2 = "db2 force application all; db2 terminate; db2stop" -DB2_STATUS = ("ps -ef | grep " + DB2_INSTANCE_OWNER + " | grep db2sysc |" - "grep -v grep | wc -l") -CREATE_DB_COMMAND = "db2 create database %(dbname)s" -DELETE_DB_COMMAND = "db2 drop database %(dbname)s" -LIST_DB_COMMAND = ( - "db2 list database directory | grep -B6 -i indirect | " - "grep 'Database name' | sed 's/.*= //'") -CREATE_USER_COMMAND = ( - 'sudo useradd -m -d /home/%(login)s %(login)s;' - 'sudo echo %(login)s:%(passwd)s |sudo chpasswd') -GRANT_USER_ACCESS = ( - "db2 connect to %(dbname)s; " - "db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " - "ON DATABASE TO USER %(login)s; db2 connect reset") -DELETE_USER_COMMAND = 'sudo userdel -r %(login)s' -REVOKE_USER_ACCESS = ( - "db2 connect to %(dbname)s; " - "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " - "ON DATABASE FROM USER %(login)s; db2 connect reset") -LIST_DB_USERS = ( - "db2 +o connect to %(dbname)s; " - "db2 -x select grantee, dataaccessauth from sysibm.sysdbauth; " - "db2 connect reset") -OFFLINE_BACKUP_DB = "db2 backup database %(dbname)s to " + DB2_BACKUP_DIR -RESTORE_OFFLINE_DB = ( - "db2 restore database %(dbname)s from " + DB2_BACKUP_DIR) -GET_DB_SIZE = ( - "db2 +o connect to %(dbname)s;" - r"db2 call get_dbsize_info\(?, ?, ?, -1\) | " - "grep -A1 'DATABASESIZE' | grep 'Parameter Value' | sed 's/.*[:]//' |" - " tr -d '\n'; db2 +o connect reset") -GET_DB_NAMES = ("find /home/db2inst1/db2inst1/backup/ -type f -name '*.001' |" - " grep -Po \"(?<=backup/)[^.']*(?=\\.)\"") -GET_DBM_CONFIGURATION = "db2 get dbm configuration > %(dbm_config)s" -UPDATE_DBM_CONFIGURATION = ("db2 update database manager configuration using " - "%(parameter)s %(value)s") -UPDATE_DB_LOG_CONFIGURATION = ( - "db2 update database configuration for " - "%(dbname)s using LOGARCHMETH1 'DISK:" + DB2_ARCHIVE_LOGS_DIR + "'") -LOG_UTILIZATION = ( - "db2 +o connect to %(dbname)s;" - "db2 -x SELECT TOTAL_LOG_USED_KB FROM SYSIBMADM.LOG_UTILIZATION | " - "tr -d '\n';db2 +o connect reset") -ONLINE_BACKUP_DB = ( - "db2 backup database %(dbname)s ONLINE to " + - DB2_BACKUP_DIR + " INCLUDE LOGS") -RESTORE_ONLINE_DB = ( - "db2 RESTORE DATABASE %(dbname)s FROM " + DB2_BACKUP_DIR - + " LOGTARGET " + DB2_ARCHIVE_LOGS_DIR) -ROLL_FORWARD_DB = ( - "db2 ROLLFORWARD DATABASE %(dbname)s TO END OF BACKUP " - "AND COMPLETE OVERFLOW LOG PATH '(" + DB2_ARCHIVE_LOGS_DIR + ")'") -RECOVER_FROM_BACKUP_PENDING_MODE = ( - "db2 backup database %(dbname)s to /dev/null") diff --git a/trove/guestagent/datastore/experimental/mariadb/__init__.py b/trove/guestagent/datastore/experimental/mariadb/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/experimental/mariadb/manager.py b/trove/guestagent/datastore/experimental/mariadb/manager.py deleted file mode 100644 index 9f0f7534a9..0000000000 --- a/trove/guestagent/datastore/experimental/mariadb/manager.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2015 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from trove.guestagent.datastore.experimental.mariadb import ( - service as mariadb_service) -from trove.guestagent.datastore.galera_common import manager as galera_manager -from trove.guestagent.datastore.mysql_common import service as mysql_service - - -class Manager(galera_manager.GaleraManager): - - def __init__(self): - super(Manager, self).__init__( - mariadb_service.MariaDBApp, - mysql_service.BaseMySqlAppStatus, - mariadb_service.MariaDBAdmin) diff --git a/trove/guestagent/datastore/experimental/mariadb/service.py b/trove/guestagent/datastore/experimental/mariadb/service.py deleted file mode 100644 index 22de268950..0000000000 --- a/trove/guestagent/datastore/experimental/mariadb/service.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2015 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.galera_common import service as galera_service -from trove.guestagent.datastore.mysql_common import service as mysql_service - -LOG = logging.getLogger(__name__) - - -class MariaDBApp(galera_service.GaleraApp): - - OS = operating_system.get_os() - - def __init__(self, status): - super(MariaDBApp, self).__init__( - status, mysql_service.BaseLocalSqlClient, - mysql_service.BaseKeepAliveConnection) - - @property - def service_candidates(self): - service_candidates = super(MariaDBApp, self).service_candidates - return { - operating_system.DEBIAN: ["mariadb"] + service_candidates, - operating_system.REDHAT: ["mariadb"], - operating_system.SUSE: service_candidates - }[self.OS] - - @property - def mysql_service(self): - result = super(MariaDBApp, self).mysql_service - if result['type'] == 'sysvinit': - result['cmd_bootstrap_galera_cluster'] = ( - "sudo service %s bootstrap" - % result['service']) - elif result['type'] == 'systemd': - if operating_system.find_executable('galera_new_cluster'): - result['cmd_bootstrap_galera_cluster'] = ( - "sudo galera_new_cluster") - else: - result['cmd_bootstrap_galera_cluster'] = ( - "sudo systemctl start %s@bootstrap.service" - % result['service']) - return result - - @property - def cluster_configuration(self): - return self.configuration_manager.get_value('galera') - - def _get_slave_status(self): - with self.local_sql_client(self.get_engine()) as client: - return client.execute('SHOW SLAVE STATUS').first() - - def _get_master_UUID(self): - slave_status = self._get_slave_status() - return slave_status and slave_status['Master_Server_Id'] or None - - def _get_gtid_executed(self): - with self.local_sql_client(self.get_engine()) as client: - return client.execute('SELECT @@global.gtid_binlog_pos').first()[0] - - def get_last_txn(self): - master_UUID = self._get_master_UUID() - last_txn_id = '0' - gtid_executed = self._get_gtid_executed() - for gtid_set in gtid_executed.split(','): - uuid_set = gtid_set.split('-') - if uuid_set[1] == master_UUID: - last_txn_id = uuid_set[-1] - break - return master_UUID, int(last_txn_id) - - def get_latest_txn_id(self): - LOG.info("Retrieving latest txn id.") - return self._get_gtid_executed() - - def wait_for_txn(self, txn): - LOG.info("Waiting on txn '%s'.", txn) - with self.local_sql_client(self.get_engine()) as client: - client.execute("SELECT MASTER_GTID_WAIT('%s')" % txn) - - -class MariaDBRootAccess(mysql_service.BaseMySqlRootAccess): - def __init__(self): - super(MariaDBRootAccess, self).__init__( - mysql_service.BaseLocalSqlClient, - MariaDBApp(mysql_service.BaseMySqlAppStatus.get())) - - -class MariaDBAdmin(mysql_service.BaseMySqlAdmin): - def __init__(self): - super(MariaDBAdmin, self).__init__( - mysql_service.BaseLocalSqlClient, MariaDBRootAccess(), - MariaDBApp) diff --git a/trove/guestagent/datastore/experimental/mongodb/__init__.py b/trove/guestagent/datastore/experimental/mongodb/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/experimental/mongodb/manager.py b/trove/guestagent/datastore/experimental/mongodb/manager.py deleted file mode 100644 index 16872c5295..0000000000 --- a/trove/guestagent/datastore/experimental/mongodb/manager.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright (c) 2014 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging - -from trove.common import instance as ds_instance -from trove.common.notification import EndNotification -from trove.guestagent import backup -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.mongodb import service -from trove.guestagent.datastore.experimental.mongodb import system -from trove.guestagent.datastore import manager -from trove.guestagent import dbaas -from trove.guestagent import volume - - -LOG = logging.getLogger(__name__) - - -class Manager(manager.Manager): - - def __init__(self): - self.app = service.MongoDBApp() - super(Manager, self).__init__('mongodb') - - @property - def status(self): - return self.app.status - - @property - def configuration_manager(self): - return self.app.configuration_manager - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - """This is called from prepare in the base class.""" - self.app.install_if_needed(packages) - self.status.wait_for_database_service_start( - self.app.state_change_wait_time) - self.app.stop_db() - self.app.clear_storage() - mount_point = system.MONGODB_MOUNT_POINT - if device_path: - device = volume.VolumeDevice(device_path) - # unmount if device is already mounted - device.unmount_device(device_path) - device.format() - if os.path.exists(system.MONGODB_MOUNT_POINT): - device.migrate_data(mount_point) - device.mount(mount_point) - operating_system.chown(mount_point, - system.MONGO_USER, system.MONGO_USER, - as_root=True) - - LOG.debug("Mounted the volume %(path)s as %(mount)s.", - {'path': device_path, "mount": mount_point}) - - if config_contents: - # Save resolved configuration template first. - self.app.configuration_manager.save_configuration(config_contents) - - # Apply guestagent specific configuration changes. - self.app.apply_initial_guestagent_configuration( - cluster_config, mount_point) - - if not cluster_config: - # Create the Trove admin user. - self.app.secure() - - # Don't start mongos until add_config_servers is invoked, - # don't start members as they should already be running. - if not (self.app.is_query_router or self.app.is_cluster_member): - self.app.start_db(update_db=True) - - if not cluster_config and backup_info: - self._perform_restore(backup_info, context, mount_point, self.app) - if service.MongoDBAdmin().is_root_enabled(): - self.app.status.report_root(context) - - def restart(self, context): - LOG.debug("Restarting MongoDB.") - self.app.restart() - - def start_db_with_conf_changes(self, context, config_contents): - LOG.debug("Starting MongoDB with configuration changes.") - self.app.start_db_with_conf_changes(config_contents) - - def stop_db(self, context, do_not_start_on_reboot=False): - LOG.debug("Stopping MongoDB.") - self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) - - def get_filesystem_stats(self, context, fs_path): - """Gets the filesystem stats for the path given.""" - LOG.debug("Getting file system status.") - # TODO(peterstac) - why is this hard-coded? - return dbaas.get_filesystem_volume_stats(system.MONGODB_MOUNT_POINT) - - def change_passwords(self, context, users): - LOG.debug("Changing password.") - with EndNotification(context): - return service.MongoDBAdmin().change_passwords(users) - - def update_attributes(self, context, username, hostname, user_attrs): - LOG.debug("Updating database attributes.") - with EndNotification(context): - return service.MongoDBAdmin().update_attributes(username, - user_attrs) - - def create_database(self, context, databases): - LOG.debug("Creating database(s).") - with EndNotification(context): - return service.MongoDBAdmin().create_database(databases) - - def create_user(self, context, users): - LOG.debug("Creating user(s).") - with EndNotification(context): - return service.MongoDBAdmin().create_users(users) - - def delete_database(self, context, database): - LOG.debug("Deleting database.") - with EndNotification(context): - return service.MongoDBAdmin().delete_database(database) - - def delete_user(self, context, user): - LOG.debug("Deleting user.") - with EndNotification(context): - return service.MongoDBAdmin().delete_user(user) - - def get_user(self, context, username, hostname): - LOG.debug("Getting user.") - return service.MongoDBAdmin().get_user(username) - - def grant_access(self, context, username, hostname, databases): - LOG.debug("Granting access.") - return service.MongoDBAdmin().grant_access(username, databases) - - def revoke_access(self, context, username, hostname, database): - LOG.debug("Revoking access.") - return service.MongoDBAdmin().revoke_access(username, database) - - def list_access(self, context, username, hostname): - LOG.debug("Listing access.") - return service.MongoDBAdmin().list_access(username) - - def list_databases(self, context, limit=None, marker=None, - include_marker=False): - LOG.debug("Listing databases.") - return service.MongoDBAdmin().list_databases(limit, marker, - include_marker) - - def list_users(self, context, limit=None, marker=None, - include_marker=False): - LOG.debug("Listing users.") - return service.MongoDBAdmin().list_users(limit, marker, include_marker) - - def enable_root(self, context): - LOG.debug("Enabling root.") - return service.MongoDBAdmin().enable_root() - - def enable_root_with_password(self, context, root_password=None): - return service.MongoDBAdmin().enable_root(root_password) - - def is_root_enabled(self, context): - LOG.debug("Checking if root is enabled.") - return service.MongoDBAdmin().is_root_enabled() - - def _perform_restore(self, backup_info, context, restore_location, app): - LOG.info("Restoring database from backup %s.", backup_info['id']) - try: - backup.restore(context, backup_info, restore_location) - except Exception: - LOG.exception("Error performing restore from backup %s.", - backup_info['id']) - self.status.set_status(ds_instance.ServiceStatuses.FAILED) - raise - LOG.info("Restored database successfully.") - - def create_backup(self, context, backup_info): - LOG.debug("Creating backup.") - with EndNotification(context): - backup.backup(context, backup_info) - - def update_overrides(self, context, overrides, remove=False): - LOG.debug("Updating overrides.") - if remove: - self.app.remove_overrides() - else: - self.app.update_overrides(context, overrides, remove) - - def apply_overrides(self, context, overrides): - LOG.debug("Overrides will be applied after restart.") - pass - - def add_members(self, context, members): - try: - LOG.debug("add_members called.") - LOG.debug("args: members=%s.", members) - self.app.add_members(members) - LOG.debug("add_members call has finished.") - except Exception: - self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) - raise - - def add_config_servers(self, context, config_servers): - try: - LOG.debug("add_config_servers called.") - LOG.debug("args: config_servers=%s.", config_servers) - self.app.add_config_servers(config_servers) - LOG.debug("add_config_servers call has finished.") - except Exception: - self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) - raise - - def add_shard(self, context, replica_set_name, replica_set_member): - try: - LOG.debug("add_shard called.") - LOG.debug("args: replica_set_name=%(name)s, " - "replica_set_member=%(member)s.", - {'name': replica_set_name, 'member': replica_set_member}) - self.app.add_shard(replica_set_name, replica_set_member) - LOG.debug("add_shard call has finished.") - except Exception: - self.app.status.set_status(ds_instance.ServiceStatuses.FAILED) - raise - - def get_key(self, context): - # Return the cluster key - LOG.debug("Getting the cluster key.") - return self.app.get_key() - - def prep_primary(self, context): - LOG.debug("Preparing to be primary member.") - self.app.prep_primary() - - def create_admin_user(self, context, password): - self.app.create_admin_user(password) - - def store_admin_password(self, context, password): - self.app.store_admin_password(password) - - def get_replica_set_name(self, context): - # Return this nodes replica set name - LOG.debug("Getting the replica set name.") - return self.app.replica_set_name - - def get_admin_password(self, context): - # Return the admin password from this instance - LOG.debug("Getting the admin password.") - return self.app.admin_password - - def is_shard_active(self, context, replica_set_name): - return self.app.is_shard_active(replica_set_name) diff --git a/trove/guestagent/datastore/experimental/mongodb/service.py b/trove/guestagent/datastore/experimental/mongodb/service.py deleted file mode 100644 index 668bfeed0d..0000000000 --- a/trove/guestagent/datastore/experimental/mongodb/service.py +++ /dev/null @@ -1,843 +0,0 @@ -# Copyright (c) 2014 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging -from oslo_utils import netutils -import pymongo - -from trove.common import cfg -from trove.common.db.mongodb import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as ds_instance -from trove.common.stream_codecs import JsonCodec, SafeYamlCodec -from trove.common import utils -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import OneFileOverrideStrategy -from trove.guestagent.common import guestagent_utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.mongodb import system -from trove.guestagent.datastore import service - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONFIG_FILE = operating_system.file_discovery(system.CONFIG_CANDIDATES) -MANAGER = CONF.datastore_manager if CONF.datastore_manager else 'mongodb' - -# Configuration group for clustering-related settings. -CNF_CLUSTER = 'clustering' - -MONGODB_PORT = CONF.mongodb.mongodb_port -CONFIGSVR_PORT = CONF.mongodb.configsvr_port - - -class MongoDBApp(object): - """Prepares DBaaS on a Guest container.""" - - def __init__(self): - self.state_change_wait_time = CONF.state_change_wait_time - - revision_dir = guestagent_utils.build_file_path( - os.path.dirname(CONFIG_FILE), - ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) - self.configuration_manager = ConfigurationManager( - CONFIG_FILE, system.MONGO_USER, system.MONGO_USER, - SafeYamlCodec(default_flow_style=False), - requires_root=True, - override_strategy=OneFileOverrideStrategy(revision_dir)) - - self.is_query_router = False - self.is_cluster_member = False - self.status = MongoDBAppStatus() - - def install_if_needed(self, packages): - """Prepare the guest machine with a MongoDB installation.""" - LOG.info("Preparing Guest as MongoDB.") - if not system.PACKAGER.pkg_is_installed(packages): - LOG.debug("Installing packages: %s.", str(packages)) - system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT) - LOG.info("Finished installing MongoDB server.") - - def _get_service_candidates(self): - if self.is_query_router: - return system.MONGOS_SERVICE_CANDIDATES - return system.MONGOD_SERVICE_CANDIDATES - - def stop_db(self, update_db=False, do_not_start_on_reboot=False): - self.status.stop_db_service( - self._get_service_candidates(), self.state_change_wait_time, - disable_on_boot=do_not_start_on_reboot, update_db=update_db) - - def restart(self): - self.status.restart_db_service( - self._get_service_candidates(), self.state_change_wait_time) - - def start_db(self, update_db=False): - self.status.start_db_service( - self._get_service_candidates(), self.state_change_wait_time, - enable_on_boot=True, update_db=update_db) - - def update_overrides(self, context, overrides, remove=False): - if overrides: - self.configuration_manager.apply_user_override(overrides) - - def remove_overrides(self): - self.configuration_manager.remove_user_override() - - def start_db_with_conf_changes(self, config_contents): - LOG.info('Starting MongoDB with configuration changes.') - if self.status.is_running: - format = 'Cannot start_db_with_conf_changes because status is %s.' - LOG.debug(format, self.status) - raise RuntimeError(format % self.status) - LOG.info("Initiating config.") - self.configuration_manager.save_configuration(config_contents) - # The configuration template has to be updated with - # guestagent-controlled settings. - self.apply_initial_guestagent_configuration( - None, mount_point=system.MONGODB_MOUNT_POINT) - self.start_db(True) - - def apply_initial_guestagent_configuration( - self, cluster_config, mount_point=None): - LOG.debug("Applying initial configuration.") - - # Mongodb init scripts assume the PID-file path is writable by the - # database service. - # See: https://jira.mongodb.org/browse/SERVER-20075 - self._initialize_writable_run_dir() - - self.configuration_manager.apply_system_override( - {'processManagement.fork': False, - 'processManagement.pidFilePath': system.MONGO_PID_FILE, - 'systemLog.destination': 'file', - 'systemLog.path': system.MONGO_LOG_FILE, - 'systemLog.logAppend': True - }) - - if mount_point: - self.configuration_manager.apply_system_override( - {'storage.dbPath': mount_point}) - - if cluster_config is not None: - self._configure_as_cluster_instance(cluster_config) - else: - self._configure_network(MONGODB_PORT) - - def _initialize_writable_run_dir(self): - """Create a writable directory for Mongodb's runtime data - (e.g. PID-file). - """ - mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE) - LOG.debug("Initializing a runtime directory: %s", mongodb_run_dir) - operating_system.create_directory( - mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER, - force=True, as_root=True) - - def _configure_as_cluster_instance(self, cluster_config): - """Configure this guest as a cluster instance and return its - new status. - """ - if cluster_config['instance_type'] == "query_router": - self._configure_as_query_router() - elif cluster_config["instance_type"] == "config_server": - self._configure_as_config_server() - elif cluster_config["instance_type"] == "member": - self._configure_as_cluster_member( - cluster_config['replica_set_name']) - else: - LOG.error("Bad cluster configuration; instance type " - "given as %s.", cluster_config['instance_type']) - return ds_instance.ServiceStatuses.FAILED - - if 'key' in cluster_config: - self._configure_cluster_security(cluster_config['key']) - - def _configure_as_query_router(self): - LOG.info("Configuring instance as a cluster query router.") - self.is_query_router = True - - # FIXME(pmalik): We should really have a separate configuration - # template for the 'mongos' process. - # Remove all storage configurations from the template. - # They apply only to 'mongod' processes. - # Already applied overrides will be integrated into the base file and - # their current groups removed. - config = guestagent_utils.expand_dict( - self.configuration_manager.parse_configuration()) - if 'storage' in config: - LOG.debug("Removing 'storage' directives from the configuration " - "template.") - del config['storage'] - self.configuration_manager.save_configuration( - guestagent_utils.flatten_dict(config)) - - # Apply 'mongos' configuration. - self._configure_network(MONGODB_PORT) - self.configuration_manager.apply_system_override( - {'sharding.configDB': ''}, CNF_CLUSTER) - - def _configure_as_config_server(self): - LOG.info("Configuring instance as a cluster config server.") - self._configure_network(CONFIGSVR_PORT) - self.configuration_manager.apply_system_override( - {'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER) - - def _configure_as_cluster_member(self, replica_set_name): - LOG.info("Configuring instance as a cluster member.") - self.is_cluster_member = True - self._configure_network(MONGODB_PORT) - # we don't want these thinking they are in a replica set yet - # as that would prevent us from creating the admin user, - # so start mongo before updating the config. - # mongo will be started by the cluster taskmanager - self.start_db() - self.configuration_manager.apply_system_override( - {'replication.replSetName': replica_set_name}, CNF_CLUSTER) - - def _configure_cluster_security(self, key_value): - """Force cluster key-file-based authentication. - - This will enabled RBAC. - """ - # Store the cluster member authentication key. - self.store_key(key_value) - - self.configuration_manager.apply_system_override( - {'security.clusterAuthMode': 'keyFile', - 'security.keyFile': self.get_key_file()}, CNF_CLUSTER) - - def _configure_network(self, port=None): - """Make the service accessible at a given (or default if not) port. - """ - instance_ip = netutils.get_my_ipv4() - bind_interfaces_string = ','.join([instance_ip, '127.0.0.1']) - options = {'net.bindIp': bind_interfaces_string} - if port is not None: - guestagent_utils.update_dict({'net.port': port}, options) - - self.configuration_manager.apply_system_override(options) - self.status.set_host(instance_ip, port=port) - - def clear_storage(self): - mount_point = "/var/lib/mongodb/*" - LOG.debug("Clearing storage at %s.", mount_point) - try: - operating_system.remove(mount_point, force=True, as_root=True) - except exception.ProcessExecutionError: - LOG.exception("Error clearing storage.") - - def _has_config_db(self): - value_string = self.configuration_manager.get_value( - 'sharding', {}).get('configDB') - - return value_string is not None - - # FIXME(pmalik): This method should really be called 'set_config_servers'. - # The current name suggests it adds more config servers, but it - # rather replaces the existing ones. - def add_config_servers(self, config_server_hosts): - """Set config servers on a query router (mongos) instance. - """ - config_servers_string = ','.join(['%s:%s' % (host, CONFIGSVR_PORT) - for host in config_server_hosts]) - LOG.info("Setting config servers: %s", config_servers_string) - self.configuration_manager.apply_system_override( - {'sharding.configDB': config_servers_string}, CNF_CLUSTER) - self.start_db(True) - - def add_shard(self, replica_set_name, replica_set_member): - """ - This method is used by query router (mongos) instances. - """ - url = "%(rs)s/%(host)s:%(port)s"\ - % {'rs': replica_set_name, - 'host': replica_set_member, - 'port': MONGODB_PORT} - MongoDBAdmin().add_shard(url) - - def add_members(self, members): - """ - This method is used by a replica-set member instance. - """ - def check_initiate_status(): - """ - This method is used to verify replica-set status. - """ - status = MongoDBAdmin().get_repl_status() - - if((status["ok"] == 1) and - (status["members"][0]["stateStr"] == "PRIMARY") and - (status["myState"] == 1)): - return True - else: - return False - - def check_rs_status(): - """ - This method is used to verify replica-set status. - """ - status = MongoDBAdmin().get_repl_status() - primary_count = 0 - - if status["ok"] != 1: - return False - if len(status["members"]) != (len(members) + 1): - return False - for rs_member in status["members"]: - if rs_member["state"] not in [1, 2, 7]: - return False - if rs_member["health"] != 1: - return False - if rs_member["state"] == 1: - primary_count += 1 - - return primary_count == 1 - - MongoDBAdmin().rs_initiate() - # TODO(ramashri) see if hardcoded values can be removed - utils.poll_until(check_initiate_status, sleep_time=30, - time_out=CONF.mongodb.add_members_timeout) - - # add replica-set members - MongoDBAdmin().rs_add_members(members) - # TODO(ramashri) see if hardcoded values can be removed - utils.poll_until(check_rs_status, sleep_time=10, - time_out=CONF.mongodb.add_members_timeout) - - def _set_localhost_auth_bypass(self, enabled): - """When active, the localhost exception allows connections from the - localhost interface to create the first user on the admin database. - The exception applies only when there are no users created in the - MongoDB instance. - """ - self.configuration_manager.apply_system_override( - {'setParameter': {'enableLocalhostAuthBypass': enabled}}) - - def list_all_dbs(self): - return MongoDBAdmin().list_database_names() - - def db_data_size(self, db_name): - schema = models.MongoDBSchema(db_name) - return MongoDBAdmin().db_stats(schema.serialize())['dataSize'] - - def admin_cmd_auth_params(self): - return MongoDBAdmin().cmd_admin_auth_params - - def get_key_file(self): - return system.MONGO_KEY_FILE - - def get_key(self): - return operating_system.read_file( - system.MONGO_KEY_FILE, as_root=True).rstrip() - - def store_key(self, key): - """Store the cluster key.""" - LOG.debug('Storing key for MongoDB cluster.') - operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) - operating_system.chmod(system.MONGO_KEY_FILE, - operating_system.FileMode.SET_USR_RO, - as_root=True) - operating_system.chown(system.MONGO_KEY_FILE, - system.MONGO_USER, system.MONGO_USER, - as_root=True) - - def store_admin_password(self, password): - LOG.debug('Storing admin password.') - creds = MongoDBCredentials(username=system.MONGO_ADMIN_NAME, - password=password) - creds.write(system.MONGO_ADMIN_CREDS_FILE) - return creds - - def create_admin_user(self, password): - """Create the admin user while the localhost exception is active.""" - LOG.debug('Creating the admin user.') - creds = self.store_admin_password(password) - user = models.MongoDBUser(name='admin.%s' % creds.username, - password=creds.password) - user.roles = system.MONGO_ADMIN_ROLES - # the driver engine is already cached, but we need to change it it - with MongoDBClient(None, host='localhost', - port=MONGODB_PORT) as client: - MongoDBAdmin().create_validated_user(user, client=client) - # now revert to the normal engine - self.status.set_host(host=netutils.get_my_ipv4(), - port=MONGODB_PORT) - LOG.debug('Created admin user.') - - def secure(self): - """Create the Trove admin user. - - The service should not be running at this point. - This will enable role-based access control (RBAC) by default. - """ - if self.status.is_running: - raise RuntimeError(_("Cannot secure the instance. " - "The service is still running.")) - - try: - self.configuration_manager.apply_system_override( - {'security.authorization': 'enabled'}) - self._set_localhost_auth_bypass(True) - self.start_db(update_db=False) - password = utils.generate_random_password() - self.create_admin_user(password) - LOG.debug("MongoDB secure complete.") - finally: - self._set_localhost_auth_bypass(False) - self.stop_db() - - def get_configuration_property(self, name, default=None): - """Return the value of a MongoDB configuration property. - """ - return self.configuration_manager.get_value(name, default) - - def prep_primary(self): - # Prepare the primary member of a replica set. - password = utils.generate_random_password() - self.create_admin_user(password) - self.restart() - - @property - def replica_set_name(self): - return MongoDBAdmin().get_repl_status()['set'] - - @property - def admin_password(self): - creds = MongoDBCredentials() - creds.read(system.MONGO_ADMIN_CREDS_FILE) - return creds.password - - def is_shard_active(self, replica_set_name): - shards = MongoDBAdmin().list_active_shards() - if replica_set_name in [shard['_id'] for shard in shards]: - LOG.debug('Replica set %s is active.', replica_set_name) - return True - else: - LOG.debug('Replica set %s is not active.', replica_set_name) - return False - - -class MongoDBAppStatus(service.BaseDbStatus): - - def __init__(self, host='localhost', port=None): - super(MongoDBAppStatus, self).__init__() - self.set_host(host, port=port) - - def set_host(self, host, port=None): - # This forces refresh of the 'pymongo' engine cached in the - # MongoDBClient class. - # Authentication is not required to check the server status. - MongoDBClient(None, host=host, port=port) - - def _get_actual_db_status(self): - try: - with MongoDBClient(None) as client: - client.server_info() - return ds_instance.ServiceStatuses.RUNNING - except (pymongo.errors.ServerSelectionTimeoutError, - pymongo.errors.AutoReconnect): - return ds_instance.ServiceStatuses.SHUTDOWN - except Exception: - LOG.exception("Error getting MongoDB status.") - - return ds_instance.ServiceStatuses.SHUTDOWN - - def cleanup_stalled_db_services(self): - pid, err = utils.execute_with_timeout(system.FIND_PID, shell=True) - utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True) - - -class MongoDBAdmin(object): - """Handles administrative tasks on MongoDB.""" - - # user is cached by making it a class attribute - admin_user = None - - def _admin_user(self): - if not type(self).admin_user: - creds = MongoDBCredentials() - creds.read(system.MONGO_ADMIN_CREDS_FILE) - user = models.MongoDBUser( - 'admin.%s' % creds.username, - creds.password - ) - type(self).admin_user = user - return type(self).admin_user - - @property - def cmd_admin_auth_params(self): - """Returns a list of strings that constitute MongoDB command line - authentication parameters. - """ - user = self._admin_user() - return ['--username', user.username, - '--password', user.password, - '--authenticationDatabase', user.database.name] - - def _create_user_with_client(self, user, client): - """Run the add user command.""" - client[user.database.name].add_user( - user.username, password=user.password, roles=user.roles - ) - - def create_validated_user(self, user, client=None): - """Creates a user on their database. The caller should ensure that - this action is valid. - :param user: a MongoDBUser object - """ - LOG.debug('Creating user %(user)s on database %(db)s with roles ' - '%(role)s.', - {'user': user.username, 'db': user.database.name, - 'role': str(user.roles)}) - if client: - self._create_user_with_client(user, client) - else: - with MongoDBClient(self._admin_user()) as admin_client: - self._create_user_with_client(user, admin_client) - - def create_users(self, users): - """Create the given user(s). - :param users: list of serialized user objects - """ - with MongoDBClient(self._admin_user()) as client: - for item in users: - user = models.MongoDBUser.deserialize(item) - # this could be called to create multiple users at once; - # catch exceptions, log the message, and continue - try: - user.check_create() - if self._get_user_record(user.name, client=client): - raise ValueError(_('User with name %(user)s already ' - 'exists.') % {'user': user.name}) - self.create_validated_user(user, client=client) - except (ValueError, pymongo.errors.PyMongoError) as e: - LOG.error(e) - LOG.warning('Skipping creation of user with name ' - '%(user)s', {'user': user.name}) - - def delete_validated_user(self, user): - """Deletes a user from their database. The caller should ensure that - this action is valid. - :param user: a MongoDBUser object - """ - LOG.debug('Deleting user %(user)s from database %(db)s.', - {'user': user.username, 'db': user.database.name}) - with MongoDBClient(self._admin_user()) as admin_client: - admin_client[user.database.name].remove_user(user.username) - - def delete_user(self, user): - """Delete the given user. - :param user: a serialized user object - """ - user = models.MongoDBUser.deserialize(user) - user.check_delete() - self.delete_validated_user(user) - - def _get_user_record(self, name, client=None): - """Get the user's record.""" - user = models.MongoDBUser(name) - if user.is_ignored: - LOG.warning('Skipping retrieval of user with reserved ' - 'name %(user)s', {'user': user.name}) - return None - if client: - user_info = client.admin.system.users.find_one( - {'user': user.username, 'db': user.database.name}) - else: - with MongoDBClient(self._admin_user()) as admin_client: - user_info = admin_client.admin.system.users.find_one( - {'user': user.username, 'db': user.database.name}) - if not user_info: - return None - user.roles = user_info['roles'] - return user - - def get_existing_user(self, name): - """Check that a user exists.""" - user = self._get_user_record(name) - if not user: - raise ValueError(_('User with name %(user)s does not' - 'exist.') % {'user': name}) - return user - - def get_user(self, name): - """Get information for the given user.""" - LOG.debug('Getting user %s.', name) - user = self._get_user_record(name) - if not user: - return None - return user.serialize() - - def list_users(self, limit=None, marker=None, include_marker=False): - """Get a list of all users.""" - users = [] - with MongoDBClient(self._admin_user()) as admin_client: - for user_info in admin_client.admin.system.users.find(): - user = models.MongoDBUser(name=user_info['_id']) - user.roles = user_info['roles'] - if not user.is_ignored: - users.append(user) - LOG.debug('users = ' + str(users)) - return guestagent_utils.serialize_list( - users, - limit=limit, marker=marker, include_marker=include_marker) - - def change_passwords(self, users): - with MongoDBClient(self._admin_user()) as admin_client: - for item in users: - user = models.MongoDBUser.deserialize(item) - # this could be called to create multiple users at once; - # catch exceptions, log the message, and continue - try: - user.check_create() - self.get_existing_user(user.name) - self.create_validated_user(user, admin_client) - LOG.debug('Changing password for user %(user)s', - {'user': user.name}) - self._create_user_with_client(user, admin_client) - except (ValueError, pymongo.errors.PyMongoError) as e: - LOG.error(e) - LOG.warning('Skipping password change for user with ' - 'name %(user)s', {'user': user.name}) - - def update_attributes(self, name, user_attrs): - """Update user attributes.""" - user = self.get_existing_user(name) - password = user_attrs.get('password') - if password: - user.password = password - self.change_passwords([user.serialize()]) - if user_attrs.get('name'): - LOG.warning('Changing user name is not supported.') - if user_attrs.get('host'): - LOG.warning('Changing user host is not supported.') - - def enable_root(self, password=None): - """Create a user 'root' with role 'root'.""" - if not password: - LOG.debug('Generating root user password.') - password = utils.generate_random_password() - root_user = models.MongoDBUser.root(password=password) - root_user.roles = {'db': 'admin', 'role': 'root'} - root_user.check_create() - self.create_validated_user(root_user) - return root_user.serialize() - - def is_root_enabled(self): - """Check if user 'admin.root' exists.""" - with MongoDBClient(self._admin_user()) as admin_client: - return bool(admin_client.admin.system.users.find_one( - {'roles.role': 'root'} - )) - - def _update_user_roles(self, user): - with MongoDBClient(self._admin_user()) as admin_client: - admin_client[user.database.name].add_user( - user.username, roles=user.roles - ) - - def grant_access(self, username, databases): - """Adds the RW role to the user for each specified database.""" - user = self.get_existing_user(username) - for db_name in databases: - # verify the database name - models.MongoDBSchema(db_name) - role = {'db': db_name, 'role': 'readWrite'} - if role not in user.roles: - LOG.debug('Adding role %(role)s to user %(user)s.', - {'role': str(role), 'user': username}) - user.roles = role - else: - LOG.debug('User %(user)s already has role %(role)s.', - {'user': username, 'role': str(role)}) - LOG.debug('Updating user %s.', username) - self._update_user_roles(user) - - def revoke_access(self, username, database): - """Removes the RW role from the user for the specified database.""" - user = self.get_existing_user(username) - # verify the database name - models.MongoDBSchema(database) - role = {'db': database, 'role': 'readWrite'} - LOG.debug('Removing role %(role)s from user %(user)s.', - {'role': str(role), 'user': username}) - user.revoke_role(role) - LOG.debug('Updating user %s.', username) - self._update_user_roles(user) - - def list_access(self, username): - """Returns a list of all databases for which the user has the RW role. - """ - user = self.get_existing_user(username) - return user.databases - - def create_database(self, databases): - """Forces creation of databases. - For each new database creates a dummy document in a dummy collection, - then drops the collection. - """ - tmp = 'dummy' - with MongoDBClient(self._admin_user()) as admin_client: - for item in databases: - schema = models.MongoDBSchema.deserialize(item) - schema.check_create() - LOG.debug('Creating MongoDB database %s', schema.name) - db = admin_client[schema.name] - # FIXME(songjian):can not create database with null content, - # so create a collection - # db[tmp].insert({'dummy': True}) - # db.drop_collection(tmp) - db.create_collection(tmp) - - def delete_database(self, database): - """Deletes the database.""" - with MongoDBClient(self._admin_user()) as admin_client: - schema = models.MongoDBSchema.deserialize(database) - schema.check_delete() - admin_client.drop_database(schema.name) - - def list_database_names(self): - """Get the list of database names.""" - with MongoDBClient(self._admin_user()) as admin_client: - return admin_client.database_names() - - def list_databases(self, limit=None, marker=None, include_marker=False): - """Lists the databases.""" - databases = [] - for db_name in self.list_database_names(): - schema = models.MongoDBSchema(name=db_name) - if not schema.is_ignored(): - databases.append(schema) - LOG.debug('databases = ' + str(databases)) - return guestagent_utils.serialize_list( - databases, - limit=limit, marker=marker, include_marker=include_marker) - - def add_shard(self, url): - """Runs the addShard command.""" - with MongoDBClient(self._admin_user()) as admin_client: - admin_client.admin.command({'addShard': url}) - - def get_repl_status(self): - """Runs the replSetGetStatus command.""" - with MongoDBClient(self._admin_user()) as admin_client: - status = admin_client.admin.command('replSetGetStatus') - LOG.debug('Replica set status: %s', status) - return status - - def rs_initiate(self): - """Runs the replSetInitiate command.""" - with MongoDBClient(self._admin_user()) as admin_client: - return admin_client.admin.command('replSetInitiate') - - def rs_add_members(self, members): - """Adds the given members to the replication set.""" - with MongoDBClient(self._admin_user()) as admin_client: - # get the current config, add the new members, then save it - config = admin_client.admin.command('replSetGetConfig')['config'] - config['version'] += 1 - next_id = max([m['_id'] for m in config['members']]) + 1 - for member in members: - config['members'].append({'_id': next_id, 'host': member}) - next_id += 1 - admin_client.admin.command('replSetReconfig', config) - - def db_stats(self, database, scale=1): - """Gets the stats for the given database.""" - with MongoDBClient(self._admin_user()) as admin_client: - db_name = models.MongoDBSchema.deserialize(database).name - return admin_client[db_name].command('dbStats', scale=scale) - - def list_active_shards(self): - """Get a list of shards active in this cluster.""" - with MongoDBClient(self._admin_user()) as admin_client: - return [shard for shard in admin_client.config.shards.find()] - - -class MongoDBClient(object): - """A wrapper to manage a MongoDB connection.""" - - # engine information is cached by making it a class attribute - engine = {} - - def __init__(self, user, host=None, port=None): - """Get the client. Specifying host and/or port updates cached values. - :param user: MongoDBUser instance used to authenticate - :param host: server address, defaults to localhost - :param port: server port, defaults to 27017 - :return: - """ - new_client = False - self._logged_in = False - if not type(self).engine: - # no engine cached - type(self).engine['host'] = (host if host else 'localhost') - type(self).engine['port'] = (port if port else MONGODB_PORT) - new_client = True - elif host or port: - LOG.debug("Updating MongoDB client.") - if host: - type(self).engine['host'] = host - if port: - type(self).engine['port'] = port - new_client = True - if new_client: - host = type(self).engine['host'] - port = type(self).engine['port'] - LOG.debug("Creating MongoDB client to %(host)s:%(port)s.", - {'host': host, 'port': port}) - type(self).engine['client'] = pymongo.MongoClient(host=host, - port=port, - connect=False) - self.session = type(self).engine['client'] - if user: - db_name = user.database.name - LOG.debug("Authenticating MongoDB client on %s.", db_name) - self._db = self.session[db_name] - self._db.authenticate(user.username, password=user.password) - self._logged_in = True - - def __enter__(self): - return self.session - - def __exit__(self, exc_type, exc_value, traceback): - LOG.debug("Disconnecting from MongoDB.") - if self._logged_in: - self._db.logout() - self.session.close() - - -class MongoDBCredentials(object): - """Handles storing/retrieving credentials. Stored as json in files.""" - - def __init__(self, username=None, password=None): - self.username = username - self.password = password - - def read(self, filename): - credentials = operating_system.read_file(filename, codec=JsonCodec()) - self.username = credentials['username'] - self.password = credentials['password'] - - def write(self, filename): - credentials = {'username': self.username, - 'password': self.password} - - operating_system.write_file(filename, credentials, codec=JsonCodec()) - operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW) diff --git a/trove/guestagent/datastore/experimental/mongodb/system.py b/trove/guestagent/datastore/experimental/mongodb/system.py deleted file mode 100644 index b36509ebd2..0000000000 --- a/trove/guestagent/datastore/experimental/mongodb/system.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2014 Mirantis, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from os import path - -from trove.guestagent.common import operating_system -from trove.guestagent import pkg - -OS_NAME = operating_system.get_os() - -MONGODB_MOUNT_POINT = "/var/lib/mongodb" -MONGO_PID_FILE = '/var/run/mongodb/mongodb.pid' -MONGO_LOG_FILE = '/var/log/mongodb/mongod.log' - -CONFIG_CANDIDATES = ["/etc/mongodb.conf", "/etc/mongod.conf"] -MONGO_ADMIN_NAME = 'os_admin' -MONGO_ADMIN_ROLES = [{'db': 'admin', 'role': 'userAdminAnyDatabase'}, - {'db': 'admin', 'role': 'dbAdminAnyDatabase'}, - {'db': 'admin', 'role': 'clusterAdmin'}, - {'db': 'admin', 'role': 'readWriteAnyDatabase'}] -MONGO_ADMIN_CREDS_FILE = path.join(path.expanduser('~'), - '.os_mongo_admin_creds.json') -MONGO_KEY_FILE = '/etc/mongo_key' -MONGOS_SERVICE_CANDIDATES = ["mongos"] -MONGOD_SERVICE_CANDIDATES = ["mongodb", "mongod"] -MONGODB_KILL = "sudo kill %s" -FIND_PID = "ps xaco pid,cmd | awk '/mongo(d|db|s)/ {print $1}'" -TIME_OUT = 1000 - -MONGO_USER = {operating_system.REDHAT: "mongod", - operating_system.DEBIAN: "mongodb", - operating_system.SUSE: "mongod"}[OS_NAME] - -PACKAGER = pkg.Package() diff --git a/trove/guestagent/datastore/experimental/percona/__init__.py b/trove/guestagent/datastore/experimental/percona/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/experimental/percona/manager.py b/trove/guestagent/datastore/experimental/percona/manager.py deleted file mode 100644 index b51d1fb68e..0000000000 --- a/trove/guestagent/datastore/experimental/percona/manager.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2015 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_utils import importutils -from trove.guestagent.datastore.mysql_common import manager - - -MYSQL_APP = ("trove.guestagent.datastore.experimental.percona.service." - "MySqlApp") -MYSQL_APP_STATUS = ("trove.guestagent.datastore.experimental.percona.service." - "MySqlAppStatus") -MYSQL_ADMIN = ("trove.guestagent.datastore.experimental.percona.service." - "MySqlAdmin") - - -class Manager(manager.MySqlManager): - - def __init__(self): - mysql_app = importutils.import_class(MYSQL_APP) - mysql_app_status = importutils.import_class(MYSQL_APP_STATUS) - mysql_admin = importutils.import_class(MYSQL_ADMIN) - - super(Manager, self).__init__(mysql_app, mysql_app_status, mysql_admin) diff --git a/trove/guestagent/datastore/experimental/percona/service.py b/trove/guestagent/datastore/experimental/percona/service.py deleted file mode 100644 index a021e2430f..0000000000 --- a/trove/guestagent/datastore/experimental/percona/service.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2015 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from trove.guestagent.datastore.mysql_common import service - -LOG = logging.getLogger(__name__) - - -class KeepAliveConnection(service.BaseKeepAliveConnection): - pass - - -class MySqlAppStatus(service.BaseMySqlAppStatus): - pass - - -class LocalSqlClient(service.BaseLocalSqlClient): - pass - - -class MySqlApp(service.BaseMySqlApp): - def __init__(self, status): - super(MySqlApp, self).__init__(status, LocalSqlClient, - KeepAliveConnection) - - def _get_slave_status(self): - with self.local_sql_client(self.get_engine()) as client: - return client.execute('SHOW SLAVE STATUS').first() - - def _get_master_UUID(self): - slave_status = self._get_slave_status() - return slave_status and slave_status['Master_UUID'] or None - - def _get_gtid_executed(self): - with self.local_sql_client(self.get_engine()) as client: - return client.execute('SELECT @@global.gtid_executed').first()[0] - - def get_last_txn(self): - master_UUID = self._get_master_UUID() - last_txn_id = '0' - gtid_executed = self._get_gtid_executed() - for gtid_set in gtid_executed.split(','): - uuid_set = gtid_set.split(':') - if uuid_set[0] == master_UUID: - last_txn_id = uuid_set[-1].split('-')[-1] - break - return master_UUID, int(last_txn_id) - - def get_latest_txn_id(self): - LOG.info("Retrieving latest txn id.") - return self._get_gtid_executed() - - def wait_for_txn(self, txn): - LOG.info("Waiting on txn '%s'.", txn) - with self.local_sql_client(self.get_engine()) as client: - client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')" - % txn) - - -class MySqlRootAccess(service.BaseMySqlRootAccess): - def __init__(self): - super(MySqlRootAccess, self).__init__(LocalSqlClient, - MySqlApp(MySqlAppStatus.get())) - - -class MySqlAdmin(service.BaseMySqlAdmin): - def __init__(self): - super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(), - MySqlApp) diff --git a/trove/guestagent/datastore/experimental/postgresql/__init__.py b/trove/guestagent/datastore/experimental/postgresql/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/experimental/postgresql/manager.py b/trove/guestagent/datastore/experimental/postgresql/manager.py deleted file mode 100644 index 6d4d790900..0000000000 --- a/trove/guestagent/datastore/experimental/postgresql/manager.py +++ /dev/null @@ -1,344 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from oslo_log import log as logging - -from trove.common import cfg -from trove.common.db.postgresql import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as trove_instance -from trove.common.notification import EndNotification -from trove.common import utils -from trove.guestagent import backup -from trove.guestagent.datastore.experimental.postgresql.service import ( - PgSqlAdmin) -from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp -from trove.guestagent.datastore import manager -from trove.guestagent import guest_log -from trove.guestagent import volume - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class Manager(manager.Manager): - - def __init__(self, manager_name='postgresql'): - super(Manager, self).__init__(manager_name) - self._app = None - self._admin = None - - @property - def status(self): - return self.app.status - - @property - def app(self): - if self._app is None: - self._app = self.build_app() - return self._app - - def build_app(self): - return PgSqlApp() - - @property - def admin(self): - if self._admin is None: - self._admin = self.app.build_admin() - return self._admin - - @property - def configuration_manager(self): - return self.app.configuration_manager - - def get_datastore_log_defs(self): - owner = self.app.pgsql_owner - long_query_time = CONF.get(self.manager).get( - 'guest_log_long_query_time') - general_log_file = self.build_log_file_name( - self.GUEST_LOG_DEFS_GENERAL_LABEL, owner, - datastore_dir=self.app.pgsql_log_dir) - general_log_dir, general_log_filename = os.path.split(general_log_file) - return { - self.GUEST_LOG_DEFS_GENERAL_LABEL: { - self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, - self.GUEST_LOG_USER_LABEL: owner, - self.GUEST_LOG_FILE_LABEL: general_log_file, - self.GUEST_LOG_ENABLE_LABEL: { - 'logging_collector': 'on', - 'log_destination': self._quote_str('stderr'), - 'log_directory': self._quote_str(general_log_dir), - 'log_filename': self._quote_str(general_log_filename), - 'log_statement': self._quote_str('all'), - 'debug_print_plan': 'on', - 'log_min_duration_statement': long_query_time, - }, - self.GUEST_LOG_DISABLE_LABEL: { - 'logging_collector': 'off', - }, - self.GUEST_LOG_RESTART_LABEL: True, - }, - } - - def _quote_str(self, value): - return "'%s'" % value - - def grant_access(self, context, username, hostname, databases): - self.admin.grant_access(context, username, hostname, databases) - - def revoke_access(self, context, username, hostname, database): - self.admin.revoke_access(context, username, hostname, database) - - def list_access(self, context, username, hostname): - return self.admin.list_access(context, username, hostname) - - def update_overrides(self, context, overrides, remove=False): - self.app.update_overrides(context, overrides, remove) - - def apply_overrides(self, context, overrides): - self.app.apply_overrides(context, overrides) - - def reset_configuration(self, context, configuration): - self.app.reset_configuration(context, configuration) - - def start_db_with_conf_changes(self, context, config_contents): - self.app.start_db_with_conf_changes(context, config_contents) - - def create_database(self, context, databases): - with EndNotification(context): - self.admin.create_database(context, databases) - - def delete_database(self, context, database): - with EndNotification(context): - self.admin.delete_database(context, database) - - def list_databases( - self, context, limit=None, marker=None, include_marker=False): - return self.admin.list_databases( - context, limit=limit, marker=marker, include_marker=include_marker) - - def install(self, context, packages): - self.app.install(context, packages) - - def stop_db(self, context, do_not_start_on_reboot=False): - self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) - - def restart(self, context): - self.app.restart() - self.set_guest_log_status(guest_log.LogStatus.Restart_Completed) - - def pre_upgrade(self, context): - LOG.debug('Preparing Postgresql for upgrade.') - self.app.status.begin_restart() - self.app.stop_db() - mount_point = self.app.pgsql_base_data_dir - upgrade_info = self.app.save_files_pre_upgrade(mount_point) - upgrade_info['mount_point'] = mount_point - return upgrade_info - - def post_upgrade(self, context, upgrade_info): - LOG.debug('Finalizing Postgresql upgrade.') - self.app.stop_db() - if 'device' in upgrade_info: - self.mount_volume(context, mount_point=upgrade_info['mount_point'], - device_path=upgrade_info['device'], - write_to_fstab=True) - self.app.restore_files_post_upgrade(upgrade_info) - self.app.start_db() - - def is_root_enabled(self, context): - return self.app.is_root_enabled(context) - - def enable_root(self, context, root_password=None): - return self.app.enable_root(context, root_password=root_password) - - def disable_root(self, context): - self.app.disable_root(context) - - def enable_root_with_password(self, context, root_password=None): - return self.app.enable_root_with_password( - context, - root_password=root_password) - - def create_user(self, context, users): - with EndNotification(context): - self.admin.create_user(context, users) - - def list_users( - self, context, limit=None, marker=None, include_marker=False): - return self.admin.list_users( - context, limit=limit, marker=marker, include_marker=include_marker) - - def delete_user(self, context, user): - with EndNotification(context): - self.admin.delete_user(context, user) - - def get_user(self, context, username, hostname): - return self.admin.get_user(context, username, hostname) - - def change_passwords(self, context, users): - with EndNotification(context): - self.admin.change_passwords(context, users) - - def update_attributes(self, context, username, hostname, user_attrs): - with EndNotification(context): - self.admin.update_attributes( - context, - username, - hostname, - user_attrs) - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, config_contents, - root_password, overrides, cluster_config, snapshot): - self.app.install(context, packages) - LOG.debug("Waiting for database first boot.") - if (self.app.status.wait_for_real_status_to_change_to( - trove_instance.ServiceStatuses.RUNNING, - CONF.state_change_wait_time, - False)): - LOG.debug("Stopping database prior to initial configuration.") - self.app.stop_db() - - if device_path: - device = volume.VolumeDevice(device_path) - device.format() - if os.path.exists(mount_point): - device.migrate_data(mount_point) - device.mount(mount_point) - self.configuration_manager.save_configuration(config_contents) - self.app.apply_initial_guestagent_configuration() - - os_admin = models.PostgreSQLUser(self.app.ADMIN_USER) - - if backup_info: - backup.restore(context, backup_info, '/tmp') - self.app.set_current_admin_user(os_admin) - - if snapshot: - LOG.info("Found snapshot info: %s", str(snapshot)) - self.attach_replica(context, snapshot, snapshot['config']) - - self.app.start_db() - - if not backup_info: - self.app.secure(context) - - self._admin = PgSqlAdmin(os_admin) - - if not cluster_config and self.is_root_enabled(context): - self.status.report_root(context) - - def create_backup(self, context, backup_info): - with EndNotification(context): - self.app.enable_backups() - backup.backup(context, backup_info) - - def backup_required_for_replication(self, context): - return self.replication.backup_required_for_replication() - - def attach_replica(self, context, replica_info, slave_config): - self.replication.enable_as_slave(self.app, replica_info, None) - - def detach_replica(self, context, for_failover=False): - replica_info = self.replication.detach_slave(self.app, for_failover) - return replica_info - - def enable_as_master(self, context, replica_source_config): - self.app.enable_backups() - self.replication.enable_as_master(self.app, None) - - def make_read_only(self, context, read_only): - """There seems to be no way to flag this at the database level in - PostgreSQL at the moment -- see discussion here: - http://www.postgresql.org/message-id/flat/CA+TgmobWQJ-GCa_tWUc4=80A - 1RJ2_+Rq3w_MqaVguk_q018dqw@mail.gmail.com#CA+TgmobWQJ-GCa_tWUc4=80A1RJ - 2_+Rq3w_MqaVguk_q018dqw@mail.gmail.com - """ - pass - - def get_replica_context(self, context): - LOG.debug("Getting replica context.") - return self.replication.get_replica_context(self.app) - - def get_latest_txn_id(self, context): - if self.app.pg_is_in_recovery(): - lsn = self.app.pg_last_xlog_replay_location() - else: - lsn = self.app.pg_current_xlog_location() - LOG.info("Last xlog location found: %s", lsn) - return lsn - - def get_last_txn(self, context): - master_host = self.app.pg_primary_host() - repl_offset = self.get_latest_txn_id(context) - return master_host, repl_offset - - def wait_for_txn(self, context, txn): - if not self.app.pg_is_in_recovery(): - raise RuntimeError(_("Attempting to wait for a txn on a server " - "not in recovery mode!")) - - def _wait_for_txn(): - lsn = self.app.pg_last_xlog_replay_location() - LOG.info("Last xlog location found: %s", lsn) - return lsn >= txn - try: - utils.poll_until(_wait_for_txn, time_out=120) - except exception.PollTimeOut: - raise RuntimeError(_("Timeout occurred waiting for xlog " - "offset to change to '%s'.") % txn) - - def cleanup_source_on_replica_detach(self, context, replica_info): - LOG.debug("Calling cleanup_source_on_replica_detach") - self.replication.cleanup_source_on_replica_detach(self.app, - replica_info) - - def demote_replication_master(self, context): - LOG.debug("Calling demote_replication_master") - self.replication.demote_master(self.app) - - def get_replication_snapshot(self, context, snapshot_info, - replica_source_config=None): - LOG.debug("Getting replication snapshot.") - - self.app.enable_backups() - self.replication.enable_as_master(self.app, None) - - snapshot_id, log_position = ( - self.replication.snapshot_for_replication(context, self.app, None, - snapshot_info)) - - mount_point = CONF.get(self.manager).mount_point - volume_stats = self.get_filesystem_stats(context, mount_point) - - replication_snapshot = { - 'dataset': { - 'datastore_manager': self.manager, - 'dataset_size': volume_stats.get('used', 0.0), - 'volume_size': volume_stats.get('total', 0.0), - 'snapshot_id': snapshot_id - }, - 'replication_strategy': self.replication_strategy, - 'master': self.replication.get_master_ref(self.app, snapshot_info), - 'log_position': log_position - } - - return replication_snapshot diff --git a/trove/guestagent/datastore/experimental/postgresql/pgsql_query.py b/trove/guestagent/datastore/experimental/postgresql/pgsql_query.py deleted file mode 100644 index f22e5f3c02..0000000000 --- a/trove/guestagent/datastore/experimental/postgresql/pgsql_query.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# Copyright (c) 2016 Tesora, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class DatabaseQuery(object): - - @classmethod - def list(cls, ignore=()): - """Query to list all databases.""" - - statement = ( - "SELECT datname, pg_encoding_to_char(encoding), " - "datcollate FROM pg_database " - "WHERE datistemplate = false" - ) - - for name in ignore: - statement += " AND datname != '{name}'".format(name=name) - - return statement - - @classmethod - def create(cls, name, encoding=None, collation=None): - """Query to create a database.""" - - statement = "CREATE DATABASE \"{name}\"".format(name=name) - if encoding is not None: - statement += " ENCODING = '{encoding}'".format( - encoding=encoding, - ) - if collation is not None: - statement += " LC_COLLATE = '{collation}'".format( - collation=collation, - ) - - return statement - - @classmethod - def drop(cls, name): - """Query to drop a database.""" - - return "DROP DATABASE IF EXISTS \"{name}\"".format(name=name) - - -class UserQuery(object): - - @classmethod - def list(cls, ignore=()): - """Query to list all users.""" - - statement = ( - "SELECT usename, datname, pg_encoding_to_char(encoding), " - "datcollate FROM pg_catalog.pg_user " - "LEFT JOIN pg_catalog.pg_database " - "ON CONCAT(usename, '=CTc/os_admin') = ANY(datacl::text[]) " - "WHERE (datistemplate ISNULL OR datistemplate = false)") - if ignore: - for name in ignore: - statement += " AND usename != '{name}'".format(name=name) - - return statement - - @classmethod - def list_root(cls, ignore=()): - """Query to list all superuser accounts.""" - - statement = ( - "SELECT usename FROM pg_catalog.pg_user WHERE usesuper = true" - ) - - for name in ignore: - statement += " AND usename != '{name}'".format(name=name) - - return statement - - @classmethod - def get(cls, name): - """Query to get a single user.""" - - return cls.list() + " AND usename = '{name}'".format(name=name) - - @classmethod - def create(cls, name, password, encrypt_password=None, *options): - """Query to create a user with a password.""" - - create_clause = "CREATE USER \"{name}\"".format(name=name) - with_clause = cls._build_with_clause( - password, encrypt_password, *options) - return ' '.join([create_clause, with_clause]) - - @classmethod - def _build_with_clause(cls, password, encrypt_password=None, *options): - tokens = ['WITH'] - if password: - # Do not specify the encryption option if 'encrypt_password' - # is None. PostgreSQL will use the configuration default. - if encrypt_password is True: - tokens.append('ENCRYPTED') - elif encrypt_password is False: - tokens.append('UNENCRYPTED') - tokens.append('PASSWORD') - tokens.append("'{password}'".format(password=password)) - if options: - tokens.extend(options) - - if len(tokens) > 1: - return ' '.join(tokens) - - return '' - - @classmethod - def update_password(cls, name, password, encrypt_password=None): - """Query to update the password for a user.""" - - return cls.alter_user(name, password, encrypt_password) - - @classmethod - def alter_user(cls, name, password, encrypt_password=None, *options): - """Query to alter a user.""" - - alter_clause = "ALTER USER \"{name}\"".format(name=name) - with_clause = cls._build_with_clause( - password, encrypt_password, *options) - return ''.join([alter_clause, with_clause]) - - @classmethod - def update_name(cls, old, new): - """Query to update the name of a user. - This statement also results in an automatic permission transfer to the - new username. - """ - - return "ALTER USER \"{old}\" RENAME TO \"{new}\"".format( - old=old, - new=new, - ) - - @classmethod - def drop(cls, name): - """Query to drop a user.""" - - return "DROP USER \"{name}\"".format(name=name) - - -class AccessQuery(object): - - @classmethod - def grant(cls, user, database): - """Query to grant user access to a database.""" - - return "GRANT ALL ON DATABASE \"{database}\" TO \"{user}\"".format( - database=database, - user=user, - ) - - @classmethod - def revoke(cls, user, database): - """Query to revoke user access to a database.""" - - return "REVOKE ALL ON DATABASE \"{database}\" FROM \"{user}\"".format( - database=database, - user=user, - ) diff --git a/trove/guestagent/datastore/experimental/postgresql/service.py b/trove/guestagent/datastore/experimental/postgresql/service.py deleted file mode 100644 index 52a24371ab..0000000000 --- a/trove/guestagent/datastore/experimental/postgresql/service.py +++ /dev/null @@ -1,1058 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# Copyright (c) 2016 Tesora, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from collections import OrderedDict -import os -import re - -from oslo_log import log as logging -import psycopg2 - -from trove.common import cfg -from trove.common.db.postgresql import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance -from trove.common.stream_codecs import PropertiesCodec -from trove.common import utils -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import OneFileOverrideStrategy -from trove.guestagent.common import guestagent_utils -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore.experimental.postgresql import pgsql_query -from trove.guestagent.datastore import service -from trove.guestagent import pkg - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -BACKUP_CFG_OVERRIDE = 'PgBaseBackupConfig' -DEBUG_MODE_OVERRIDE = 'DebugLevelOverride' - - -class PgSqlApp(object): - - OS = operating_system.get_os() - LISTEN_ADDRESSES = ['*'] # Listen on all available IP (v4/v6) interfaces. - ADMIN_USER = 'os_admin' # Trove's administrative user. - - def __init__(self): - super(PgSqlApp, self).__init__() - - self._current_admin_user = None - self.status = PgSqlAppStatus(self.pgsql_extra_bin_dir) - - revision_dir = guestagent_utils.build_file_path( - os.path.dirname(self.pgsql_config), - ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) - self.configuration_manager = ConfigurationManager( - self.pgsql_config, self.pgsql_owner, self.pgsql_owner, - PropertiesCodec( - delimiter='=', - string_mappings={'on': True, 'off': False, "''": None}), - requires_root=True, - override_strategy=OneFileOverrideStrategy(revision_dir)) - - @property - def service_candidates(self): - return ['postgresql'] - - @property - def pgsql_owner(self): - return 'postgres' - - @property - def default_superuser_name(self): - return "postgres" - - @property - def pgsql_base_data_dir(self): - return '/var/lib/postgresql/' - - @property - def pgsql_pid_file(self): - return guestagent_utils.build_file_path(self.pgsql_run_dir, - 'postgresql.pid') - - @property - def pgsql_run_dir(self): - return '/var/run/postgresql/' - - @property - def pgsql_extra_bin_dir(self): - """Redhat and Ubuntu packages for PgSql do not place 'extra' important - binaries in /usr/bin, but rather in a directory like /usr/pgsql-9.4/bin - in the case of PostgreSQL 9.4 for RHEL/CentOS - """ - return { - operating_system.DEBIAN: '/usr/lib/postgresql/%s/bin/', - operating_system.REDHAT: '/usr/pgsql-%s/bin/', - operating_system.SUSE: '/usr/bin/' - }[self.OS] % self.pg_version[1] - - @property - def pgsql_config(self): - return self._find_config_file('postgresql.conf') - - @property - def pgsql_hba_config(self): - return self._find_config_file('pg_hba.conf') - - @property - def pgsql_ident_config(self): - return self._find_config_file('pg_ident.conf') - - def _find_config_file(self, name_pattern): - version_base = guestagent_utils.build_file_path(self.pgsql_config_dir, - self.pg_version[1]) - return sorted(operating_system.list_files_in_directory( - version_base, recursive=True, pattern=name_pattern, - as_root=True), key=len)[0] - - @property - def pgsql_config_dir(self): - return { - operating_system.DEBIAN: '/etc/postgresql/', - operating_system.REDHAT: '/var/lib/postgresql/', - operating_system.SUSE: '/var/lib/pgsql/' - }[self.OS] - - @property - def pgsql_log_dir(self): - return "/var/log/postgresql/" - - def build_admin(self): - return PgSqlAdmin(self.get_current_admin_user()) - - def update_overrides(self, context, overrides, remove=False): - if remove: - self.configuration_manager.remove_user_override() - elif overrides: - self.configuration_manager.apply_user_override(overrides) - - def set_current_admin_user(self, user): - self._current_admin_user = user - - def get_current_admin_user(self): - if self._current_admin_user is not None: - return self._current_admin_user - - if self.status.is_installed: - return models.PostgreSQLUser(self.ADMIN_USER) - - return models.PostgreSQLUser(self.default_superuser_name) - - def apply_overrides(self, context, overrides): - self.reload_configuration() - - def reload_configuration(self): - """Send a signal to the server, causing configuration files to be - reloaded by all server processes. - Active queries or connections to the database will not be - interrupted. - - NOTE: Do not use the 'SET' command as it only affects the current - session. - """ - self.build_admin().psql( - "SELECT pg_reload_conf()") - - def reset_configuration(self, context, configuration): - """Reset the PgSql configuration to the one given. - """ - config_contents = configuration['config_contents'] - self.configuration_manager.save_configuration(config_contents) - - def start_db_with_conf_changes(self, context, config_contents): - """Starts the PgSql instance with a new configuration.""" - if self.status.is_running: - raise RuntimeError(_("The service is still running.")) - - self.configuration_manager.save_configuration(config_contents) - # The configuration template has to be updated with - # guestagent-controlled settings. - self.apply_initial_guestagent_configuration() - self.start_db() - - def apply_initial_guestagent_configuration(self): - """Update guestagent-controlled configuration properties. - """ - LOG.debug("Applying initial guestagent configuration.") - file_locations = { - 'data_directory': self._quote(self.pgsql_data_dir), - 'hba_file': self._quote(self.pgsql_hba_config), - 'ident_file': self._quote(self.pgsql_ident_config), - 'external_pid_file': self._quote(self.pgsql_pid_file), - 'unix_socket_directories': self._quote(self.pgsql_run_dir), - 'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)), - 'port': cfg.get_configuration_property('postgresql_port')} - self.configuration_manager.apply_system_override(file_locations) - self._apply_access_rules() - - @staticmethod - def _quote(value): - return "'%s'" % value - - def _apply_access_rules(self): - LOG.debug("Applying database access rules.") - - # Connections to all resources are granted. - # - # Local access from administrative users is implicitly trusted. - # - # Remote access from the Trove's account is always rejected as - # it is not needed and could be used by malicious users to hijack the - # instance. - # - # Connections from other accounts always require a double-MD5-hashed - # password. - # - # Make the rules readable only by the Postgres service. - # - # NOTE: The order of entries is important. - # The first failure to authenticate stops the lookup. - # That is why the 'local' connections validate first. - # The OrderedDict is necessary to guarantee the iteration order. - local_admins = ','.join([self.default_superuser_name, self.ADMIN_USER]) - remote_admins = self.ADMIN_USER - access_rules = OrderedDict( - [('local', [['all', local_admins, None, 'trust'], - ['replication', local_admins, None, 'trust'], - ['all', 'all', None, 'md5']]), - ('host', [['all', local_admins, '127.0.0.1/32', 'trust'], - ['all', local_admins, '::1/128', 'trust'], - ['all', local_admins, 'localhost', 'trust'], - ['all', remote_admins, '0.0.0.0/0', 'reject'], - ['all', remote_admins, '::/0', 'reject'], - ['all', 'all', '0.0.0.0/0', 'md5'], - ['all', 'all', '::/0', 'md5']]) - ]) - operating_system.write_file(self.pgsql_hba_config, access_rules, - PropertiesCodec( - string_mappings={'\t': None}), - as_root=True) - operating_system.chown(self.pgsql_hba_config, - self.pgsql_owner, self.pgsql_owner, - as_root=True) - operating_system.chmod(self.pgsql_hba_config, FileMode.SET_USR_RO, - as_root=True) - - def disable_backups(self): - """Reverse overrides applied by PgBaseBackup strategy""" - if not self.configuration_manager.has_system_override( - BACKUP_CFG_OVERRIDE): - return - LOG.info("Removing configuration changes for backups") - self.configuration_manager.remove_system_override(BACKUP_CFG_OVERRIDE) - self.remove_wal_archive_dir() - self.restart() - - def enable_backups(self): - """Apply necessary changes to config to enable WAL-based backups - if we are using the PgBaseBackup strategy - """ - LOG.info("Checking if we need to apply changes to WAL config") - if 'PgBaseBackup' not in self.backup_strategy: - return - if self.configuration_manager.has_system_override(BACKUP_CFG_OVERRIDE): - return - - LOG.info("Applying changes to WAL config for use by base backups") - wal_arch_loc = self.wal_archive_location - if not os.path.isdir(wal_arch_loc): - raise RuntimeError(_("Cannot enable backup as WAL dir '%s' does " - "not exist.") % wal_arch_loc) - arch_cmd = "'test ! -f {wal_arch}/%f && cp %p {wal_arch}/%f'".format( - wal_arch=wal_arch_loc - ) - # Only support pg version > 9.6, wal_level set to replica, and - # remove parameter "checkpoint_segments". - opts = { - 'wal_level': 'replica', - 'archive_mode': 'on', - 'max_wal_senders': 8, - 'wal_log_hints': 'on', - 'wal_keep_segments': 8, - 'archive_command': arch_cmd - } - - self.configuration_manager.apply_system_override( - opts, BACKUP_CFG_OVERRIDE) - self.restart() - - def disable_debugging(self, level=1): - """Disable debug-level logging in postgres""" - self.configuration_manager.remove_system_override(DEBUG_MODE_OVERRIDE) - - def enable_debugging(self, level=1): - """Enable debug-level logging in postgres""" - opt = {'log_min_messages': 'DEBUG%s' % level} - self.configuration_manager.apply_system_override(opt, - DEBUG_MODE_OVERRIDE) - - def install(self, context, packages): - """Install one or more packages that postgresql needs to run. - - The packages parameter is a string representing the package names that - should be given to the system's package manager. - """ - - LOG.debug( - "{guest_id}: Beginning PgSql package installation.".format( - guest_id=CONF.guest_id - ) - ) - self.recreate_wal_archive_dir() - - packager = pkg.Package() - if not packager.pkg_is_installed(packages): - try: - LOG.info( - "{guest_id}: Installing ({packages}).".format( - guest_id=CONF.guest_id, - packages=packages, - ) - ) - packager.pkg_install(packages, {}, 1000) - except (pkg.PkgAdminLockError, pkg.PkgPermissionError, - pkg.PkgPackageStateError, pkg.PkgNotFoundError, - pkg.PkgTimeout, pkg.PkgScriptletError, - pkg.PkgDownloadError, pkg.PkgSignError, - pkg.PkgBrokenError): - LOG.exception( - "{guest_id}: There was a package manager error while " - "trying to install ({packages}).".format( - guest_id=CONF.guest_id, - packages=packages, - ) - ) - raise - except Exception: - LOG.exception( - "{guest_id}: The package manager encountered an unknown " - "error while trying to install ({packages}).".format( - guest_id=CONF.guest_id, - packages=packages, - ) - ) - raise - else: - self.start_db() - LOG.debug( - "{guest_id}: Completed package installation.".format( - guest_id=CONF.guest_id, - ) - ) - - @property - def pgsql_recovery_config(self): - return os.path.join(self.pgsql_data_dir, "recovery.conf") - - @property - def pgsql_data_dir(self): - return os.path.dirname(self.pg_version[0]) - - @property - def pg_version(self): - """Find the database version file stored in the data directory. - - :returns: A tuple with the path to the version file - (in the root of the data directory) and the version string. - """ - version_files = operating_system.list_files_in_directory( - self.pgsql_base_data_dir, recursive=True, pattern='PG_VERSION', - as_root=True) - version_file = sorted(version_files, key=len)[0] - version = operating_system.read_file(version_file, as_root=True) - return version_file, version.strip() - - def restart(self): - self.status.restart_db_service( - self.service_candidates, CONF.state_change_wait_time) - - def start_db(self, enable_on_boot=True, update_db=False): - self.status.start_db_service( - self.service_candidates, CONF.state_change_wait_time, - enable_on_boot=enable_on_boot, update_db=update_db) - - def stop_db(self, do_not_start_on_reboot=False, update_db=False): - self.status.stop_db_service( - self.service_candidates, CONF.state_change_wait_time, - disable_on_boot=do_not_start_on_reboot, update_db=update_db) - - def secure(self, context): - """Create an administrative user for Trove. - Force password encryption. - Also disable the built-in superuser - """ - password = utils.generate_random_password() - - os_admin_db = models.PostgreSQLSchema(self.ADMIN_USER) - os_admin = models.PostgreSQLUser(self.ADMIN_USER, password) - os_admin.databases.append(os_admin_db.serialize()) - - postgres = models.PostgreSQLUser(self.default_superuser_name) - admin = PgSqlAdmin(postgres) - admin._create_database(context, os_admin_db) - admin._create_admin_user(context, os_admin, - encrypt_password=True) - - PgSqlAdmin(os_admin).alter_user(context, postgres, None, - 'NOSUPERUSER', 'NOLOGIN') - - self.set_current_admin_user(os_admin) - - def pg_current_xlog_location(self): - """Wrapper for pg_current_xlog_location() - Cannot be used against a running slave - """ - version = int(self.pg_version[1]) - if version < 10: - query = "SELECT pg_current_xlog_location()" - else: - query = "SELECT pg_current_wal_lsn()" - r = self.build_admin().query(query) - return r[0][0] - - def pg_last_xlog_replay_location(self): - """Wrapper for pg_last_xlog_replay_location() - For use on standby servers - """ - version = int(self.pg_version[1]) - if version < 10: - query = "SELECT pg_last_xlog_replay_location()" - else: - query = "SELECT pg_last_wal_replay_lsn()" - r = self.build_admin().query(query) - return r[0][0] - - def pg_is_in_recovery(self): - """Wrapper for pg_is_in_recovery() for detecting a server in - standby mode - """ - r = self.build_admin().query("SELECT pg_is_in_recovery()") - return r[0][0] - - def pg_primary_host(self): - """There seems to be no way to programmatically determine this - on a hot standby, so grab what we have written to the recovery - file - """ - r = operating_system.read_file(self.pgsql_recovery_config, - as_root=True) - regexp = re.compile(r"host=(\d+.\d+.\d+.\d+) ") - m = regexp.search(r) - return m.group(1) - - def recreate_wal_archive_dir(self): - wal_archive_dir = self.wal_archive_location - operating_system.remove(wal_archive_dir, force=True, recursive=True, - as_root=True) - operating_system.create_directory(wal_archive_dir, - user=self.pgsql_owner, - group=self.pgsql_owner, - force=True, as_root=True) - - def remove_wal_archive_dir(self): - wal_archive_dir = self.wal_archive_location - operating_system.remove(wal_archive_dir, force=True, recursive=True, - as_root=True) - - def is_root_enabled(self, context): - """Return True if there is a superuser account enabled. - """ - results = self.build_admin().query( - pgsql_query.UserQuery.list_root(), - timeout=30, - ) - - # There should be only one superuser (Trove's administrative account). - return len(results) > 1 or (results[0][0] != self.ADMIN_USER) - - def enable_root(self, context, root_password=None): - """Create a superuser user or reset the superuser password. - - The default PostgreSQL administration account is 'postgres'. - This account always exists and cannot be removed. - Its attributes and access can however be altered. - - Clients can connect from the localhost or remotely via TCP/IP: - - Local clients (e.g. psql) can connect from a preset *system* account - called 'postgres'. - This system account has no password and is *locked* by default, - so that it can be used by *local* users only. - It should *never* be enabled (or its password set)!!! - That would just open up a new attack vector on the system account. - - Remote clients should use a build-in *database* account of the same - name. It's password can be changed using the "ALTER USER" statement. - - Access to this account is disabled by Trove exposed only once the - superuser access is requested. - Trove itself creates its own administrative account. - - {"_name": "postgres", "_password": ""} - """ - user = self.build_root_user(root_password) - self.build_admin().alter_user( - context, user, None, *PgSqlAdmin.ADMIN_OPTIONS) - return user.serialize() - - def build_root_user(self, password=None): - return models.PostgreSQLUser.root(password=password) - - def pg_start_backup(self, backup_label): - r = self.build_admin().query( - "SELECT pg_start_backup('%s', true)" % backup_label) - return r[0][0] - - def pg_xlogfile_name(self, start_segment): - version = int(self.pg_version[1]) - if version < 10: - query = "SELECT pg_xlogfile_name('%s')" - else: - query = "SELECT pg_walfile_name('%s')" - r = self.build_admin().query(query % start_segment) - return r[0][0] - - def pg_stop_backup(self): - r = self.build_admin().query("SELECT pg_stop_backup()") - return r[0][0] - - def disable_root(self, context): - """Generate a new random password for the public superuser account. - Do not disable its access rights. Once enabled the account should - stay that way. - """ - self.enable_root(context) - - def enable_root_with_password(self, context, root_password=None): - return self.enable_root(context, root_password) - - @property - def wal_archive_location(self): - return cfg.get_configuration_property('wal_archive_location') - - @property - def backup_strategy(self): - return cfg.get_configuration_property('backup_strategy') - - def save_files_pre_upgrade(self, mount_point): - LOG.debug('Saving files pre-upgrade.') - mnt_etc_dir = os.path.join(mount_point, 'save_etc') - if self.OS not in [operating_system.REDHAT]: - # No need to store the config files away for Redhat because - # they are already stored in the data volume. - operating_system.remove(mnt_etc_dir, force=True, as_root=True) - operating_system.copy(self.pgsql_config_dir, mnt_etc_dir, - preserve=True, recursive=True, as_root=True) - return {'save_etc': mnt_etc_dir} - - def restore_files_post_upgrade(self, upgrade_info): - LOG.debug('Restoring files post-upgrade.') - if self.OS not in [operating_system.REDHAT]: - # No need to restore the config files for Redhat because - # they are already in the data volume. - operating_system.copy('%s/.' % upgrade_info['save_etc'], - self.pgsql_config_dir, - preserve=True, recursive=True, - force=True, as_root=True) - operating_system.remove(upgrade_info['save_etc'], force=True, - as_root=True) - self.configuration_manager.refresh_cache() - self.status.set_ready() - - -class PgSqlAppStatus(service.BaseDbStatus): - - HOST = 'localhost' - - def __init__(self, tools_dir): - super(PgSqlAppStatus, self).__init__() - self._cmd = guestagent_utils.build_file_path(tools_dir, 'pg_isready') - - def _get_actual_db_status(self): - try: - utils.execute_with_timeout( - self._cmd, '-h', self.HOST, log_output_on_error=True) - return instance.ServiceStatuses.RUNNING - except exception.ProcessExecutionError: - return instance.ServiceStatuses.SHUTDOWN - except utils.Timeout: - return instance.ServiceStatuses.BLOCKED - except Exception: - LOG.exception("Error getting Postgres status.") - return instance.ServiceStatuses.CRASHED - - return instance.ServiceStatuses.SHUTDOWN - - -class PgSqlAdmin(object): - - # Default set of options of an administrative account. - ADMIN_OPTIONS = ( - 'SUPERUSER', 'CREATEDB', 'CREATEROLE', 'INHERIT', 'REPLICATION', - 'BYPASSRLS', 'LOGIN' - ) - - def __init__(self, user): - port = cfg.get_configuration_property('postgresql_port') - self.__connection = PostgresLocalhostConnection(user.name, port=port) - - def grant_access(self, context, username, hostname, databases): - """Give a user permission to use a given database. - - The username and hostname parameters are strings. - The databases parameter is a list of strings representing the names of - the databases to grant permission on. - """ - for database in databases: - LOG.info( - "{guest_id}: Granting user ({user}) access to database " - "({database}).".format( - guest_id=CONF.guest_id, - user=username, - database=database,) - ) - self.psql( - pgsql_query.AccessQuery.grant( - user=username, - database=database, - ), - timeout=30, - ) - - def revoke_access(self, context, username, hostname, database): - """Revoke a user's permission to use a given database. - - The username and hostname parameters are strings. - The database parameter is a string representing the name of the - database. - """ - LOG.info( - "{guest_id}: Revoking user ({user}) access to database" - "({database}).".format( - guest_id=CONF.guest_id, - user=username, - database=database,) - ) - self.psql( - pgsql_query.AccessQuery.revoke( - user=username, - database=database, - ), - timeout=30, - ) - - def list_access(self, context, username, hostname): - """List database for which the given user as access. - Return a list of serialized Postgres databases. - """ - user = self._find_user(context, username) - if user is not None: - return user.databases - - raise exception.UserNotFound(uuid=username) - - def create_database(self, context, databases): - """Create the list of specified databases. - - The databases parameter is a list of serialized Postgres databases. - """ - for database in databases: - self._create_database( - context, - models.PostgreSQLSchema.deserialize(database)) - - def _create_database(self, context, database): - """Create a database. - - :param database: Database to be created. - :type database: PostgreSQLSchema - """ - LOG.info( - "{guest_id}: Creating database {name}.".format( - guest_id=CONF.guest_id, - name=database.name, - ) - ) - self.psql( - pgsql_query.DatabaseQuery.create( - name=database.name, - encoding=database.character_set, - collation=database.collate, - ), - timeout=30, - ) - - def delete_database(self, context, database): - """Delete the specified database. - """ - self._drop_database( - models.PostgreSQLSchema.deserialize(database)) - - def _drop_database(self, database): - """Drop a given Postgres database. - - :param database: Database to be dropped. - :type database: PostgreSQLSchema - """ - LOG.info( - "{guest_id}: Dropping database {name}.".format( - guest_id=CONF.guest_id, - name=database.name, - ) - ) - self.psql( - pgsql_query.DatabaseQuery.drop(name=database.name), - timeout=30, - ) - - def list_databases(self, context, limit=None, marker=None, - include_marker=False): - """List all databases on the instance. - Return a paginated list of serialized Postgres databases. - """ - - return guestagent_utils.serialize_list( - self._get_databases(), - limit=limit, marker=marker, include_marker=include_marker) - - def _get_databases(self): - """Return all non-system Postgres databases on the instance.""" - results = self.query( - pgsql_query.DatabaseQuery.list(ignore=self.ignore_dbs), - timeout=30, - ) - return [models.PostgreSQLSchema( - row[0].strip(), character_set=row[1], collate=row[2]) - for row in results] - - def create_user(self, context, users): - """Create users and grant privileges for the specified databases. - - The users parameter is a list of serialized Postgres users. - """ - for user in users: - self._create_user( - context, - models.PostgreSQLUser.deserialize(user), None) - - def _create_user(self, context, user, encrypt_password=None, *options): - """Create a user and grant privileges for the specified databases. - - :param user: User to be created. - :type user: PostgreSQLUser - - :param encrypt_password: Store passwords encrypted if True. - Fallback to configured default - behavior if None. - :type encrypt_password: boolean - - :param options: Other user options. - :type options: list - """ - LOG.info( - "{guest_id}: Creating user {user} {with_clause}." - .format( - guest_id=CONF.guest_id, - user=user.name, - with_clause=pgsql_query.UserQuery._build_with_clause( - '', - encrypt_password, - *options - ), - ) - ) - self.psql( - pgsql_query.UserQuery.create( - user.name, - user.password, - encrypt_password, - *options - ), - timeout=30, - ) - self._grant_access( - context, user.name, - [models.PostgreSQLSchema.deserialize(db) - for db in user.databases]) - - def _create_admin_user(self, context, user, encrypt_password=None): - self._create_user(context, user, encrypt_password, *self.ADMIN_OPTIONS) - - def _grant_access(self, context, username, databases): - self.grant_access( - context, - username, - None, - [db.name for db in databases], - ) - - def list_users( - self, context, limit=None, marker=None, include_marker=False): - """List all users on the instance along with their access permissions. - Return a paginated list of serialized Postgres users. - """ - return guestagent_utils.serialize_list( - self._get_users(context), - limit=limit, marker=marker, include_marker=include_marker) - - def _get_users(self, context): - """Return all non-system Postgres users on the instance.""" - results = self.query( - pgsql_query.UserQuery.list(ignore=self.ignore_users), - timeout=30, - ) - - names = set([row[0].strip() for row in results]) - return [self._build_user(context, name, results) for name in names] - - def _build_user(self, context, username, acl=None): - """Build a model representation of a Postgres user. - Include all databases it has access to. - """ - user = models.PostgreSQLUser(username) - if acl: - dbs = [models.PostgreSQLSchema(row[1].strip(), - character_set=row[2], - collate=row[3]) - for row in acl if row[0] == username and row[1] is not None] - for d in dbs: - user.databases.append(d.serialize()) - - return user - - def delete_user(self, context, user): - """Delete the specified user. - """ - self._drop_user( - context, models.PostgreSQLUser.deserialize(user)) - - def _drop_user(self, context, user): - """Drop a given Postgres user. - - :param user: User to be dropped. - :type user: PostgreSQLUser - """ - # Postgresql requires that you revoke grants before dropping the user - databases = list(self.list_access(context, user.name, None)) - for db in databases: - db_schema = models.PostgreSQLSchema.deserialize(db) - self.revoke_access(context, user.name, None, db_schema.name) - - LOG.info( - "{guest_id}: Dropping user {name}.".format( - guest_id=CONF.guest_id, - name=user.name, - ) - ) - self.psql( - pgsql_query.UserQuery.drop(name=user.name), - timeout=30, - ) - - def get_user(self, context, username, hostname): - """Return a serialized representation of a user with a given name. - """ - user = self._find_user(context, username) - return user.serialize() if user is not None else None - - def _find_user(self, context, username): - """Lookup a user with a given username. - Return a new Postgres user instance or None if no match is found. - """ - results = self.query( - pgsql_query.UserQuery.get(name=username), - timeout=30, - ) - - if results: - return self._build_user(context, username, results) - - return None - - def user_exists(self, username): - """Return whether a given user exists on the instance.""" - results = self.query( - pgsql_query.UserQuery.get(name=username), - timeout=30, - ) - - return bool(results) - - def change_passwords(self, context, users): - """Change the passwords of one or more existing users. - The users parameter is a list of serialized Postgres users. - """ - for user in users: - self.alter_user( - context, - models.PostgreSQLUser.deserialize(user), None) - - def alter_user(self, context, user, encrypt_password=None, *options): - """Change the password and options of an existing users. - - :param user: User to be altered. - :type user: PostgreSQLUser - - :param encrypt_password: Store passwords encrypted if True. - Fallback to configured default - behavior if None. - :type encrypt_password: boolean - - :param options: Other user options. - :type options: list - """ - LOG.info( - "{guest_id}: Altering user {user} {with_clause}." - .format( - guest_id=CONF.guest_id, - user=user.name, - with_clause=pgsql_query.UserQuery._build_with_clause( - '', - encrypt_password, - *options - ), - ) - ) - self.psql( - pgsql_query.UserQuery.alter_user( - user.name, - user.password, - encrypt_password, - *options), - timeout=30, - ) - - def update_attributes(self, context, username, hostname, user_attrs): - """Change the attributes of one existing user. - - The username and hostname parameters are strings. - The user_attrs parameter is a dictionary in the following form: - - {"password": "", "name": ""} - - Each key/value pair in user_attrs is optional. - """ - user = self._build_user(context, username) - new_username = user_attrs.get('name') - new_password = user_attrs.get('password') - - if new_username is not None: - self._rename_user(context, user, new_username) - # Make sure we can retrieve the renamed user. - user = self._find_user(context, new_username) - if user is None: - raise exception.TroveError(_( - "Renamed user %s could not be found on the instance.") - % new_username) - - if new_password is not None: - user.password = new_password - self.alter_user(context, user) - - def _rename_user(self, context, user, new_username): - """Rename a given Postgres user and transfer all access to the - new name. - - :param user: User to be renamed. - :type user: PostgreSQLUser - """ - LOG.info( - "{guest_id}: Changing username for {old} to {new}.".format( - guest_id=CONF.guest_id, - old=user.name, - new=new_username, - ) - ) - # PostgreSQL handles the permission transfer itself. - self.psql( - pgsql_query.UserQuery.update_name( - old=user.name, - new=new_username, - ), - timeout=30, - ) - - def psql(self, statement, timeout=30): - """Execute a non-returning statement (usually DDL); - Turn autocommit ON (this is necessary for statements that cannot run - within an implicit transaction, like CREATE DATABASE). - """ - return self.__connection.execute(statement) - - def query(self, query, timeout=30): - """Execute a query and return the result set. - """ - return self.__connection.query(query) - - @property - def ignore_users(self): - return cfg.get_ignored_users() - - @property - def ignore_dbs(self): - return cfg.get_ignored_dbs() - - -class PostgresConnection(object): - - def __init__(self, **connection_args): - self._connection_args = connection_args - - def execute(self, statement, identifiers=None, data_values=None): - """Execute a non-returning statement. - """ - self._execute_stmt(statement, identifiers, data_values, False, - autocommit=True) - - def query(self, query, identifiers=None, data_values=None): - """Execute a query and return the result set. - """ - return self._execute_stmt(query, identifiers, data_values, True) - - def _execute_stmt(self, statement, identifiers, data_values, fetch, - autocommit=False): - if statement: - with psycopg2.connect(**self._connection_args) as connection: - connection.autocommit = autocommit - with connection.cursor() as cursor: - cursor.execute( - self._bind(statement, identifiers), data_values) - if fetch: - return cursor.fetchall() - else: - raise exception.UnprocessableEntity(_("Invalid SQL statement: %s") - % statement) - - def _bind(self, statement, identifiers): - if identifiers: - return statement.format(*identifiers) - return statement - - -class PostgresLocalhostConnection(PostgresConnection): - - HOST = 'localhost' - - def __init__(self, user, password=None, port=5432): - super(PostgresLocalhostConnection, self).__init__( - user=user, password=password, - host=self.HOST, port=port) diff --git a/trove/guestagent/datastore/experimental/pxc/__init__.py b/trove/guestagent/datastore/experimental/pxc/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/experimental/pxc/manager.py b/trove/guestagent/datastore/experimental/pxc/manager.py deleted file mode 100644 index caf3fc3aad..0000000000 --- a/trove/guestagent/datastore/experimental/pxc/manager.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2015 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from trove.guestagent.datastore.experimental.pxc import service as pxc_service -from trove.guestagent.datastore.galera_common import manager -from trove.guestagent.datastore.mysql_common import service as mysql_service - - -class Manager(manager.GaleraManager): - - def __init__(self): - super(Manager, self).__init__(pxc_service.PXCApp, - mysql_service.BaseMySqlAppStatus, - pxc_service.PXCAdmin) diff --git a/trove/guestagent/datastore/experimental/pxc/service.py b/trove/guestagent/datastore/experimental/pxc/service.py deleted file mode 100644 index 8b9ed09133..0000000000 --- a/trove/guestagent/datastore/experimental/pxc/service.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2015 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from trove.guestagent.datastore.galera_common import service as galera_service -from trove.guestagent.datastore.mysql_common import service as mysql_service - - -class PXCApp(galera_service.GaleraApp): - - def __init__(self, status): - super(PXCApp, self).__init__( - status, mysql_service.BaseLocalSqlClient, - mysql_service.BaseKeepAliveConnection) - - @property - def mysql_service(self): - result = super(PXCApp, self).mysql_service - if result['type'] == 'sysvinit': - result['cmd_bootstrap_galera_cluster'] = ( - "sudo service %s bootstrap-pxc" % result['service']) - elif result['type'] == 'systemd': - result['cmd_bootstrap_galera_cluster'] = ( - "sudo systemctl start %s@bootstrap.service" - % result['service']) - return result - - @property - def cluster_configuration(self): - return self.configuration_manager.get_value('mysqld') - - -class PXCRootAccess(mysql_service.BaseMySqlRootAccess): - - def __init__(self): - super(PXCRootAccess, self).__init__( - mysql_service.BaseLocalSqlClient, - PXCApp(mysql_service.BaseMySqlAppStatus.get())) - - -class PXCAdmin(mysql_service.BaseMySqlAdmin): - def __init__(self): - super(PXCAdmin, self).__init__( - mysql_service.BaseLocalSqlClient, PXCRootAccess(), PXCApp) diff --git a/trove/guestagent/datastore/experimental/redis/__init__.py b/trove/guestagent/datastore/experimental/redis/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/experimental/redis/manager.py b/trove/guestagent/datastore/experimental/redis/manager.py deleted file mode 100644 index 68a7d79c96..0000000000 --- a/trove/guestagent/datastore/experimental/redis/manager.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright (c) 2013 Rackspace -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_log import log as logging - -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as rd_instance -from trove.common.notification import EndNotification -from trove.common import utils -from trove.guestagent import backup -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.redis import service, system -from trove.guestagent.datastore import manager -from trove.guestagent import guest_log -from trove.guestagent import volume - - -LOG = logging.getLogger(__name__) - - -class Manager(manager.Manager): - """ - This is the Redis manager class. It is dynamically loaded - based off of the service_type of the trove instance - """ - - GUEST_LOG_DEFS_REDIS_LABEL = 'redis' - - def __init__(self): - super(Manager, self).__init__('redis') - self._app = service.RedisApp() - - @property - def status(self): - return self._app.status - - @property - def configuration_manager(self): - return self._app.configuration_manager - - def get_datastore_log_defs(self): - logfile = self._app.get_logfile() - if not logfile: - return {} - return { - self.GUEST_LOG_DEFS_REDIS_LABEL: { - self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.SYS, - self.GUEST_LOG_USER_LABEL: system.REDIS_OWNER, - self.GUEST_LOG_FILE_LABEL: logfile - } - } - - def _perform_restore(self, backup_info, context, restore_location, app): - """Perform a restore on this instance.""" - LOG.info("Restoring database from backup %s.", backup_info['id']) - try: - backup.restore(context, backup_info, restore_location) - except Exception: - LOG.exception("Error performing restore from backup %s.", - backup_info['id']) - app.status.set_status(rd_instance.ServiceStatuses.FAILED) - raise - LOG.info("Restored database successfully.") - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - """This is called from prepare in the base class.""" - if device_path: - device = volume.VolumeDevice(device_path) - # unmount if device is already mounted - device.unmount_device(device_path) - device.format() - device.mount(mount_point) - operating_system.chown(mount_point, 'redis', 'redis', - as_root=True) - LOG.debug('Mounted the volume.') - self._app.install_if_needed(packages) - LOG.info('Writing redis configuration.') - if cluster_config: - config_contents = (config_contents + "\n" - + "cluster-enabled yes\n" - + "cluster-config-file cluster.conf\n") - self._app.configuration_manager.save_configuration(config_contents) - self._app.apply_initial_guestagent_configuration() - if backup_info: - persistence_dir = self._app.get_working_dir() - self._perform_restore(backup_info, context, persistence_dir, - self._app) - else: - # If we're not restoring, we have to force a restart of the - # server manually so that the configuration stuff takes effect - self._app.restart() - if snapshot: - self.attach_replica(context, snapshot, snapshot['config']) - - def pre_upgrade(self, context): - mount_point = self._app.get_working_dir() - save_etc_dir = "%s/etc" % mount_point - home_save = "%s/trove_user" % mount_point - - self._app.status.begin_restart() - self._app.stop_db() - - operating_system.copy("%s/." % system.REDIS_CONF_DIR, save_etc_dir, - preserve=True, as_root=True) - - operating_system.copy("%s/." % os.path.expanduser('~'), home_save, - preserve=True, as_root=True) - - self.unmount_volume(context, mount_point=mount_point) - - return { - 'mount_point': mount_point, - 'save_etc_dir': save_etc_dir, - 'home_save': home_save - } - - def post_upgrade(self, context, upgrade_info): - self._app.stop_db() - - if 'device' in upgrade_info: - self.mount_volume(context, mount_point=upgrade_info['mount_point'], - device_path=upgrade_info['device'], - write_to_fstab=True) - operating_system.chown(path=upgrade_info['mount_point'], - user=system.REDIS_OWNER, - group=system.REDIS_OWNER, - recursive=True, as_root=True) - - self._restore_home_directory(upgrade_info['home_save']) - - self._restore_directory(upgrade_info['save_etc_dir'], - system.REDIS_CONF_DIR) - - self._app = service.RedisApp() - self._app.start_db() - self._app.status.end_restart() - - def restart(self, context): - """ - Restart this redis instance. - This method is called when the guest agent - gets a restart message from the taskmanager. - """ - LOG.debug("Restart called.") - self._app.restart() - - def start_db_with_conf_changes(self, context, config_contents): - """ - Start this redis instance with new conf changes. - """ - LOG.debug("Start DB with conf changes called.") - self._app.start_db_with_conf_changes(config_contents) - - def stop_db(self, context, do_not_start_on_reboot=False): - """ - Stop this redis instance. - This method is called when the guest agent - gets a stop message from the taskmanager. - """ - LOG.debug("Stop DB called.") - self._app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) - - def create_backup(self, context, backup_info): - """Create a backup of the database.""" - LOG.debug("Creating backup.") - with EndNotification(context): - backup.backup(context, backup_info) - - def update_overrides(self, context, overrides, remove=False): - LOG.debug("Updating overrides.") - if remove: - self._app.remove_overrides() - else: - self._app.update_overrides(context, overrides, remove) - - def apply_overrides(self, context, overrides): - LOG.debug("Applying overrides.") - self._app.apply_overrides(self._app.admin, overrides) - - def backup_required_for_replication(self, context): - return self.replication.backup_required_for_replication() - - def get_replication_snapshot(self, context, snapshot_info, - replica_source_config=None): - LOG.debug("Getting replication snapshot.") - self.replication.enable_as_master(self._app, replica_source_config) - - snapshot_id, log_position = self.replication.snapshot_for_replication( - context, self._app, None, snapshot_info) - - volume_stats = self.get_filesystem_stats(context, None) - - replication_snapshot = { - 'dataset': { - 'datastore_manager': self.manager, - 'dataset_size': volume_stats.get('used', 0.0), - 'volume_size': volume_stats.get('total', 0.0), - 'snapshot_id': snapshot_id - }, - 'replication_strategy': self.replication_strategy, - 'master': self.replication.get_master_ref(self._app, - snapshot_info), - 'log_position': log_position - } - - return replication_snapshot - - def enable_as_master(self, context, replica_source_config): - LOG.debug("Calling enable_as_master.") - self.replication.enable_as_master(self._app, replica_source_config) - - def detach_replica(self, context, for_failover=False): - LOG.debug("Detaching replica.") - replica_info = self.replication.detach_slave(self._app, for_failover) - return replica_info - - def get_replica_context(self, context): - LOG.debug("Getting replica context.") - replica_info = self.replication.get_replica_context(self._app) - return replica_info - - def _validate_slave_for_replication(self, context, replica_info): - if replica_info['replication_strategy'] != self.replication_strategy: - raise exception.IncompatibleReplicationStrategy( - replica_info.update({ - 'guest_strategy': self.replication_strategy - })) - - def attach_replica(self, context, replica_info, slave_config): - LOG.debug("Attaching replica.") - try: - if 'replication_strategy' in replica_info: - self._validate_slave_for_replication(context, replica_info) - self.replication.enable_as_slave(self._app, replica_info, - slave_config) - except Exception: - LOG.exception("Error enabling replication.") - raise - - def make_read_only(self, context, read_only): - LOG.debug("Executing make_read_only(%s)", read_only) - self._app.make_read_only(read_only) - - def _get_repl_info(self): - return self._app.admin.get_info('replication') - - def _get_master_host(self): - slave_info = self._get_repl_info() - return slave_info and slave_info['master_host'] or None - - def _get_repl_offset(self): - repl_info = self._get_repl_info() - LOG.debug("Got repl info: %s", repl_info) - offset_key = '%s_repl_offset' % repl_info['role'] - offset = repl_info[offset_key] - LOG.debug("Found offset %(offset)s for key %(key)s.", - {'offset': offset, 'key': offset_key}) - return int(offset) - - def get_last_txn(self, context): - master_host = self._get_master_host() - repl_offset = self._get_repl_offset() - return master_host, repl_offset - - def get_latest_txn_id(self, context): - LOG.info("Retrieving latest repl offset.") - return self._get_repl_offset() - - def wait_for_txn(self, context, txn): - LOG.info("Waiting on repl offset '%s'.", txn) - - def _wait_for_txn(): - current_offset = self._get_repl_offset() - LOG.debug("Current offset: %s.", current_offset) - return current_offset >= txn - - try: - utils.poll_until(_wait_for_txn, time_out=120) - except exception.PollTimeOut: - raise RuntimeError(_("Timeout occurred waiting for Redis repl " - "offset to change to '%s'.") % txn) - - def cleanup_source_on_replica_detach(self, context, replica_info): - LOG.debug("Cleaning up the source on the detach of a replica.") - self.replication.cleanup_source_on_replica_detach(self._app, - replica_info) - - def demote_replication_master(self, context): - LOG.debug("Demoting replica source.") - self.replication.demote_master(self._app) - - def cluster_meet(self, context, ip, port): - LOG.debug("Executing cluster_meet to join node to cluster.") - self._app.cluster_meet(ip, port) - - def get_node_ip(self, context): - LOG.debug("Retrieving cluster node ip address.") - return self._app.get_node_ip() - - def get_node_id_for_removal(self, context): - LOG.debug("Validating removal of node from cluster.") - return self._app.get_node_id_for_removal() - - def remove_nodes(self, context, node_ids): - LOG.debug("Removing nodes from cluster.") - self._app.remove_nodes(node_ids) - - def cluster_addslots(self, context, first_slot, last_slot): - LOG.debug("Executing cluster_addslots to assign hash slots %s-%s.", - first_slot, last_slot) - self._app.cluster_addslots(first_slot, last_slot) - - def enable_root(self, context): - LOG.debug("Enabling authentication.") - return self._app.enable_root() - - def enable_root_with_password(self, context, root_password=None): - LOG.debug("Enabling authentication with password.") - return self._app.enable_root(root_password) - - def disable_root(self, context): - LOG.debug("Disabling authentication.") - return self._app.disable_root() - - def get_root_password(self, context): - LOG.debug("Getting auth password.") - return self._app.get_auth_password() diff --git a/trove/guestagent/datastore/experimental/redis/service.py b/trove/guestagent/datastore/experimental/redis/service.py deleted file mode 100644 index f048f0db33..0000000000 --- a/trove/guestagent/datastore/experimental/redis/service.py +++ /dev/null @@ -1,561 +0,0 @@ -# Copyright (c) 2013 Rackspace -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import redis -from redis.exceptions import BusyLoadingError, ConnectionError - -from oslo_log import log as logging - -from trove.common import cfg -from trove.common.db.redis.models import RedisRootUser -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as rd_instance -from trove.common.stream_codecs import PropertiesCodec, StringConverter -from trove.common import utils -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import OneFileOverrideStrategy -from trove.guestagent.common import guestagent_utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.redis import system -from trove.guestagent.datastore import service -from trove.guestagent import pkg - -LOG = logging.getLogger(__name__) -TIME_OUT = 1200 -CONF = cfg.CONF -CLUSTER_CFG = 'clustering' -SYS_OVERRIDES_AUTH = 'auth_password' -packager = pkg.Package() - - -class RedisAppStatus(service.BaseDbStatus): - """ - Handles all of the status updating for the redis guest agent. - """ - - def __init__(self, client): - super(RedisAppStatus, self).__init__() - self.__client = client - - def set_client(self, client): - self.__client = client - - def _get_actual_db_status(self): - try: - if self.__client.ping(): - return rd_instance.ServiceStatuses.RUNNING - except ConnectionError: - return rd_instance.ServiceStatuses.SHUTDOWN - except BusyLoadingError: - return rd_instance.ServiceStatuses.BLOCKED - except Exception: - LOG.exception("Error getting Redis status.") - - return rd_instance.ServiceStatuses.CRASHED - - def cleanup_stalled_db_services(self): - utils.execute_with_timeout('pkill', '-9', - 'redis-server', - run_as_root=True, - root_helper='sudo') - - -class RedisApp(object): - """ - Handles installation and configuration of redis - on a trove instance. - """ - - def __init__(self, state_change_wait_time=None): - """ - Sets default status and state_change_wait_time - """ - if state_change_wait_time: - self.state_change_wait_time = state_change_wait_time - else: - self.state_change_wait_time = CONF.state_change_wait_time - - revision_dir = guestagent_utils.build_file_path( - os.path.dirname(system.REDIS_CONFIG), - ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) - config_value_mappings = {'yes': True, 'no': False, "''": None} - self._value_converter = StringConverter(config_value_mappings) - self.configuration_manager = ConfigurationManager( - system.REDIS_CONFIG, - system.REDIS_OWNER, system.REDIS_OWNER, - PropertiesCodec( - unpack_singletons=False, - string_mappings=config_value_mappings - ), requires_root=True, - override_strategy=OneFileOverrideStrategy(revision_dir)) - - self.admin = self._build_admin_client() - self.status = RedisAppStatus(self.admin) - - def _build_admin_client(self): - password = self.get_configuration_property('requirepass') - socket = self.get_configuration_property('unixsocket') - cmd = self.get_config_command_name() - - return RedisAdmin(password=password, unix_socket_path=socket, - config_cmd=cmd) - - def _refresh_admin_client(self): - self.admin = self._build_admin_client() - self.status.set_client(self.admin) - return self.admin - - def install_if_needed(self, packages): - """ - Install redis if needed do nothing if it is already installed. - """ - LOG.info('Preparing Guest as Redis Server.') - if not packager.pkg_is_installed(packages): - LOG.info('Installing Redis.') - self._install_redis(packages) - LOG.info('Redis installed completely.') - - def _install_redis(self, packages): - """ - Install the redis server. - """ - LOG.debug('Installing redis server.') - LOG.debug("Creating %s.", system.REDIS_CONF_DIR) - operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True) - pkg_opts = {} - packager.pkg_install(packages, pkg_opts, TIME_OUT) - self.start_db() - LOG.debug('Finished installing redis server.') - - def stop_db(self, update_db=False, do_not_start_on_reboot=False): - self.status.stop_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time, - disable_on_boot=do_not_start_on_reboot, update_db=update_db) - - def restart(self): - self.status.restart_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time) - - def update_overrides(self, context, overrides, remove=False): - if overrides: - self.configuration_manager.apply_user_override(overrides) - # apply requirepass at runtime - # TODO(zhaochao): updating 'requirepass' here will be removed - # in the future releases, Redis only use enable_root/disable_root - # to set this parameter. - if 'requirepass' in overrides: - self.admin.config_set('requirepass', overrides['requirepass']) - self._refresh_admin_client() - - def apply_overrides(self, client, overrides): - """Use the 'CONFIG SET' command to apply configuration at runtime. - - Commands that appear multiple times have values separated by a - white space. For instance, the following two 'save' directives from the - configuration file... - - save 900 1 - save 300 10 - - ... would be applied in a single command as: - - CONFIG SET save "900 1 300 10" - - Note that the 'CONFIG' command has been renamed to prevent - users from using it to bypass configuration groups. - """ - for prop_name, prop_args in overrides.items(): - args_string = self._join_lists( - self._value_converter.to_strings(prop_args), ' ') - client.config_set(prop_name, args_string) - # NOTE(zhaochao): requirepass applied in update_overrides is - # only kept for back compatibility. Now requirepass is set - # via enable_root/disable_root, Redis admin client should be - # refreshed here. - if prop_name == "requirepass": - client = self._refresh_admin_client() - - def _join_lists(self, items, sep): - """Join list items (including items from sub-lists) into a string. - Non-list inputs are returned unchanged. - - _join_lists('1234', ' ') = "1234" - _join_lists(['1','2','3','4'], ' ') = "1 2 3 4" - _join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4" - """ - if isinstance(items, list): - return sep.join([sep.join(e) if isinstance(e, list) else e - for e in items]) - return items - - def remove_overrides(self): - self.configuration_manager.remove_user_override() - - def make_read_only(self, read_only): - # Redis has no mechanism to make an instance read-only at present - pass - - def start_db_with_conf_changes(self, config_contents): - LOG.info('Starting redis with conf changes.') - if self.status.is_running: - format = 'Cannot start_db_with_conf_changes because status is %s.' - LOG.debug(format, self.status) - raise RuntimeError(format % self.status) - LOG.info("Initiating config.") - self.configuration_manager.save_configuration(config_contents) - # The configuration template has to be updated with - # guestagent-controlled settings. - self.apply_initial_guestagent_configuration() - self.start_db(True) - - def start_db(self, update_db=False): - self.status.start_db_service( - system.SERVICE_CANDIDATES, self.state_change_wait_time, - enable_on_boot=True, update_db=update_db) - - def apply_initial_guestagent_configuration(self): - """Update guestagent-controlled configuration properties. - """ - - # Hide the 'CONFIG' command from end users by mangling its name. - self.admin.set_config_command_name(self._mangle_config_command_name()) - - self.configuration_manager.apply_system_override( - {'daemonize': 'yes', - 'protected-mode': 'no', - 'supervised': 'systemd', - 'pidfile': system.REDIS_PID_FILE, - 'logfile': system.REDIS_LOG_FILE, - 'dir': system.REDIS_DATA_DIR}) - - def get_config_command_name(self): - """Get current name of the 'CONFIG' command. - """ - renamed_cmds = self.configuration_manager.get_value('rename-command') - if renamed_cmds: - for name_pair in renamed_cmds: - if name_pair[0] == 'CONFIG': - return name_pair[1] - - return None - - def _mangle_config_command_name(self): - """Hide the 'CONFIG' command from the clients by renaming it to a - random string known only to the guestagent. - Return the mangled name. - """ - mangled = utils.generate_random_password() - self._rename_command('CONFIG', mangled) - return mangled - - def _rename_command(self, old_name, new_name): - """It is possible to completely disable a command by renaming it - to an empty string. - """ - self.configuration_manager.apply_system_override( - {'rename-command': [old_name, new_name]}) - - def get_logfile(self): - """Specify the log file name. Also the empty string can be used to - force Redis to log on the standard output. - Note that if you use standard output for logging but daemonize, - logs will be sent to /dev/null - """ - return self.get_configuration_property('logfile') - - def get_db_filename(self): - """The filename where to dump the DB. - """ - return self.get_configuration_property('dbfilename') - - def get_working_dir(self): - """The DB will be written inside this directory, - with the filename specified the 'dbfilename' configuration directive. - The Append Only File will also be created inside this directory. - """ - return self.get_configuration_property('dir') - - def get_persistence_filepath(self): - """Returns the full path to the persistence file.""" - return guestagent_utils.build_file_path( - self.get_working_dir(), self.get_db_filename()) - - def get_port(self): - """Port for this instance or default if not set.""" - return self.get_configuration_property('port', system.REDIS_PORT) - - def get_auth_password(self): - """Client authentication password for this instance or None if not set. - """ - return self.get_configuration_property('requirepass') - - def is_appendonly_enabled(self): - """True if the Append Only File (AOF) persistence mode is enabled. - """ - return self.get_configuration_property('appendonly', False) - - def get_append_file_name(self): - """The name of the append only file (AOF). - """ - return self.get_configuration_property('appendfilename') - - def is_cluster_enabled(self): - """Only nodes that are started as cluster nodes can be part of a - Redis Cluster. - """ - return self.get_configuration_property('cluster-enabled', False) - - def enable_cluster(self): - """In order to start a Redis instance as a cluster node enable the - cluster support - """ - self.configuration_manager.apply_system_override( - {'cluster-enabled': 'yes'}, CLUSTER_CFG) - - def get_cluster_config_filename(self): - """Cluster node configuration file. - """ - return self.get_configuration_property('cluster-config-file') - - def set_cluster_config_filename(self, name): - """Make sure that instances running in the same system do not have - overlapping cluster configuration file names. - """ - self.configuration_manager.apply_system_override( - {'cluster-config-file': name}, CLUSTER_CFG) - - def get_cluster_node_timeout(self): - """Cluster node timeout is the amount of milliseconds a node must be - unreachable for it to be considered in failure state. - """ - return self.get_configuration_property('cluster-node-timeout') - - def get_configuration_property(self, name, default=None): - """Return the value of a Redis configuration property. - Returns a single value for single-argument properties or - a list otherwise. - """ - return utils.unpack_singleton( - self.configuration_manager.get_value(name, default)) - - def cluster_meet(self, ip, port): - try: - utils.execute_with_timeout('redis-cli', 'cluster', 'meet', - ip, port) - except exception.ProcessExecutionError: - LOG.exception('Error joining node to cluster at %s.', ip) - raise - - def cluster_addslots(self, first_slot, last_slot): - try: - group_size = 200 - # Create list of slots represented in strings - # eg. ['10', '11', '12', '13'] - slots = list(map(str, range(first_slot, last_slot + 1))) - while slots: - cmd = (['redis-cli', 'cluster', 'addslots'] - + slots[0:group_size]) - out, err = utils.execute_with_timeout(*cmd, run_as_root=True, - root_helper='sudo') - if 'OK' not in out: - raise RuntimeError(_('Error executing addslots: %s') - % out) - del slots[0:group_size] - except exception.ProcessExecutionError: - LOG.exception('Error adding slots %(first_slot)s-%(last_slot)s' - ' to cluster.', - {'first_slot': first_slot, 'last_slot': last_slot}) - raise - - def _get_node_info(self): - try: - out, _ = utils.execute_with_timeout('redis-cli', '--csv', - 'cluster', 'nodes') - return [line.split(' ') for line in out.splitlines()] - except exception.ProcessExecutionError: - LOG.exception('Error getting node info.') - raise - - def _get_node_details(self): - for node_details in self._get_node_info(): - if 'myself' in node_details[2]: - return node_details - raise exception.TroveError(_("Unable to determine node details")) - - def get_node_ip(self): - """Returns [ip, port] where both values are strings""" - return self._get_node_details()[1].split(':') - - def get_node_id_for_removal(self): - node_details = self._get_node_details() - node_id = node_details[0] - my_ip = node_details[1].split(':')[0] - try: - slots, _ = utils.execute_with_timeout('redis-cli', '--csv', - 'cluster', 'slots') - return node_id if my_ip not in slots else None - except exception.ProcessExecutionError: - LOG.exception('Error validating node to for removal.') - raise - - def remove_nodes(self, node_ids): - try: - for node_id in node_ids: - utils.execute_with_timeout('redis-cli', 'cluster', - 'forget', node_id) - except exception.ProcessExecutionError: - LOG.exception('Error removing node from cluster.') - raise - - def enable_root(self, password=None): - if not password: - password = utils.generate_random_password() - redis_password = RedisRootUser(password=password) - try: - self.configuration_manager.apply_system_override( - {'requirepass': password, 'masterauth': password}, - change_id=SYS_OVERRIDES_AUTH) - self.apply_overrides( - self.admin, {'requirepass': password, 'masterauth': password}) - except exception.TroveError: - LOG.exception('Error enabling authentication for instance.') - raise - return redis_password.serialize() - - def disable_root(self): - try: - self.configuration_manager.remove_system_override( - change_id=SYS_OVERRIDES_AUTH) - self.apply_overrides(self.admin, - {'requirepass': '', 'masterauth': ''}) - except exception.TroveError: - LOG.exception('Error disabling authentication for instance.') - raise - - -class RedisAdmin(object): - """Handles administrative tasks on the Redis database. - """ - - DEFAULT_CONFIG_CMD = 'CONFIG' - - def __init__(self, password=None, unix_socket_path=None, config_cmd=None): - self.__client = redis.StrictRedis( - password=password, unix_socket_path=unix_socket_path) - self.__config_cmd_name = config_cmd or self.DEFAULT_CONFIG_CMD - - def set_config_command_name(self, name): - """Set name of the 'CONFIG' command or None for default. - """ - self.__config_cmd_name = name or self.DEFAULT_CONFIG_CMD - - def ping(self): - """Ping the Redis server and return True if a response is received. - """ - return self.__client.ping() - - def get_info(self, section=None): - return self.__client.info(section=section) - - def persist_data(self): - save_cmd = 'SAVE' - last_save = self.__client.lastsave() - LOG.debug("Starting Redis data persist") - save_ok = True - try: - save_ok = self.__client.bgsave() - except redis.exceptions.ResponseError as re: - # If an auto-save is in progress just use it, since it must have - # just happened - if "Background save already in progress" in str(re): - LOG.info("Waiting for existing background save to finish") - else: - raise - if save_ok: - save_cmd = 'BGSAVE' - - def _timestamp_changed(): - return last_save != self.__client.lastsave() - - try: - utils.poll_until(_timestamp_changed, sleep_time=2, - time_out=TIME_OUT) - except exception.PollTimeOut: - raise RuntimeError(_("Timeout occurred waiting for Redis " - "persist (%s) to complete.") % save_cmd) - - # If the background save fails for any reason, try doing a foreground - # one. This blocks client connections, so we don't want it to be - # the default. - elif not self.__client.save(): - raise exception.BackupCreationError(_("Could not persist " - "Redis data (%s)") % save_cmd) - LOG.debug("Redis data persist (%s) completed", save_cmd) - - def set_master(self, host=None, port=None): - self.__client.slaveof(host, port) - - def config_set(self, name, value): - response = self.execute( - '%s %s' % (self.__config_cmd_name, 'SET'), name, value) - if not self._is_ok_response(response): - raise exception.UnprocessableEntity( - _("Could not set configuration property '%(name)s' to " - "'%(value)s'.") % {'name': name, 'value': value}) - - def _is_ok_response(self, response): - """Return True if a given Redis response is 'OK'. - """ - return response and redis.client.bool_ok(response) - - def execute(self, cmd_name, *cmd_args, **options): - """Execute a command and return a parsed response. - """ - try: - return self.__client.execute_command(cmd_name, *cmd_args, - **options) - except Exception as e: - LOG.exception(e) - raise exception.TroveError( - _("Redis command '%(cmd_name)s %(cmd_args)s' failed.") - % {'cmd_name': cmd_name, 'cmd_args': ' '.join(cmd_args)}) - - def wait_until(self, key, wait_value, section=None, timeout=None): - """Polls redis until the specified 'key' changes to 'wait_value'.""" - timeout = timeout or CONF.usage_timeout - LOG.debug("Waiting for Redis '%(key)s' to be: %(value)s.", - {'key': key, 'value': wait_value}) - - def _check_info(): - redis_info = self.get_info(section) - if key in redis_info: - current_value = redis_info[key] - LOG.debug("Found '%(value)s' for field %(key)s.", - {'value': current_value, 'key': key}) - else: - LOG.error('Output from Redis command: %s', redis_info) - raise RuntimeError(_("Field %(field)s not found " - "(Section: '%(sec)s').") % - ({'field': key, 'sec': section})) - return current_value == wait_value - - try: - utils.poll_until(_check_info, time_out=timeout) - except exception.PollTimeOut: - raise RuntimeError(_("Timeout occurred waiting for Redis field " - "'%(field)s' to change to '%(val)s'.") % - {'field': key, 'val': wait_value}) diff --git a/trove/guestagent/datastore/experimental/redis/system.py b/trove/guestagent/datastore/experimental/redis/system.py deleted file mode 100644 index f2cbe0ff60..0000000000 --- a/trove/guestagent/datastore/experimental/redis/system.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2013 Rackspace -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Determines operating system version and OS dependent commands. -""" - -from trove.guestagent.common.operating_system import get_os - - -REDIS_OWNER = 'redis' -REDIS_CONFIG = '/etc/redis/redis.conf' -REDIS_PID_FILE = '/var/run/redis/redis-server.pid' -REDIS_LOG_FILE = '/var/log/redis/server.log' -REDIS_CONF_DIR = '/etc/redis' -REDIS_DATA_DIR = '/var/lib/redis' -REDIS_PORT = '6379' -REDIS_INIT = '/etc/init/redis-server.conf' -REDIS_PACKAGE = '' -SERVICE_CANDIDATES = ['redis-server', 'redis'] - -OS = get_os() -if OS == 'redhat': - REDIS_CONFIG = '/etc/redis.conf' - REDIS_PACKAGE = 'redis' diff --git a/trove/guestagent/datastore/experimental/vertica/__init__.py b/trove/guestagent/datastore/experimental/vertica/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/experimental/vertica/manager.py b/trove/guestagent/datastore/experimental/vertica/manager.py deleted file mode 100644 index 0d66462898..0000000000 --- a/trove/guestagent/datastore/experimental/vertica/manager.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright [2015] Hewlett-Packard Development Company, L.P. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from oslo_log import log as logging - -from trove.common.i18n import _ -from trove.common import instance as rd_ins -from trove.guestagent.datastore.experimental.vertica.service import ( - VerticaAppStatus) -from trove.guestagent.datastore.experimental.vertica.service import VerticaApp -from trove.guestagent.datastore import manager -from trove.guestagent import volume - - -LOG = logging.getLogger(__name__) - - -class Manager(manager.Manager): - - def __init__(self): - self.appStatus = VerticaAppStatus() - self.app = VerticaApp(self.appStatus) - super(Manager, self).__init__('vertica') - - @property - def status(self): - return self.appStatus - - @property - def configuration_manager(self): - return self.app.configuration_manager - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - """This is called from prepare in the base class.""" - if device_path: - device = volume.VolumeDevice(device_path) - # unmount if device is already mounted - device.unmount_device(device_path) - device.format() - if os.path.exists(mount_point): - # rsync any existing data - device.migrate_data(mount_point) - # mount the volume - device.mount(mount_point) - LOG.debug("Mounted the volume.") - self.app.install_if_needed(packages) - self.app.prepare_for_install_vertica() - if cluster_config is None: - self.app.install_vertica() - self.app.create_db() - self.app.add_udls() - - if config_contents: - self.app.configuration_manager.save_configuration( - config_contents) - - elif cluster_config['instance_type'] not in ["member", "master"]: - raise RuntimeError(_("Bad cluster configuration: instance type " - "given as %s.") % - cluster_config['instance_type']) - - def restart(self, context): - LOG.debug("Restarting the database.") - self.app.restart() - LOG.debug("Restarted the database.") - - def stop_db(self, context, do_not_start_on_reboot=False): - LOG.debug("Stopping the database.") - self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) - LOG.debug("Stopped the database.") - - def enable_root(self, context): - LOG.debug("Enabling root.") - return self.app.enable_root() - - def enable_root_with_password(self, context, root_password=None): - LOG.debug("Enabling root.") - return self.app.enable_root(root_password) - - def is_root_enabled(self, context): - LOG.debug("Checking if root is enabled.") - return self.app.is_root_enabled() - - def start_db_with_conf_changes(self, context, config_contents): - LOG.debug("Starting with configuration changes.") - self.app.start_db_with_conf_changes(config_contents) - - def get_public_keys(self, context, user): - LOG.debug("Retrieving public keys for %s.", user) - return self.app.get_public_keys(user) - - def authorize_public_keys(self, context, user, public_keys): - LOG.debug("Authorizing public keys for %s.", user) - return self.app.authorize_public_keys(user, public_keys) - - def install_cluster(self, context, members): - try: - LOG.debug("Installing cluster on members: %s.", members) - self.app.install_cluster(members) - self.app.add_udls() - LOG.debug("install_cluster call has finished.") - except Exception: - LOG.exception('Cluster installation failed.') - self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) - raise - - def update_overrides(self, context, overrides, remove=False): - LOG.debug("Updating overrides.") - if remove: - self.app.remove_overrides() - else: - self.app.update_overrides(context, overrides, remove) - - def apply_overrides(self, context, overrides): - if overrides: - LOG.debug("Applying overrides: " + str(overrides)) - self.app.apply_overrides(overrides) - - def grow_cluster(self, context, members): - try: - LOG.debug("Growing cluster to members: %s.", members) - self.app.grow_cluster(members) - LOG.debug("grow_cluster call has finished.") - except Exception: - LOG.exception('Cluster grow failed.') - self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) - raise - - def shrink_cluster(self, context, members): - try: - LOG.debug("Shrinking cluster members: %s.", members) - self.app.shrink_cluster(members) - LOG.debug("shrink_cluster call has finished.") - except Exception: - LOG.exception('Cluster shrink failed.') - self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) - raise - - def mark_design_ksafe(self, context, k): - try: - LOG.debug("Setting vertica k-safety to %s.", k) - self.app.mark_design_ksafe(k) - except Exception: - LOG.exception('K-safety setting failed.') - self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) - raise diff --git a/trove/guestagent/datastore/experimental/vertica/service.py b/trove/guestagent/datastore/experimental/vertica/service.py deleted file mode 100644 index 7dc490740f..0000000000 --- a/trove/guestagent/datastore/experimental/vertica/service.py +++ /dev/null @@ -1,618 +0,0 @@ -# Copyright [2015] Hewlett-Packard Development Company, L.P. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import subprocess -import tempfile - -from oslo_log import log as logging -from oslo_utils import netutils -from six.moves import configparser - -from trove.common import cfg -from trove.common.db import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import instance as rd_instance -from trove.common.stream_codecs import PropertiesCodec -from trove.common import utils -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import ImportOverrideStrategy -from trove.guestagent.common import guestagent_utils -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore.experimental.vertica import system -from trove.guestagent.datastore import service -from trove.guestagent import pkg -from trove.guestagent import volume - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -packager = pkg.Package() -DB_NAME = 'db_srvr' -MOUNT_POINT = CONF.vertica.mount_point -# We will use a fake configuration file for the options managed through -# configuration groups that we apply directly with ALTER DB ... SET ... -FAKE_CFG = os.path.join(MOUNT_POINT, "vertica.cfg.fake") - - -class VerticaAppStatus(service.BaseDbStatus): - - def _get_actual_db_status(self): - """Get the status of dbaas and report it back.""" - try: - out, err = system.shell_execute(system.STATUS_ACTIVE_DB, - system.VERTICA_ADMIN) - if out.strip() == DB_NAME: - # UP status is confirmed - LOG.info("Service Status is RUNNING.") - return rd_instance.ServiceStatuses.RUNNING - else: - LOG.info("Service Status is SHUTDOWN.") - return rd_instance.ServiceStatuses.SHUTDOWN - except exception.ProcessExecutionError: - LOG.exception("Failed to get database status.") - return rd_instance.ServiceStatuses.CRASHED - - -class VerticaApp(object): - """Prepares DBaaS on a Guest container.""" - - def __init__(self, status): - self.state_change_wait_time = CONF.state_change_wait_time - self.status = status - revision_dir = \ - guestagent_utils.build_file_path( - os.path.join(MOUNT_POINT, - os.path.dirname(system.VERTICA_ADMIN)), - ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) - - if not operating_system.exists(FAKE_CFG): - operating_system.write_file(FAKE_CFG, '', as_root=True) - operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN, - system.VERTICA_ADMIN_GRP, as_root=True) - operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(), - as_root=True) - self.configuration_manager = \ - ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN, - system.VERTICA_ADMIN_GRP, - PropertiesCodec(delimiter='='), - requires_root=True, - override_strategy=ImportOverrideStrategy( - revision_dir, "cnf")) - - def update_overrides(self, context, overrides, remove=False): - if overrides: - self.apply_overrides(overrides) - - def remove_overrides(self): - config = self.configuration_manager.get_user_override() - self._reset_config(config) - self.configuration_manager.remove_user_override() - - def apply_overrides(self, overrides): - self.configuration_manager.apply_user_override(overrides) - self._apply_config(overrides) - - def _reset_config(self, config): - try: - db_password = self._get_database_password() - for k, v in config.items(): - alter_db_cmd = system.ALTER_DB_RESET_CFG % (DB_NAME, str(k)) - out, err = system.exec_vsql_command(db_password, alter_db_cmd) - if err: - if err.is_warning(): - LOG.warning(err) - else: - LOG.error(err) - raise RuntimeError(_("Failed to remove config %s") % k) - - except Exception: - LOG.exception("Vertica configuration remove failed.") - raise RuntimeError(_("Vertica configuration remove failed.")) - LOG.info("Vertica configuration reset completed.") - - def _apply_config(self, config): - try: - db_password = self._get_database_password() - for k, v in config.items(): - alter_db_cmd = system.ALTER_DB_CFG % (DB_NAME, str(k), str(v)) - out, err = system.exec_vsql_command(db_password, alter_db_cmd) - if err: - if err.is_warning(): - LOG.warning(err) - else: - LOG.error(err) - raise RuntimeError(_("Failed to apply config %s") % k) - - except Exception: - LOG.exception("Vertica configuration apply failed") - raise RuntimeError(_("Vertica configuration apply failed")) - LOG.info("Vertica config apply completed.") - - def _enable_db_on_boot(self): - try: - command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c", - (system.SET_RESTART_POLICY % (DB_NAME, "always"))] - subprocess.Popen(command) - command = ["sudo", "su", "-", "root", "-c", - (system.VERTICA_AGENT_SERVICE_COMMAND % "enable")] - subprocess.Popen(command) - except Exception: - LOG.exception("Failed to enable database on boot.") - raise RuntimeError(_("Could not enable database on boot.")) - - def _disable_db_on_boot(self): - try: - command = (system.SET_RESTART_POLICY % (DB_NAME, "never")) - system.shell_execute(command, system.VERTICA_ADMIN) - command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable") - system.shell_execute(command) - except exception.ProcessExecutionError: - LOG.exception("Failed to disable database on boot.") - raise RuntimeError(_("Could not disable database on boot.")) - - def stop_db(self, update_db=False, do_not_start_on_reboot=False): - """Stop the database.""" - LOG.info("Stopping Vertica.") - if do_not_start_on_reboot: - self._disable_db_on_boot() - - try: - # Stop vertica-agent service - command = (system.VERTICA_AGENT_SERVICE_COMMAND % "stop") - system.shell_execute(command) - # Using Vertica adminTools to stop db. - db_password = self._get_database_password() - stop_db_command = (system.STOP_DB % (DB_NAME, db_password)) - out, err = system.shell_execute(system.STATUS_ACTIVE_DB, - system.VERTICA_ADMIN) - if out.strip() == DB_NAME: - system.shell_execute(stop_db_command, system.VERTICA_ADMIN) - if not self.status._is_restarting: - if not self.status.wait_for_real_status_to_change_to( - rd_instance.ServiceStatuses.SHUTDOWN, - self.state_change_wait_time, update_db): - LOG.error("Could not stop Vertica.") - self.status.end_restart() - raise RuntimeError(_("Could not stop Vertica!")) - LOG.debug("Database stopped.") - else: - LOG.debug("Database is not running.") - except exception.ProcessExecutionError: - LOG.exception("Failed to stop database.") - raise RuntimeError(_("Could not stop database.")) - - def start_db(self, update_db=False): - """Start the database.""" - LOG.info("Starting Vertica.") - try: - self._enable_db_on_boot() - # Start vertica-agent service - command = ["sudo", "su", "-", "root", "-c", - (system.VERTICA_AGENT_SERVICE_COMMAND % "start")] - subprocess.Popen(command) - # Using Vertica adminTools to start db. - db_password = self._get_database_password() - start_db_command = ["sudo", "su", "-", system.VERTICA_ADMIN, "-c", - (system.START_DB % (DB_NAME, db_password))] - subprocess.Popen(start_db_command) - if not self.status._is_restarting: - self.status.end_restart() - LOG.debug("Database started.") - except Exception as e: - raise RuntimeError(_("Could not start Vertica due to %s") % e) - - def start_db_with_conf_changes(self, config_contents): - """ - Currently all that this method does is to start Vertica. This method - needs to be implemented to enable volume resize on guestagent side. - """ - LOG.info("Starting Vertica with configuration changes.") - if self.status.is_running: - format = 'Cannot start_db_with_conf_changes because status is %s.' - LOG.debug(format, self.status) - raise RuntimeError(format % self.status) - LOG.info("Initiating config.") - self.configuration_manager.save_configuration(config_contents) - self.start_db(True) - - def restart(self): - """Restart the database.""" - try: - self.status.begin_restart() - self.stop_db() - self.start_db() - finally: - self.status.end_restart() - - def add_db_to_node(self, members=netutils.get_my_ipv4()): - """Add db to host with admintools""" - LOG.info("Calling admintools to add DB to host") - try: - # Create db after install - db_password = self._get_database_password() - create_db_command = (system.ADD_DB_TO_NODE % (members, - DB_NAME, - db_password)) - system.shell_execute(create_db_command, "dbadmin") - except exception.ProcessExecutionError: - # Give vertica some time to get the node up, won't be available - # by the time adminTools -t db_add_node completes - LOG.info("adminTools failed as expected - wait for node") - self.wait_for_node_status() - LOG.info("Vertica add db to host completed.") - - def remove_db_from_node(self, members=netutils.get_my_ipv4()): - """Remove db from node with admintools""" - LOG.info("Removing db from node") - try: - # Create db after install - db_password = self._get_database_password() - create_db_command = (system.REMOVE_DB_FROM_NODE % (members, - DB_NAME, - db_password)) - system.shell_execute(create_db_command, "dbadmin") - except exception.ProcessExecutionError: - # Give vertica some time to get the node up, won't be available - # by the time adminTools -t db_add_node completes - LOG.info("adminTools failed as expected - wait for node") - - # Give vertica some time to take the node down - it won't be available - # by the time adminTools -t db_add_node completes - self.wait_for_node_status() - LOG.info("Vertica remove host from db completed.") - - def create_db(self, members=netutils.get_my_ipv4()): - """Prepare the guest machine with a Vertica db creation.""" - LOG.info("Creating database on Vertica host.") - try: - # Create db after install - db_password = self._get_database_password() - create_db_command = (system.CREATE_DB % (members, DB_NAME, - MOUNT_POINT, MOUNT_POINT, - db_password)) - system.shell_execute(create_db_command, system.VERTICA_ADMIN) - except Exception: - LOG.exception("Vertica database create failed.") - raise RuntimeError(_("Vertica database create failed.")) - LOG.info("Vertica database create completed.") - - def install_vertica(self, members=netutils.get_my_ipv4()): - """Prepare the guest machine with a Vertica db creation.""" - LOG.info("Installing Vertica Server.") - try: - # Create db after install - install_vertica_cmd = (system.INSTALL_VERTICA % (members, - MOUNT_POINT)) - system.shell_execute(install_vertica_cmd) - except exception.ProcessExecutionError: - LOG.exception("install_vertica failed.") - raise RuntimeError(_("install_vertica failed.")) - self._generate_database_password() - LOG.info("install_vertica completed.") - - def update_vertica(self, command, members=netutils.get_my_ipv4()): - LOG.info("Calling update_vertica with command %s", command) - try: - update_vertica_cmd = (system.UPDATE_VERTICA % (command, members, - MOUNT_POINT)) - system.shell_execute(update_vertica_cmd) - except exception.ProcessExecutionError: - LOG.exception("update_vertica failed.") - raise RuntimeError(_("update_vertica failed.")) - # self._generate_database_password() - LOG.info("update_vertica completed.") - - def add_udls(self): - """Load the user defined load libraries into the database.""" - LOG.info("Adding configured user defined load libraries.") - password = self._get_database_password() - loaded_udls = [] - for lib in system.UDL_LIBS: - func_name = lib['func_name'] - lib_name = lib['lib_name'] - language = lib['language'] - factory = lib['factory'] - path = lib['path'] - if os.path.isfile(path): - LOG.debug("Adding the %(func)s library as %(lib)s.", - {'func': func_name, 'lib': lib_name}) - out, err = system.exec_vsql_command( - password, - system.CREATE_LIBRARY % (lib_name, path) - ) - if err: - if err.is_warning(): - LOG.warning(err) - else: - LOG.error(err) - raise RuntimeError(_("Failed to create library %s.") - % lib_name) - out, err = system.exec_vsql_command( - password, - system.CREATE_SOURCE % (func_name, language, - factory, lib_name) - ) - if err: - if err.is_warning(): - LOG.warning(err) - else: - LOG.error(err) - raise RuntimeError(_("Failed to create source %s.") - % func_name) - loaded_udls.append(func_name) - else: - LOG.warning("Skipping %(func)s as path %(path)s not " - "found.", {"func": func_name, "path": path}) - LOG.info("The following UDL functions are available for use: %s", - loaded_udls) - - def _generate_database_password(self): - """Generate and write the password to vertica.cnf file.""" - config = configparser.ConfigParser() - config.add_section('credentials') - config.set('credentials', 'dbadmin_password', - utils.generate_random_password()) - self.write_config(config) - - def write_config(self, config, - unlink_function=os.unlink, - temp_function=tempfile.NamedTemporaryFile): - """Write the configuration contents to vertica.cnf file.""" - LOG.debug('Defining config holder at %s.', system.VERTICA_CONF) - tempfile = temp_function('w', delete=False) - try: - config.write(tempfile) - tempfile.close() - command = (("install -o root -g root -m 644 %(source)s %(target)s" - ) % {'source': tempfile.name, - 'target': system.VERTICA_CONF}) - system.shell_execute(command) - unlink_function(tempfile.name) - except Exception: - unlink_function(tempfile.name) - raise - - def read_config(self): - """Reads and returns the Vertica config.""" - try: - config = configparser.ConfigParser() - config.read(system.VERTICA_CONF) - return config - except Exception: - LOG.exception("Failed to read config %s.", system.VERTICA_CONF) - raise RuntimeError - - def _get_database_password(self): - """Read the password from vertica.cnf file and return it.""" - return self.read_config().get('credentials', 'dbadmin_password') - - def install_if_needed(self, packages): - """Install Vertica package if needed.""" - LOG.info("Preparing Guest as Vertica Server.") - if not packager.pkg_is_installed(packages): - LOG.debug("Installing Vertica Package.") - packager.pkg_install(packages, None, system.INSTALL_TIMEOUT) - - def _set_readahead_for_disks(self): - """This method sets readhead size for disks as needed by Vertica.""" - device = volume.VolumeDevice(CONF.device_path) - device.set_readahead_size(CONF.vertica.readahead_size) - LOG.debug("Set readhead size as required by Vertica.") - - def prepare_for_install_vertica(self): - """This method executes preparatory methods before - executing install_vertica. - """ - command = ("VERT_DBA_USR=%s VERT_DBA_HOME=/home/dbadmin " - "VERT_DBA_GRP=%s /opt/vertica/oss/python/bin/python" - " -m vertica.local_coerce" % - (system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP)) - try: - self._set_readahead_for_disks() - system.shell_execute(command) - except exception.ProcessExecutionError: - LOG.exception("Failed to prepare for install_vertica.") - raise - - def mark_design_ksafe(self, k): - """Wrapper for mark_design_ksafe function for setting k-safety """ - LOG.info("Setting Vertica k-safety to %s", str(k)) - out, err = system.exec_vsql_command(self._get_database_password(), - system.MARK_DESIGN_KSAFE % k) - # Only fail if we get an ERROR as opposed to a warning complaining - # about setting k = 0 - if "ERROR" in err: - LOG.error(err) - raise RuntimeError(_("Failed to set k-safety level %s.") % k) - - def _create_user(self, username, password, role=None): - """Creates a user, granting and enabling the given role for it.""" - LOG.info("Creating user in Vertica database.") - out, err = system.exec_vsql_command(self._get_database_password(), - system.CREATE_USER % - (username, password)) - if err: - if err.is_warning(): - LOG.warning(err) - else: - LOG.error(err) - raise RuntimeError(_("Failed to create user %s.") % username) - if role: - self._grant_role(username, role) - - def _grant_role(self, username, role): - """Grants a role to the user on the schema.""" - out, err = system.exec_vsql_command(self._get_database_password(), - system.GRANT_TO_USER - % (role, username)) - if err: - if err.is_warning(): - LOG.warning(err) - else: - LOG.error(err) - raise RuntimeError(_("Failed to grant role %(r)s to user " - "%(u)s.") - % {'r': role, 'u': username}) - out, err = system.exec_vsql_command(self._get_database_password(), - system.ENABLE_FOR_USER - % (username, role)) - if err: - LOG.warning(err) - - def enable_root(self, root_password=None): - """Resets the root password.""" - LOG.info("Enabling root.") - user = models.DatastoreUser.root(password=root_password) - if not self.is_root_enabled(): - self._create_user(user.name, user.password, 'pseudosuperuser') - else: - LOG.debug("Updating %s password.", user.name) - try: - out, err = system.exec_vsql_command( - self._get_database_password(), - system.ALTER_USER_PASSWORD % (user.name, user.password)) - if err: - if err.is_warning(): - LOG.warning(err) - else: - LOG.error(err) - raise RuntimeError(_("Failed to update %s " - "password.") % user.name) - except exception.ProcessExecutionError: - LOG.error("Failed to update %s password.", user.name) - raise RuntimeError(_("Failed to update %s password.") - % user.name) - return user.serialize() - - def is_root_enabled(self): - """Return True if root access is enabled else False.""" - LOG.debug("Checking is root enabled.") - try: - out, err = system.shell_execute(system.USER_EXISTS % - (self._get_database_password(), - 'root'), system.VERTICA_ADMIN) - if err: - LOG.error(err) - raise RuntimeError(_("Failed to query for root user.")) - except exception.ProcessExecutionError: - raise RuntimeError(_("Failed to query for root user.")) - return out.rstrip() == "1" - - def get_public_keys(self, user): - """Generates key (if not found), and sends public key for user.""" - LOG.debug("Public keys requested for user: %s.", user) - user_home_directory = os.path.expanduser('~' + user) - public_key_file_name = user_home_directory + '/.ssh/id_rsa.pub' - - try: - key_generate_command = (system.SSH_KEY_GEN % user_home_directory) - system.shell_execute(key_generate_command, user) - except exception.ProcessExecutionError: - LOG.debug("Cannot generate key.") - - try: - read_key_cmd = ("cat %(file)s" % {'file': public_key_file_name}) - out, err = system.shell_execute(read_key_cmd) - except exception.ProcessExecutionError: - LOG.exception("Cannot read public key.") - raise - return out.strip() - - def authorize_public_keys(self, user, public_keys): - """Adds public key to authorized_keys for user.""" - LOG.debug("public keys to be added for user: %s.", user) - user_home_directory = os.path.expanduser('~' + user) - authorized_file_name = user_home_directory + '/.ssh/authorized_keys' - - try: - read_key_cmd = ("cat %(file)s" % {'file': authorized_file_name}) - out, err = system.shell_execute(read_key_cmd) - public_keys.append(out.strip()) - except exception.ProcessExecutionError: - LOG.debug("Cannot read authorized_keys.") - all_keys = '\n'.join(public_keys) + "\n" - - try: - with tempfile.NamedTemporaryFile("w", delete=False) as tempkeyfile: - tempkeyfile.write(all_keys) - copy_key_cmd = (("install -o %(user)s -m 600 %(source)s %(target)s" - ) % {'user': user, 'source': tempkeyfile.name, - 'target': authorized_file_name}) - system.shell_execute(copy_key_cmd) - os.remove(tempkeyfile.name) - except exception.ProcessExecutionError: - LOG.exception("Cannot install public keys.") - os.remove(tempkeyfile.name) - raise - - def _export_conf_to_members(self, members): - """This method exports conf files to other members.""" - try: - for member in members: - COPY_CMD = (system.SEND_CONF_TO_SERVER % (system.VERTICA_CONF, - member, - system.VERTICA_CONF)) - system.shell_execute(COPY_CMD) - except exception.ProcessExecutionError: - LOG.exception("Cannot export configuration.") - raise - - def install_cluster(self, members): - """Installs & configures cluster.""" - cluster_members = ','.join(members) - LOG.debug("Installing cluster with members: %s.", cluster_members) - self.install_vertica(cluster_members) - self._export_conf_to_members(members) - LOG.debug("Creating database with members: %s.", cluster_members) - self.create_db(cluster_members) - LOG.debug("Cluster configured on members: %s.", cluster_members) - - def grow_cluster(self, members): - """Adds nodes to cluster.""" - cluster_members = ','.join(members) - LOG.debug("Growing cluster with members: %s.", cluster_members) - self.update_vertica("--add-hosts", cluster_members) - self._export_conf_to_members(members) - LOG.debug("Creating database with members: %s.", cluster_members) - self.add_db_to_node(cluster_members) - LOG.debug("Cluster configured on members: %s.", cluster_members) - - def shrink_cluster(self, members): - """Removes nodes from cluster.""" - cluster_members = ','.join(members) - LOG.debug("Shrinking cluster with members: %s.", cluster_members) - self.remove_db_from_node(cluster_members) - self.update_vertica("--remove-hosts", cluster_members) - - def wait_for_node_status(self, status='UP'): - """Wait until all nodes are the same status""" - # select node_state from nodes where node_state <> 'UP' - def _wait_for_node_status(): - out, err = system.exec_vsql_command(self._get_database_password(), - system.NODE_STATUS % status) - LOG.debug("Polled vertica node states: %s", out) - - if err: - LOG.error(err) - raise RuntimeError(_("Failed to query for root user.")) - - return "0 rows" in out - - try: - utils.poll_until(_wait_for_node_status, time_out=600, - sleep_time=15) - except exception.PollTimeOut: - raise RuntimeError(_("Timed out waiting for cluster to " - "change to status %s") % status) diff --git a/trove/guestagent/datastore/experimental/vertica/system.py b/trove/guestagent/datastore/experimental/vertica/system.py deleted file mode 100644 index 104d9c294e..0000000000 --- a/trove/guestagent/datastore/experimental/vertica/system.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright [2015] Hewlett-Packard Development Company, L.P. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from trove.common.i18n import _ -from trove.common import utils - -ALTER_DB_CFG = "ALTER DATABASE %s SET %s = %s" -ALTER_DB_RESET_CFG = "ALTER DATABASE %s CLEAR %s" -ALTER_USER_PASSWORD = "ALTER USER %s IDENTIFIED BY '%s'" -ADD_DB_TO_NODE = ("/opt/vertica/bin/adminTools -t db_add_node -a" - " %s -d %s -p '%s'") -REMOVE_DB_FROM_NODE = ("/opt/vertica/bin/adminTools -t db_remove_node -s" - " %s -d %s -i -p '%s'") -CREATE_DB = ("echo yes | /opt/vertica/bin/adminTools -t create_db -s" - " %s -d %s -c %s -D %s -p '%s'") -CREATE_USER = "CREATE USER %s IDENTIFIED BY '%s'" -ENABLE_FOR_USER = "ALTER USER %s DEFAULT ROLE %s" -GRANT_TO_USER = "GRANT %s to %s" -INSTALL_VERTICA = ("/opt/vertica/sbin/install_vertica -s %s" - " -d %s -X -N -S default -r" - " /vertica.deb -L CE -Y --no-system-checks" - " --ignore-aws-instance-type" - " --ignore-install-config") -MARK_DESIGN_KSAFE = "SELECT MARK_DESIGN_KSAFE(%s)" -NODE_STATUS = "SELECT node_state FROM nodes where node_state <> '%s'" -STOP_DB = "/opt/vertica/bin/adminTools -t stop_db -F -d %s -p '%s'" -START_DB = "/opt/vertica/bin/adminTools -t start_db -d %s -p '%s'" -STATUS_ACTIVE_DB = "/opt/vertica/bin/adminTools -t show_active_db" -STATUS_DB_DOWN = "/opt/vertica/bin/adminTools -t db_status -s DOWN" -SET_RESTART_POLICY = ("/opt/vertica/bin/adminTools -t set_restart_policy " - "-d %s -p '%s'") -SEND_CONF_TO_SERVER = ("rsync -v -e 'ssh -o " - "UserKnownHostsFile=/dev/null -o " - "StrictHostKeyChecking=no' --perms --owner --group " - "%s %s:%s") -SSH_KEY_GEN = "ssh-keygen -f %s/.ssh/id_rsa -t rsa -N ''" -UPDATE_VERTICA = ("/opt/vertica/sbin/update_vertica %s %s " - " -d %s -X -N -S default -r" - " /vertica.deb -L CE -Y --no-system-checks" - " --ignore-aws-instance-type" - " --ignore-install-config") -UPDATE_REMOVE = ("/opt/vertica/sbin/update_vertica --remove-hosts %s " - " -d %s -X -N -S default -r" - " /vertica.deb -L CE -Y --no-system-checks" - " --ignore-aws-instance-type" - " --ignore-install-config") -UPDATE_ADD = ("/opt/vertica/sbin/update_vertica --add-hosts %s " - " -d %s -X -N -S default -r" - " /vertica.deb -L CE -Y --no-system-checks" - " --ignore-aws-instance-type" - " --ignore-install-config") -USER_EXISTS = ("/opt/vertica/bin/vsql -w '%s' -c " - "\"select 1 from users where user_name = '%s'\" " - "| grep row | awk '{print $1}' | cut -c2-") -VERTICA_ADMIN = "dbadmin" -VERTICA_ADMIN_GRP = "verticadba" -VERTICA_AGENT_SERVICE_COMMAND = "service vertica_agent %s" -VERTICA_CONF = "/etc/vertica.cnf" -INSTALL_TIMEOUT = 1000 -CREATE_LIBRARY = "CREATE LIBRARY %s AS '%s'" -CREATE_SOURCE = "CREATE SOURCE %s AS LANGUAGE '%s' NAME '%s' LIBRARY %s" -UDL_LIBS = [ - { - 'func_name': "curl", - 'lib_name': "curllib", - 'language': "C++", - 'factory': "CurlSourceFactory", - 'path': "/opt/vertica/sdk/examples/build/cURLLib.so" - }, -] - - -def shell_execute(command, command_executor="root"): - # This method encapsulates utils.execute for 2 purpose: - # 1. Helps in safe testing. - # 2. Helps in executing commands as other user, using their environment. - - # Note: This method uses su because using sudo -i -u - # does not works with vertica installer - # and it has problems while executing remote commands. - return utils.execute("sudo", "su", "-", command_executor, "-c", "%s" - % command) - - -class VSqlError(object): - def __init__(self, stderr): - """Parse the stderr part of the VSql output. - stderr looks like: "ERROR 3117: Division by zero" - :param stderr: string from executing statement via vsql - """ - parse = re.match(r"^(ERROR|WARNING) (\d+): (.+)$", stderr) - if not parse: - raise ValueError(_("VSql stderr %(msg)s not recognized.") - % {'msg': stderr}) - self.type = parse.group(1) - self.code = int(parse.group(2)) - self.msg = parse.group(3) - - def is_warning(self): - return bool(self.type == "WARNING") - - def __str__(self): - return "Vertica %s (%s): %s" % (self.type, self.code, self.msg) - - -def exec_vsql_command(dbadmin_password, command): - """Executes a VSQL command with the given dbadmin password.""" - out, err = shell_execute("/opt/vertica/bin/vsql -w \'%s\' -c \"%s\"" - % (dbadmin_password, command), - VERTICA_ADMIN) - if err: - err = VSqlError(err) - return out, err diff --git a/trove/guestagent/datastore/galera_common/__init__.py b/trove/guestagent/datastore/galera_common/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/datastore/galera_common/manager.py b/trove/guestagent/datastore/galera_common/manager.py deleted file mode 100644 index 6af86cf5f6..0000000000 --- a/trove/guestagent/datastore/galera_common/manager.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2016 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from trove.common import instance as rd_instance -from trove.guestagent.datastore.mysql_common import manager - - -LOG = logging.getLogger(__name__) - - -class GaleraManager(manager.MySqlManager): - - def __init__(self, mysql_app, mysql_app_status, mysql_admin, - manager_name='galera'): - - super(GaleraManager, self).__init__( - mysql_app, mysql_app_status, mysql_admin, manager_name) - self._mysql_app = mysql_app - self._mysql_app_status = mysql_app_status - self._mysql_admin = mysql_admin - - self.volume_do_not_start_on_reboot = False - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - self.volume_do_not_start_on_reboot = True - super(GaleraManager, self).do_prepare( - context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot) - - def install_cluster(self, context, replication_user, cluster_configuration, - bootstrap): - app = self.mysql_app(self.mysql_app_status.get()) - try: - app.install_cluster( - replication_user, cluster_configuration, bootstrap) - LOG.debug("install_cluster call has finished.") - except Exception: - LOG.exception('Cluster installation failed.') - app.status.set_status( - rd_instance.ServiceStatuses.FAILED) - raise - - def reset_admin_password(self, context, admin_password): - LOG.debug("Storing the admin password on the instance.") - app = self.mysql_app(self.mysql_app_status.get()) - app.reset_admin_password(admin_password) - - def get_cluster_context(self, context): - LOG.debug("Getting the cluster context.") - app = self.mysql_app(self.mysql_app_status.get()) - return app.get_cluster_context() - - def write_cluster_configuration_overrides(self, context, - cluster_configuration): - LOG.debug("Apply the updated cluster configuration.") - app = self.mysql_app(self.mysql_app_status.get()) - app.write_cluster_configuration_overrides(cluster_configuration) - - def enable_root_with_password(self, context, root_password=None): - return self.mysql_admin().enable_root(root_password) diff --git a/trove/guestagent/datastore/galera_common/service.py b/trove/guestagent/datastore/galera_common/service.py deleted file mode 100644 index bee7e3c8e4..0000000000 --- a/trove/guestagent/datastore/galera_common/service.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2016 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import abc - -from oslo_log import log as logging -from sqlalchemy.sql.expression import text - -from trove.common.i18n import _ -from trove.common import utils -from trove.guestagent.common import sql_query -from trove.guestagent.datastore.mysql_common import service - - -LOG = logging.getLogger(__name__) -CONF = service.CONF - - -class GaleraApp(service.BaseMySqlApp): - - def __init__(self, status, local_sql_client, keep_alive_connection_cls): - super(GaleraApp, self).__init__(status, local_sql_client, - keep_alive_connection_cls) - - def _grant_cluster_replication_privilege(self, replication_user): - LOG.info("Granting Replication Slave privilege.") - with self.local_sql_client(self.get_engine()) as client: - perms = ['REPLICATION CLIENT', 'RELOAD', 'LOCK TABLES'] - g = sql_query.Grant(permissions=perms, - user=replication_user['name'], - clear=replication_user['password']) - t = text(str(g)) - client.execute(t) - - def _bootstrap_cluster(self, timeout=120): - LOG.info("Bootstraping cluster.") - try: - utils.execute_with_timeout( - self.mysql_service['cmd_bootstrap_galera_cluster'], - shell=True, timeout=timeout) - except KeyError: - LOG.exception("Error bootstrapping cluster.") - raise RuntimeError(_("Service is not discovered.")) - - def write_cluster_configuration_overrides(self, cluster_configuration): - self.configuration_manager.apply_system_override( - cluster_configuration, 'cluster') - - def install_cluster(self, replication_user, cluster_configuration, - bootstrap=False): - LOG.info("Installing cluster configuration.") - self._grant_cluster_replication_privilege(replication_user) - self.stop_db() - self.write_cluster_configuration_overrides(cluster_configuration) - self.wipe_ib_logfiles() - LOG.debug("bootstrap the instance? : %s", bootstrap) - # Have to wait to sync up the joiner instances with the donor instance. - if bootstrap: - self._bootstrap_cluster(timeout=CONF.restore_usage_timeout) - else: - self.start_mysql(timeout=CONF.restore_usage_timeout) - - @abc.abstractproperty - def cluster_configuration(self): - """ - Returns the cluster section from the configuration manager. - """ - - def get_cluster_context(self): - auth = self.cluster_configuration.get( - "wsrep_sst_auth").replace('"', '') - cluster_name = self.cluster_configuration.get("wsrep_cluster_name") - return { - 'replication_user': { - 'name': auth.split(":")[0], - 'password': auth.split(":")[1], - }, - 'cluster_name': cluster_name, - 'admin_password': self.get_auth_password() - } diff --git a/trove/guestagent/datastore/manager.py b/trove/guestagent/datastore/manager.py index 9ec80648f8..0a639417c2 100644 --- a/trove/guestagent/datastore/manager.py +++ b/trove/guestagent/datastore/manager.py @@ -18,26 +18,25 @@ import abc import operator import os +import docker from oslo_config import cfg as oslo_cfg from oslo_log import log as logging from oslo_service import periodic_task -from oslo_utils import encodeutils from trove.common import cfg from trove.common import exception -from trove.common.i18n import _ from trove.common import instance +from trove.common.i18n import _ from trove.common.notification import EndNotification +from trove.guestagent import dbaas +from trove.guestagent import guest_log +from trove.guestagent import volume from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode -from trove.guestagent import dbaas -from trove.guestagent import guest_log from trove.guestagent.module import driver_manager from trove.guestagent.module import module_manager from trove.guestagent.strategies import replication as repl_strategy -from trove.guestagent import volume - LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -66,6 +65,8 @@ class Manager(periodic_task.PeriodicTasks): MODULE_APPLY_TO_ALL = module_manager.ModuleManager.MODULE_APPLY_TO_ALL + docker_client = docker.from_env() + def __init__(self, manager_name): super(Manager, self).__init__(CONF) @@ -73,6 +74,7 @@ class Manager(periodic_task.PeriodicTasks): self.__manager_name = manager_name self.__manager = None self.__prepare_error = False + self.status = None # Guest log self._guest_log_context = None @@ -104,16 +106,10 @@ class Manager(periodic_task.PeriodicTasks): self.__prepare_error = prepare_error @property - def replication(self): - """If the datastore supports replication, return an instance of - the strategy. + def configuration_manager(self): + """If the datastore supports the new-style configuration manager, + it should override this to return it. """ - try: - return repl_strategy.get_instance(self.manager) - except Exception as ex: - LOG.warning("Cannot get replication instance for '%(manager)s': " - "%(msg)s", {'manager': self.manager, 'msg': str(ex)}) - return None @property @@ -127,51 +123,13 @@ class Manager(periodic_task.PeriodicTasks): return None - @abc.abstractproperty - def status(self): - """This should return an instance of a status class that has been - inherited from datastore.service.BaseDbStatus. Each datastore - must implement this property. - """ - return None - - @property - def configuration_manager(self): - """If the datastore supports the new-style configuration manager, - it should override this to return it. - """ - return None - - def get_datastore_log_defs(self): - """Any datastore-specific log files should be overridden in this dict - by the corresponding Manager class. - - Format of a dict entry: - - 'name_of_log': {self.GUEST_LOG_TYPE_LABEL: - Specified by the Enum in guest_log.LogType, - self.GUEST_LOG_USER_LABEL: - User that owns the file, - self.GUEST_LOG_FILE_LABEL: - Path on filesystem where the log resides, - self.GUEST_LOG_SECTION_LABEL: - Section where to put config (if ini style) - self.GUEST_LOG_ENABLE_LABEL: { - Dict of config_group settings to enable log}, - self.GUEST_LOG_DISABLE_LABEL: { - Dict of config_group settings to disable log}, - - See guestagent_log_defs for an example. - """ - return {} - @property def guestagent_log_defs(self): """These are log files that should be available on every Trove instance. By definition, these should be of type LogType.SYS """ - log_dir = CONF.get('log_dir', '/var/log/trove/') - log_file = CONF.get('log_file', 'trove-guestagent.log') + log_dir = CONF.log_dir or '/var/log/trove/' + log_file = CONF.log_file or 'trove-guestagent.log' guestagent_log = guestagent_utils.build_file_path(log_dir, log_file) return { self.GUEST_LOG_DEFS_GUEST_LABEL: { @@ -181,13 +139,6 @@ class Manager(periodic_task.PeriodicTasks): }, } - def get_guest_log_defs(self): - """Return all the guest log defs.""" - if not self._guest_log_defs: - self._guest_log_defs = dict(self.get_datastore_log_defs()) - self._guest_log_defs.update(self.guestagent_log_defs) - return self._guest_log_defs - @property def guest_log_context(self): return self._guest_log_context @@ -196,57 +147,11 @@ class Manager(periodic_task.PeriodicTasks): def guest_log_context(self, context): self._guest_log_context = context - def get_guest_log_cache(self): - """Make sure the guest_log_cache is loaded and return it.""" - self._refresh_guest_log_cache() - return self._guest_log_cache - - def _refresh_guest_log_cache(self): - if self._guest_log_cache: - # Replace the context if it's changed - if self._guest_log_loaded_context != self.guest_log_context: - for log_name in self._guest_log_cache.keys(): - self._guest_log_cache[log_name].context = ( - self.guest_log_context) - else: - # Load the initial cache - self._guest_log_cache = {} - if self.guest_log_context: - gl_defs = self.get_guest_log_defs() - try: - exposed_logs = CONF.get(self.manager).get( - 'guest_log_exposed_logs') - except oslo_cfg.NoSuchOptError: - exposed_logs = '' - LOG.debug("Available log defs: %s", ",".join(gl_defs.keys())) - exposed_logs = exposed_logs.lower().replace(',', ' ').split() - LOG.debug("Exposing log defs: %s", ",".join(exposed_logs)) - expose_all = 'all' in exposed_logs - for log_name in gl_defs.keys(): - gl_def = gl_defs[log_name] - exposed = expose_all or log_name in exposed_logs - LOG.debug("Building guest log '%(name)s' from def: %(def)s" - " (exposed: %(exposed)s)", - {'name': log_name, 'def': gl_def, - 'exposed': exposed}) - self._guest_log_cache[log_name] = guest_log.GuestLog( - self.guest_log_context, log_name, - gl_def[self.GUEST_LOG_TYPE_LABEL], - gl_def[self.GUEST_LOG_USER_LABEL], - gl_def[self.GUEST_LOG_FILE_LABEL], - exposed) - - self._guest_log_loaded_context = self.guest_log_context - - def get_service_status(self): - return self.status._get_actual_db_status() - @periodic_task.periodic_task def update_status(self, context): """Update the status of the trove instance.""" - if not self.status.is_installed or self.status._is_restarting: - LOG.info("Database service is not installed or is in restart " - "mode, skip status check") + if not self.status.is_installed: + LOG.info("Database service is not installed, skip status check") return LOG.debug("Starting to check database service status") @@ -254,6 +159,9 @@ class Manager(periodic_task.PeriodicTasks): status = self.get_service_status() self.status.set_status(status) + def get_service_status(self): + return self.status.get_actual_db_status() + def rpc_ping(self, context): LOG.debug("Responding to RPC ping.") return True @@ -264,19 +172,22 @@ class Manager(periodic_task.PeriodicTasks): def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, - cluster_config=None, snapshot=None, modules=None): + cluster_config=None, snapshot=None, modules=None, + ds_version=None): """Set up datastore on a Guest Instance.""" with EndNotification(context, instance_id=CONF.guest_id): self._prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, - cluster_config, snapshot, modules) + cluster_config, snapshot, modules, + ds_version=ds_version) def _prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, - cluster_config, snapshot, modules): - LOG.info("Starting datastore prepare for '%s'.", self.manager) + cluster_config, snapshot, modules, ds_version=None): + LOG.info("Starting datastore prepare for '%s:%s'.", self.manager, + ds_version) self.status.begin_install() post_processing = True if cluster_config else False try: @@ -285,15 +196,10 @@ class Manager(periodic_task.PeriodicTasks): self.do_prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, - cluster_config, snapshot) - if overrides: - LOG.info("Applying user-specified configuration " - "(called from 'prepare').") - self.apply_overrides_on_prepare(context, overrides) + cluster_config, snapshot, ds_version=ds_version) except Exception as ex: self.prepare_error = True - LOG.exception("An error occurred preparing datastore: %s", - encodeutils.exception_to_unicode(ex)) + LOG.exception("Failed to prepare datastore: %s", ex) raise finally: LOG.info("Ending datastore prepare for '%s'.", self.manager) @@ -328,16 +234,16 @@ class Manager(periodic_task.PeriodicTasks): self.create_database(context, databases) LOG.info('Databases created successfully.') except Exception as ex: - LOG.exception("An error occurred creating databases: " - "%s", str(ex)) + LOG.warning("An error occurred creating databases: %s", + str(ex)) try: if users: LOG.info("Creating users (called from 'prepare')") self.create_user(context, users) LOG.info('Users created successfully.') except Exception as ex: - LOG.exception("An error occurred creating users: " - "%s", str(ex)) + LOG.warning("An error occurred creating users: " + "%s", str(ex)) # We only enable-root automatically if not restoring a backup # that may already have root enabled in which case we keep it @@ -352,8 +258,7 @@ class Manager(periodic_task.PeriodicTasks): "%s", str(ex)) try: - LOG.info("Calling post_prepare for '%s' datastore.", - self.manager) + LOG.info("Starting post prepare for '%s' datastore.", self.manager) self.post_prepare(context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, @@ -365,17 +270,11 @@ class Manager(periodic_task.PeriodicTasks): str(ex)) raise - def apply_overrides_on_prepare(self, context, overrides): - self.update_overrides(context, overrides) - self.restart(context) - - def enable_root_on_prepare(self, context, root_password): - self.enable_root_with_password(context, root_password) - @abc.abstractmethod def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, - root_password, overrides, cluster_config, snapshot): + root_password, overrides, cluster_config, snapshot, + ds_version=None): """This is called from prepare when the Trove instance first comes online. 'Prepare' is the first rpc message passed from the task manager. do_prepare handles all the base configuration of @@ -480,6 +379,20 @@ class Manager(periodic_task.PeriodicTasks): config_contents = configuration['config_contents'] self.configuration_manager.save_configuration(config_contents) + def apply_overrides_on_prepare(self, context, overrides): + self.update_overrides(context, overrides) + self.restart(context) + + def update_overrides(self, context, overrides, remove=False): + LOG.debug("Updating overrides.") + raise exception.DatastoreOperationNotSupported( + operation='update_overrides', datastore=self.manager) + + def apply_overrides(self, context, overrides): + LOG.debug("Applying overrides.") + raise exception.DatastoreOperationNotSupported( + operation='apply_overrides', datastore=self.manager) + ################# # Cluster related ################# @@ -490,6 +403,78 @@ class Manager(periodic_task.PeriodicTasks): ############# # Log related ############# + def get_datastore_log_defs(self): + """Any datastore-specific log files should be overridden in this dict + by the corresponding Manager class. + + Format of a dict entry: + + 'name_of_log': {self.GUEST_LOG_TYPE_LABEL: + Specified by the Enum in guest_log.LogType, + self.GUEST_LOG_USER_LABEL: + User that owns the file, + self.GUEST_LOG_FILE_LABEL: + Path on filesystem where the log resides, + self.GUEST_LOG_SECTION_LABEL: + Section where to put config (if ini style) + self.GUEST_LOG_ENABLE_LABEL: { + Dict of config_group settings to enable log}, + self.GUEST_LOG_DISABLE_LABEL: { + Dict of config_group settings to disable log}, + + See guestagent_log_defs for an example. + """ + return {} + + def get_guest_log_defs(self): + """Return all the guest log defs.""" + if not self._guest_log_defs: + self._guest_log_defs = dict(self.get_datastore_log_defs()) + self._guest_log_defs.update(self.guestagent_log_defs) + return self._guest_log_defs + + def get_guest_log_cache(self): + """Make sure the guest_log_cache is loaded and return it.""" + self._refresh_guest_log_cache() + return self._guest_log_cache + + def _refresh_guest_log_cache(self): + if self._guest_log_cache: + # Replace the context if it's changed + if self._guest_log_loaded_context != self.guest_log_context: + for log_name in self._guest_log_cache.keys(): + self._guest_log_cache[log_name].context = ( + self.guest_log_context) + else: + # Load the initial cache + self._guest_log_cache = {} + if self.guest_log_context: + gl_defs = self.get_guest_log_defs() + try: + exposed_logs = CONF.get(self.manager).get( + 'guest_log_exposed_logs') + except oslo_cfg.NoSuchOptError: + exposed_logs = '' + LOG.debug("Available log defs: %s", ",".join(gl_defs.keys())) + exposed_logs = exposed_logs.lower().replace(',', ' ').split() + LOG.debug("Exposing log defs: %s", ",".join(exposed_logs)) + expose_all = 'all' in exposed_logs + for log_name in gl_defs.keys(): + gl_def = gl_defs[log_name] + exposed = expose_all or log_name in exposed_logs + LOG.debug("Building guest log '%(name)s' from def: %(def)s" + " (exposed: %(exposed)s)", + {'name': log_name, 'def': gl_def, + 'exposed': exposed}) + self._guest_log_cache[log_name] = guest_log.GuestLog( + self.guest_log_context, log_name, + gl_def[self.GUEST_LOG_TYPE_LABEL], + gl_def[self.GUEST_LOG_USER_LABEL], + gl_def[self.GUEST_LOG_FILE_LABEL], + exposed) + + self._guest_log_loaded_context = self.guest_log_context + def guest_log_list(self, context): LOG.info("Getting list of guest logs.") self.guest_log_context = context @@ -743,9 +728,6 @@ class Manager(periodic_task.PeriodicTasks): driver, module_type, id, name, datastore, ds_version) LOG.info("Deleted module: %s", name) - ############### - # Not Supported - ############### def change_passwords(self, context, users): LOG.debug("Changing passwords.") with EndNotification(context): @@ -762,6 +744,9 @@ class Manager(periodic_task.PeriodicTasks): raise exception.DatastoreOperationNotSupported( operation='enable_root', datastore=self.manager) + def enable_root_on_prepare(self, context, root_password): + self.enable_root_with_password(context, root_password) + def enable_root_with_password(self, context, root_password=None): LOG.debug("Enabling root with password.") raise exception.DatastoreOperationNotSupported( @@ -782,7 +767,7 @@ class Manager(periodic_task.PeriodicTasks): raise exception.DatastoreOperationNotSupported( operation='create_backup', datastore=self.manager) - def _perform_restore(self, backup_info, context, restore_location, app): + def perform_restore(self, context, restore_location, backup_info): LOG.debug("Performing restore.") raise exception.DatastoreOperationNotSupported( operation='_perform_restore', datastore=self.manager) @@ -854,16 +839,6 @@ class Manager(periodic_task.PeriodicTasks): raise exception.DatastoreOperationNotSupported( operation='get_configuration_changes', datastore=self.manager) - def update_overrides(self, context, overrides, remove=False): - LOG.debug("Updating overrides.") - raise exception.DatastoreOperationNotSupported( - operation='update_overrides', datastore=self.manager) - - def apply_overrides(self, context, overrides): - LOG.debug("Applying overrides.") - raise exception.DatastoreOperationNotSupported( - operation='apply_overrides', datastore=self.manager) - def get_replication_snapshot(self, context, snapshot_info, replica_source_config=None): LOG.debug("Getting replication snapshot.") @@ -895,6 +870,11 @@ class Manager(periodic_task.PeriodicTasks): raise exception.DatastoreOperationNotSupported( operation='enable_as_master', datastore=self.manager) + def demote_replication_master(self, context): + LOG.debug("Demoting replication master.") + raise exception.DatastoreOperationNotSupported( + operation='demote_replication_master', datastore=self.manager) + def get_txn_count(self, context): LOG.debug("Getting transaction count.") raise exception.DatastoreOperationNotSupported( @@ -909,8 +889,3 @@ class Manager(periodic_task.PeriodicTasks): LOG.debug("Waiting for transaction.") raise exception.DatastoreOperationNotSupported( operation='wait_for_txn', datastore=self.manager) - - def demote_replication_master(self, context): - LOG.debug("Demoting replication master.") - raise exception.DatastoreOperationNotSupported( - operation='demote_replication_master', datastore=self.manager) diff --git a/trove/guestagent/datastore/experimental/couchbase/__init__.py b/trove/guestagent/datastore/mariadb/__init__.py similarity index 100% rename from trove/guestagent/datastore/experimental/couchbase/__init__.py rename to trove/guestagent/datastore/mariadb/__init__.py diff --git a/trove/guestagent/datastore/mariadb/manager.py b/trove/guestagent/datastore/mariadb/manager.py new file mode 100644 index 0000000000..87d502f8f8 --- /dev/null +++ b/trove/guestagent/datastore/mariadb/manager.py @@ -0,0 +1,26 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from trove.guestagent.datastore.mariadb import service +from trove.guestagent.datastore.mysql_common import manager +from trove.guestagent.datastore.mysql_common import service as mysql_service + + +class Manager(manager.MySqlManager): + def __init__(self): + status = mysql_service.BaseMySqlAppStatus(self.docker_client) + app = service.MariaDBApp(status, self.docker_client) + adm = service.MariaDBAdmin(app) + + super(Manager, self).__init__(app, status, adm) diff --git a/trove/guestagent/datastore/mariadb/service.py b/trove/guestagent/datastore/mariadb/service.py new file mode 100644 index 0000000000..1edb012212 --- /dev/null +++ b/trove/guestagent/datastore/mariadb/service.py @@ -0,0 +1,88 @@ +# Copyright 2015 Tesora, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_log import log as logging + +from trove.guestagent.datastore.mysql_common import service as mysql_service +from trove.guestagent.utils import mysql as mysql_util +from trove.common import utils +from trove.common import exception + +LOG = logging.getLogger(__name__) + + +class MariaDBApp(mysql_service.BaseMySqlApp): + def __init__(self, status, docker_client): + super(MariaDBApp, self).__init__(status, docker_client) + + def wait_for_slave_status(self, status, client, max_time): + def verify_slave_status(): + actual_status = client.execute( + 'SHOW GLOBAL STATUS like "Slave_running";').first()[1] + return actual_status.upper() == status.upper() + + LOG.debug("Waiting for slave status %s with timeout %s", + status, max_time) + try: + utils.poll_until(verify_slave_status, sleep_time=3, + time_out=max_time) + LOG.info("Replication status: %s.", status) + except exception.PollTimeOut: + raise RuntimeError( + "Replication is not %(status)s after %(max)d seconds." % + {'status': status.lower(), 'max': max_time}) + + def _get_slave_status(self): + with mysql_util.SqlClient(self.get_engine()) as client: + return client.execute('SHOW SLAVE STATUS').first() + + def _get_master_UUID(self): + slave_status = self._get_slave_status() + return slave_status and slave_status['Master_Server_Id'] or None + + def _get_gtid_executed(self): + with mysql_util.SqlClient(self.get_engine()) as client: + return client.execute('SELECT @@global.gtid_binlog_pos').first()[0] + + def get_last_txn(self): + master_UUID = self._get_master_UUID() + last_txn_id = '0' + gtid_executed = self._get_gtid_executed() + for gtid_set in gtid_executed.split(','): + uuid_set = gtid_set.split('-') + if uuid_set[1] == master_UUID: + last_txn_id = uuid_set[-1] + break + return master_UUID, int(last_txn_id) + + def get_latest_txn_id(self): + return self._get_gtid_executed() + + def wait_for_txn(self, txn): + cmd = "SELECT MASTER_GTID_WAIT('%s')" % txn + with mysql_util.SqlClient(self.get_engine()) as client: + client.execute(cmd) + + +class MariaDBRootAccess(mysql_service.BaseMySqlRootAccess): + def __init__(self, app): + super(MariaDBRootAccess, self).__init__(app) + + +class MariaDBAdmin(mysql_service.BaseMySqlAdmin): + def __init__(self, app): + root_access = MariaDBRootAccess(app) + super(MariaDBAdmin, self).__init__(root_access, app) diff --git a/trove/guestagent/datastore/mysql/manager.py b/trove/guestagent/datastore/mysql/manager.py index 986b2787d6..6704984543 100644 --- a/trove/guestagent/datastore/mysql/manager.py +++ b/trove/guestagent/datastore/mysql/manager.py @@ -1,35 +1,25 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Rackspace Hosting -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. +# Copyright 2020 Catalyst Cloud # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -from oslo_utils import importutils +from trove.guestagent.datastore.mysql import service from trove.guestagent.datastore.mysql_common import manager -MYSQL_APP = "trove.guestagent.datastore.mysql.service.MySqlApp" -MYSQL_APP_STATUS = "trove.guestagent.datastore.mysql.service.MySqlAppStatus" -MYSQL_ADMIN = "trove.guestagent.datastore.mysql.service.MySqlAdmin" - - class Manager(manager.MySqlManager): - def __init__(self): - mysql_app = importutils.import_class(MYSQL_APP) - mysql_app_status = importutils.import_class(MYSQL_APP_STATUS) - mysql_admin = importutils.import_class(MYSQL_ADMIN) + status = service.MySqlAppStatus(self.docker_client) + app = service.MySqlApp(status, self.docker_client) + adm = service.MySqlAdmin(app) - super(Manager, self).__init__(mysql_app, mysql_app_status, mysql_admin) + super(Manager, self).__init__(app, status, adm) diff --git a/trove/guestagent/datastore/mysql/service.py b/trove/guestagent/datastore/mysql/service.py index fe040e58fa..cbe59721e8 100644 --- a/trove/guestagent/datastore/mysql/service.py +++ b/trove/guestagent/datastore/mysql/service.py @@ -1,72 +1,43 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Rackspace Hosting -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. +# Copyright 2020 Catalyst Cloud # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from trove.guestagent.datastore.mysql_common import service - -LOG = logging.getLogger(__name__) -CONF = service.CONF - - -class KeepAliveConnection(service.BaseKeepAliveConnection): - pass +from trove.guestagent.utils import mysql as mysql_util class MySqlAppStatus(service.BaseMySqlAppStatus): - pass - - -class LocalSqlClient(service.BaseLocalSqlClient): - pass + def __init__(self, docker_client): + super(MySqlAppStatus, self).__init__(docker_client) class MySqlApp(service.BaseMySqlApp): - def __init__(self, status): - super(MySqlApp, self).__init__(status, LocalSqlClient, - KeepAliveConnection) + def __init__(self, status, docker_client): + super(MySqlApp, self).__init__(status, docker_client) - # DEPRECATED: Mantain for API Compatibility - def get_txn_count(self): - LOG.info("Retrieving latest txn id.") - txn_count = 0 - with self.local_sql_client(self.get_engine()) as client: - result = client.execute('SELECT @@global.gtid_executed').first() - for uuid_set in result[0].split(','): - for interval in uuid_set.split(':')[1:]: - if '-' in interval: - iparts = interval.split('-') - txn_count += int(iparts[1]) - int(iparts[0]) - else: - txn_count += 1 - return txn_count + def _get_gtid_executed(self): + with mysql_util.SqlClient(self.get_engine()) as client: + return client.execute('SELECT @@global.gtid_executed').first()[0] def _get_slave_status(self): - with self.local_sql_client(self.get_engine()) as client: + with mysql_util.SqlClient(self.get_engine()) as client: return client.execute('SHOW SLAVE STATUS').first() def _get_master_UUID(self): slave_status = self._get_slave_status() return slave_status and slave_status['Master_UUID'] or None - def _get_gtid_executed(self): - with self.local_sql_client(self.get_engine()) as client: - return client.execute('SELECT @@global.gtid_executed').first()[0] + def get_latest_txn_id(self): + return self._get_gtid_executed() def get_last_txn(self): master_UUID = self._get_master_UUID() @@ -79,27 +50,18 @@ class MySqlApp(service.BaseMySqlApp): break return master_UUID, int(last_txn_id) - def get_latest_txn_id(self): - LOG.info("Retrieving latest txn id.") - return self._get_gtid_executed() - def wait_for_txn(self, txn): - LOG.info("Waiting on txn '%s'.", txn) - with self.local_sql_client(self.get_engine()) as client: + with mysql_util.SqlClient(self.get_engine()) as client: client.execute("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s')" % txn) class MySqlRootAccess(service.BaseMySqlRootAccess): - def __init__(self): - super(MySqlRootAccess, self).__init__(LocalSqlClient, - MySqlApp(MySqlAppStatus.get())) + def __init__(self, app): + super(MySqlRootAccess, self).__init__(app) class MySqlAdmin(service.BaseMySqlAdmin): - def __init__(self): - super(MySqlAdmin, self).__init__(LocalSqlClient, MySqlRootAccess(), - MySqlApp) - - -get_engine = MySqlApp.get_engine + def __init__(self, app): + root_access = MySqlRootAccess(app) + super(MySqlAdmin, self).__init__(root_access, app) diff --git a/trove/guestagent/datastore/mysql_common/manager.py b/trove/guestagent/datastore/mysql_common/manager.py index 3ffc4940f4..2d31e2f6e3 100644 --- a/trove/guestagent/datastore/mysql_common/manager.py +++ b/trove/guestagent/datastore/mysql_common/manager.py @@ -15,8 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. # - -import os +import tempfile from oslo_log import log as logging @@ -24,55 +23,201 @@ from trove.common import cfg from trove.common import configurations from trove.common import exception from trove.common import instance as rd_instance +from trove.common import utils from trove.common.notification import EndNotification -from trove.guestagent import backup -from trove.guestagent.common import operating_system -from trove.guestagent.datastore import manager -from trove.guestagent.datastore.mysql_common import service from trove.guestagent import guest_log from trove.guestagent import volume - +from trove.guestagent.common import operating_system +from trove.guestagent.datastore import manager +from trove.guestagent.strategies import replication as repl_strategy +from trove.guestagent.utils import docker as docker_util +from trove.guestagent.utils import mysql as mysql_util LOG = logging.getLogger(__name__) CONF = cfg.CONF class MySqlManager(manager.Manager): - def __init__(self, mysql_app, mysql_app_status, mysql_admin, manager_name='mysql'): - super(MySqlManager, self).__init__(manager_name) - self._mysql_app = mysql_app - self._mysql_app_status = mysql_app_status - self._mysql_admin = mysql_admin + self.app = mysql_app + self.status = mysql_app_status + self.adm = mysql_admin self.volume_do_not_start_on_reboot = False - @property - def mysql_app(self): - return self._mysql_app - - @property - def mysql_app_status(self): - return self._mysql_app_status - - @property - def mysql_admin(self): - return self._mysql_admin - - @property - def status(self): - return self.mysql_app_status.get() - @property def configuration_manager(self): - return self.mysql_app( - self.mysql_app_status.get()).configuration_manager + return self.app.configuration_manager + + @property + def replication(self): + """If the datastore supports replication, return an instance of + the strategy. + """ + try: + return repl_strategy.get_instance(self.manager) + except Exception as ex: + LOG.warning("Cannot get replication instance for '%(manager)s': " + "%(msg)s", {'manager': self.manager, 'msg': str(ex)}) + + return None + + def get_service_status(self): + try: + with mysql_util.SqlClient(self.app.get_engine()) as client: + cmd = "SELECT 1;" + client.execute(cmd) + + LOG.debug("Database service check: database query is responsive") + return rd_instance.ServiceStatuses.HEALTHY + except Exception: + return super(MySqlManager, self).get_service_status() + + def create_database(self, context, databases): + with EndNotification(context): + return self.adm.create_database(databases) + + def create_user(self, context, users): + with EndNotification(context): + self.adm.create_user(users) + + def delete_database(self, context, database): + with EndNotification(context): + return self.adm.delete_database(database) + + def delete_user(self, context, user): + with EndNotification(context): + self.adm.delete_user(user) + + def list_databases(self, context, limit=None, marker=None, + include_marker=False): + return self.adm.list_databases(limit, marker, include_marker) + + def list_users(self, context, limit=None, marker=None, + include_marker=False): + return self.adm.list_users(limit, marker, include_marker) + + def get_user(self, context, username, hostname): + return self.adm.get_user(username, hostname) + + def update_attributes(self, context, username, hostname, user_attrs): + with EndNotification(context): + self.adm.update_attributes(username, hostname, user_attrs) + + def grant_access(self, context, username, hostname, databases): + return self.adm.grant_access(username, hostname, databases) + + def revoke_access(self, context, username, hostname, database): + return self.adm.revoke_access(username, hostname, database) + + def list_access(self, context, username, hostname): + return self.adm.list_access(username, hostname) + + def enable_root(self, context): + return self.adm.enable_root() + + def enable_root_with_password(self, context, root_password=None): + return self.adm.enable_root(root_password) + + def is_root_enabled(self, context): + return self.adm.is_root_enabled() + + def disable_root(self, context): + return self.adm.disable_root() + + def change_passwords(self, context, users): + with EndNotification(context): + self.adm.change_passwords(users) + + def do_prepare(self, context, packages, databases, memory_mb, users, + device_path, mount_point, backup_info, + config_contents, root_password, overrides, + cluster_config, snapshot, ds_version=None): + """This is called from prepare in the base class.""" + data_dir = mount_point + '/data' + if device_path: + LOG.info('Preparing the storage for %s, mount path %s', + device_path, mount_point) + + self.app.stop_db() + + device = volume.VolumeDevice(device_path) + # unmount if device is already mounted + device.unmount_device(device_path) + device.format() + if operating_system.list_files_in_directory(mount_point): + # rsync existing data to a "data" sub-directory + # on the new volume + device.migrate_data(mount_point, target_subdir="data") + # mount the volume + device.mount(mount_point) + operating_system.chown(mount_point, CONF.database_service_uid, + CONF.database_service_uid, + recursive=True, as_root=True) + + operating_system.create_directory(data_dir, + user=CONF.database_service_uid, + group=CONF.database_service_uid, + as_root=True) + self.app.set_data_dir(data_dir) + + # Prepare mysql configuration + LOG.info('Preparing database configuration') + self.app.configuration_manager.save_configuration(config_contents) + self.app.update_overrides(overrides) + + # Restore data from backup and reset root password + if backup_info: + self.perform_restore(context, data_dir, backup_info) + self.reset_password_for_restore(ds_version=ds_version, + data_dir=data_dir) + + # Start database service. + # Cinder volume initialization(after formatted) may leave a + # lost+found folder + command = f'--ignore-db-dir=lost+found --datadir={data_dir}' + self.app.start_db(ds_version=ds_version, command=command) + + self.app.secure() + enable_remote_root = (backup_info and self.adm.is_root_enabled()) + if enable_remote_root: + self.status.report_root(context) + else: + self.app.secure_root() + + if snapshot: + # This instance is a replication slave + self.attach_replica(context, snapshot, snapshot['config']) + + def _validate_slave_for_replication(self, context, replica_info): + if replica_info['replication_strategy'] != self.replication_strategy: + raise exception.IncompatibleReplicationStrategy( + replica_info.update({ + 'guest_strategy': self.replication_strategy + })) + + volume_stats = self.get_filesystem_stats(context, None) + if (volume_stats.get('total', 0.0) < + replica_info['dataset']['dataset_size']): + raise exception.InsufficientSpaceForReplica( + replica_info.update({ + 'slave_volume_size': volume_stats.get('total', 0.0) + })) + + def stop_db(self, context): + self.app.stop_db() + + def restart(self, context): + self.app.restart() + + def start_db_with_conf_changes(self, context, config_contents): + self.app.start_db_with_conf_changes(config_contents) def get_datastore_log_defs(self): - owner = 'mysql' - datastore_dir = self.mysql_app.get_data_dir() + owner = cfg.get_configuration_property('database_service_uid') + datastore_dir = self.app.get_data_dir() server_section = configurations.MySQLConfParser.SERVER_CONF_SECTION long_query_time = CONF.get(self.manager).get( 'guest_log_long_query_time') / 1000 @@ -119,212 +264,14 @@ class MySqlManager(manager.Manager): }, } - def get_service_status(self): - try: - app = self.mysql_app(self.status) - with service.BaseLocalSqlClient(app.get_engine()) as client: - cmd = "SELECT 1;" - client.execute(cmd) + def apply_overrides(self, context, overrides): + LOG.info("Applying overrides (%s).", overrides) + self.app.apply_overrides(overrides) - LOG.debug("Database service check: database query is responsive") - return rd_instance.ServiceStatuses.HEALTHY - except Exception as e: - LOG.warning('Failed to query database, error: %s', str(e)) - return super(MySqlManager, self).get_service_status() - - def change_passwords(self, context, users): - with EndNotification(context): - self.mysql_admin().change_passwords(users) - - def update_attributes(self, context, username, hostname, user_attrs): - with EndNotification(context): - self.mysql_admin().update_attributes( - username, hostname, user_attrs) - - def reset_configuration(self, context, configuration): - app = self.mysql_app(self.mysql_app_status.get()) - app.reset_configuration(configuration) - - def create_database(self, context, databases): - with EndNotification(context): - return self.mysql_admin().create_database(databases) - - def create_user(self, context, users): - with EndNotification(context): - self.mysql_admin().create_user(users) - - def delete_database(self, context, database): - with EndNotification(context): - return self.mysql_admin().delete_database(database) - - def delete_user(self, context, user): - with EndNotification(context): - self.mysql_admin().delete_user(user) - - def get_user(self, context, username, hostname): - return self.mysql_admin().get_user(username, hostname) - - def grant_access(self, context, username, hostname, databases): - return self.mysql_admin().grant_access(username, hostname, databases) - - def revoke_access(self, context, username, hostname, database): - return self.mysql_admin().revoke_access(username, hostname, database) - - def list_access(self, context, username, hostname): - return self.mysql_admin().list_access(username, hostname) - - def list_databases(self, context, limit=None, marker=None, - include_marker=False): - return self.mysql_admin().list_databases(limit, marker, - include_marker) - - def list_users(self, context, limit=None, marker=None, - include_marker=False): - return self.mysql_admin().list_users(limit, marker, - include_marker) - - def enable_root(self, context): - return self.mysql_admin().enable_root() - - def enable_root_with_password(self, context, root_password=None): - return self.mysql_admin().enable_root(root_password) - - def is_root_enabled(self, context): - return self.mysql_admin().is_root_enabled() - - def disable_root(self, context): - return self.mysql_admin().disable_root() - - def _perform_restore(self, backup_info, context, restore_location, app): - LOG.info("Restoring database from backup %s, backup_info: %s", - backup_info['id'], backup_info) - try: - backup.restore(context, backup_info, restore_location) - except Exception: - LOG.exception("Error performing restore from backup %s.", - backup_info['id']) - app.status.set_status(rd_instance.ServiceStatuses.FAILED) - raise - LOG.info("Restored database successfully.") - - def do_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot): - """This is called from prepare in the base class.""" - app = self.mysql_app(self.mysql_app_status.get()) - app.install_if_needed(packages) - if device_path: - LOG.info('Prepare the storage for %s', device_path) - - app.stop_db( - do_not_start_on_reboot=self.volume_do_not_start_on_reboot - ) - - device = volume.VolumeDevice(device_path) - # unmount if device is already mounted - device.unmount_device(device_path) - device.format() - if os.path.exists(mount_point): - # rsync existing data to a "data" sub-directory - # on the new volume - device.migrate_data(mount_point, target_subdir="data") - # mount the volume - device.mount(mount_point) - operating_system.chown(mount_point, service.MYSQL_OWNER, - service.MYSQL_OWNER, - recursive=False, as_root=True) - - LOG.debug("Mounted the volume at %s", mount_point) - # We need to temporarily update the default my.cnf so that - # mysql will start after the volume is mounted. Later on it - # will be changed based on the config template - # (see MySqlApp.secure()) and restart. - app.set_data_dir(mount_point + '/data') - app.start_mysql() - - LOG.info('Finish to prepare the storage for %s', device_path) - if backup_info: - self._perform_restore(backup_info, context, - mount_point + "/data", app) - app.secure(config_contents) - enable_root_on_restore = (backup_info and - self.mysql_admin().is_root_enabled()) - if enable_root_on_restore: - app.secure_root(secure_remote_root=False) - self.mysql_app_status.get().report_root(context) - else: - app.secure_root(secure_remote_root=True) - - if snapshot: - self.attach_replica(context, snapshot, snapshot['config']) - - def pre_upgrade(self, context): - app = self.mysql_app(self.mysql_app_status.get()) - data_dir = app.get_data_dir() - mount_point, _data = os.path.split(data_dir) - save_dir = "%s/etc_mysql" % mount_point - save_etc_dir = "%s/etc" % mount_point - home_save = "%s/trove_user" % mount_point - - app.status.begin_restart() - app.stop_db() - - if operating_system.exists("/etc/my.cnf", as_root=True): - operating_system.create_directory(save_etc_dir, as_root=True) - operating_system.copy("/etc/my.cnf", save_etc_dir, - preserve=True, as_root=True) - - operating_system.copy("/etc/mysql/.", save_dir, - preserve=True, as_root=True) - - operating_system.copy("%s/." % os.path.expanduser('~'), home_save, - preserve=True, as_root=True) - - self.unmount_volume(context, mount_point=mount_point) - return { - 'mount_point': mount_point, - 'save_dir': save_dir, - 'save_etc_dir': save_etc_dir, - 'home_save': home_save - } - - def post_upgrade(self, context, upgrade_info): - app = self.mysql_app(self.mysql_app_status.get()) - app.stop_db() - if 'device' in upgrade_info: - self.mount_volume(context, mount_point=upgrade_info['mount_point'], - device_path=upgrade_info['device'], - write_to_fstab=True) - operating_system.chown(path=upgrade_info['mount_point'], - user=service.MYSQL_OWNER, - group=service.MYSQL_OWNER, - recursive=True, as_root=True) - - self._restore_home_directory(upgrade_info['home_save']) - - if operating_system.exists(upgrade_info['save_etc_dir'], - is_directory=True, as_root=True): - self._restore_directory(upgrade_info['save_etc_dir'], "/etc") - - self._restore_directory("%s/." % upgrade_info['save_dir'], - "/etc/mysql") - - self.configuration_manager.refresh_cache() - app.start_mysql() - app.status.end_restart() - - def restart(self, context): - app = self.mysql_app(self.mysql_app_status.get()) - app.restart() - - def start_db_with_conf_changes(self, context, config_contents): - app = self.mysql_app(self.mysql_app_status.get()) - app.start_db_with_conf_changes(config_contents) - - def stop_db(self, context, do_not_start_on_reboot=False): - app = self.mysql_app(self.mysql_app_status.get()) - app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) + def update_overrides(self, context, overrides, remove=False): + if remove: + self.app.remove_overrides() + self.app.update_overrides(overrides) def create_backup(self, context, backup_info): """ @@ -333,22 +280,98 @@ class MySqlManager(manager.Manager): device_path is specified, it will be mounted based to a point specified in configuration. + :param context: User context object. :param backup_info: a dictionary containing the db instance id of the backup task, location, type, and other data. """ with EndNotification(context): - backup.backup(context, backup_info) + self.app.create_backup(context, backup_info) - def update_overrides(self, context, overrides, remove=False): - app = self.mysql_app(self.mysql_app_status.get()) - if remove: - app.remove_overrides() - app.update_overrides(overrides) + def perform_restore(self, context, restore_location, backup_info): + LOG.info("Starting to restore database from backup %s, " + "backup_info: %s", backup_info['id'], backup_info) - def apply_overrides(self, context, overrides): - LOG.debug("Applying overrides (%s).", overrides) - app = self.mysql_app(self.mysql_app_status.get()) - app.apply_overrides(overrides) + try: + self.app.restore_backup(context, backup_info, restore_location) + except Exception: + LOG.error("Failed to restore from backup %s.", backup_info['id']) + self.status.set_status(rd_instance.ServiceStatuses.FAILED) + raise + + LOG.info("Finished restore data from backup %s", backup_info['id']) + + def reset_password_for_restore(self, ds_version=None, + data_dir='/var/lib/mysql/data'): + """Reset the root password after restore the db data. + + We create a temporary database container by running mysqld_safe to + reset the root password. + """ + LOG.info('Starting to reset password for restore') + + try: + root_pass = self.app.get_auth_password(file="root.cnf") + except exception.UnprocessableEntity: + root_pass = utils.generate_random_password() + self.app.save_password('root', root_pass) + + with tempfile.NamedTemporaryFile(mode='w') as init_file, \ + tempfile.NamedTemporaryFile(suffix='.err') as err_file: + operating_system.write_file( + init_file.name, + f"ALTER USER 'root'@'localhost' IDENTIFIED BY '{root_pass}';" + ) + command = ( + f'mysqld_safe --init-file={init_file.name} ' + f'--log-error={err_file.name} ' + f'--datadir={data_dir}' + ) + extra_volumes = { + init_file.name: {"bind": init_file.name, "mode": "rw"}, + err_file.name: {"bind": err_file.name, "mode": "rw"}, + } + + # Allow database service user to access the temporary files. + for file in [init_file.name, err_file.name]: + operating_system.chmod(file, + operating_system.FileMode.SET_ALL_RWX(), + force=True, as_root=True) + + try: + self.app.start_db(ds_version=ds_version, command=command, + extra_volumes=extra_volumes) + except Exception as err: + LOG.error('Failed to reset password for restore, error: %s', + str(err)) + LOG.debug('Content in init error log file: %s', + err_file.read()) + raise err + finally: + LOG.debug( + 'The init container log: %s', + docker_util.get_container_logs(self.app.docker_client) + ) + docker_util.remove_container(self.app.docker_client) + + LOG.info('Finished to reset password for restore') + + def attach_replica(self, context, replica_info, slave_config): + LOG.info("Attaching replica, replica_info: %s", replica_info) + try: + if 'replication_strategy' in replica_info: + self._validate_slave_for_replication(context, replica_info) + + self.replication.enable_as_slave(self.app, replica_info, + slave_config) + except Exception as err: + LOG.error("Error enabling replication, error: %s", str(err)) + self.status.set_status(rd_instance.ServiceStatuses.FAILED) + raise + + def detach_replica(self, context, for_failover=False): + LOG.info("Detaching replica.") + replica_info = self.replication.detach_slave(self.app, for_failover) + return replica_info def backup_required_for_replication(self, context): return self.replication.backup_required_for_replication() @@ -357,12 +380,12 @@ class MySqlManager(manager.Manager): replica_source_config=None): LOG.info("Getting replication snapshot, snapshot_info: %s", snapshot_info) - app = self.mysql_app(self.mysql_app_status.get()) - self.replication.enable_as_master(app, replica_source_config) + self.replication.enable_as_master(self.app, replica_source_config) + LOG.info('Enabled as replication master') snapshot_id, log_position = self.replication.snapshot_for_replication( - context, app, None, snapshot_info) + context, self.app, self.adm, None, snapshot_info) volume_stats = self.get_filesystem_stats(context, None) @@ -374,84 +397,37 @@ class MySqlManager(manager.Manager): 'snapshot_id': snapshot_id }, 'replication_strategy': self.replication_strategy, - 'master': self.replication.get_master_ref(app, snapshot_info), + 'master': self.replication.get_master_ref(self.app, snapshot_info), 'log_position': log_position } return replication_snapshot def enable_as_master(self, context, replica_source_config): - LOG.debug("Calling enable_as_master.") - app = self.mysql_app(self.mysql_app_status.get()) - self.replication.enable_as_master(app, replica_source_config) - - # DEPRECATED: Maintain for API Compatibility - def get_txn_count(self, context): - LOG.debug("Calling get_txn_count") - return self.mysql_app(self.mysql_app_status.get()).get_txn_count() - - def get_last_txn(self, context): - LOG.debug("Calling get_last_txn") - return self.mysql_app(self.mysql_app_status.get()).get_last_txn() - - def get_latest_txn_id(self, context): - LOG.debug("Calling get_latest_txn_id.") - return self.mysql_app(self.mysql_app_status.get()).get_latest_txn_id() - - def wait_for_txn(self, context, txn): - LOG.debug("Calling wait_for_txn.") - self.mysql_app(self.mysql_app_status.get()).wait_for_txn(txn) - - def detach_replica(self, context, for_failover=False): - LOG.debug("Detaching replica.") - app = self.mysql_app(self.mysql_app_status.get()) - replica_info = self.replication.detach_slave(app, for_failover) - return replica_info - - def get_replica_context(self, context): - LOG.debug("Getting replica context.") - app = self.mysql_app(self.mysql_app_status.get()) - replica_info = self.replication.get_replica_context(app) - return replica_info - - def _validate_slave_for_replication(self, context, replica_info): - if replica_info['replication_strategy'] != self.replication_strategy: - raise exception.IncompatibleReplicationStrategy( - replica_info.update({ - 'guest_strategy': self.replication_strategy - })) - - volume_stats = self.get_filesystem_stats(context, None) - if (volume_stats.get('total', 0.0) < - replica_info['dataset']['dataset_size']): - raise exception.InsufficientSpaceForReplica( - replica_info.update({ - 'slave_volume_size': volume_stats.get('total', 0.0) - })) - - def attach_replica(self, context, replica_info, slave_config): - LOG.info("Attaching replica.") - app = self.mysql_app(self.mysql_app_status.get()) - try: - if 'replication_strategy' in replica_info: - self._validate_slave_for_replication(context, replica_info) - self.replication.enable_as_slave(app, replica_info, slave_config) - except Exception: - LOG.exception("Error enabling replication.") - app.status.set_status(rd_instance.ServiceStatuses.FAILED) - raise + LOG.info("Enable as master") + self.replication.enable_as_master(self.app, replica_source_config) def make_read_only(self, context, read_only): - LOG.debug("Executing make_read_only(%s)", read_only) - app = self.mysql_app(self.mysql_app_status.get()) - app.make_read_only(read_only) + LOG.info("Executing make_read_only(%s)", read_only) + self.app.make_read_only(read_only) - def cleanup_source_on_replica_detach(self, context, replica_info): - LOG.debug("Cleaning up the source on the detach of a replica.") - self.replication.cleanup_source_on_replica_detach(self.mysql_admin(), - replica_info) + def get_latest_txn_id(self, context): + LOG.info("Calling get_latest_txn_id.") + return self.app.get_latest_txn_id() + + def get_last_txn(self, context): + LOG.info("Calling get_last_txn") + return self.app.get_last_txn() + + def wait_for_txn(self, context, txn): + LOG.info("Calling wait_for_txn.") + self.app.wait_for_txn(txn) + + def get_replica_context(self, context): + LOG.info("Getting replica context.") + replica_info = self.replication.get_replica_context(self.app, self.adm) + return replica_info def demote_replication_master(self, context): - LOG.debug("Demoting replication master.") - app = self.mysql_app(self.mysql_app_status.get()) - self.replication.demote_master(app) + LOG.info("Demoting replication master.") + self.replication.demote_master(self.app) diff --git a/trove/guestagent/datastore/mysql_common/service.py b/trove/guestagent/datastore/mysql_common/service.py index c562775e6a..c98b327912 100644 --- a/trove/guestagent/datastore/mysql_common/service.py +++ b/trove/guestagent/datastore/mysql_common/service.py @@ -1,246 +1,115 @@ -# Copyright 2013 OpenStack Foundation -# Copyright 2013 Rackspace Hosting -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. +# Copyright 2020 Catalyst Cloud # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import abc -from collections import defaultdict -import os import re -import six -import uuid from oslo_log import log as logging from oslo_utils import encodeutils -from pymysql import err as pymysql_err +from oslo_utils import timeutils +import six from six.moves import urllib import sqlalchemy from sqlalchemy import exc -from sqlalchemy import interfaces from sqlalchemy.sql.expression import text +from trove.backup.state import BackupState from trove.common import cfg +from trove.common import exception +from trove.common import instance +from trove.common import utils from trove.common.configurations import MySQLConfParser from trove.common.db.mysql import models -from trove.common import exception -from trove.common.exception import PollTimeOut from trove.common.i18n import _ -from trove.common import instance as rd_instance from trove.common.stream_codecs import IniCodec -from trove.common import utils -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import ImportOverrideStrategy +from trove.conductor import api as conductor_api from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common import sql_query +from trove.guestagent.common.configuration import ConfigurationManager +from trove.guestagent.common.configuration import ImportOverrideStrategy from trove.guestagent.datastore import service -from trove.guestagent import pkg +from trove.guestagent.datastore.mysql_common import service as commmon_service +from trove.guestagent.utils import docker as docker_util +from trove.guestagent.utils import mysql as mysql_util +LOG = logging.getLogger(__name__) +CONF = cfg.CONF ADMIN_USER_NAME = "os_admin" CONNECTION_STR_FORMAT = ("mysql+pymysql://%s:%s@localhost/?" "unix_socket=/var/run/mysqld/mysqld.sock") -LOG = logging.getLogger(__name__) -FLUSH = text(sql_query.FLUSH) ENGINE = None -DATADIR = None -PREPARING = False -UUID = False - -TMP_MYCNF = "/tmp/my.cnf.tmp" -MYSQL_BASE_DIR = "/var/lib/mysql" - -CONF = cfg.CONF - INCLUDE_MARKER_OPERATORS = { True: ">=", False: ">" } - -OS_NAME = operating_system.get_os() -MYSQL_CONFIG = {operating_system.REDHAT: "/etc/my.cnf", - operating_system.DEBIAN: "/etc/mysql/my.cnf", - operating_system.SUSE: "/etc/my.cnf"}[OS_NAME] -MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"] -MYSQL_OWNER = 'mysql' +MYSQL_CONFIG = "/etc/mysql/my.cnf" CNF_EXT = 'cnf' CNF_INCLUDE_DIR = '/etc/mysql/conf.d' CNF_MASTER = 'master-replication' CNF_SLAVE = 'slave-replication' -# Create a package impl -packager = pkg.Package() - - -def clear_expired_password(): - """ - Some mysql installations generate random root password - and save it in /root/.mysql_secret, this password is - expired and should be changed by client that supports expired passwords. - """ - LOG.debug("Removing expired password.") - secret_file = "/root/.mysql_secret" - try: - out, err = utils.execute("cat", secret_file, - run_as_root=True, root_helper="sudo") - except exception.ProcessExecutionError: - LOG.warning("/root/.mysql_secret does not exist.") - else: - m = re.match('# The random password set for the root user at .*: (.*)', - out) - if m: - try: - out, err = utils.execute("mysqladmin", "-p%s" % m.group(1), - "password", "", run_as_root=True, - root_helper="sudo") - except exception.ProcessExecutionError: - LOG.exception("Cannot change mysql password.") - return - operating_system.remove(secret_file, force=True, as_root=True) - LOG.debug("Expired password removed.") - - # The root user password will be changed in app.secure_root() later on - LOG.debug('Initializae the root password to empty') - try: - utils.execute("mysqladmin", "--user=root", "password", "", - run_as_root=True, root_helper="sudo") - except Exception: - LOG.exception("Failed to initializae the root password") - - -def load_mysqld_options(): - # find mysqld bin - for bin in MYSQL_BIN_CANDIDATES: - if os.path.isfile(bin): - mysqld_bin = bin - break - else: - return {} - try: - out, err = utils.execute(mysqld_bin, "--print-defaults", - run_as_root=True, root_helper="sudo") - arglist = re.split("\n", out)[1].split() - args = defaultdict(list) - for item in arglist: - if "=" in item: - key, value = item.split("=", 1) - args[key.lstrip("--")].append(value) - else: - args[item.lstrip("--")].append(None) - return args - except exception.ProcessExecutionError: - return {} +BACKUP_LOG = re.compile(r'.*Backup successfully, checksum: (?P.*), ' + r'location: (?P.*)') class BaseMySqlAppStatus(service.BaseDbStatus): + def __init__(self, docker_client): + super(BaseMySqlAppStatus, self).__init__(docker_client) - @classmethod - def get(cls): - if not cls._instance: - cls._instance = BaseMySqlAppStatus() - return cls._instance - - def _get_actual_db_status(self): - """Check database service status. - - The checks which don't need service app can be put here. - """ - try: - out, _ = utils.execute_with_timeout( - "/bin/ps", "-C", "mysqld", "h", - log_output_on_error=True - ) - pid = out.split()[0] - - LOG.debug('Database service check: service PID exists: %s', pid) - return rd_instance.ServiceStatuses.RUNNING - except exception.ProcessExecutionError: - LOG.warning("Database service check: Failed to get database " - "service status by ps, fall back to check PID file.") - - mysql_args = load_mysqld_options() - pid_file = mysql_args.get('pid_file', - ['/var/run/mysqld/mysqld.pid'])[0] - if os.path.exists(pid_file): - LOG.info("Database service check: MySQL Service Status is " - "CRASHED.") - return rd_instance.ServiceStatuses.CRASHED + def get_actual_db_status(self): + """Check database service status.""" + status = docker_util.get_container_status(self.docker_client) + if status == "running": + root_pass = commmon_service.BaseMySqlApp.get_auth_password( + file="root.cnf") + cmd = 'mysql -uroot -p%s -e "select 1;"' % root_pass + try: + docker_util.run_command(self.docker_client, cmd) + return instance.ServiceStatuses.HEALTHY + except Exception as exc: + LOG.warning('Failed to run docker command, error: %s', + str(exc)) + container_log = docker_util.get_container_logs( + self.docker_client, tail='all') + LOG.warning('container log: %s', '\n'.join(container_log)) + return instance.ServiceStatuses.RUNNING + elif status == "not running": + return instance.ServiceStatuses.SHUTDOWN + elif status == "paused": + return instance.ServiceStatuses.PAUSED + elif status == "exited": + return instance.ServiceStatuses.SHUTDOWN + elif status == "dead": + return instance.ServiceStatuses.CRASHED else: - LOG.info("Database service check: MySQL Service Status is " - "SHUTDOWN.") - return rd_instance.ServiceStatuses.SHUTDOWN - - -class BaseLocalSqlClient(object): - """A sqlalchemy wrapper to manage transactions.""" - - def __init__(self, engine, use_flush=True): - self.engine = engine - self.use_flush = use_flush - - def __enter__(self): - self.conn = self.engine.connect() - self.trans = self.conn.begin() - return self.conn - - def __exit__(self, type, value, traceback): - if self.trans: - if type is not None: # An error occurred - self.trans.rollback() - else: - if self.use_flush: - self.conn.execute(FLUSH) - self.trans.commit() - self.conn.close() - - def execute(self, t, **kwargs): - try: - return self.conn.execute(t, kwargs) - except Exception: - self.trans.rollback() - self.trans = None - raise + return instance.ServiceStatuses.UNKNOWN @six.add_metaclass(abc.ABCMeta) class BaseMySqlAdmin(object): """Handles administrative tasks on the MySQL database.""" - def __init__(self, local_sql_client, mysql_root_access, - mysql_app): - self._local_sql_client = local_sql_client - self._mysql_root_access = mysql_root_access - self._mysql_app = mysql_app(local_sql_client) - - @property - def local_sql_client(self): - return self._local_sql_client - - @property - def mysql_root_access(self): - return self._mysql_root_access - - @property - def mysql_app(self): - return self._mysql_app + def __init__(self, mysql_root_access, mysql_app): + self.mysql_root_access = mysql_root_access + self.mysql_app = mysql_app def _associate_dbs(self, user): """Internal. Given a MySQLUser, populate its databases attribute.""" LOG.debug("Associating dbs to user %(name)s at %(host)s.", {'name': user.name, 'host': user.host}) - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: q = sql_query.Query() q.columns = ["grantee", "table_schema"] q.tables = ["information_schema.SCHEMA_PRIVILEGES"] @@ -256,7 +125,7 @@ class BaseMySqlAdmin(object): def change_passwords(self, users): """Change the passwords of one or more existing users.""" LOG.debug("Changing the password of some users.") - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: for item in users: LOG.debug("Changing password for user %s.", item) user_dict = {'_name': item['name'], @@ -280,7 +149,7 @@ class BaseMySqlAdmin(object): if new_name or new_host or new_password: - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: if new_password is not None: uu = sql_query.SetPassword(user.name, host=user.host, @@ -298,7 +167,7 @@ class BaseMySqlAdmin(object): def create_database(self, databases): """Create the list of specified databases.""" - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: for item in databases: mydb = models.MySQLSchema.deserialize(item) mydb.check_create() @@ -313,7 +182,7 @@ class BaseMySqlAdmin(object): """Create users and grant them privileges for the specified databases. """ - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: for item in users: user = models.MySQLUser.deserialize(item) user.check_create() @@ -333,7 +202,7 @@ class BaseMySqlAdmin(object): def delete_database(self, database): """Delete the specified database.""" - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: mydb = models.MySQLSchema.deserialize(database) mydb.check_delete() dd = sql_query.DropDatabase(mydb.name) @@ -347,7 +216,7 @@ class BaseMySqlAdmin(object): self.delete_user_by_name(mysql_user.name, mysql_user.host) def delete_user_by_name(self, name, host='%'): - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: du = sql_query.DropUser(name, host=host) t = text(str(du)) LOG.debug("delete_user_by_name: %s", t) @@ -373,7 +242,7 @@ class BaseMySqlAdmin(object): ": %(reason)s") % {'user': username, 'reason': err_msg} ) - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: q = sql_query.Query() q.columns = ['User', 'Host'] q.tables = ['mysql.user'] @@ -395,7 +264,7 @@ class BaseMySqlAdmin(object): """Grant a user permission to use a given database.""" user = self._get_user(username, hostname) mydb = None # cache the model as we just want name validation - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: for database in databases: try: if mydb: @@ -416,8 +285,6 @@ class BaseMySqlAdmin(object): def is_root_enabled(self): """Return True if root access is enabled; False otherwise.""" - LOG.debug("Class type of mysql_root_access is %s ", - self.mysql_root_access) return self.mysql_root_access.is_root_enabled() def enable_root(self, root_password=None): @@ -432,13 +299,13 @@ class BaseMySqlAdmin(object): return self.mysql_root_access.disable_root() def list_databases(self, limit=None, marker=None, include_marker=False): - """List databases the user created on this mysql instance.""" - LOG.debug("---Listing Databases---") + """List databases on this mysql instance.""" + LOG.info("Listing Databases") ignored_database_names = "'%s'" % "', '".join(cfg.get_ignored_dbs()) LOG.debug("The following database names are on ignore list and will " "be omitted from the listing: %s", ignored_database_names) databases = [] - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: # If you have an external volume mounted at /var/lib/mysql # the lost+found directory will show up in mysql as a database # which will create errors if you try to do any database ops @@ -461,17 +328,16 @@ class BaseMySqlAdmin(object): t = text(str(q)) database_names = client.execute(t) next_marker = None - LOG.debug("database_names = %r.", database_names) for count, database in enumerate(database_names): if limit is not None and count >= limit: break - LOG.debug("database = %s.", str(database)) mysql_db = models.MySQLSchema(name=database[0], character_set=database[1], collate=database[2]) next_marker = mysql_db.name databases.append(mysql_db.serialize()) - LOG.debug("databases = %s", str(databases)) + + LOG.info("databases = %s", str(databases)) if limit is not None and database_names.rowcount <= limit: next_marker = None return databases, next_marker @@ -496,12 +362,12 @@ class BaseMySqlAdmin(object): Marker LIMIT :limit; ''' - LOG.debug("---Listing Users---") + LOG.info("Listing Users") ignored_user_names = "'%s'" % "', '".join(cfg.get_ignored_users()) LOG.debug("The following user names are on ignore list and will " "be omitted from the listing: %s", ignored_user_names) users = [] - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: iq = sql_query.Query() # Inner query. iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"] iq.tables = ['mysql.user'] @@ -524,7 +390,6 @@ class BaseMySqlAdmin(object): t = text(str(oq)) result = client.execute(t) next_marker = None - LOG.debug("result = %s", str(result)) for count, row in enumerate(result): if limit is not None and count >= limit: break @@ -537,14 +402,14 @@ class BaseMySqlAdmin(object): users.append(mysql_user.serialize()) if limit is not None and result.rowcount <= limit: next_marker = None - LOG.debug("users = %s", str(users)) + LOG.info("users = %s", str(users)) return users, next_marker def revoke_access(self, username, hostname, database): """Revoke a user's permission to use a given database.""" user = self._get_user(username, hostname) - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: r = sql_query.Revoke(database=database, user=user.name, host=user.host) @@ -559,63 +424,21 @@ class BaseMySqlAdmin(object): return user.databases -class BaseKeepAliveConnection(interfaces.PoolListener): - """ - A connection pool listener that ensures live connections are returned - from the connection pool at checkout. This alleviates the problem of - MySQL connections timing out. - """ - - def checkout(self, dbapi_con, con_record, con_proxy): - """Event triggered when a connection is checked out from the pool.""" - try: - try: - dbapi_con.ping(False) - except TypeError: - dbapi_con.ping() - except dbapi_con.OperationalError as ex: - if ex.args[0] in (2006, 2013, 2014, 2045, 2055): - raise exc.DisconnectionError() - else: - raise - # MariaDB seems to timeout the client in a different - # way than MySQL and PXC - except pymysql_err.InternalError as ex: - if "Packet sequence number wrong" in str(ex): - raise exc.DisconnectionError() - elif 'Connection was killed' in str(ex): - raise exc.DisconnectionError() - else: - raise - - @six.add_metaclass(abc.ABCMeta) class BaseMySqlApp(object): """Prepares DBaaS on a Guest container.""" - TIME_OUT = 1000 CFG_CODEC = IniCodec() - - @property - def local_sql_client(self): - return self._local_sql_client - - @property - def keep_alive_connection_cls(self): - return self._keep_alive_connection_cls - - @property - def service_candidates(self): - return ["mysql", "mysqld", "mysql-server"] - - @property - def mysql_service(self): - service_candidates = self.service_candidates - return operating_system.service_discovery(service_candidates) - configuration_manager = ConfigurationManager( - MYSQL_CONFIG, MYSQL_OWNER, MYSQL_OWNER, CFG_CODEC, requires_root=True, - override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT)) + MYSQL_CONFIG, CONF.database_service_uid, CONF.database_service_uid, + CFG_CODEC, requires_root=True, + override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT) + ) + + def __init__(self, status, docker_client): + """By default login with root no password for initial setup.""" + self.status = status + self.docker_client = docker_client def get_engine(self): """Create the default engine with the updated admin user. @@ -638,14 +461,19 @@ class BaseMySqlApp(object): CONNECTION_STR_FORMAT % (user, urllib.parse.quote(password.strip())), pool_recycle=120, echo=CONF.sql_query_logging, - listeners=[self.keep_alive_connection_cls()]) + listeners=[mysql_util.BaseKeepAliveConnection()]) return ENGINE + def execute_sql(self, sql_statement): + LOG.debug("Executing SQL: %s", sql_statement) + with mysql_util.SqlClient(self.get_engine()) as client: + return client.execute(sql_statement) + @classmethod - def get_auth_password(cls): + def get_auth_password(cls, file="os_admin.cnf"): auth_config = operating_system.read_file( - cls.get_client_auth_file(), codec=cls.CFG_CODEC) + cls.get_client_auth_file(file), codec=cls.CFG_CODEC) return auth_config['client']['password'] @classmethod @@ -659,22 +487,15 @@ class BaseMySqlApp(object): {MySQLConfParser.SERVER_CONF_SECTION: {'datadir': value}}) @classmethod - def get_client_auth_file(cls): - return guestagent_utils.build_file_path("~", ".my.cnf") - - def __init__(self, status, local_sql_client, keep_alive_connection_cls): - """By default login with root no password for initial setup.""" - self.state_change_wait_time = CONF.state_change_wait_time - self.status = status - self._local_sql_client = local_sql_client - self._keep_alive_connection_cls = keep_alive_connection_cls + def get_client_auth_file(cls, file="os_admin.cnf"): + return guestagent_utils.build_file_path("/opt/trove-guestagent", file) def _create_admin_user(self, client, password): """ Create a os_admin user with a random password with all privileges similar to the root user. """ - LOG.debug("Creating Trove admin user '%s'.", ADMIN_USER_NAME) + LOG.info("Creating Trove admin user '%s'.", ADMIN_USER_NAME) host = "localhost" try: cu = sql_query.CreateUser(ADMIN_USER_NAME, host=host, @@ -694,150 +515,40 @@ class BaseMySqlApp(object): host=host, grant_option=True) t = text(str(g)) client.execute(t) - LOG.debug("Trove admin user '%s' created.", ADMIN_USER_NAME) + LOG.info("Trove admin user '%s' created.", ADMIN_USER_NAME) @staticmethod - def _generate_root_password(client): - """Generate, set, and preserve a random password - for root@localhost when invoking mysqladmin to - determine the execution status of the mysql service. - """ - localhost = "localhost" - new_password = utils.generate_random_password() - uu = sql_query.SetPassword( - models.MySQLUser.root_username, host=localhost, - new_password=new_password) - t = text(str(uu)) - client.execute(t) + def save_password(user, password): + content = {'client': {'user': user, + 'password': password, + 'host': "localhost"}} + operating_system.write_file('/opt/trove-guestagent/%s.cnf' % user, + content, codec=IniCodec()) - # Save the password to root's private .my.cnf file - root_sect = {'client': {'user': 'root', - 'password': new_password, - 'host': localhost}} - operating_system.write_file('/root/.my.cnf', - root_sect, codec=IniCodec(), as_root=True) + def secure(self): + LOG.info("Securing MySQL now.") - def install_if_needed(self, packages): - """Prepare the guest machine with a secure - mysql server installation. - """ - LOG.info("Preparing Guest as MySQL Server.") - if not packager.pkg_is_installed(packages): - LOG.debug("Installing MySQL server.") - self._clear_mysql_config() - # set blank password on pkg configuration stage - pkg_opts = {'root_password': '', - 'root_password_again': ''} - packager.pkg_install(packages, pkg_opts, self.TIME_OUT) - self._create_mysql_confd_dir() - LOG.info("Finished installing MySQL server.") - self.start_mysql() - - def secure(self, config_contents): - LOG.debug("Securing MySQL now.") - clear_expired_password() - - LOG.debug("Generating admin password.") + root_pass = self.get_auth_password(file="root.cnf") admin_password = utils.generate_random_password() - # By default, MySQL does not require a password at all for connecting - # as root engine = sqlalchemy.create_engine( - CONNECTION_STR_FORMAT % ('root', ''), echo=True) - with self.local_sql_client(engine, use_flush=False) as client: + CONNECTION_STR_FORMAT % ('root', root_pass), echo=True) + with mysql_util.SqlClient(engine, use_flush=False) as client: self._create_admin_user(client, admin_password) - LOG.debug("Switching to the '%s' user now.", ADMIN_USER_NAME) engine = sqlalchemy.create_engine( CONNECTION_STR_FORMAT % (ADMIN_USER_NAME, urllib.parse.quote(admin_password)), echo=True) - with self.local_sql_client(engine) as client: + with mysql_util.SqlClient(engine) as client: self._remove_anonymous_user(client) - self.stop_db() - self._reset_configuration(config_contents, admin_password) - self.start_mysql() - LOG.debug("MySQL secure complete.") + self.save_password(ADMIN_USER_NAME, admin_password) + LOG.info("MySQL secure complete.") - def _reset_configuration(self, configuration, admin_password=None): - if not admin_password: - # Take the current admin password from the base configuration file - # if not given. - admin_password = self.get_auth_password() - - self.configuration_manager.save_configuration(configuration) - self._save_authentication_properties(admin_password) - self.wipe_ib_logfiles() - - def _save_authentication_properties(self, admin_password): - # Use localhost to connect with mysql using unix socket instead of ip - # and port. - client_sect = {'client': {'user': ADMIN_USER_NAME, - 'password': admin_password, - 'host': 'localhost'}} - operating_system.write_file(self.get_client_auth_file(), - client_sect, codec=self.CFG_CODEC) - - def secure_root(self, secure_remote_root=True): - with self.local_sql_client(self.get_engine()) as client: - LOG.info("Preserving root access from restore.") - self._generate_root_password(client) - if secure_remote_root: - self._remove_remote_root_access(client) - - def _clear_mysql_config(self): - """Clear old configs, which can be incompatible with new version.""" - LOG.debug("Clearing old MySQL config.") - random_uuid = str(uuid.uuid4()) - configs = ["/etc/my.cnf", "/etc/mysql/conf.d", "/etc/mysql/my.cnf"] - for config in configs: - try: - old_conf_backup = "%s_%s" % (config, random_uuid) - operating_system.move(config, old_conf_backup, as_root=True) - LOG.debug("%(cfg)s saved to %(saved_cfg)s_%(uuid)s.", - {'cfg': config, 'saved_cfg': config, - 'uuid': random_uuid}) - except exception.ProcessExecutionError: - pass - - def _create_mysql_confd_dir(self): - LOG.debug("Creating %s.", CNF_INCLUDE_DIR) - operating_system.create_directory(CNF_INCLUDE_DIR, as_root=True) - - def _enable_mysql_on_boot(self): - LOG.debug("Enabling MySQL on boot.") - try: - utils.execute_with_timeout(self.mysql_service['cmd_enable'], - shell=True) - except KeyError: - LOG.exception("Error enabling MySQL start on boot.") - raise RuntimeError(_("Service is not discovered.")) - - def _disable_mysql_on_boot(self): - try: - utils.execute_with_timeout(self.mysql_service['cmd_disable'], - shell=True) - except KeyError: - LOG.exception("Error disabling MySQL start on boot.") - raise RuntimeError(_("Service is not discovered.")) - - def stop_db(self, update_db=False, do_not_start_on_reboot=False): - LOG.info("Stopping MySQL.") - if do_not_start_on_reboot: - self._disable_mysql_on_boot() - try: - utils.execute_with_timeout(self.mysql_service['cmd_stop'], - shell=True) - except KeyError: - LOG.exception("Error stopping MySQL.") - raise RuntimeError(_("Service is not discovered.")) - if not self.status.wait_for_real_status_to_change_to( - rd_instance.ServiceStatuses.SHUTDOWN, - self.state_change_wait_time, update_db): - LOG.error("Could not stop MySQL.") - self.status.end_restart() - raise RuntimeError(_("Could not stop MySQL!")) + def secure_root(self): + with mysql_util.SqlClient(self.get_engine()) as client: + self._remove_remote_root_access(client) def _remove_anonymous_user(self, client): LOG.debug("Removing anonymous user.") @@ -846,33 +557,23 @@ class BaseMySqlApp(object): LOG.debug("Anonymous user removed.") def _remove_remote_root_access(self, client): - LOG.debug("Removing root access.") + LOG.debug("Removing remote root access.") t = text(sql_query.REMOVE_ROOT) client.execute(t) - LOG.debug("Root access removed.") - - def restart(self): - try: - self.status.begin_restart() - self.stop_db() - self.start_mysql() - finally: - self.status.end_restart() + LOG.debug("Root remote access removed.") def update_overrides(self, overrides): - self._apply_user_overrides(overrides) - - def _apply_user_overrides(self, overrides): - # All user-defined values go to the server section of the configuration - # file. if overrides: self.configuration_manager.apply_user_override( {MySQLConfParser.SERVER_CONF_SECTION: overrides}) + def remove_overrides(self): + self.configuration_manager.remove_user_override() + def apply_overrides(self, overrides): - LOG.debug("Applying overrides to MySQL.") - with self.local_sql_client(self.get_engine()) as client: - LOG.debug("Updating override values in running MySQL.") + LOG.info("Applying overrides to running MySQL, overrides: %s", + overrides) + with mysql_util.SqlClient(self.get_engine()) as client: for k, v in overrides.items(): byte_value = guestagent_utils.to_bytes(v) q = sql_query.SetServerVariable(key=k, value=byte_value) @@ -881,13 +582,89 @@ class BaseMySqlApp(object): client.execute(t) except exc.OperationalError: output = {'key': k, 'value': byte_value} - LOG.exception("Unable to set %(key)s with value " - "%(value)s.", output) + LOG.error("Unable to set %(key)s with value %(value)s.", + output) - def make_read_only(self, read_only): - with self.local_sql_client(self.get_engine()) as client: - q = "set global read_only = %s" % read_only - client.execute(text(str(q))) + def start_db(self, update_db=False, ds_version=None, command=None, + extra_volumes=None): + docker_image = CONF.get(CONF.datastore_manager).docker_image + image = (f'{docker_image}:latest' if not ds_version else + f'{docker_image}:{ds_version}') + command = command if command else '' + + try: + root_pass = self.get_auth_password(file="root.cnf") + except exception.UnprocessableEntity: + root_pass = utils.generate_random_password() + + # Get uid and gid + user = "%s:%s" % (CONF.database_service_uid, CONF.database_service_uid) + + # Create folders for mysql on localhost + for folder in ['/etc/mysql', '/var/run/mysqld']: + operating_system.create_directory( + folder, user=CONF.database_service_uid, + group=CONF.database_service_uid, force=True, + as_root=True) + + volumes = { + "/etc/mysql": {"bind": "/etc/mysql", "mode": "rw"}, + "/var/run/mysqld": {"bind": "/var/run/mysqld", + "mode": "rw"}, + "/var/lib/mysql": {"bind": "/var/lib/mysql", "mode": "rw"}, + } + if extra_volumes: + volumes.update(extra_volumes) + + try: + LOG.info("Starting docker container, image: %s", image) + docker_util.start_container( + self.docker_client, + image, + volumes=volumes, + network_mode="host", + user=user, + environment={ + "MYSQL_ROOT_PASSWORD": root_pass, + }, + command=command + ) + + # Save root password + LOG.debug("Saving root credentials to local host.") + self.save_password('root', root_pass) + except Exception: + LOG.exception("Failed to start mysql") + raise exception.TroveError(_("Failed to start mysql")) + + if not self.status.wait_for_real_status_to_change_to( + instance.ServiceStatuses.HEALTHY, + CONF.state_change_wait_time, update_db): + raise exception.TroveError(_("Failed to start mysql")) + + def start_db_with_conf_changes(self, config_contents): + if self.status.is_running: + LOG.info("Stopping MySQL before applying changes.") + self.stop_db() + + LOG.info("Resetting configuration.") + self._reset_configuration(config_contents) + + self.start_db(update_db=True) + + def stop_db(self, update_db=False): + LOG.info("Stopping MySQL.") + + try: + docker_util.stop_container(self.docker_client) + except Exception: + LOG.exception("Failed to stop mysql") + raise exception.TroveError("Failed to stop mysql") + + if not self.status.wait_for_real_status_to_change_to( + instance.ServiceStatuses.SHUTDOWN, + CONF.state_change_wait_time, update_db): + raise exception.TroveError("Failed to stop mysql") def wipe_ib_logfiles(self): """Destroys the iblogfiles. @@ -896,7 +673,6 @@ class BaseMySqlApp(object): current size of the files MySQL will fail to start, so we delete the files to be safe. """ - LOG.info("Wiping ib_logfiles.") for index in range(2): try: # On restarts, sometimes these are wiped. So it can be a race @@ -910,13 +686,179 @@ class BaseMySqlApp(object): LOG.exception("Could not delete logfile.") raise - def remove_overrides(self): - self.configuration_manager.remove_user_override() + def _reset_configuration(self, configuration, admin_password=None): + self.configuration_manager.save_configuration(configuration) + if admin_password: + self.save_password(ADMIN_USER_NAME, admin_password) + self.wipe_ib_logfiles() - def _remove_replication_overrides(self, cnf_file): - LOG.info("Removing replication configuration file.") - if os.path.exists(cnf_file): - operating_system.remove(cnf_file, as_root=True) + def reset_configuration(self, configuration): + config_contents = configuration['config_contents'] + LOG.info("Resetting configuration.") + self._reset_configuration(config_contents) + + def restart(self): + LOG.info("Restarting mysql") + + # Ensure folders permission for database. + for folder in ['/etc/mysql', '/var/run/mysqld']: + operating_system.create_directory( + folder, user=CONF.database_service_uid, + group=CONF.database_service_uid, force=True, + as_root=True) + + try: + docker_util.restart_container(self.docker_client) + except Exception: + LOG.exception("Failed to restart mysql") + raise exception.TroveError("Failed to restart mysql") + + if not self.status.wait_for_real_status_to_change_to( + instance.ServiceStatuses.HEALTHY, + CONF.state_change_wait_time, update_db=False): + raise exception.TroveError("Failed to start mysql") + + LOG.info("Finished restarting mysql") + + def create_backup(self, context, backup_info): + storage_driver = CONF.storage_strategy + backup_driver = cfg.get_configuration_property('backup_strategy') + incremental = '' + backup_type = 'full' + if backup_info.get('parent'): + incremental = ( + f'--incremental ' + f'--parent-location={backup_info["parent"]["location"]} ' + f'--parent-checksum={backup_info["parent"]["checksum"]}') + backup_type = 'incremental' + + backup_id = backup_info["id"] + image = CONF.backup_docker_image + name = 'db_backup' + volumes = {'/var/lib/mysql': {'bind': '/var/lib/mysql', 'mode': 'rw'}} + admin_pass = self.get_auth_password() + user_token = context.auth_token + auth_url = CONF.service_credentials.auth_url + user_tenant = context.project_id + metadata = f'datastore:{backup_info["datastore"]},' \ + f'datastore_version:{backup_info["datastore_version"]}' + + command = ( + f'/usr/bin/python3 main.py --backup --backup-id={backup_id} ' + f'--storage-driver={storage_driver} --driver={backup_driver} ' + f'--db-user=os_admin --db-password={admin_pass} ' + f'--db-host=127.0.0.1 ' + f'--os-token={user_token} --os-auth-url={auth_url} ' + f'--os-tenant-id={user_tenant} ' + f'--swift-extra-metadata={metadata} ' + f'{incremental}' + ) + + # Update backup status in db + conductor = conductor_api.API(context) + mount_point = CONF.get(CONF.datastore_manager).mount_point + stats = guestagent_utils.get_filesystem_volume_stats(mount_point) + backup_state = { + 'backup_id': backup_id, + 'size': stats.get('used', 0.0), + 'state': BackupState.BUILDING, + 'backup_type': backup_type + } + conductor.update_backup(CONF.guest_id, + sent=timeutils.utcnow_ts(microsecond=True), + **backup_state) + LOG.debug("Updated state for %s to %s.", backup_id, backup_state) + + # Start to run backup inside a separate docker container + try: + LOG.info('Starting to create backup %s, command: %s', backup_id, + command) + output, ret = docker_util.run_container( + self.docker_client, image, name, + volumes=volumes, command=command) + result = output[-1] + if not ret: + msg = f'Failed to run backup container, error: {result}' + LOG.error(msg) + raise Exception(msg) + + backup_result = BACKUP_LOG.match(result) + if backup_result: + backup_state.update({ + 'checksum': backup_result.group('checksum'), + 'location': backup_result.group('location'), + 'success': True, + 'state': BackupState.COMPLETED, + }) + else: + backup_state.update({ + 'success': False, + 'state': BackupState.FAILED, + }) + except Exception as err: + LOG.error("Failed to create backup %s", backup_id) + backup_state.update({ + 'success': False, + 'state': BackupState.FAILED, + }) + raise exception.TroveError( + "Failed to create backup %s, error: %s" % + (backup_id, str(err)) + ) + finally: + LOG.info("Completed backup %s.", backup_id) + conductor.update_backup(CONF.guest_id, + sent=timeutils.utcnow_ts( + microsecond=True), + **backup_state) + LOG.debug("Updated state for %s to %s.", backup_id, backup_state) + + def restore_backup(self, context, backup_info, restore_location): + backup_id = backup_info['id'] + storage_driver = CONF.storage_strategy + backup_driver = cfg.get_configuration_property('backup_strategy') + user_token = context.auth_token + auth_url = CONF.service_credentials.auth_url + user_tenant = context.project_id + image = CONF.backup_docker_image + name = 'db_restore' + volumes = {'/var/lib/mysql': {'bind': '/var/lib/mysql', 'mode': 'rw'}} + + command = ( + f'/usr/bin/python3 main.py --nobackup ' + f'--storage-driver={storage_driver} --driver={backup_driver} ' + f'--os-token={user_token} --os-auth-url={auth_url} ' + f'--os-tenant-id={user_tenant} ' + f'--restore-from={backup_info["location"]} ' + f'--restore-checksum={backup_info["checksum"]}' + ) + + LOG.debug('Stop the database and clean up the data before restore ' + 'from %s', backup_id) + self.stop_db() + operating_system.chmod(restore_location, + operating_system.FileMode.SET_FULL, + as_root=True) + utils.clean_out(restore_location) + + # Start to run restore inside a separate docker container + LOG.info('Starting to restore backup %s, command: %s', backup_id, + command) + output, ret = docker_util.run_container( + self.docker_client, image, name, + volumes=volumes, command=command) + result = output[-1] + if not ret: + msg = f'Failed to run restore container, error: {result}' + LOG.error(msg) + raise Exception(msg) + + LOG.debug('Deleting ib_logfile files after restore from backup %s', + backup_id) + operating_system.chown(restore_location, CONF.database_service_uid, + CONF.database_service_uid, force=True, + as_root=True) + self.wipe_ib_logfiles() def exists_replication_source_overrides(self): return self.configuration_manager.has_system_override(CNF_MASTER) @@ -936,11 +878,10 @@ class BaseMySqlApp(object): self.configuration_manager.remove_system_override(CNF_SLAVE) def grant_replication_privilege(self, replication_user): - LOG.info("Granting Replication Slave privilege.") + LOG.info("Granting replication slave privilege for %s", + replication_user['name']) - LOG.debug("grant_replication_privilege: %s", replication_user) - - with self.local_sql_client(self.get_engine()) as client: + with mysql_util.SqlClient(self.get_engine()) as client: g = sql_query.Grant(permissions=['REPLICATION SLAVE'], user=replication_user['name'], clear=replication_user['password']) @@ -949,149 +890,73 @@ class BaseMySqlApp(object): client.execute(t) def get_port(self): - with self.local_sql_client(self.get_engine()) as client: + with mysql_util.SqlClient(self.get_engine()) as client: result = client.execute('SELECT @@port').first() return result[0] - def get_binlog_position(self): - with self.local_sql_client(self.get_engine()) as client: - result = client.execute('SHOW MASTER STATUS').first() - binlog_position = { - 'log_file': result['File'], - 'position': result['Position'] - } - return binlog_position + def wait_for_slave_status(self, status, client, max_time): + def verify_slave_status(): + ret = client.execute( + "SELECT SERVICE_STATE FROM " + "performance_schema.replication_connection_status").first() + if not ret: + actual_status = 'OFF' + else: + actual_status = ret[0] + return actual_status.upper() == status.upper() - def execute_on_client(self, sql_statement): - LOG.debug("Executing SQL: %s", sql_statement) - with self.local_sql_client(self.get_engine()) as client: - return client.execute(sql_statement) + LOG.debug("Waiting for slave status %s with timeout %s", + status, max_time) + try: + utils.poll_until(verify_slave_status, sleep_time=3, + time_out=max_time) + LOG.info("Replication status: %s.", status) + except exception.PollTimeOut: + raise RuntimeError( + _("Replication is not %(status)s after %(max)d seconds.") % { + 'status': status.lower(), 'max': max_time}) def start_slave(self): LOG.info("Starting slave replication.") - with self.local_sql_client(self.get_engine()) as client: + with mysql_util.SqlClient(self.get_engine()) as client: client.execute('START SLAVE') - self._wait_for_slave_status("ON", client, 180) + self.wait_for_slave_status("ON", client, 180) def stop_slave(self, for_failover): - replication_user = None LOG.info("Stopping slave replication.") - with self.local_sql_client(self.get_engine()) as client: + + replication_user = None + with mysql_util.SqlClient(self.get_engine()) as client: result = client.execute('SHOW SLAVE STATUS') replication_user = result.first()['Master_User'] client.execute('STOP SLAVE') client.execute('RESET SLAVE ALL') - self._wait_for_slave_status("OFF", client, 180) + self.wait_for_slave_status('OFF', client, 180) if not for_failover: - client.execute('DROP USER ' + replication_user) + client.execute('DROP USER IF EXISTS ' + replication_user) + return { 'replication_user': replication_user } def stop_master(self): LOG.info("Stopping replication master.") - with self.local_sql_client(self.get_engine()) as client: + with mysql_util.SqlClient(self.get_engine()) as client: client.execute('RESET MASTER') - def _wait_for_slave_status(self, status, client, max_time): - - def verify_slave_status(): - actual_status = client.execute( - "SHOW GLOBAL STATUS like 'slave_running'").first()[1] - return actual_status.upper() == status.upper() - - LOG.debug("Waiting for SLAVE_RUNNING to change to %s.", status) - try: - utils.poll_until(verify_slave_status, sleep_time=3, - time_out=max_time) - LOG.info("Replication is now %s.", status.lower()) - except PollTimeOut: - raise RuntimeError( - _("Replication is not %(status)s after %(max)d seconds.") % { - 'status': status.lower(), 'max': max_time}) - - def start_mysql(self, update_db=False, disable_on_boot=False, timeout=120): - LOG.info("Starting MySQL.") - # This is the site of all the trouble in the restart tests. - # Essentially what happens is that mysql start fails, but does not - # die. It is then impossible to kill the original, so - - if disable_on_boot: - self._disable_mysql_on_boot() - else: - self._enable_mysql_on_boot() - - try: - utils.execute_with_timeout(self.mysql_service['cmd_start'], - shell=True, timeout=timeout) - except KeyError: - raise RuntimeError(_("Service is not discovered.")) - except exception.ProcessExecutionError: - # it seems mysql (percona, at least) might come back with [Fail] - # but actually come up ok. we're looking into the timing issue on - # parallel, but for now, we'd like to give it one more chance to - # come up. so regardless of the execute_with_timeout() response, - # we'll assume mysql comes up and check its status for a while. - pass - if not self.status.wait_for_real_status_to_change_to( - rd_instance.ServiceStatuses.RUNNING, - self.state_change_wait_time, update_db): - LOG.error("Start up of MySQL failed.") - # If it won't start, but won't die either, kill it by hand so we - # don't let a rouge process wander around. - try: - utils.execute_with_timeout("sudo", "pkill", "-9", "mysql") - except exception.ProcessExecutionError: - LOG.exception("Error killing stalled MySQL start command.") - # There's nothing more we can do... - self.status.end_restart() - raise RuntimeError(_("Could not start MySQL!")) - - def start_db_with_conf_changes(self, config_contents): - LOG.info("Starting MySQL with conf changes.") - LOG.debug("Inside the guest - Status is_running = (%s).", - self.status.is_running) - if self.status.is_running: - LOG.error("Cannot execute start_db_with_conf_changes because " - "MySQL state == %s.", self.status) - raise RuntimeError(_("MySQL not stopped.")) - LOG.info("Resetting configuration.") - self._reset_configuration(config_contents) - self.start_mysql(True) - - def reset_configuration(self, configuration): - config_contents = configuration['config_contents'] - LOG.info("Resetting configuration.") - self._reset_configuration(config_contents) - - def reset_admin_password(self, admin_password): - """Replace the password in the my.cnf file.""" - # grant the new admin password - with self.local_sql_client(self.get_engine()) as client: - self._create_admin_user(client, admin_password) - # reset the ENGINE because the password could have changed - global ENGINE - ENGINE = None - self._save_authentication_properties(admin_password) + def make_read_only(self, read_only): + with mysql_util.SqlClient(self.get_engine()) as client: + q = "set global read_only = %s" % read_only + client.execute(text(str(q))) class BaseMySqlRootAccess(object): - - def __init__(self, local_sql_client, mysql_app): - self._local_sql_client = local_sql_client - self._mysql_app = mysql_app - - @property - def mysql_app(self): - return self._mysql_app - - @property - def local_sql_client(self): - return self._local_sql_client + def __init__(self, mysql_app): + self.mysql_app = mysql_app def is_root_enabled(self): """Return True if root access is enabled; False otherwise.""" - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: t = text(sql_query.ROOT_ENABLED) result = client.execute(t) LOG.debug("Found %s with remote root access.", result.rowcount) @@ -1102,7 +967,7 @@ class BaseMySqlRootAccess(object): reset the root password. """ user = models.MySQLUser.root(password=root_password) - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: try: cu = sql_query.CreateUser(user.name, host=user.host) t = text(str(cu)) @@ -1111,7 +976,7 @@ class BaseMySqlRootAccess(object): # Ignore, user is already created, just reset the password # TODO(rnirmal): More fine grained error checking later on LOG.debug(err) - with self.local_sql_client(self.mysql_app.get_engine()) as client: + with mysql_util.SqlClient(self.mysql_app.get_engine()) as client: uu = sql_query.SetPassword(user.name, host=user.host, new_password=user.password) t = text(str(uu)) diff --git a/trove/guestagent/datastore/service.py b/trove/guestagent/datastore/service.py index 4646a5ab1f..63a5464e44 100644 --- a/trove/guestagent/datastore/service.py +++ b/trove/guestagent/datastore/service.py @@ -12,8 +12,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - - import os import time @@ -22,13 +20,12 @@ from oslo_utils import timeutils from trove.common import cfg from trove.common import context as trove_context -from trove.common.i18n import _ from trove.common import instance +from trove.common.i18n import _ from trove.conductor import api as conductor_api from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system - LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -40,33 +37,17 @@ class BaseDbStatus(object): the state of the application is determined by calling a series of commands. - This class also handles saving and load the status of the DB application - in the database. - The status is updated whenever the update() method is called, except - if the state is changed to building or restart mode using the - "begin_install" and "begin_restart" methods. - The building mode persists in the database while restarting mode does - not (so if there is a Python Pete crash update() will set the status to - show a failure). - These modes are exited and functionality to update() returns when - end_install or end_restart() is called, at which point the status again - reflects the actual status of the DB app. - This is a base class, subclasses must implement real logic for - determining current status of DB in _get_actual_db_status() + determining current status of DB in get_actual_db_status() """ - _instance = None - - GUESTAGENT_DIR = '~' + GUESTAGENT_DIR = '/opt/trove-guestagent' PREPARE_START_FILENAME = '.guestagent.prepare.start' PREPARE_END_FILENAME = '.guestagent.prepare.end' - def __init__(self): - if self._instance is not None: - raise RuntimeError(_("Cannot instantiate twice.")) + def __init__(self, docker_client): self.status = None - self.restart_mode = False + self.docker_client = docker_client self.__prepare_completed = None @@ -95,10 +76,6 @@ class BaseDbStatus(object): self.set_status(instance.ServiceStatuses.BUILDING, True) - def begin_restart(self): - """Called before restarting DB server.""" - self.restart_mode = True - def set_ready(self): prepare_end_file = guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME) @@ -123,22 +100,18 @@ class BaseDbStatus(object): LOG.info("Set final status to %s.", final_status) self.set_status(final_status, force=True) else: - self._end_install_or_restart(True) + self._end_install(True) - def end_restart(self): - self.restart_mode = False - LOG.info("Ending restart.") - self._end_install_or_restart(False) + def _end_install(self, force): + """Called after DB is installed. - def _end_install_or_restart(self, force): - """Called after DB is installed or restarted. Updates the database with the actual DB server status. """ - real_status = self._get_actual_db_status() + real_status = self.get_actual_db_status() LOG.info("Current database status is '%s'.", real_status) self.set_status(real_status, force=force) - def _get_actual_db_status(self): + def get_actual_db_status(self): raise NotImplementedError() @property @@ -149,15 +122,12 @@ class BaseDbStatus(object): """ return self.prepare_completed - @property - def _is_restarting(self): - return self.restart_mode - @property def is_running(self): """True if DB server is running.""" return (self.status is not None and - self.status == instance.ServiceStatuses.RUNNING) + self.status in [instance.ServiceStatuses.RUNNING, + instance.ServiceStatuses.HEALTHY]) def set_status(self, status, force=False): """Use conductor to update the DB app status.""" @@ -180,40 +150,10 @@ class BaseDbStatus(object): """Find and report status of DB on this machine. The database is updated and the status is also returned. """ - if self.is_installed and not self._is_restarting: - status = self._get_actual_db_status() + if self.is_installed: + status = self.get_actual_db_status() self.set_status(status) - def restart_db_service(self, service_candidates, timeout): - """Restart the database. - Do not change the service auto-start setting. - Disable the Trove instance heartbeat updates during the restart. - - 1. Stop the database service. - 2. Wait for the database to shutdown. - 3. Start the database service. - 4. Wait for the database to start running. - - :param service_candidates: List of possible system service names. - :type service_candidates: list - - :param timeout: Wait timeout in seconds. - :type timeout: integer - - :raises: :class:`RuntimeError` on failure. - """ - try: - self.begin_restart() - self.stop_db_service(service_candidates, timeout, - disable_on_boot=False, update_db=False) - self.start_db_service(service_candidates, timeout, - enable_on_boot=False, update_db=False) - except Exception as e: - LOG.exception(e) - raise RuntimeError(_("Database restart failed.")) - finally: - self.end_restart() - def start_db_service(self, service_candidates, timeout, enable_on_boot=True, update_db=False): """Start the database service and wait for the database to become @@ -344,7 +284,7 @@ class BaseDbStatus(object): loop = True while loop: - self.status = self._get_actual_db_status() + self.status = self.get_actual_db_status() if self.status == status: if update_db: self.set_status(self.status) diff --git a/trove/guestagent/datastore/technical-preview/__init__.py b/trove/guestagent/datastore/technical-preview/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/dbaas.py b/trove/guestagent/dbaas.py index 4399216b50..cb51da327b 100644 --- a/trove/guestagent/dbaas.py +++ b/trove/guestagent/dbaas.py @@ -32,33 +32,32 @@ from trove.common import cfg from trove.common.i18n import _ from trove.common import utils - LOG = logging.getLogger(__name__) defaults = { 'mysql': - 'trove.guestagent.datastore.mysql.manager.Manager', - 'percona': - 'trove.guestagent.datastore.experimental.percona.manager.Manager', - 'pxc': - 'trove.guestagent.datastore.experimental.pxc.manager.Manager', - 'redis': - 'trove.guestagent.datastore.experimental.redis.manager.Manager', - 'cassandra': - 'trove.guestagent.datastore.experimental.cassandra.manager.Manager', - 'couchbase': - 'trove.guestagent.datastore.experimental.couchbase.manager.Manager', - 'mongodb': - 'trove.guestagent.datastore.experimental.mongodb.manager.Manager', - 'postgresql': - 'trove.guestagent.datastore.experimental.postgresql.manager.Manager', - 'couchdb': - 'trove.guestagent.datastore.experimental.couchdb.manager.Manager', - 'vertica': - 'trove.guestagent.datastore.experimental.vertica.manager.Manager', - 'db2': - 'trove.guestagent.datastore.experimental.db2.manager.Manager', + 'trove.guestagent.datastore.mysql.manager.Manager', 'mariadb': - 'trove.guestagent.datastore.experimental.mariadb.manager.Manager' + 'trove.guestagent.datastore.mariadb.manager.Manager', + 'percona': + 'trove.guestagent.datastore.experimental.percona.manager.Manager', + 'pxc': + 'trove.guestagent.datastore.experimental.pxc.manager.Manager', + 'redis': + 'trove.guestagent.datastore.experimental.redis.manager.Manager', + 'cassandra': + 'trove.guestagent.datastore.experimental.cassandra.manager.Manager', + 'couchbase': + 'trove.guestagent.datastore.experimental.couchbase.manager.Manager', + 'mongodb': + 'trove.guestagent.datastore.experimental.mongodb.manager.Manager', + 'postgresql': + 'trove.guestagent.datastore.experimental.postgresql.manager.Manager', + 'couchdb': + 'trove.guestagent.datastore.experimental.couchdb.manager.Manager', + 'vertica': + 'trove.guestagent.datastore.experimental.vertica.manager.Manager', + 'db2': + 'trove.guestagent.datastore.experimental.db2.manager.Manager', } CONF = cfg.CONF diff --git a/trove/guestagent/strategies/backup/__init__.py b/trove/guestagent/strategies/backup/__init__.py deleted file mode 100644 index 5399208574..0000000000 --- a/trove/guestagent/strategies/backup/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from trove.common.strategies.strategy import Strategy - -LOG = logging.getLogger(__name__) - - -def get_backup_strategy(backup_driver, ns=__name__): - return Strategy.get_strategy(backup_driver, ns) diff --git a/trove/guestagent/strategies/backup/base.py b/trove/guestagent/strategies/backup/base.py deleted file mode 100644 index cc9f6fc167..0000000000 --- a/trove/guestagent/strategies/backup/base.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -import os -import signal - -from oslo_log import log as logging - -from eventlet.green import subprocess -from trove.common import cfg, utils -from trove.common.strategies.strategy import Strategy - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -class BackupError(Exception): - """Error running the Backup Command.""" - - -class UnknownBackupType(Exception): - """Unknown backup type.""" - - -class BackupRunner(Strategy): - """Base class for Backup Strategy implementations.""" - __strategy_type__ = 'backup_runner' - __strategy_ns__ = 'trove.guestagent.strategies.backup' - - # The actual system call to run the backup - cmd = None - is_zipped = CONF.backup_use_gzip_compression - is_encrypted = CONF.backup_use_openssl_encryption - encrypt_key = CONF.backup_aes_cbc_key - - def __init__(self, filename, **kwargs): - self.base_filename = filename - self.process = None - self.pid = None - kwargs.update({'filename': filename}) - self.command = self.cmd % kwargs - super(BackupRunner, self).__init__() - - @property - def backup_type(self): - return type(self).__name__ - - def _run(self): - LOG.debug("BackupRunner running cmd: %s", self.command) - self.process = subprocess.Popen(self.command, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - preexec_fn=os.setsid) - self.pid = self.process.pid - - def __enter__(self): - """Start up the process.""" - self._run_pre_backup() - self._run() - return self - - def __exit__(self, exc_type, exc_value, traceback): - """Clean up everything.""" - # NOTE(zhaochao): all child processes should always be killed even the - # context exits by an exception. - if getattr(self, 'process', None): - try: - # Send a sigterm to the session leader, so that all - # child processes are killed and cleaned up on terminate - # (Ensures zombie processes aren't left around on a FAILURE) - # https://bugs.launchpad.net/trove/+bug/1253850 - os.killpg(self.process.pid, signal.SIGTERM) - self.process.terminate() - except OSError: - # Already stopped - pass - - if exc_type is not None: - return False - - utils.raise_if_process_errored(self.process, BackupError) - if not self.check_process(): - raise BackupError - - self._run_post_backup() - - return True - - def metadata(self): - """Hook for subclasses to store metadata from the backup.""" - return {} - - @property - def filename(self): - """Subclasses may overwrite this to declare a format (.tar).""" - return self.base_filename - - @property - def manifest(self): - return "%s%s%s" % (self.filename, - self.zip_manifest, - self.encrypt_manifest) - - @property - def zip_cmd(self): - return ' | gzip' if self.is_zipped else '' - - @property - def zip_manifest(self): - return '.gz' if self.is_zipped else '' - - @property - def encrypt_cmd(self): - return (' | openssl enc -aes-256-cbc -salt -pass pass:%s' % - self.encrypt_key) if self.is_encrypted else '' - - @property - def encrypt_manifest(self): - return '.enc' if self.is_encrypted else '' - - def check_process(self): - """Hook for subclasses to check process for errors.""" - return True - - def read(self, chunk_size): - return self.process.stdout.read(chunk_size) - - def _run_pre_backup(self): - pass - - def _run_post_backup(self): - pass diff --git a/trove/guestagent/strategies/backup/experimental/__init__.py b/trove/guestagent/strategies/backup/experimental/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/strategies/backup/experimental/cassandra_impl.py b/trove/guestagent/strategies/backup/experimental/cassandra_impl.py deleted file mode 100644 index 33398ef276..0000000000 --- a/trove/guestagent/strategies/backup/experimental/cassandra_impl.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2014 Mirantis Inc. -# All Rights Reserved. -# Copyright 2015 Tesora Inc. -# All Rights Reserved.s -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from trove.common import exception -from trove.common.i18n import _ -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.cassandra import service -from trove.guestagent.strategies.backup import base - -LOG = logging.getLogger(__name__) - - -class NodetoolSnapshot(base.BackupRunner): - """Implementation of backup using the Nodetool (http://goo.gl/QtXVsM) - utility. - """ - - # It is recommended to include the system keyspace in the backup. - # Keeping the system keyspace will reduce the restore time - # by avoiding need to rebuilding indexes. - - __strategy_name__ = 'nodetoolsnapshot' - _SNAPSHOT_EXTENSION = 'db' - - def __init__(self, filename, **kwargs): - self._app = service.CassandraApp() - super(NodetoolSnapshot, self).__init__(filename, **kwargs) - - def _run_pre_backup(self): - """Take snapshot(s) for all keyspaces. - Remove existing ones first if any. - Snapshot(s) will be stored in the data directory tree: - //
/snapshots/ - """ - - self._remove_snapshot(self.filename) - self._snapshot_all_keyspaces(self.filename) - - # Commonly 'self.command' gets resolved in the base constructor, - # but we can build the full command only after having taken the - # keyspace snapshot(s). - self.command = self._backup_cmd + self.command - - def _run_post_backup(self): - """Remove the created snapshot(s). - """ - - self._remove_snapshot(self.filename) - - def _remove_snapshot(self, snapshot_name): - LOG.debug('Clearing snapshot(s) for all keyspaces with snapshot name ' - '"%s".', snapshot_name) - utils.execute('nodetool', 'clearsnapshot', '-t %s' % snapshot_name) - - def _snapshot_all_keyspaces(self, snapshot_name): - LOG.debug('Creating snapshot(s) for all keyspaces with snapshot name ' - '"%s".', snapshot_name) - utils.execute('nodetool', 'snapshot', '-t %s' % snapshot_name) - - @property - def cmd(self): - return self.zip_cmd + self.encrypt_cmd - - @property - def _backup_cmd(self): - """Command to collect and package keyspace snapshot(s). - """ - - return self._build_snapshot_package_cmd(self._app.cassandra_data_dir, - self.filename) - - def _build_snapshot_package_cmd(self, data_dir, snapshot_name): - """Collect all files for a given snapshot and build a package - command for them. - Transform the paths such that the backup can be restored simply by - extracting the archive right to an existing data directory - (i.e. place the root into the and - remove the 'snapshots/' portion of the path). - Attempt to preserve access modifiers on the archived files. - Assert the backup is not empty as there should always be - at least the system keyspace. Fail if there is nothing to backup. - """ - - LOG.debug('Searching for all snapshot(s) with name "%s".', - snapshot_name) - snapshot_files = operating_system.list_files_in_directory( - data_dir, recursive=True, include_dirs=False, - pattern=r'.*/snapshots/%s/.*\.%s' % (snapshot_name, - self._SNAPSHOT_EXTENSION), - as_root=True) - num_snapshot_files = len(snapshot_files) - LOG.debug('Found %(num)d snapshot (*.%(ext)s) files.', - {'num': num_snapshot_files, 'ext': self._SNAPSHOT_EXTENSION}) - if num_snapshot_files > 0: - return ('sudo tar ' - '--transform="s#snapshots/%s/##" -cpPf - -C "%s" "%s"' - % (snapshot_name, data_dir, '" "'.join(snapshot_files))) - - # There should always be at least the system keyspace snapshot. - raise exception.BackupCreationError(_("No data found.")) diff --git a/trove/guestagent/strategies/backup/experimental/couchbase_impl.py b/trove/guestagent/strategies/backup/experimental/couchbase_impl.py deleted file mode 100644 index 80e70cdac8..0000000000 --- a/trove/guestagent/strategies/backup/experimental/couchbase_impl.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) 2014 eBay Software Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import json - -from oslo_log import log as logging - -from trove.common import exception -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.couchbase import service -from trove.guestagent.datastore.experimental.couchbase import system -from trove.guestagent.strategies.backup import base - - -LOG = logging.getLogger(__name__) -OUTFILE = '/tmp' + system.BUCKETS_JSON - - -class CbBackup(base.BackupRunner): - """ - Implementation of Backup Strategy for Couchbase. - """ - __strategy_name__ = 'cbbackup' - - pre_backup_commands = [ - ['rm', '-rf', system.COUCHBASE_DUMP_DIR], - ['mkdir', '-p', system.COUCHBASE_DUMP_DIR], - ] - - post_backup_commands = [ - ['rm', '-rf', system.COUCHBASE_DUMP_DIR], - ] - - @property - def cmd(self): - """ - Creates backup dump dir, tars it up, and encrypts it. - """ - cmd = 'tar cpPf - ' + system.COUCHBASE_DUMP_DIR - return cmd + self.zip_cmd + self.encrypt_cmd - - def _save_buckets_config(self, password): - url = system.COUCHBASE_REST_API + '/pools/default/buckets' - utils.execute_with_timeout('curl -u root:' + password + - ' ' + url + ' > ' + OUTFILE, - shell=True, timeout=300) - - def _backup(self, password): - utils.execute_with_timeout('/opt/couchbase/bin/cbbackup', - system.COUCHBASE_REST_API, - system.COUCHBASE_DUMP_DIR, - '-u', 'root', '-p', password, - timeout=600, run_as_root=True, - root_helper='sudo') - - def _run_pre_backup(self): - try: - for cmd in self.pre_backup_commands: - utils.execute_with_timeout(*cmd) - root = service.CouchbaseRootAccess() - pw = root.get_password() - self._save_buckets_config(pw) - with open(OUTFILE, "r") as f: - out = f.read() - if out != "[]": - d = json.loads(out) - all_memcached = True - for i in range(len(d)): - bucket_type = d[i]["bucketType"] - if bucket_type != "memcached": - all_memcached = False - break - if not all_memcached: - self._backup(pw) - else: - LOG.info("All buckets are memcached. " - "Skipping backup.") - operating_system.move(OUTFILE, system.COUCHBASE_DUMP_DIR) - if pw != "password": - # Not default password, backup generated root password - operating_system.copy(system.pwd_file, - system.COUCHBASE_DUMP_DIR, - preserve=True, as_root=True) - except exception.ProcessExecutionError: - LOG.exception("Error during pre-backup phase.") - raise - - def _run_post_backup(self): - try: - for cmd in self.post_backup_commands: - utils.execute_with_timeout(*cmd) - except exception.ProcessExecutionError: - LOG.exception("Error during post-backup phase.") - raise diff --git a/trove/guestagent/strategies/backup/experimental/couchdb_impl.py b/trove/guestagent/strategies/backup/experimental/couchdb_impl.py deleted file mode 100644 index 91bdf7b6d7..0000000000 --- a/trove/guestagent/strategies/backup/experimental/couchdb_impl.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2016 IBM Corporation -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from trove.guestagent.datastore.experimental.couchdb import service -from trove.guestagent.strategies.backup import base - - -class CouchDBBackup(base.BackupRunner): - - __strategy_name__ = 'couchdbbackup' - - @property - def cmd(self): - """ - CouchDB backup is based on a simple filesystem copy of the database - files. Each database is a single fully contained append only file. - For example, if a user creates a database 'foo', then a corresponding - 'foo.couch' file will be created in the database directory which by - default is in '/var/lib/couchdb'. - """ - cmd = 'sudo tar cpPf - ' + service.COUCHDB_LIB_DIR - return cmd + self.zip_cmd + self.encrypt_cmd diff --git a/trove/guestagent/strategies/backup/experimental/db2_impl.py b/trove/guestagent/strategies/backup/experimental/db2_impl.py deleted file mode 100644 index d0fad8c279..0000000000 --- a/trove/guestagent/strategies/backup/experimental/db2_impl.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright 2016 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from trove.common.db import models -from trove.common import exception -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.db2 import service -from trove.guestagent.datastore.experimental.db2 import system -from trove.guestagent.strategies.backup import base - -LOG = logging.getLogger(__name__) - - -class DB2Backup(base.BackupRunner): - """ - Base class for DB2 backups - """ - def __init__(self, *args, **kwargs): - super(DB2Backup, self).__init__(*args, **kwargs) - self.admin = service.DB2Admin() - self.databases = self.list_dbnames() - - def list_dbnames(self): - dbNames = [] - databases, marker = self.admin.list_databases() - for database in databases: - mydb = models.DatastoreSchema.deserialize(database) - dbNames.append(mydb.name) - return dbNames - - def estimate_backup_size(self): - """ - Estimating the size of the backup based on the size of the data - returned from the get_db_size procedure. The size of the - backup is always going to be smaller than the size of the data. - """ - try: - size = 0 - for dbname in self.databases: - out = service.run_command(system.GET_DB_SIZE % {'dbname': - dbname}) - size = size + int(out[0]) - except exception.ProcessExecutionError: - LOG.exception("An error occurred while trying to " - "estimate backup size") - LOG.debug("Estimated size for databases: %d", size) - return size - - def estimate_log_size(self): - return 0.0 - - def run_backup(self): - pass - - def execute_backup_cmd(self, backup_command): - service.create_db2_dir(system.DB2_BACKUP_DIR) - for dbName in self.databases: - service.run_command(backup_command % {'dbname': dbName}) - - def _run_pre_backup(self): - """ - Before performing the actual backup we need to make sure that - there is enough space to store the backups. The backup size - is the sum of the size of the databases and if it is an online - backup, the size of the archived logs is also factored in. - """ - backup_size_bytes = self.estimate_backup_size() - log_size_bytes = self.estimate_log_size() - total_backup_size_gb = utils.to_gb(backup_size_bytes + log_size_bytes) - free_bytes = operating_system.get_bytes_free_on_fs(system.MOUNT_POINT) - free_gb = utils.to_gb(free_bytes) - - if total_backup_size_gb > free_gb: - raise exception.InsufficientSpaceForBackup % { - 'backup_size': total_backup_size_gb, - 'free': free_gb - } - self.run_backup() - - @property - def cmd(self): - cmd = 'sudo tar cPf - ' + system.DB2_BACKUP_DIR - return cmd + self.zip_cmd + self.encrypt_cmd - - def cleanup(self): - service.remove_db2_dir(system.DB2_BACKUP_DIR) - - def _run_post_backup(self): - self.cleanup() - - -class DB2OnlineBackup(DB2Backup): - """ - Implementation of Online Backup Strategy for DB2 - using archive logging. - """ - __strategy_name__ = 'db2onlinebackup' - - def __init__(self, *args, **kwargs): - super(DB2OnlineBackup, self).__init__(*args, **kwargs) - - def estimate_log_size(self): - """ - Estimate the log utilization for all databases. The LOG_UTILIZATION - administrative view returns information about log utilization for the - connected database. The TOTAL_LOG_USED_KB returns the log utilization - in KB. - """ - log_size = 0 - try: - for dbname in self.databases: - out = service.run_command( - system.LOG_UTILIZATION % {'dbname': dbname}) - log_size = log_size + int(out[0]) - log_size = log_size * 1024 - except exception.ProcessExecutionError: - LOG.exception("An error occurred while trying to estimate log " - "size") - LOG.debug("Estimated log size for all databases: %d", log_size) - return log_size - - def run_backup(self): - try: - self.execute_backup_cmd(system.ONLINE_BACKUP_DB) - except exception.ProcessExecutionError: - LOG.exception("An exception occurred while doing an online " - "backup.") - self.cleanup() - raise - - def cleanup(self): - super(DB2OnlineBackup, self).cleanup() - ''' - After a backup operation, we can delete the archived logs - from the archived log directory but we do not want to delete - the directory itself. Since archive logging is enabled for - all databases, this directory is needed to store archive logs. - ''' - service.remove_db2_dir(system.DB2_ARCHIVE_LOGS_DIR + "/*") - - -class DB2OfflineBackup(DB2Backup): - """ - Implementation of Offline Backup Strategy for DB2 using - circular logging which is the default. - """ - __strategy_name__ = 'db2offlinebackup' - - def __init__(self, *args, **kwargs): - super(DB2OfflineBackup, self).__init__(*args, **kwargs) - - def run_backup(self): - """Create archival contents in dump dir""" - try: - service.run_command(system.QUIESCE_DB2) - self.execute_backup_cmd(system.OFFLINE_BACKUP_DB) - service.run_command(system.UNQUIESCE_DB2) - except exception.ProcessExecutionError: - LOG.exception("An exception occurred while doing an offline " - "backup.") - self.cleanup() - raise diff --git a/trove/guestagent/strategies/backup/experimental/mariadb_impl.py b/trove/guestagent/strategies/backup/experimental/mariadb_impl.py deleted file mode 100644 index b3644aeb42..0000000000 --- a/trove/guestagent/strategies/backup/experimental/mariadb_impl.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2019 Catalyst Cloud Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import re - -from oslo_log import log as logging - -from trove.common.i18n import _ -from trove.guestagent.datastore.mysql import service as mysql_service -from trove.guestagent.datastore.mysql_common import service as common_service -from trove.guestagent.strategies.backup import base - -LOG = logging.getLogger(__name__) -BACKUP_LOG = '/tmp/mariabackup.log' - - -class MariaBackup(base.BackupRunner): - """Implementation of Backup Strategy for mariabackup.""" - __strategy_name__ = 'mariabackup' - - @property - def user_and_pass(self): - return ('--user=%(user)s --password=%(password)s --host=localhost ' - '--socket=%(socket_file)s' % - {'user': common_service.ADMIN_USER_NAME, - 'password': mysql_service.MySqlApp.get_auth_password(), - 'socket_file': '/var/run/mysqld/mysqld.sock'}) - - @property - def cmd(self): - cmd = ('sudo mariabackup --backup --stream=xbstream ' + - self.user_and_pass + ' 2>' + BACKUP_LOG) - return cmd + self.zip_cmd + self.encrypt_cmd - - def check_process(self): - """Check the output of mariabackup command for 'completed OK!'. - - Return True if no error, otherwise return False. - """ - LOG.debug('Checking mariabackup process output.') - - with open(BACKUP_LOG, 'r') as backup_log: - output = backup_log.read() - if not output: - LOG.error("mariabackup log file empty.") - return False - - LOG.debug(output) - - last_line = output.splitlines()[-1].strip() - if not re.search('completed OK!', last_line): - LOG.error("mariabackup command failed.") - return False - - return True - - def metadata(self): - LOG.debug('Getting metadata for backup %s', self.base_filename) - - meta = {} - lsn = re.compile(r"The latest check point \(for incremental\): " - r"'(\d+)'") - with open(BACKUP_LOG, 'r') as backup_log: - output = backup_log.read() - match = lsn.search(output) - if match: - meta = {'lsn': match.group(1)} - - LOG.info("Metadata for backup %s: %s", self.base_filename, meta) - return meta - - @property - def filename(self): - return '%s.xbstream' % self.base_filename - - -class MariaBackupIncremental(MariaBackup): - def __init__(self, *args, **kwargs): - if not kwargs.get('lsn'): - raise AttributeError(_('lsn attribute missing, bad parent?')) - super(MariaBackupIncremental, self).__init__(*args, **kwargs) - self.parent_location = kwargs.get('parent_location') - self.parent_checksum = kwargs.get('parent_checksum') - - @property - def cmd(self): - cmd = ( - 'sudo mariabackup --backup --stream=xbstream' - ' --incremental-lsn=%(lsn)s ' + - self.user_and_pass + - ' 2>' + - BACKUP_LOG - ) - return cmd + self.zip_cmd + self.encrypt_cmd - - def metadata(self): - meta = super(MariaBackupIncremental, self).metadata() - meta.update({ - 'parent_location': self.parent_location, - 'parent_checksum': self.parent_checksum, - }) - return meta diff --git a/trove/guestagent/strategies/backup/experimental/mongo_impl.py b/trove/guestagent/strategies/backup/experimental/mongo_impl.py deleted file mode 100644 index c363992669..0000000000 --- a/trove/guestagent/strategies/backup/experimental/mongo_impl.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2014 eBay Software Foundation -# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging - -from trove.common import cfg -from trove.common import exception -from trove.common.i18n import _ -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.mongodb import ( - service as mongo_service) -from trove.guestagent.datastore.experimental.mongodb import ( - system as mongo_system) -from trove.guestagent.strategies.backup import base - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) -MONGODB_DBPATH = CONF.mongodb.mount_point -MONGO_DUMP_DIR = MONGODB_DBPATH + "/dump" -LARGE_TIMEOUT = 1200 - - -class MongoDump(base.BackupRunner): - """Implementation of Backup Strategy for MongoDump.""" - __strategy_name__ = 'mongodump' - - def __init__(self, *args, **kwargs): - self.app = mongo_service.MongoDBApp() - super(MongoDump, self).__init__(*args, **kwargs) - - def _run_pre_backup(self): - """Create archival contents in dump dir""" - try: - est_dump_size = self.estimate_dump_size() - avail = operating_system.get_bytes_free_on_fs(MONGODB_DBPATH) - if est_dump_size > avail: - self.cleanup() - # TODO(atomic77) Though we can fully recover from this error - # BackupRunner will leave the trove instance in a BACKUP state - raise OSError(_("Need more free space to run mongodump, " - "estimated %(est_dump_size)s" - " and found %(avail)s bytes free ") % - {'est_dump_size': est_dump_size, - 'avail': avail}) - - operating_system.create_directory(MONGO_DUMP_DIR, as_root=True) - operating_system.chown(MONGO_DUMP_DIR, mongo_system.MONGO_USER, - mongo_system.MONGO_USER, as_root=True) - - # high timeout here since mongodump can take a long time - utils.execute_with_timeout( - 'mongodump', '--out', MONGO_DUMP_DIR, - *(self.app.admin_cmd_auth_params()), - run_as_root=True, root_helper='sudo', - timeout=LARGE_TIMEOUT - ) - except exception.ProcessExecutionError: - LOG.debug("Caught exception when creating the dump") - self.cleanup() - raise - - @property - def cmd(self): - """Tars and streams the dump dir contents to - the stdout - """ - cmd = 'sudo tar cPf - ' + MONGO_DUMP_DIR - return cmd + self.zip_cmd + self.encrypt_cmd - - def cleanup(self): - operating_system.remove(MONGO_DUMP_DIR, force=True, as_root=True) - - def _run_post_backup(self): - self.cleanup() - - def estimate_dump_size(self): - """ - Estimate the space that the mongodump will take based on the output of - db.stats().dataSize. This seems to be conservative, as the actual bson - output in many cases is a fair bit smaller. - """ - dbs = self.app.list_all_dbs() - # mongodump does not dump the content of the local database - dbs.remove('local') - dbstats = dict([(d, 0) for d in dbs]) - for d in dbstats: - dbstats[d] = self.app.db_data_size(d) - - LOG.debug("Estimated size for databases: " + str(dbstats)) - return sum(dbstats.values()) diff --git a/trove/guestagent/strategies/backup/experimental/postgresql_impl.py b/trove/guestagent/strategies/backup/experimental/postgresql_impl.py deleted file mode 100644 index 8c1478cd62..0000000000 --- a/trove/guestagent/strategies/backup/experimental/postgresql_impl.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import stat - -from oslo_log import log as logging - -from trove.common import cfg -from trove.common import exception -from trove.common.i18n import _ -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp -from trove.guestagent.strategies.backup import base - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -WAL_ARCHIVE_DIR = CONF.postgresql.wal_archive_location - - -class PgDump(base.BackupRunner): - """Implementation of Backup Strategy for pg_dump.""" - __strategy_name__ = 'pg_dump' - - @property - def cmd(self): - cmd = 'sudo -u postgres pg_dumpall ' - return cmd + self.zip_cmd + self.encrypt_cmd - - -class PgBaseBackupUtil(object): - - def most_recent_backup_wal(self, pos=0): - """ - Return the WAL file for the most recent backup - """ - mrb_file = self.most_recent_backup_file(pos=pos) - # just return the first part of the filename - return mrb_file.split('.')[0] - - def most_recent_backup_file(self, pos=0): - """ - Look for the most recent .backup file that basebackup creates - :return: a string like 000000010000000000000006.00000168.backup - """ - walre = re.compile("[0-9A-F]{24}.*.backup") - wal_files = [wal_file for wal_file in os.listdir(WAL_ARCHIVE_DIR) - if walre.search(wal_file)] - wal_files = sorted(wal_files, reverse=True) - if not wal_files: - return None - return wal_files[pos] - - def log_files_since_last_backup(self, pos=0): - """Return the WAL files since the provided last backup - pg_archivebackup depends on alphanumeric sorting to decide wal order, - so we'll do so too: - https://github.com/postgres/postgres/blob/REL9_4_STABLE/contrib - /pg_archivecleanup/pg_archivecleanup.c#L122 - """ - last_wal = self.most_recent_backup_wal(pos=pos) - walre = re.compile("^[0-9A-F]{24}$") - wal_files = [wal_file for wal_file in os.listdir(WAL_ARCHIVE_DIR) - if walre.search(wal_file) and wal_file >= last_wal] - return wal_files - - -class PgBaseBackup(base.BackupRunner, PgBaseBackupUtil): - """Base backups are taken with the pg_basebackup filesystem-level backup - tool pg_basebackup creates a copy of the binary files in the PostgreSQL - cluster data directory and enough WAL segments to allow the database to - be brought back to a consistent state. Associated with each backup is a - log location, normally indicated by the WAL file name and the position - inside the file. - """ - __strategy_name__ = 'pg_basebackup' - - def __init__(self, *args, **kwargs): - self._app = None - super(PgBaseBackup, self).__init__(*args, **kwargs) - self.label = None - self.stop_segment = None - self.start_segment = None - self.start_wal_file = None - self.stop_wal_file = None - self.checkpoint_location = None - self.mrb = None - - @property - def app(self): - if self._app is None: - self._app = self._build_app() - return self._app - - def _build_app(self): - return PgSqlApp() - - @property - def cmd(self): - cmd = ("pg_basebackup -h %s -U %s --pgdata=-" - " --label=%s --format=tar --xlog " % - (self.app.pgsql_run_dir, self.app.ADMIN_USER, - self.base_filename)) - - return cmd + self.zip_cmd + self.encrypt_cmd - - def base_backup_metadata(self, metadata_file): - """Parse the contents of the .backup file""" - metadata = {} - operating_system.chmod( - metadata_file, FileMode(add=[stat.S_IROTH]), as_root=True) - - start_re = re.compile(r"START WAL LOCATION: (.*) \(file (.*)\)") - stop_re = re.compile(r"STOP WAL LOCATION: (.*) \(file (.*)\)") - checkpt_re = re.compile("CHECKPOINT LOCATION: (.*)") - label_re = re.compile("LABEL: (.*)") - - metadata_contents = operating_system.read_file(metadata_file) - match = start_re.search(metadata_contents) - if match: - self.start_segment = match.group(1) - metadata['start-segment'] = self.start_segment - self.start_wal_file = match.group(2) - metadata['start-wal-file'] = self.start_wal_file - - match = stop_re.search(metadata_contents) - if match: - self.stop_segment = match.group(1) - metadata['stop-segment'] = self.stop_segment - self.stop_wal_file = match.group(2) - metadata['stop-wal-file'] = self.stop_wal_file - - match = checkpt_re.search(metadata_contents) - if match: - self.checkpoint_location = match.group(1) - metadata['checkpoint-location'] = self.checkpoint_location - - match = label_re.search(metadata_contents) - if match: - self.label = match.group(1) - metadata['label'] = self.label - return metadata - - def check_process(self): - # If any of the below variables were not set by either metadata() - # or direct retrieval from the pgsql backup commands, then something - # has gone wrong - if not self.start_segment or not self.start_wal_file: - LOG.info("Unable to determine starting WAL file/segment") - return False - if not self.stop_segment or not self.stop_wal_file: - LOG.info("Unable to determine ending WAL file/segment") - return False - if not self.label: - LOG.info("No backup label found") - return False - return True - - def metadata(self): - """pg_basebackup may complete, and we arrive here before the - history file is written to the wal archive. So we need to - handle two possibilities: - - this is the first backup, and no history file exists yet - - this isn't the first backup, and so the history file we retrieve - isn't the one we just ran! - """ - def _metadata_found(): - LOG.debug("Polling for backup metadata... ") - self.mrb = self.most_recent_backup_file() - if not self.mrb: - LOG.debug("No history files found!") - return False - metadata = self.base_backup_metadata( - os.path.join(WAL_ARCHIVE_DIR, self.mrb)) - LOG.debug("Label to pg_basebackup: %(base_filename)s " - "label found: %(label)s", - {'base_filename': self.base_filename, - 'label': metadata['label']}) - LOG.info("Metadata for backup: %s.", str(metadata)) - return metadata['label'] == self.base_filename - - try: - utils.poll_until(_metadata_found, sleep_time=5, time_out=60) - except exception.PollTimeOut: - raise RuntimeError(_("Timeout waiting for backup metadata for" - " backup %s") % self.base_filename) - - return self.base_backup_metadata( - os.path.join(WAL_ARCHIVE_DIR, self.mrb)) - - def _run_post_backup(self): - """Get rid of WAL data we don't need any longer""" - arch_cleanup_bin = os.path.join(self.app.pgsql_extra_bin_dir, - "pg_archivecleanup") - bk_file = os.path.basename(self.most_recent_backup_file()) - cmd_full = " ".join((arch_cleanup_bin, WAL_ARCHIVE_DIR, bk_file)) - utils.execute("sudo", "su", "-", self.app.pgsql_owner, "-c", - "%s" % cmd_full) - - -class PgBaseBackupIncremental(PgBaseBackup): - """To restore an incremental backup from a previous backup, in PostgreSQL, - is effectively to replay the WAL entries to a designated point in time. - All that is required is the most recent base backup, and all WAL files - """ - - def __init__(self, *args, **kwargs): - if (not kwargs.get('parent_location') or - not kwargs.get('parent_checksum')): - raise AttributeError(_('Parent missing!')) - - super(PgBaseBackupIncremental, self).__init__(*args, **kwargs) - self.parent_location = kwargs.get('parent_location') - self.parent_checksum = kwargs.get('parent_checksum') - - def _run_pre_backup(self): - self.backup_label = self.base_filename - self.start_segment = self.app.pg_start_backup(self.backup_label) - - self.start_wal_file = self.app.pg_xlogfile_name(self.start_segment) - - self.stop_segment = self.app.pg_stop_backup() - - # We have to hack this because self.command is - # initialized in the base class before we get here, which is - # when we will know exactly what WAL files we want to archive - self.command = self._cmd() - - def _cmd(self): - wal_file_list = self.log_files_since_last_backup(pos=1) - cmd = 'sudo tar -cf - -C {wal_dir} {wal_list} '.format( - wal_dir=WAL_ARCHIVE_DIR, - wal_list=" ".join(wal_file_list)) - return cmd + self.zip_cmd + self.encrypt_cmd - - def metadata(self): - _meta = super(PgBaseBackupIncremental, self).metadata() - _meta.update({ - 'parent_location': self.parent_location, - 'parent_checksum': self.parent_checksum, - }) - return _meta diff --git a/trove/guestagent/strategies/backup/experimental/redis_impl.py b/trove/guestagent/strategies/backup/experimental/redis_impl.py deleted file mode 100644 index aa6f4cac78..0000000000 --- a/trove/guestagent/strategies/backup/experimental/redis_impl.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from trove.guestagent.datastore.experimental.redis import service -from trove.guestagent.strategies.backup import base - -LOG = logging.getLogger(__name__) - - -class RedisBackup(base.BackupRunner): - """Implementation of Backup Strategy for Redis.""" - __strategy_name__ = 'redisbackup' - - def __init__(self, filename, **kwargs): - self.app = service.RedisApp() - super(RedisBackup, self).__init__(filename, **kwargs) - - @property - def cmd(self): - cmd = 'sudo cat %s' % self.app.get_persistence_filepath() - return cmd + self.zip_cmd + self.encrypt_cmd - - def _run_pre_backup(self): - self.app.admin.persist_data() - LOG.debug('Redis data persisted.') diff --git a/trove/guestagent/strategies/backup/mysql_impl.py b/trove/guestagent/strategies/backup/mysql_impl.py deleted file mode 100644 index 25bd5c5229..0000000000 --- a/trove/guestagent/strategies/backup/mysql_impl.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# Copyright 2014 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import re - -from oslo_log import log as logging - -from trove.common.i18n import _ -from trove.guestagent.datastore.mysql.service import MySqlApp -from trove.guestagent.datastore.mysql_common.service import ADMIN_USER_NAME -from trove.guestagent.strategies.backup import base - -LOG = logging.getLogger(__name__) - - -class MySQLDump(base.BackupRunner): - """Implementation of Backup Strategy for MySQLDump.""" - __strategy_name__ = 'mysqldump' - - @property - def cmd(self): - user_and_pass = ( - ' --password=%(password)s -u %(user)s ' - '2>/tmp/mysqldump.log' % - {'password': MySqlApp.get_auth_password(), - 'user': ADMIN_USER_NAME}) - cmd = ('mysqldump' - ' --all-databases' - ' %(extra_opts)s' - ' --opt' + user_and_pass) - return cmd + self.zip_cmd + self.encrypt_cmd - - def check_process(self): - """Check the output from mysqldump ignoring 'Warning'.""" - LOG.debug('Checking mysqldump process output.') - with open('/tmp/mysqldump.log', 'r') as backup_log: - output = backup_log.read() - if not output: - return True - - LOG.debug(output) - for line in output.splitlines(): - if not re.search('Warning', line.strip()): - LOG.error("Mysqldump did not complete successfully.") - return False - - return True - - -class InnoBackupEx(base.BackupRunner): - """Implementation of Backup Strategy for InnoBackupEx.""" - __strategy_name__ = 'innobackupex' - - @property - def user_and_pass(self): - return ('--user=%(user)s --password=%(password)s --host=localhost ' - '--socket=%(socket_file)s' % - {'user': ADMIN_USER_NAME, - 'password': MySqlApp.get_auth_password(), - 'socket_file': '/var/run/mysqld/mysqld.sock'}) - - @property - def cmd(self): - cmd = ('sudo innobackupex' - ' --stream=xbstream' - ' %(extra_opts)s ' + - self.user_and_pass + ' ' + - MySqlApp.get_data_dir() + - ' 2>/tmp/innobackupex.log' - ) - return cmd + self.zip_cmd + self.encrypt_cmd - - def check_process(self): - """Check the output from innobackupex for 'completed OK!'.""" - LOG.debug('Checking innobackupex process output.') - with open('/tmp/innobackupex.log', 'r') as backup_log: - output = backup_log.read() - if not output: - LOG.error("Innobackupex log file empty.") - return False - - LOG.debug(output) - last_line = output.splitlines()[-1].strip() - if not re.search('completed OK!', last_line): - LOG.error("Innobackupex did not complete successfully.") - return False - - return True - - def metadata(self): - LOG.debug('Getting metadata for backup %s', self.base_filename) - meta = {} - lsn = re.compile(r"The latest check point \(for incremental\): " - r"'(\d+)'") - with open('/tmp/innobackupex.log', 'r') as backup_log: - output = backup_log.read() - match = lsn.search(output) - if match: - meta = {'lsn': match.group(1)} - LOG.info("Metadata for backup %s: %s", self.base_filename, meta) - return meta - - @property - def filename(self): - return '%s.xbstream' % self.base_filename - - -class InnoBackupExIncremental(InnoBackupEx): - """InnoBackupEx incremental backup.""" - - def __init__(self, *args, **kwargs): - if not kwargs.get('lsn'): - raise AttributeError(_('lsn attribute missing, bad parent?')) - super(InnoBackupExIncremental, self).__init__(*args, **kwargs) - self.parent_location = kwargs.get('parent_location') - self.parent_checksum = kwargs.get('parent_checksum') - - @property - def cmd(self): - cmd = ('sudo innobackupex' - ' --stream=xbstream' - ' --incremental' - ' --incremental-lsn=%(lsn)s' - ' %(extra_opts)s ' + - self.user_and_pass + ' ' + - MySqlApp.get_data_dir() + - ' 2>/tmp/innobackupex.log') - return cmd + self.zip_cmd + self.encrypt_cmd - - def metadata(self): - _meta = super(InnoBackupExIncremental, self).metadata() - _meta.update({ - 'parent_location': self.parent_location, - 'parent_checksum': self.parent_checksum, - }) - return _meta diff --git a/trove/guestagent/strategies/replication/base.py b/trove/guestagent/strategies/replication/base.py index 692c6b89c9..6e65592553 100644 --- a/trove/guestagent/strategies/replication/base.py +++ b/trove/guestagent/strategies/replication/base.py @@ -39,7 +39,7 @@ class Replication(Strategy): return True @abc.abstractmethod - def snapshot_for_replication(self, context, service, location, + def snapshot_for_replication(self, context, service, adm, location, snapshot_info): """Capture snapshot of master db.""" @@ -48,7 +48,7 @@ class Replication(Strategy): """Configure underlying database to act as master for replication.""" @abc.abstractmethod - def enable_as_slave(self, service, snapshot, slave_config): + def enable_as_slave(self, service, master_info, slave_config): """Configure underlying database as a slave of the given master.""" @abc.abstractmethod diff --git a/trove/guestagent/strategies/replication/experimental/__init__.py b/trove/guestagent/strategies/replication/experimental/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/strategies/replication/experimental/postgresql_impl.py b/trove/guestagent/strategies/replication/experimental/postgresql_impl.py deleted file mode 100644 index 7ffcdb2e32..0000000000 --- a/trove/guestagent/strategies/replication/experimental/postgresql_impl.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright 2014 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import os - -from oslo_log import log as logging -from oslo_utils import netutils -from trove.common import cfg -from trove.common.db.postgresql import models -from trove.common import exception -from trove.common.i18n import _ -from trove.common import stream_codecs -from trove.common import utils -from trove.guestagent.backup.backupagent import BackupAgent -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.strategies import backup -from trove.guestagent.strategies.replication import base - -AGENT = BackupAgent() -CONF = cfg.CONF - -REPL_BACKUP_NAMESPACE = 'trove.guestagent.strategies.backup.experimental' \ - '.postgresql_impl' - -LOG = logging.getLogger(__name__) - -TRIGGER_FILE = '/tmp/postgresql.trigger' -REPL_USER = 'replicator' -SLAVE_STANDBY_OVERRIDE = 'SlaveStandbyOverride' - - -class PostgresqlReplicationStreaming(base.Replication): - - def __init__(self, *args, **kwargs): - super(PostgresqlReplicationStreaming, self).__init__(*args, **kwargs) - - @property - def repl_backup_runner(self): - return backup.get_backup_strategy('PgBaseBackup', - REPL_BACKUP_NAMESPACE) - - @property - def repl_incr_backup_runner(self): - return backup.get_backup_strategy('PgBaseBackupIncremental', - REPL_BACKUP_NAMESPACE) - - @property - def repl_backup_extra_opts(self): - return CONF.backup_runner_options.get('PgBaseBackup', '') - - def get_master_ref(self, service, snapshot_info): - master_ref = { - 'host': netutils.get_my_ipv4(), - 'port': cfg.get_configuration_property('postgresql_port') - } - return master_ref - - def backup_required_for_replication(self): - return True - - def snapshot_for_replication(self, context, service, - location, snapshot_info): - - snapshot_id = snapshot_info['id'] - replica_number = snapshot_info.get('replica_number', 1) - - LOG.debug("Acquiring backup for replica number %d.", replica_number) - # Only create a backup if it's the first replica - if replica_number == 1: - AGENT.execute_backup( - context, snapshot_info, runner=self.repl_backup_runner, - extra_opts=self.repl_backup_extra_opts, - incremental_runner=self.repl_incr_backup_runner) - else: - LOG.info("Using existing backup created for previous replica.") - - repl_user_info = self._get_or_create_replication_user(service) - - log_position = { - 'replication_user': repl_user_info - } - - return snapshot_id, log_position - - def _get_or_create_replication_user(self, service): - """There are three scenarios we need to deal with here: - - This is a fresh master, with no replicator user created. - Generate a new u/p - - We are attaching a new slave and need to give it the login creds - Send the creds we have stored in PGDATA/.replpass - - This is a failed-over-to slave, who will have the replicator user - but not the credentials file. Recreate the repl user in this case - """ - - LOG.debug("Checking for replicator user") - pwfile = os.path.join(service.pgsql_data_dir, ".replpass") - admin = service.build_admin() - if admin.user_exists(REPL_USER): - if operating_system.exists(pwfile, as_root=True): - LOG.debug("Found existing .replpass, returning pw") - pw = operating_system.read_file(pwfile, as_root=True) - else: - LOG.debug("Found user but not .replpass, recreate") - u = models.PostgreSQLUser(REPL_USER) - admin._drop_user(context=None, user=u) - pw = self._create_replication_user(service, admin, pwfile) - else: - LOG.debug("Found no replicator user, create one") - pw = self._create_replication_user(service, admin, pwfile) - - repl_user_info = { - 'name': REPL_USER, - 'password': pw - } - - return repl_user_info - - def _create_replication_user(self, service, admin, pwfile): - """Create the replication user. Unfortunately, to be able to - run pg_rewind, we need SUPERUSER, not just REPLICATION privilege - """ - - pw = utils.generate_random_password() - operating_system.write_file(pwfile, pw, as_root=True) - operating_system.chown(pwfile, user=service.pgsql_owner, - group=service.pgsql_owner, as_root=True) - operating_system.chmod(pwfile, FileMode.SET_USR_RWX(), - as_root=True) - - repl_user = models.PostgreSQLUser(name=REPL_USER, password=pw) - admin._create_user(context=None, user=repl_user) - admin.alter_user(None, repl_user, True, - 'REPLICATION', 'SUPERUSER', 'LOGIN') - - return pw - - def enable_as_master(self, service, master_config, for_failover=False): - """For a server to be a master in postgres, we need to enable - the replication user in pg_hba and ensure that WAL logging is - at the appropriate level (use the same settings as backups) - """ - LOG.debug("Enabling as master, with cfg: %s ", master_config) - self._get_or_create_replication_user(service) - hba_entry = "host replication replicator 0.0.0.0/0 md5 \n" - - tmp_hba = '/tmp/pg_hba' - operating_system.copy(service.pgsql_hba_config, tmp_hba, - force=True, as_root=True) - operating_system.chmod(tmp_hba, FileMode.SET_ALL_RWX(), - as_root=True) - with open(tmp_hba, 'a+') as hba_file: - hba_file.write(hba_entry) - - operating_system.copy(tmp_hba, service.pgsql_hba_config, - force=True, as_root=True) - operating_system.chmod(service.pgsql_hba_config, - FileMode.SET_USR_RWX(), - as_root=True) - operating_system.remove(tmp_hba, as_root=True) - service.reload_configuration() - - def enable_as_slave(self, service, snapshot, slave_config): - """Adds appropriate config options to postgresql.conf, and writes out - the recovery.conf file used to set up replication - """ - LOG.debug("Got slave_config: %s", str(slave_config)) - self._write_standby_recovery_file(service, snapshot, sslmode='prefer') - self.enable_hot_standby(service) - # Ensure the WAL arch is empty before restoring - service.recreate_wal_archive_dir() - - def detach_slave(self, service, for_failover): - """Touch trigger file in to disable recovery mode""" - LOG.info("Detaching slave, use trigger to disable recovery mode") - operating_system.write_file(TRIGGER_FILE, '') - operating_system.chown(TRIGGER_FILE, user=service.pgsql_owner, - group=service.pgsql_owner, as_root=True) - - def _wait_for_failover(): - """Wait until slave has switched out of recovery mode""" - return not service.pg_is_in_recovery() - - try: - utils.poll_until(_wait_for_failover, time_out=120) - - except exception.PollTimeOut: - raise RuntimeError(_("Timeout occurred waiting for slave to exit " - "recovery mode")) - - def cleanup_source_on_replica_detach(self, admin_service, replica_info): - pass - - def _rewind_against_master(self, service): - """Call pg_rewind to resync datadir against state of new master - We should already have a recovery.conf file in PGDATA - """ - rconf = operating_system.read_file( - service.pgsql_recovery_config, - codec=stream_codecs.KeyValueCodec(line_terminator='\n'), - as_root=True) - conninfo = rconf['primary_conninfo'].strip() - - # The recovery.conf file we want should already be there, but pg_rewind - # will delete it, so copy it out first - rec = service.pgsql_recovery_config - tmprec = "/tmp/recovery.conf.bak" - operating_system.move(rec, tmprec, as_root=True) - - cmd_full = " ".join(["pg_rewind", - '--target-pgdata=' + service.pgsql_data_dir, - '--source-server=' + conninfo]) - out, err = utils.execute("sudo", "su", "-", service.pgsql_owner, - "-c", "%s" % cmd_full, check_exit_code=0) - LOG.debug("Got stdout %(out)s and stderr %(err)s from pg_rewind", - {'out': str(out), 'err': str(err)}) - operating_system.move(tmprec, rec, as_root=True) - - def demote_master(self, service): - """In order to demote a master we need to shutdown the server and call - pg_rewind against the new master to enable a proper timeline - switch. - """ - service.stop_db() - self._rewind_against_master(service) - service.start_db() - - def connect_to_master(self, service, snapshot): - """All that is required in postgresql to connect to a slave is to - restart with a recovery.conf file in the data dir, which contains - the connection information for the master. - """ - assert operating_system.exists(service.pgsql_recovery_config, - as_root=True) - service.restart() - - def _remove_recovery_file(self, service): - operating_system.remove(service.pgsql_recovery_config, as_root=True) - - def _write_standby_recovery_file(self, service, snapshot, - sslmode='prefer'): - LOG.info("Snapshot data received: %s", str(snapshot)) - - logging_config = snapshot['log_position'] - conninfo_params = \ - {'host': snapshot['master']['host'], - 'port': snapshot['master']['port'], - 'repl_user': logging_config['replication_user']['name'], - 'password': logging_config['replication_user']['password'], - 'sslmode': sslmode} - - conninfo = 'host=%(host)s ' \ - 'port=%(port)s ' \ - 'dbname=os_admin ' \ - 'user=%(repl_user)s ' \ - 'password=%(password)s ' \ - 'sslmode=%(sslmode)s ' % conninfo_params - - recovery_conf = "standby_mode = 'on'\n" - recovery_conf += "primary_conninfo = '" + conninfo + "'\n" - recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n" - recovery_conf += "recovery_target_timeline='latest'\n" - - operating_system.write_file(service.pgsql_recovery_config, - recovery_conf, - codec=stream_codecs.IdentityCodec(), - as_root=True) - operating_system.chown(service.pgsql_recovery_config, - user=service.pgsql_owner, - group=service.pgsql_owner, as_root=True) - - def enable_hot_standby(self, service): - # Only support pg version > 9.6, wal_level set to replica, and - # remove parameter "checkpoint_segments". - opts = {'hot_standby': 'on', - 'wal_level': 'replica', - 'wal_log_hints': 'on'} - - service.configuration_manager.\ - apply_system_override(opts, SLAVE_STANDBY_OVERRIDE) - - def get_replica_context(self, service): - LOG.debug("Calling get_replica_context") - repl_user_info = self._get_or_create_replication_user(service) - - log_position = { - 'replication_user': repl_user_info - } - - return { - 'master': self.get_master_ref(None, None), - 'log_position': log_position - } diff --git a/trove/guestagent/strategies/replication/experimental/redis_sync.py b/trove/guestagent/strategies/replication/experimental/redis_sync.py deleted file mode 100644 index ae3fbadba5..0000000000 --- a/trove/guestagent/strategies/replication/experimental/redis_sync.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2014 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_log import log as logging -from oslo_utils import netutils - -from trove.guestagent.strategies.replication import base - -LOG = logging.getLogger(__name__) - - -class RedisSyncReplication(base.Replication): - """Redis Replication strategy.""" - - __strategy_ns__ = 'trove.guestagent.strategies.replication.experimental' - __strategy_name__ = 'RedisSyncReplication' - - CONF_LABEL_REPLICATION_MASTER = 'replication_master' - CONF_LABEL_REPLICATION_SLAVE = 'replication_slave' - - def get_master_ref(self, service, snapshot_info): - master_ref = { - 'host': netutils.get_my_ipv4(), - 'port': service.get_port(), - 'requirepass': service.get_auth_password(), - } - return master_ref - - def backup_required_for_replication(self): - LOG.debug('Request for replication backup: no backup required') - return False - - def snapshot_for_replication(self, context, service, - location, snapshot_info): - return None, None - - def enable_as_master(self, service, master_config): - service.configuration_manager.apply_system_override( - master_config, change_id=self.CONF_LABEL_REPLICATION_MASTER) - service.restart() - - def enable_as_slave(self, service, snapshot, slave_config): - service.configuration_manager.apply_system_override( - slave_config, change_id=self.CONF_LABEL_REPLICATION_SLAVE) - master_info = snapshot['master'] - master_host = master_info['host'] - master_port = master_info['port'] - connect_options = {'slaveof': [master_host, master_port]} - master_passwd = master_info.get('requirepass') - if master_passwd: - connect_options['masterauth'] = master_passwd - service.admin.config_set('masterauth', master_passwd) - else: - service.admin.config_set('masterauth', "") - service.configuration_manager.apply_system_override( - connect_options, change_id=self.CONF_LABEL_REPLICATION_SLAVE) - service.admin.set_master(host=master_host, port=master_port) - LOG.debug('Enabled as slave.') - - def detach_slave(self, service, for_failover): - service.configuration_manager.remove_system_override( - change_id=self.CONF_LABEL_REPLICATION_SLAVE) - service.admin.set_master(host=None, port=None) - service.admin.config_set('masterauth', "") - return None - - def cleanup_source_on_replica_detach(self, service, replica_info): - # Nothing needs to be done to the master when a replica goes away. - pass - - def get_replica_context(self, service): - return { - 'master': self.get_master_ref(service, None), - } - - def demote_master(self, service): - service.configuration_manager.remove_system_override( - change_id=self.CONF_LABEL_REPLICATION_MASTER) diff --git a/trove/guestagent/strategies/replication/experimental/mariadb_gtid.py b/trove/guestagent/strategies/replication/mariadb_gtid.py similarity index 51% rename from trove/guestagent/strategies/replication/experimental/mariadb_gtid.py rename to trove/guestagent/strategies/replication/mariadb_gtid.py index 30e73948d7..b95853bb5e 100644 --- a/trove/guestagent/strategies/replication/experimental/mariadb_gtid.py +++ b/trove/guestagent/strategies/replication/mariadb_gtid.py @@ -17,7 +17,6 @@ from oslo_log import log as logging from trove.common import cfg -from trove.guestagent.strategies import backup from trove.guestagent.strategies.replication import mysql_base CONF = cfg.CONF @@ -27,45 +26,46 @@ LOG = logging.getLogger(__name__) class MariaDBGTIDReplication(mysql_base.MysqlReplicationBase): """MariaDB Replication coordinated by GTIDs.""" - @property - def repl_backup_runner(self): - return backup.get_backup_strategy( - CONF.mariadb.backup_strategy, - CONF.mariadb.backup_namespace - ) + def get_replica_context(self, service, adm): + """Get replication information as master.""" + master_info = super(MariaDBGTIDReplication, self).get_replica_context( + service, adm) - @property - def repl_incr_backup_runner(self): - strategy = CONF.mariadb.backup_incremental_strategy.get( - CONF.mariadb.backup_strategy, CONF.mariadb.backup_strategy - ) + get_pos_cmd = 'SELECT @@global.gtid_binlog_pos;' + gtid_pos = service.execute_sql(get_pos_cmd).first()[0] + LOG.debug('gtid_binlog_pos: %s', gtid_pos) + master_info['log_position']['gtid_pos'] = gtid_pos - return backup.get_backup_strategy( - strategy, - CONF.mariadb.backup_namespace - ) + return master_info - @property - def repl_backup_extra_opts(self): - return CONF.backup_runner_options.get( - CONF.mariadb.backup_strategy, '' - ) + def connect_to_master(self, service, master_info): + logging_config = master_info['log_position'] + last_gtid = '' + + if 'gtid_pos' in logging_config: + # This will happen during master failover. + last_gtid = logging_config['gtid_pos'] + elif 'dataset' in master_info: + # This will happen when initial replication is set up. + last_gtid = self.read_last_master_gtid(service) + + set_gtid_cmd = "SET GLOBAL gtid_slave_pos='%s';" % last_gtid + service.execute_sql(set_gtid_cmd) - def connect_to_master(self, service, snapshot): - logging_config = snapshot['log_position'] - LOG.debug("connect_to_master %s", logging_config['replication_user']) change_master_cmd = ( - "CHANGE MASTER TO MASTER_HOST='%(host)s', " + "CHANGE MASTER TO " + "MASTER_HOST='%(host)s', " "MASTER_PORT=%(port)s, " "MASTER_USER='%(user)s', " "MASTER_PASSWORD='%(password)s', " "MASTER_CONNECT_RETRY=15, " "MASTER_USE_GTID=slave_pos" % { - 'host': snapshot['master']['host'], - 'port': snapshot['master']['port'], + 'host': master_info['master']['host'], + 'port': master_info['master']['port'], 'user': logging_config['replication_user']['name'], - 'password': logging_config['replication_user']['password'] + 'password': logging_config['replication_user']['password'], }) - service.execute_on_client(change_master_cmd) + service.execute_sql(change_master_cmd) + service.start_slave() diff --git a/trove/guestagent/strategies/replication/mysql_base.py b/trove/guestagent/strategies/replication/mysql_base.py index 6a3843afe1..4b3ca9698e 100644 --- a/trove/guestagent/strategies/replication/mysql_base.py +++ b/trove/guestagent/strategies/replication/mysql_base.py @@ -21,38 +21,19 @@ from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg -from trove.common.db.mysql import models +from trove.common import exception from trove.common import utils -from trove.guestagent.backup.backupagent import BackupAgent -from trove.guestagent.datastore.mysql.service import MySqlAdmin -from trove.guestagent.strategies import backup +from trove.common.db.mysql import models +from trove.guestagent.common import operating_system from trove.guestagent.strategies.replication import base -AGENT = BackupAgent() -CONF = cfg.CONF - -REPL_BACKUP_NAMESPACE = 'trove.guestagent.strategies.backup.mysql_impl' - LOG = logging.getLogger(__name__) +CONF = cfg.CONF class MysqlReplicationBase(base.Replication): """Base class for MySql Replication strategies.""" - @property - def repl_backup_runner(self): - return backup.get_backup_strategy('InnoBackupEx', - REPL_BACKUP_NAMESPACE) - - @property - def repl_incr_backup_runner(self): - return backup.get_backup_strategy('InnoBackupExIncremental', - REPL_BACKUP_NAMESPACE) - - @property - def repl_backup_extra_opts(self): - return CONF.backup_runner_options.get('InnoBackupEx', '') - def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), @@ -60,7 +41,7 @@ class MysqlReplicationBase(base.Replication): } return master_ref - def _create_replication_user(self): + def _create_replication_user(self, service, adm): replication_user = None replication_password = utils.generate_random_password(16) @@ -78,9 +59,11 @@ class MysqlReplicationBase(base.Replication): name=name, password=replication_password ) mysql_user.check_create() - MySqlAdmin().create_user([mysql_user.serialize()]) - LOG.debug("Trying to create replication user " + + + LOG.debug("Trying to create replication user %s", mysql_user.name) + adm.create_user([mysql_user.serialize()]) + replication_user = { 'name': mysql_user.name, 'password': replication_password @@ -93,52 +76,55 @@ class MysqlReplicationBase(base.Replication): return replication_user - def snapshot_for_replication(self, context, service, - location, snapshot_info): - snapshot_id = snapshot_info['id'] - replica_number = snapshot_info.get('replica_number', 1) + def snapshot_for_replication(self, context, service, adm, location, + snapshot_info): + LOG.info("Creating backup for replication") + service.create_backup(context, snapshot_info) - LOG.debug("Acquiring backup for replica number %d.", replica_number) - # Only create a backup if it's the first replica - if replica_number == 1: - AGENT.execute_backup( - context, snapshot_info, runner=self.repl_backup_runner, - extra_opts=self.repl_backup_extra_opts, - incremental_runner=self.repl_incr_backup_runner) - else: - LOG.debug("Using existing backup created for previous replica.") - - LOG.debug("Replication snapshot %(snapshot_id)s used for replica " - "number %(replica_number)d.", - {'snapshot_id': snapshot_id, - 'replica_number': replica_number}) - - replication_user = self._create_replication_user() + LOG.info('Creating replication user') + replication_user = self._create_replication_user(service, adm) service.grant_replication_privilege(replication_user) - # With streamed InnobackupEx, the log position is in - # the stream and will be decoded by the slave log_position = { 'replication_user': replication_user } - return snapshot_id, log_position + return snapshot_info['id'], log_position def enable_as_master(self, service, master_config): if not service.exists_replication_source_overrides(): service.write_replication_source_overrides(master_config) service.restart() + def read_last_master_gtid(self, service): + INFO_FILE = ('%s/xtrabackup_binlog_info' % service.get_data_dir()) + operating_system.chmod(INFO_FILE, + operating_system.FileMode.ADD_READ_ALL, + as_root=True) + + LOG.info("Reading last master GTID from %s", INFO_FILE) + try: + with open(INFO_FILE, 'r') as f: + content = f.read() + LOG.debug('Content in %s: "%s"', INFO_FILE, content) + ret = content.strip().split('\t') + return ret[2] if len(ret) == 3 else '' + except Exception as ex: + LOG.error('Failed to read last master GTID, error: %s', str(ex)) + raise exception.UnableToDetermineLastMasterGTID( + {'binlog_file': INFO_FILE}) + @abc.abstractmethod - def connect_to_master(self, service, snapshot): + def connect_to_master(self, service, master_info): """Connects a slave to a master""" - def enable_as_slave(self, service, snapshot, slave_config): + def enable_as_slave(self, service, master_info, slave_config): try: service.write_replication_replica_overrides(slave_config) service.restart() - self.connect_to_master(service, snapshot) - except Exception: - LOG.exception("Exception enabling guest as replica") + self.connect_to_master(service, master_info) + except Exception as err: + LOG.error("Exception enabling guest as replica, error: %s", + str(err)) raise def detach_slave(self, service, for_failover): @@ -147,8 +133,9 @@ class MysqlReplicationBase(base.Replication): service.restart() return replica_info - def get_replica_context(self, service): - replication_user = self._create_replication_user() + def get_replica_context(self, service, adm): + """Get replication information as master.""" + replication_user = self._create_replication_user(service, adm) service.grant_replication_privilege(replication_user) return { 'master': self.get_master_ref(service, None), diff --git a/trove/guestagent/strategies/replication/mysql_binlog.py b/trove/guestagent/strategies/replication/mysql_binlog.py deleted file mode 100644 index 854cd16dba..0000000000 --- a/trove/guestagent/strategies/replication/mysql_binlog.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2014 Tesora, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import csv - -from oslo_log import log as logging - -from trove.common import exception -from trove.common.i18n import _ -from trove.guestagent.backup.backupagent import BackupAgent -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore.mysql.service import MySqlApp -from trove.guestagent.strategies.replication import mysql_base - -AGENT = BackupAgent() - -LOG = logging.getLogger(__name__) - - -class MysqlBinlogReplication(mysql_base.MysqlReplicationBase): - """MySql Replication coordinated by binlog position.""" - - class UnableToDetermineBinlogPosition(exception.TroveError): - message = _("Unable to determine binlog position " - "(from file %(binlog_file)s).") - - def connect_to_master(self, service, snapshot): - logging_config = snapshot['log_position'] - logging_config.update(self._read_log_position()) - change_master_cmd = ( - "CHANGE MASTER TO MASTER_HOST='%(host)s', " - "MASTER_PORT=%(port)s, " - "MASTER_USER='%(user)s', " - "MASTER_PASSWORD='%(password)s', " - "MASTER_LOG_FILE='%(log_file)s', " - "MASTER_LOG_POS=%(log_pos)s, " - "MASTER_CONNECT_RETRY=15" % - { - 'host': snapshot['master']['host'], - 'port': snapshot['master']['port'], - 'user': logging_config['replication_user']['name'], - 'password': logging_config['replication_user']['password'], - 'log_file': logging_config['log_file'], - 'log_pos': logging_config['log_position'] - }) - service.execute_on_client(change_master_cmd) - service.start_slave() - - def _read_log_position(self): - INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir()) - LOG.info("Setting read permissions on %s", INFO_FILE) - operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True) - LOG.info("Reading log position from %s", INFO_FILE) - try: - with open(INFO_FILE, 'rb') as f: - row = next(csv.reader(f, delimiter='\t', - skipinitialspace=True)) - return { - 'log_file': row[0], - 'log_position': int(row[1]) - } - except (IOError, IndexError) as ex: - LOG.exception(ex) - raise self.UnableToDetermineBinlogPosition( - {'binlog_file': INFO_FILE}) diff --git a/trove/guestagent/strategies/replication/mysql_gtid.py b/trove/guestagent/strategies/replication/mysql_gtid.py index f76f45d304..261f750b94 100644 --- a/trove/guestagent/strategies/replication/mysql_gtid.py +++ b/trove/guestagent/strategies/replication/mysql_gtid.py @@ -14,71 +14,48 @@ # under the License. # from oslo_log import log as logging -from oslo_utils import encodeutils -from trove.common import exception -from trove.common.i18n import _ -from trove.guestagent.backup.backupagent import BackupAgent -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore.mysql.service import MySqlApp from trove.guestagent.strategies.replication import mysql_base -AGENT = BackupAgent() - LOG = logging.getLogger(__name__) class MysqlGTIDReplication(mysql_base.MysqlReplicationBase): """MySql Replication coordinated by GTIDs.""" - - class UnableToDetermineLastMasterGTID(exception.TroveError): - message = _("Unable to determine last GTID executed on master " - "(from file %(binlog_file)s).") - - def connect_to_master(self, service, snapshot): - if 'dataset' in snapshot: + def connect_to_master(self, service, master_info): + if 'dataset' in master_info: # pull the last executed GTID from the master via # the xtrabackup metadata file. If that value is # provided we need to set the gtid_purged variable # before executing the CHANGE MASTER TO command - last_gtid = self._read_last_master_gtid() - LOG.debug("last_gtid value is %s", last_gtid) - # fix ['mysql-bin.000001', '154', '\n'] still existed last_gtid - # with '\n' value - if last_gtid and len(last_gtid) != 1: + last_gtid = self.read_last_master_gtid(service) + LOG.info("last_gtid value is %s", last_gtid) + if '-' in last_gtid: set_gtid_cmd = "SET GLOBAL gtid_purged='%s'" % last_gtid - LOG.debug("set gtid_purged with %s", set_gtid_cmd) - service.execute_on_client(set_gtid_cmd) + service.execute_sql(set_gtid_cmd) + + logging_config = master_info['log_position'] + LOG.info( + "Configure the slave, master: %s:%s, replication user: %s", + master_info['master']['host'], + master_info['master']['port'], + logging_config['replication_user']['name'] + ) - logging_config = snapshot['log_position'] - LOG.debug("connect_to_master %s", logging_config['replication_user']) change_master_cmd = ( - "CHANGE MASTER TO MASTER_HOST='%(host)s', " + "CHANGE MASTER TO " + "MASTER_HOST='%(host)s', " "MASTER_PORT=%(port)s, " "MASTER_USER='%(user)s', " "MASTER_PASSWORD='%(password)s', " "MASTER_AUTO_POSITION=1, " "MASTER_CONNECT_RETRY=15" % { - 'host': snapshot['master']['host'], - 'port': snapshot['master']['port'], + 'host': master_info['master']['host'], + 'port': master_info['master']['port'], 'user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'] }) - service.execute_on_client(change_master_cmd) - service.start_slave() + service.execute_sql(change_master_cmd) - def _read_last_master_gtid(self): - INFO_FILE = ('%s/xtrabackup_binlog_info' % MySqlApp.get_data_dir()) - LOG.info("Setting read permissions on %s", INFO_FILE) - operating_system.chmod(INFO_FILE, FileMode.ADD_READ_ALL, as_root=True) - LOG.info("Reading last master GTID from %s", INFO_FILE) - try: - with open(INFO_FILE, 'rb') as f: - row = f.read().split(b'\t') - return encodeutils.safe_decode(row[2]) - except (IOError, IndexError) as ex: - LOG.exception(ex) - raise self.UnableToDetermineLastMasterGTID( - {'binlog_file': INFO_FILE}) + service.start_slave() diff --git a/trove/guestagent/strategies/restore/__init__.py b/trove/guestagent/strategies/restore/__init__.py deleted file mode 100644 index 4c99e29451..0000000000 --- a/trove/guestagent/strategies/restore/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_log import log as logging - -from trove.common.strategies.strategy import Strategy - -LOG = logging.getLogger(__name__) - - -def get_restore_strategy(restore_driver, ns=__name__): - LOG.debug("Getting restore strategy: %s.", restore_driver) - return Strategy.get_strategy(restore_driver, ns) diff --git a/trove/guestagent/strategies/restore/base.py b/trove/guestagent/strategies/restore/base.py deleted file mode 100644 index e77c2a4168..0000000000 --- a/trove/guestagent/strategies/restore/base.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from eventlet.green import subprocess -from oslo_log import log as logging - -from trove.common import cfg -from trove.common.strategies.strategy import Strategy -from trove.common import utils - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CHUNK_SIZE = CONF.backup_chunk_size -BACKUP_USE_GZIP = CONF.backup_use_gzip_compression -BACKUP_USE_OPENSSL = CONF.backup_use_openssl_encryption -BACKUP_DECRYPT_KEY = CONF.backup_aes_cbc_key - - -class RestoreError(Exception): - """Error running the Backup Command.""" - - -class RestoreRunner(Strategy): - """Base class for Restore Strategy implementations.""" - """Restore a database from a previous backup.""" - - __strategy_type__ = 'restore_runner' - __strategy_ns__ = 'trove.guestagent.strategies.restore' - - # The actual system calls to run the restore and prepare - restore_cmd = None - - # The backup format type - restore_type = None - - # Decryption Parameters - is_zipped = BACKUP_USE_GZIP - is_encrypted = BACKUP_USE_OPENSSL - decrypt_key = BACKUP_DECRYPT_KEY - - def __init__(self, storage, **kwargs): - self.storage = storage - self.process = None - self.location = kwargs.pop('location') - self.checksum = kwargs.pop('checksum') - self.restore_location = kwargs.get('restore_location') - self.restore_cmd = (self.decrypt_cmd + - self.unzip_cmd + - (self.base_restore_cmd % kwargs)) - super(RestoreRunner, self).__init__() - - def pre_restore(self): - """Hook that is called before the restore command.""" - pass - - def post_restore(self): - """Hook that is called after the restore command.""" - pass - - def restore(self): - self.pre_restore() - content_length = self._run_restore() - self.post_restore() - return content_length - - def _run_restore(self): - return self._unpack(self.location, self.checksum, self.restore_cmd) - - def _unpack(self, location, checksum, command): - stream = self.storage.load(location, checksum) - self.process = subprocess.Popen(command, shell=True, - stdin=subprocess.PIPE, - stderr=subprocess.PIPE) - content_length = 0 - for chunk in stream: - self.process.stdin.write(chunk) - content_length += len(chunk) - self.process.stdin.close() - utils.raise_if_process_errored(self.process, RestoreError) - if not self.check_process(): - raise RestoreError - LOG.debug("Restored %s bytes from stream.", content_length) - - return content_length - - @property - def decrypt_cmd(self): - if self.is_encrypted: - return ('openssl enc -d -aes-256-cbc -salt -pass pass:%s | ' - % self.decrypt_key) - else: - return '' - - @property - def unzip_cmd(self): - return 'gzip -d -c | ' if self.is_zipped else '' - - def check_process(self): - """Hook for subclasses to check the restore process for errors.""" - return True diff --git a/trove/guestagent/strategies/restore/experimental/__init__.py b/trove/guestagent/strategies/restore/experimental/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/guestagent/strategies/restore/experimental/cassandra_impl.py b/trove/guestagent/strategies/restore/experimental/cassandra_impl.py deleted file mode 100644 index 87c9eb280e..0000000000 --- a/trove/guestagent/strategies/restore/experimental/cassandra_impl.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2014 Mirantis Inc. -# All Rights Reserved. -# Copyright 2015 Tesora Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.cassandra import service -from trove.guestagent.strategies.restore import base - -LOG = logging.getLogger(__name__) - - -class NodetoolSnapshot(base.RestoreRunner): - """Implementation of restore using the Nodetool (http://goo.gl/QtXVsM) - utility. - """ - - __strategy_name__ = 'nodetoolsnapshot' - - def __init__(self, storage, **kwargs): - self._app = service.CassandraApp() - kwargs.update({'restore_location': self._app.cassandra_data_dir}) - super(NodetoolSnapshot, self).__init__(storage, **kwargs) - - def pre_restore(self): - """Prepare the data directory for restored files. - The directory itself is not included in the backup archive - (i.e. the archive is rooted inside the data directory). - This is to make sure we can always restore an old backup - even if the standard guest agent data directory changes. - """ - - LOG.debug('Initializing a data directory.') - operating_system.create_directory( - self.restore_location, - user=self._app.cassandra_owner, group=self._app.cassandra_owner, - force=True, as_root=True) - - def post_restore(self): - """Updated ownership on the restored files. - """ - - LOG.debug('Updating ownership of the restored files.') - operating_system.chown( - self.restore_location, - self._app.cassandra_owner, self._app.cassandra_owner, - recursive=True, force=True, as_root=True) - - @property - def base_restore_cmd(self): - """Command to extract a backup archive into a given location. - Attempt to preserve access modifiers on the archived files. - """ - - return 'sudo tar -xpPf - -C "%(restore_location)s"' diff --git a/trove/guestagent/strategies/restore/experimental/couchbase_impl.py b/trove/guestagent/strategies/restore/experimental/couchbase_impl.py deleted file mode 100644 index 674c3d988e..0000000000 --- a/trove/guestagent/strategies/restore/experimental/couchbase_impl.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) 2014 eBay Software Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import json -import os.path -import time - -from oslo_log import log as logging - -from trove.common import exception -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.couchbase import service -from trove.guestagent.datastore.experimental.couchbase import system -from trove.guestagent.strategies.restore import base - - -LOG = logging.getLogger(__name__) - - -class CbBackup(base.RestoreRunner): - """ - Implementation of Restore Strategy for Couchbase. - """ - __strategy_name__ = 'cbbackup' - base_restore_cmd = 'sudo tar xpPf -' - - def __init__(self, *args, **kwargs): - super(CbBackup, self).__init__(*args, **kwargs) - - def pre_restore(self): - try: - operating_system.remove(system.COUCHBASE_DUMP_DIR, force=True) - except exception.ProcessExecutionError: - LOG.exception("Error during pre-restore phase.") - raise - - def post_restore(self): - try: - # Root enabled for the backup - pwd_file = system.COUCHBASE_DUMP_DIR + system.SECRET_KEY - if os.path.exists(pwd_file): - with open(pwd_file, "r") as f: - pw = f.read().rstrip("\n") - root = service.CouchbaseRootAccess() - root.set_password(pw) - - # Get current root password - root = service.CouchbaseRootAccess() - root_pwd = root.get_password() - - # Iterate through each bucket config - buckets_json = system.COUCHBASE_DUMP_DIR + system.BUCKETS_JSON - with open(buckets_json, "r") as f: - out = f.read() - if out == "[]": - # No buckets or data to restore. Done. - return - d = json.loads(out) - for i in range(len(d)): - bucket_name = d[i]["name"] - bucket_type = d[i]["bucketType"] - if bucket_type == "membase": - bucket_type = "couchbase" - ram = int(utils.to_mb(d[i]["quota"]["ram"])) - auth_type = d[i]["authType"] - password = d[i]["saslPassword"] - port = d[i]["proxyPort"] - replica_number = d[i]["replicaNumber"] - replica_index = 1 if d[i]["replicaIndex"] else 0 - threads = d[i]["threadsNumber"] - flush = 1 if "flush" in d[i]["controllers"] else 0 - - # cbrestore requires you to manually create dest buckets - create_bucket_cmd = ('curl -X POST -u root:' + root_pwd + - ' -d name="' + - bucket_name + '"' + - ' -d bucketType="' + - bucket_type + '"' + - ' -d ramQuotaMB="' + - str(ram) + '"' + - ' -d authType="' + - auth_type + '"' + - ' -d saslPassword="' + - password + '"' + - ' -d proxyPort="' + - str(port) + '"' + - ' -d replicaNumber="' + - str(replica_number) + '"' + - ' -d replicaIndex="' + - str(replica_index) + '"' + - ' -d threadsNumber="' + - str(threads) + '"' + - ' -d flushEnabled="' + - str(flush) + '" ' + - system.COUCHBASE_REST_API + - '/pools/default/buckets') - utils.execute_with_timeout(create_bucket_cmd, - shell=True, timeout=300) - - if bucket_type == "memcached": - continue - - # Wait for couchbase (membase) bucket creation to complete - # (follows same logic as --wait for couchbase-cli) - timeout_in_seconds = 120 - start = time.time() - bucket_exist = False - while ((time.time() - start) <= timeout_in_seconds and - not bucket_exist): - url = (system.COUCHBASE_REST_API + - '/pools/default/buckets/') - outfile = system.COUCHBASE_DUMP_DIR + '/buckets.all' - utils.execute_with_timeout('curl -u root:' + root_pwd + - ' ' + url + ' > ' + outfile, - shell=True, timeout=300) - with open(outfile, "r") as file: - out = file.read() - buckets = json.loads(out) - for bucket in buckets: - if bucket["name"] == bucket_name: - bucket_exist = True - break - if not bucket_exist: - time.sleep(2) - - if not bucket_exist: - raise base.RestoreError("Failed to create bucket '%s' " - "within %s seconds" - % (bucket_name, - timeout_in_seconds)) - - # Query status - # (follows same logic as --wait for couchbase-cli) - healthy = False - while ((time.time() - start) <= timeout_in_seconds): - url = (system.COUCHBASE_REST_API + - '/pools/default/buckets/' + - bucket_name) - outfile = system.COUCHBASE_DUMP_DIR + '/' + bucket_name - utils.execute_with_timeout('curl -u root:' + root_pwd + - ' ' + url + ' > ' + outfile, - shell=True, timeout=300) - all_node_ready = True - with open(outfile, "r") as file: - out = file.read() - bucket = json.loads(out) - for node in bucket["nodes"]: - if node["status"] != "healthy": - all_node_ready = False - break - if not all_node_ready: - time.sleep(2) - else: - healthy = True - break - - if not healthy: - raise base.RestoreError("Bucket '%s' is created but " - "not ready to use within %s " - "seconds" - % (bucket_name, - timeout_in_seconds)) - - # Restore - restore_cmd = ('/opt/couchbase/bin/cbrestore ' + - system.COUCHBASE_DUMP_DIR + ' ' + - system.COUCHBASE_REST_API + - ' --bucket-source=' + bucket_name + - ' --bucket-destination=' + bucket_name + - ' -u root' + ' -p ' + root_pwd) - try: - utils.execute_with_timeout(restore_cmd, - shell=True, - timeout=300) - except exception.ProcessExecutionError: - # cbrestore fails or hangs at times: - # http://www.couchbase.com/issues/browse/MB-10832 - # Retrying typically works - LOG.exception("cbrestore failed. Retrying...") - utils.execute_with_timeout(restore_cmd, - shell=True, - timeout=300) - except exception.ProcessExecutionError as p: - LOG.error(p) - raise base.RestoreError("Couchbase restore failed.") diff --git a/trove/guestagent/strategies/restore/experimental/couchdb_impl.py b/trove/guestagent/strategies/restore/experimental/couchdb_impl.py deleted file mode 100644 index 559674adf4..0000000000 --- a/trove/guestagent/strategies/restore/experimental/couchdb_impl.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2016 IBM Corporation -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.couchdb import service -from trove.guestagent.strategies.restore import base - - -class CouchDBBackup(base.RestoreRunner): - - __strategy_name__ = 'couchdbbackup' - base_restore_cmd = 'sudo tar xPf -' - - def __init__(self, *args, **kwargs): - self.appStatus = service.CouchDBAppStatus() - self.app = service.CouchDBApp(self.appStatus) - super(CouchDBBackup, self).__init__(*args, **kwargs) - - def post_restore(self): - """ - To restore from backup, all we need to do is untar the compressed - database files into the database directory and change its ownership. - """ - operating_system.chown(service.COUCHDB_LIB_DIR, - 'couchdb', - 'couchdb', - as_root=True) - self.app.restart() diff --git a/trove/guestagent/strategies/restore/experimental/db2_impl.py b/trove/guestagent/strategies/restore/experimental/db2_impl.py deleted file mode 100644 index 7cf2cb338d..0000000000 --- a/trove/guestagent/strategies/restore/experimental/db2_impl.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2016 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from trove.common import exception -from trove.common import utils -from trove.guestagent.datastore.experimental.db2 import service -from trove.guestagent.datastore.experimental.db2 import system -from trove.guestagent.strategies.restore import base - -LOG = logging.getLogger(__name__) - - -class DB2Backup(base.RestoreRunner): - """ - Base class implementation of Restore strategy for DB2 - """ - base_restore_cmd = 'sudo tar xPf -' - - def __init__(self, *args, **kwargs): - super(DB2Backup, self).__init__(*args, **kwargs) - self.appStatus = service.DB2AppStatus() - self.app = service.DB2App(self.appStatus) - self.admin = service.DB2Admin() - self.restore_location = system.DB2_BACKUP_DIR - - def _post_restore(self, restore_command, rollforward_command=None): - """ - Restore from the directory that we untarred into - """ - out = "" - try: - out, err = utils.execute_with_timeout(system.GET_DB_NAMES, - shell=True) - except exception.ProcessExecutionError: - LOG.exception("Couldn't find any databases.") - - dbNames = out.split() - for dbName in dbNames: - service.run_command(restore_command % {'dbname': dbName}) - if rollforward_command: - service.run_command(system.ROLL_FORWARD_DB % {'dbname': - dbName}) - - LOG.info("Cleaning out restore location: %s.", - system.DB2_BACKUP_DIR) - service.remove_db2_dir(system.DB2_BACKUP_DIR) - - -class DB2OfflineBackup(DB2Backup): - """ - Implementation of Restore Strategy for full offline backups - using the default circular logging - """ - __strategy_name__ = 'db2offlinebackup' - - def post_restore(self): - self._post_restore(system.RESTORE_OFFLINE_DB) - - -class DB2OnlineBackup(DB2Backup): - """ - Implementation of restore strategy for full online backups using - archived logging. - """ - __strategy_name__ = 'db2onlinebackup' - - def post_restore(self): - """ - Once the databases are restored from a backup, we have to roll - forward the logs to the point of where the backup was taken. This - brings the database to a state were it can used, otherwise it - remains in a BACKUP PENDING state. After roll forwarding the logs, - we can delete the archived logs. - """ - self._post_restore(system.RESTORE_ONLINE_DB, system.ROLL_FORWARD_DB) - service.remove_db2_dir(system.DB2_ARCHIVE_LOGS_DIR + '/*') diff --git a/trove/guestagent/strategies/restore/experimental/mariadb_impl.py b/trove/guestagent/strategies/restore/experimental/mariadb_impl.py deleted file mode 100644 index b6c5ef159c..0000000000 --- a/trove/guestagent/strategies/restore/experimental/mariadb_impl.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2019 Catalyst Cloud Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import glob -import os - -from oslo_log import log as logging - -from trove.common import cfg -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.mariadb import service -from trove.guestagent.datastore.mysql_common import service as mysql_service -from trove.guestagent.strategies.restore import base -from trove.guestagent.strategies.restore import mysql_impl - -LOG = logging.getLogger(__name__) -PREPARE_LOG = '/tmp/innoprepare.log' - - -class MariaBackup(base.RestoreRunner, mysql_impl.MySQLRestoreMixin): - __strategy_name__ = 'mariabackup' - base_restore_cmd = ('sudo mbstream -x -C %(restore_location)s ' - '2>/tmp/xbstream_extract.log') - - def __init__(self, *args, **kwargs): - self._app = None - super(MariaBackup, self).__init__(*args, **kwargs) - - @property - def app(self): - if self._app is None: - self._app = service.MariaDBApp( - mysql_service.BaseMySqlAppStatus.get() - ) - return self._app - - def pre_restore(self): - self.app.stop_db() - LOG.debug("Cleaning out restore location: %s.", self.restore_location) - operating_system.chmod(self.restore_location, - operating_system.FileMode.SET_FULL, - as_root=True) - utils.clean_out(self.restore_location) - - def post_restore(self): - operating_system.chown(self.restore_location, 'mysql', None, - force=True, as_root=True) - - # When using Mariabackup from versions prior to MariaDB 10.2.10, you - # would also have to remove any pre-existing InnoDB redo log files. - self._delete_old_binlogs() - self.app.start_mysql() - LOG.debug("Finished post restore.") - - def _delete_old_binlogs(self): - files = glob.glob(os.path.join(self.restore_location, "ib_logfile*")) - for f in files: - os.unlink(f) - - def check_process(self): - LOG.debug('Checking return code of mbstream restore process.') - return_code = self.process.wait() - if return_code != 0: - LOG.error('mbstream exited with %s', return_code) - return False - - return True - - -class MariaBackupIncremental(MariaBackup): - __strategy_name__ = 'mariabackupincremental' - incremental_prep = ('sudo mariabackup --prepare ' - '--target-dir=%(restore_location)s ' - '%(incremental_args)s ' - '2>/tmp/innoprepare.log') - - def __init__(self, *args, **kwargs): - super(MariaBackupIncremental, self).__init__(*args, **kwargs) - self.content_length = 0 - - def _incremental_restore_cmd(self, incremental_dir): - """Return a command for a restore with a incremental location.""" - args = {'restore_location': incremental_dir} - return (self.decrypt_cmd + - self.unzip_cmd + - (self.base_restore_cmd % args)) - - def _incremental_prepare_cmd(self, incremental_dir): - if incremental_dir is not None: - incremental_arg = '--incremental-dir=%s' % incremental_dir - else: - incremental_arg = '' - - args = { - 'restore_location': self.restore_location, - 'incremental_args': incremental_arg, - } - - return self.incremental_prep % args - - def _incremental_prepare(self, incremental_dir): - prepare_cmd = self._incremental_prepare_cmd(incremental_dir) - - LOG.debug("Running mariabackup prepare: %s.", prepare_cmd) - utils.execute(prepare_cmd, shell=True) - LOG.debug("mariabackup prepare finished successfully.") - - def _incremental_restore(self, location, checksum): - """Recursively apply backups from all parents. - - If we are the parent then we restore to the restore_location and - we apply the logs to the restore_location only. - - Otherwise if we are an incremental we restore to a subfolder to - prevent stomping on the full restore data. Then we run apply log - with the '--incremental-dir' flag - """ - metadata = self.storage.load_metadata(location, checksum) - incremental_dir = None - if 'parent_location' in metadata: - LOG.info("Restoring parent: %(parent_location)s" - " checksum: %(parent_checksum)s.", metadata) - parent_location = metadata['parent_location'] - parent_checksum = metadata['parent_checksum'] - # Restore parents recursively so backup are applied sequentially - self._incremental_restore(parent_location, parent_checksum) - # for *this* backup set the incremental_dir - # just use the checksum for the incremental path as it is - # sufficiently unique /var/lib/mysql/ - incremental_dir = os.path.join( - cfg.get_configuration_property('mount_point'), checksum) - operating_system.create_directory(incremental_dir, as_root=True) - command = self._incremental_restore_cmd(incremental_dir) - else: - # The parent (full backup) use the same command from InnobackupEx - # super class and do not set an incremental_dir. - command = self.restore_cmd - - self.content_length += self._unpack(location, checksum, command) - self._incremental_prepare(incremental_dir) - - # Delete unpacked incremental backup metadata - if incremental_dir: - operating_system.remove(incremental_dir, force=True, as_root=True) - - def _run_restore(self): - """Run incremental restore.""" - self._incremental_restore(self.location, self.checksum) - return self.content_length diff --git a/trove/guestagent/strategies/restore/experimental/mongo_impl.py b/trove/guestagent/strategies/restore/experimental/mongo_impl.py deleted file mode 100644 index 353e7c4fae..0000000000 --- a/trove/guestagent/strategies/restore/experimental/mongo_impl.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2014 eBay Software Foundation -# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -from oslo_utils import netutils - -from trove.common import cfg -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.mongodb import ( - service as mongo_service) -from trove.guestagent.strategies.restore import base - -CONF = cfg.CONF -IP = netutils.get_my_ipv4() -LARGE_TIMEOUT = 1200 -MONGODB_DBPATH = CONF.mongodb.mount_point -MONGO_DUMP_DIR = MONGODB_DBPATH + "/dump" - - -class MongoDump(base.RestoreRunner): - __strategy_name__ = 'mongodump' - base_restore_cmd = 'sudo tar xPf -' - - def __init__(self, *args, **kwargs): - super(MongoDump, self).__init__(*args, **kwargs) - self.app = mongo_service.MongoDBApp() - - def post_restore(self): - """ - Restore from the directory that we untarred into - """ - params = self.app.admin_cmd_auth_params() - params.append(MONGO_DUMP_DIR) - utils.execute_with_timeout('mongorestore', *params, - timeout=LARGE_TIMEOUT) - - operating_system.remove(MONGO_DUMP_DIR, force=True, as_root=True) diff --git a/trove/guestagent/strategies/restore/experimental/postgresql_impl.py b/trove/guestagent/strategies/restore/experimental/postgresql_impl.py deleted file mode 100644 index c7cf0d63f2..0000000000 --- a/trove/guestagent/strategies/restore/experimental/postgresql_impl.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re - -from eventlet.green import subprocess -from oslo_log import log as logging - -from trove.common import cfg -from trove.common import stream_codecs -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore.experimental.postgresql.service import PgSqlApp -from trove.guestagent.strategies.restore import base - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -WAL_ARCHIVE_DIR = CONF.postgresql.wal_archive_location - - -class PgDump(base.RestoreRunner): - """Implementation of Restore Strategy for pg_dump.""" - __strategy_name__ = 'pg_dump' - base_restore_cmd = 'psql -U os_admin' - - IGNORED_ERROR_PATTERNS = [ - re.compile(r'ERROR:\s*role "postgres" already exists'), - ] - - def restore(self): - """We are overriding the base class behavior - to perform custom error handling. - """ - self.pre_restore() - content_length = self._execute_postgres_restore() - self.post_restore() - return content_length - - def _execute_postgres_restore(self): - # Postgresql outputs few benign messages into the stderr stream - # during a normal restore procedure. - # We need to watch for those and avoid raising - # an exception in response. - # Message 'ERROR: role "postgres" already exists' - # is expected and does not pose any problems to the restore operation. - - stream = self.storage.load(self.location, self.checksum) - process = subprocess.Popen(self.restore_cmd, shell=True, - stdin=subprocess.PIPE, - stderr=subprocess.PIPE) - content_length = 0 - for chunk in stream: - process.stdin.write(chunk) - content_length += len(chunk) - process.stdin.close() - self._handle_errors(process) - LOG.info("Restored %s bytes from stream.", content_length) - - return content_length - - def _handle_errors(self, process): - # Handle messages in the error stream of a given process. - # Raise an exception if the stream is not empty and - # does not match the expected message sequence. - - try: - err = process.stderr.read() - # Empty error stream is always accepted as valid - # for future compatibility. - if err: - for message in err.splitlines(False): - if not any(regex.match(message) - for regex in self.IGNORED_ERROR_PATTERNS): - raise Exception(message) - except OSError: - pass - - -class PgBaseBackup(base.RestoreRunner): - """Implementation of Restore Strategy for pg_basebackup.""" - __strategy_name__ = 'pg_basebackup' - location = "" - base_restore_cmd = '' - - IGNORED_ERROR_PATTERNS = [ - re.compile(r'ERROR:\s*role "postgres" already exists'), - ] - - def __init__(self, *args, **kwargs): - self._app = None - self.base_restore_cmd = 'sudo -u %s tar xCf %s - ' % ( - self.app.pgsql_owner, self.app.pgsql_data_dir - ) - - super(PgBaseBackup, self).__init__(*args, **kwargs) - - @property - def app(self): - if self._app is None: - self._app = self._build_app() - return self._app - - def _build_app(self): - return PgSqlApp() - - def pre_restore(self): - self.app.stop_db() - LOG.info("Preparing WAL archive dir") - self.app.recreate_wal_archive_dir() - datadir = self.app.pgsql_data_dir - operating_system.remove(datadir, force=True, recursive=True, - as_root=True) - operating_system.create_directory(datadir, user=self.app.pgsql_owner, - group=self.app.pgsql_owner, - force=True, as_root=True) - - def post_restore(self): - operating_system.chmod(self.app.pgsql_data_dir, - FileMode.SET_USR_RWX(), - as_root=True, recursive=True, force=True) - - def write_recovery_file(self, restore=False): - metadata = self.storage.load_metadata(self.location, self.checksum) - recovery_conf = "" - recovery_conf += "recovery_target_name = '%s' \n" % metadata['label'] - recovery_conf += "recovery_target_timeline = '%s' \n" % 1 - - if restore: - recovery_conf += ("restore_command = '" + - self.pgsql_restore_cmd + "'\n") - - recovery_file = os.path.join(self.app.pgsql_data_dir, 'recovery.conf') - operating_system.write_file(recovery_file, recovery_conf, - codec=stream_codecs.IdentityCodec(), - as_root=True) - operating_system.chown(recovery_file, user=self.app.pgsql_owner, - group=self.app.pgsql_owner, as_root=True) - - -class PgBaseBackupIncremental(PgBaseBackup): - - def __init__(self, *args, **kwargs): - super(PgBaseBackupIncremental, self).__init__(*args, **kwargs) - self.content_length = 0 - self.incr_restore_cmd = 'sudo -u %s tar -xf - -C %s ' % ( - self.app.pgsql_owner, WAL_ARCHIVE_DIR - ) - self.pgsql_restore_cmd = "cp " + WAL_ARCHIVE_DIR + '/%f "%p"' - - def pre_restore(self): - self.app.stop_db() - - def post_restore(self): - self.write_recovery_file(restore=True) - - def _incremental_restore_cmd(self, incr=False): - args = {'restore_location': self.restore_location} - cmd = self.base_restore_cmd - if incr: - cmd = self.incr_restore_cmd - return self.decrypt_cmd + self.unzip_cmd + (cmd % args) - - def _incremental_restore(self, location, checksum): - - metadata = self.storage.load_metadata(location, checksum) - if 'parent_location' in metadata: - LOG.info("Found parent at %s", metadata['parent_location']) - parent_location = metadata['parent_location'] - parent_checksum = metadata['parent_checksum'] - self._incremental_restore(parent_location, parent_checksum) - cmd = self._incremental_restore_cmd(incr=True) - self.content_length += self._unpack(location, checksum, cmd) - - else: - # For the parent base backup, revert to the default restore cmd - LOG.info("Recursed back to full backup.") - - super(PgBaseBackupIncremental, self).pre_restore() - cmd = self._incremental_restore_cmd(incr=False) - self.content_length += self._unpack(location, checksum, cmd) - - operating_system.chmod(self.app.pgsql_data_dir, - FileMode.SET_USR_RWX(), - as_root=True, recursive=True, force=True) - - def _run_restore(self): - self._incremental_restore(self.location, self.checksum) - # content-length restored - return self.content_length diff --git a/trove/guestagent/strategies/restore/experimental/redis_impl.py b/trove/guestagent/strategies/restore/experimental/redis_impl.py deleted file mode 100644 index 551aff9cef..0000000000 --- a/trove/guestagent/strategies/restore/experimental/redis_impl.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. and Tesora, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from oslo_log import log as logging - -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.guestagent.datastore.experimental.redis import service -from trove.guestagent.datastore.experimental.redis import system -from trove.guestagent.strategies.restore import base - -LOG = logging.getLogger(__name__) - - -class RedisBackup(base.RestoreRunner): - """Implementation of Restore Strategy for Redis.""" - __strategy_name__ = 'redisbackup' - - CONF_LABEL_AOF_TEMP_OFF = 'restore_aof_temp_off' - INFO_PERSISTENCE_SECTION = 'persistence' - - def __init__(self, storage, **kwargs): - self.app = service.RedisApp() - self.restore_location = self.app.get_persistence_filepath() - self.base_restore_cmd = 'tee %s' % self.restore_location - self.aof_set = self.app.is_appendonly_enabled() - self.aof_off_cfg = {'appendonly': 'no'} - kwargs.update({'restore_location': self.restore_location}) - super(RedisBackup, self).__init__(storage, **kwargs) - - def pre_restore(self): - self.app.stop_db() - LOG.info("Removing old persistence file: %s.", - self.restore_location) - operating_system.remove(self.restore_location, force=True, - as_root=True) - dir = os.path.dirname(self.restore_location) - operating_system.create_directory(dir, as_root=True) - operating_system.chmod(dir, FileMode.SET_FULL, as_root=True) - # IF AOF is set, we need to turn it off temporarily - if self.aof_set: - self.app.configuration_manager.apply_system_override( - self.aof_off_cfg, change_id=self.CONF_LABEL_AOF_TEMP_OFF) - - def post_restore(self): - operating_system.chown(self.restore_location, - system.REDIS_OWNER, system.REDIS_OWNER, - as_root=True) - self.app.start_db() - - # IF AOF was set, we need to put back the original file - if self.aof_set: - self.app.admin.wait_until('loading', 0, - section=self.INFO_PERSISTENCE_SECTION) - self.app.admin.execute('BGREWRITEAOF') - self.app.admin.wait_until('aof_rewrite_in_progress', 0, - section=self.INFO_PERSISTENCE_SECTION) - self.app.stop_db() - self.app.configuration_manager.remove_system_override( - change_id=self.CONF_LABEL_AOF_TEMP_OFF) - self.app.start_db() diff --git a/trove/guestagent/strategies/restore/mysql_impl.py b/trove/guestagent/strategies/restore/mysql_impl.py deleted file mode 100644 index 372f5ea739..0000000000 --- a/trove/guestagent/strategies/restore/mysql_impl.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import glob -import os -import re -import tempfile - -from oslo_log import log as logging -import pexpect - -from trove.common import cfg -from trove.common import exception -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -import trove.guestagent.datastore.mysql.service as dbaas -from trove.guestagent.strategies.restore import base - -LOG = logging.getLogger(__name__) - - -class MySQLRestoreMixin(object): - """Common utils for restoring MySQL databases.""" - RESET_ROOT_RETRY_TIMEOUT = 100 - RESET_ROOT_SLEEP_INTERVAL = 10 - - RESET_ROOT_MYSQL_COMMANDS = ("SET PASSWORD FOR " - "'root'@'localhost'='';") - # This is a suffix MySQL appends to the file name given in - # the '--log-error' startup parameter. - _ERROR_LOG_SUFFIX = '.err' - _ERROR_MESSAGE_PATTERN = re.compile(b"ERROR") - - def mysql_is_running(self): - try: - utils.execute_with_timeout("/usr/bin/mysqladmin", "ping") - LOG.debug("MySQL is up and running.") - return True - except exception.ProcessExecutionError: - LOG.debug("MySQL is not running.") - return False - - def mysql_is_not_running(self): - try: - utils.execute_with_timeout("/usr/bin/pgrep", "mysqld") - LOG.debug("MySQL is still running.") - return False - except exception.ProcessExecutionError: - LOG.debug("MySQL is not running.") - return True - - def poll_until_then_raise(self, event, exc): - try: - utils.poll_until(event, - sleep_time=self.RESET_ROOT_SLEEP_INTERVAL, - time_out=self.RESET_ROOT_RETRY_TIMEOUT) - except exception.PollTimeOut: - raise exc - - def _start_mysqld_safe_with_init_file(self, init_file, err_log_file): - # This directory is added and removed by the mysql systemd service - # as the database is started and stopped. The restore operation - # takes place when the database is stopped, so the directory does - # not exist, but it is assumed to exist by the mysqld_safe command - # which starts the database. This command used to create this - # directory if it didn't exist, but it was changed recently to - # simply fail in this case. - run_dir = "/var/run/mysqld" - if not os.path.exists(run_dir): - utils.execute("mkdir", run_dir, - run_as_root=True, root_helper="sudo") - utils.execute("chown", "mysql:mysql", run_dir, err_log_file.name, - init_file.name, run_as_root=True, root_helper="sudo") - command_mysql_safe = ("sudo mysqld_safe" - " --init-file=%s" - " --log-error=%s" % - (init_file.name, err_log_file.name)) - LOG.debug("Spawning: %s" % command_mysql_safe) - child = pexpect.spawn(command_mysql_safe) - try: - index = child.expect(['Starting mysqld daemon']) - if index == 0: - LOG.info("Starting MySQL") - except pexpect.TIMEOUT: - LOG.exception("Got a timeout launching mysqld_safe") - finally: - # There is a race condition here where we kill mysqld before - # the init file been executed. We need to ensure mysqld is up. - # - # mysqld_safe will start even if init-file statement(s) fail. - # We therefore also check for errors in the log file. - self.poll_until_then_raise( - self.mysql_is_running, - base.RestoreError("Reset root password failed:" - " mysqld did not start!")) - first_err_message = self._find_first_error_message(err_log_file) - if first_err_message: - raise base.RestoreError("Reset root password failed: %s" - % first_err_message) - - LOG.info("Root password reset successfully.") - LOG.debug("Cleaning up the temp mysqld process.") - utils.execute_with_timeout("mysqladmin", "-uroot", - "--protocol=tcp", "shutdown") - LOG.debug("Polling for shutdown to complete.") - try: - utils.poll_until(self.mysql_is_not_running, - sleep_time=self.RESET_ROOT_SLEEP_INTERVAL, - time_out=self.RESET_ROOT_RETRY_TIMEOUT) - LOG.debug("Database successfully shutdown") - except exception.PollTimeOut: - LOG.debug("Timeout shutting down database " - "- performing killall on mysqld_safe.") - utils.execute_with_timeout("killall", "mysqld_safe", - root_helper="sudo", - run_as_root=True) - self.poll_until_then_raise( - self.mysql_is_not_running, - base.RestoreError("Reset root password failed: " - "mysqld did not stop!")) - - def reset_root_password(self): - """Reset the password of the localhost root account used by Trove - for initial datastore configuration. - """ - - try: - # Do not attempt to delete these files as the 'trove' user. - # The process writing into it may have assumed its ownership. - # Only owners can delete temporary files (restricted deletion). - init_file = tempfile.NamedTemporaryFile(mode='w', delete=False) - operating_system.write_file(init_file.name, - self.RESET_ROOT_MYSQL_COMMANDS) - operating_system.chmod(init_file.name, FileMode.ADD_READ_ALL, - as_root=True) - err_log_file = tempfile.NamedTemporaryFile( - suffix=self._ERROR_LOG_SUFFIX, - delete=False) - self._start_mysqld_safe_with_init_file(init_file, err_log_file) - finally: - init_file.close() - err_log_file.close() - operating_system.remove( - init_file.name, force=True, as_root=True) - operating_system.remove( - err_log_file.name, force=True, as_root=True) - - def _find_first_error_message(self, fp): - if self._is_non_zero_file(fp): - return self._find_first_pattern_match( - fp, self._ERROR_MESSAGE_PATTERN) - return None - - def _is_non_zero_file(self, fp): - file_path = fp.name - return os.path.isfile(file_path) and (os.path.getsize(file_path) > 0) - - def _find_first_pattern_match(self, fp, pattern): - for line in fp: - if pattern.match(line): - return line - return None - - -class MySQLDump(base.RestoreRunner, MySQLRestoreMixin): - """Implementation of Restore Strategy for MySQLDump.""" - __strategy_name__ = 'mysqldump' - base_restore_cmd = 'sudo mysql' - - -class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin): - """Implementation of Restore Strategy for InnoBackupEx.""" - __strategy_name__ = 'innobackupex' - base_restore_cmd = ('sudo xbstream -x -C %(restore_location)s' - ' 2>/tmp/xbstream_extract.log') - base_prepare_cmd = ('sudo innobackupex' - ' --defaults-file=%(restore_location)s/backup-my.cnf' - ' --ibbackup=xtrabackup' - ' --apply-log' - ' %(restore_location)s' - ' 2>/tmp/innoprepare.log') - - def __init__(self, *args, **kwargs): - self._app = None - super(InnoBackupEx, self).__init__(*args, **kwargs) - self.prepare_cmd = self.base_prepare_cmd % kwargs - self.prep_retcode = None - - @property - def app(self): - if self._app is None: - self._app = self._build_app() - return self._app - - def _build_app(self): - return dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) - - def pre_restore(self): - self.app.stop_db() - LOG.debug("Cleaning out restore location: %s.", self.restore_location) - operating_system.chmod(self.restore_location, FileMode.SET_FULL, - as_root=True) - utils.clean_out(self.restore_location) - - def _run_prepare(self): - LOG.info("Running innobackupex prepare: %s.", self.prepare_cmd) - self.prep_retcode = utils.execute(self.prepare_cmd, shell=True) - LOG.info("Innobackupex prepare finished successfully.") - - def post_restore(self): - self._run_prepare() - operating_system.chown(self.restore_location, 'mysql', None, - force=True, as_root=True) - self._delete_old_binlogs() - self.reset_root_password() - self.app.start_mysql() - - def _delete_old_binlogs(self): - files = glob.glob(os.path.join(self.restore_location, "ib_logfile*")) - for f in files: - os.unlink(f) - - def check_process(self): - """Check whether xbstream restore is successful.""" - # We first check the restore process exits with 0, however - # xbstream has a bug for creating new files: - # https://jira.percona.com/browse/PXB-1542 - # So we also check the stderr with ignorance of some known - # non-error log lines. Currently we only need to ignore: - # "encryption: using gcrypt x.x.x" - # After PXB-1542 is fixed, we could just check the exit status. - LOG.debug('Checking return code of xbstream restore process.') - return_code = self.process.wait() - if return_code != 0: - LOG.error('xbstream exited with %s', return_code) - return False - - LOG.debug('Checking xbstream restore process stderr output.') - IGNORE_LINES = [ - 'encryption: using gcrypt ', - 'sudo: unable to resolve host ', - ] - with open('/tmp/xbstream_extract.log', 'r') as xbstream_log: - for line in xbstream_log: - # Ignore empty lines - if not line.strip(): - continue - - # Ignore known non-error log lines - check_ignorance = [line.startswith(non_err) - for non_err in IGNORE_LINES] - if any(check_ignorance): - continue - else: - LOG.error('xbstream restore failed with: %s', - line.rstrip('\n')) - return False - - return True - - -class InnoBackupExIncremental(InnoBackupEx): - __strategy_name__ = 'innobackupexincremental' - incremental_prep = ('sudo innobackupex' - ' --defaults-file=%(restore_location)s/backup-my.cnf' - ' --ibbackup=xtrabackup' - ' --apply-log' - ' --redo-only' - ' %(restore_location)s' - ' %(incremental_args)s' - ' 2>/tmp/innoprepare.log') - - def __init__(self, *args, **kwargs): - super(InnoBackupExIncremental, self).__init__(*args, **kwargs) - self.restore_location = kwargs.get('restore_location') - self.content_length = 0 - - def _incremental_restore_cmd(self, incremental_dir): - """Return a command for a restore with a incremental location.""" - args = {'restore_location': incremental_dir} - return (self.decrypt_cmd + - self.unzip_cmd + - (self.base_restore_cmd % args)) - - def _incremental_prepare_cmd(self, incremental_dir): - if incremental_dir is not None: - incremental_arg = '--incremental-dir=%s' % incremental_dir - else: - incremental_arg = '' - - args = { - 'restore_location': self.restore_location, - 'incremental_args': incremental_arg, - } - - return self.incremental_prep % args - - def _incremental_prepare(self, incremental_dir): - prepare_cmd = self._incremental_prepare_cmd(incremental_dir) - LOG.debug("Running innobackupex prepare: %s.", prepare_cmd) - utils.execute(prepare_cmd, shell=True) - LOG.debug("Innobackupex prepare finished successfully.") - - def _incremental_restore(self, location, checksum): - """Recursively apply backups from all parents. - - If we are the parent then we restore to the restore_location and - we apply the logs to the restore_location only. - - Otherwise if we are an incremental we restore to a subfolder to - prevent stomping on the full restore data. Then we run apply log - with the '--incremental-dir' flag - """ - metadata = self.storage.load_metadata(location, checksum) - incremental_dir = None - if 'parent_location' in metadata: - LOG.info("Restoring parent: %(parent_location)s" - " checksum: %(parent_checksum)s.", metadata) - parent_location = metadata['parent_location'] - parent_checksum = metadata['parent_checksum'] - # Restore parents recursively so backup are applied sequentially - self._incremental_restore(parent_location, parent_checksum) - # for *this* backup set the incremental_dir - # just use the checksum for the incremental path as it is - # sufficiently unique /var/lib/mysql/ - incremental_dir = os.path.join( - cfg.get_configuration_property('mount_point'), checksum) - operating_system.create_directory(incremental_dir, as_root=True) - command = self._incremental_restore_cmd(incremental_dir) - else: - # The parent (full backup) use the same command from InnobackupEx - # super class and do not set an incremental_dir. - command = self.restore_cmd - - self.content_length += self._unpack(location, checksum, command) - self._incremental_prepare(incremental_dir) - - # Delete unpacked incremental backup metadata - if incremental_dir: - operating_system.remove(incremental_dir, force=True, as_root=True) - - def _run_restore(self): - """Run incremental restore. - - First grab all parents and prepare them with '--redo-only'. After - all backups are restored the super class InnoBackupEx post_restore - method is called to do the final prepare with '--apply-log' - """ - self._incremental_restore(self.location, self.checksum) - return self.content_length diff --git a/trove/guestagent/datastore/experimental/couchdb/__init__.py b/trove/guestagent/utils/__init__.py similarity index 100% rename from trove/guestagent/datastore/experimental/couchdb/__init__.py rename to trove/guestagent/utils/__init__.py diff --git a/trove/guestagent/utils/docker.py b/trove/guestagent/utils/docker.py new file mode 100644 index 0000000000..fe17473132 --- /dev/null +++ b/trove/guestagent/utils/docker.py @@ -0,0 +1,152 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +import docker +from oslo_log import log as logging +from oslo_utils import encodeutils + +from trove.common import cfg + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF +ANSI_ESCAPE = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]') + + +def stop_container(client, name="database"): + try: + container = client.containers.get(name) + except docker.errors.NotFound: + LOG.warning("Failed to get container %s", name) + return + + container.stop(timeout=CONF.state_change_wait_time) + + +def start_container(client, image, name="database", + restart_policy="unless-stopped", + volumes={}, ports={}, user="", network_mode="host", + environment={}, command=""): + """Start a docker container. + + :param client: docker client obj. + :param image: docker image. + :param name: container name. + :param restart_policy: restart policy. + :param volumes: e.g. + {"/host/trove": {"bind": "/container/trove", "mode": "rw"}} + :param ports: ports is ignored when network_mode="host". e.g. + {"3306/tcp": 3306} + :param user: e.g. "1000.1001" + :param network_mode: One of bridge, none, host + :param environment: Environment variables + :param command: + :return: + """ + try: + container = client.containers.get(name) + container.start() + except docker.errors.NotFound: + LOG.warning("Failed to get container %s", name) + container = client.containers.run( + image, + name=name, + restart_policy={"Name": restart_policy}, + privileged=False, + network_mode=network_mode, + detach=True, + volumes=volumes, + ports=ports, + user=user, + environment=environment, + command=command + ) + + return container + + +def _decode_output(output): + output = encodeutils.safe_decode(output) + output = ANSI_ESCAPE.sub('', output.strip()) + return output.split('\n') + + +def run_container(client, image, name, network_mode="host", volumes={}, + command=""): + """Run command in a container and return the string output list. + + :returns output: The log output. + :returns ret: True if no error occurs, otherwise False. + """ + try: + container = client.containers.get(name) + container.remove(force=True) + except docker.errors.NotFound: + pass + + try: + output = client.containers.run( + image, + name=name, + network_mode=network_mode, + volumes=volumes, + remove=False, + command=command, + ) + except docker.errors.ContainerError as err: + output = err.container.logs() + return _decode_output(output), False + + return _decode_output(output), True + + +def get_container_status(client, name="database"): + try: + container = client.containers.get(name) + # One of created, restarting, running, removing, paused, exited, or + # dead + return container.status + except docker.errors.NotFound: + return "not running" + except Exception: + return "unknown" + + +def run_command(client, command, name="database"): + container = client.containers.get(name) + # output is Bytes type + ret, output = container.exec_run(command) + if ret == 1: + raise Exception('Running command error: %s' % output) + + return output + + +def restart_container(client, name="database"): + container = client.containers.get(name) + container.restart(timeout=CONF.state_change_wait_time) + + +def remove_container(client, name="database"): + try: + container = client.containers.get(name) + container.remove(force=True) + except docker.errors.NotFound: + pass + + +def get_container_logs(client, name='database', tail=50): + container = client.containers.get(name) + output = container.logs(tail=tail) + return _decode_output(output) diff --git a/trove/guestagent/utils/mysql.py b/trove/guestagent/utils/mysql.py new file mode 100644 index 0000000000..d3511fa772 --- /dev/null +++ b/trove/guestagent/utils/mysql.py @@ -0,0 +1,85 @@ +# Copyright 2020 Catalyst Cloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo_log import log as logging +from pymysql import err as pymysql_err +from sqlalchemy import exc +from sqlalchemy import interfaces +from sqlalchemy.sql.expression import text + +from trove.guestagent.common import sql_query + +LOG = logging.getLogger(__name__) +FLUSH = text(sql_query.FLUSH) + + +class SqlClient(object): + """A sqlalchemy wrapper to manage transactions.""" + + def __init__(self, engine, use_flush=True): + self.engine = engine + self.use_flush = use_flush + + def __enter__(self): + self.conn = self.engine.connect() + self.trans = self.conn.begin() + return self.conn + + def __exit__(self, type, value, traceback): + if self.trans: + if type is not None: + self.trans.rollback() + else: + if self.use_flush: + self.conn.execute(FLUSH) + self.trans.commit() + self.conn.close() + + def execute(self, t, **kwargs): + LOG.debug('Execute SQL: %s', t) + try: + return self.conn.execute(t, kwargs) + except Exception: + self.trans.rollback() + self.trans = None + raise + + +class BaseKeepAliveConnection(interfaces.PoolListener): + """ + A connection pool listener that ensures live connections are returned + from the connection pool at checkout. This alleviates the problem of + MySQL connections timing out. + """ + + def checkout(self, dbapi_con, con_record, con_proxy): + """Event triggered when a connection is checked out from the pool.""" + try: + try: + dbapi_con.ping(False) + except TypeError: + dbapi_con.ping() + except dbapi_con.OperationalError as ex: + if ex.args[0] in (2006, 2013, 2014, 2045, 2055): + raise exc.DisconnectionError() + else: + raise + # MariaDB seems to timeout the client in a different + # way than MySQL and PXC + except pymysql_err.InternalError as ex: + if "Packet sequence number wrong" in str(ex): + raise exc.DisconnectionError() + elif 'Connection was killed' in str(ex): + raise exc.DisconnectionError() + else: + raise diff --git a/trove/instance/models.py b/trove/instance/models.py index f2c5cbaf0d..b5b9ac9391 100644 --- a/trove/instance/models.py +++ b/trove/instance/models.py @@ -144,12 +144,15 @@ def load_simple_instance_addresses(context, db_info): return addresses = [] + user_ports = [] client = clients.create_neutron_client(context, db_info.region_id) ports = client.list_ports(device_id=db_info.compute_instance_id)['ports'] for port in ports: if 'Management port' not in port['description']: LOG.debug('Found user port %s for instance %s', port['id'], db_info.id) + + user_ports.append(port['id']) for ip in port['fixed_ips']: # TODO(lxkong): IPv6 is not supported if netutils.is_valid_ipv4(ip.get('ip_address')): @@ -163,6 +166,7 @@ def load_simple_instance_addresses(context, db_info): addresses.append( {'address': fip['floating_ip_address'], 'type': 'public'}) + db_info.ports = user_ports db_info.addresses = addresses @@ -221,6 +225,13 @@ class SimpleInstance(object): else: return None + @property + def ports(self): + if hasattr(self.db_info, 'ports'): + return self.db_info.ports + else: + return None + @property def created(self): return self.db_info.created @@ -1273,13 +1284,15 @@ class Instance(BuiltInstance): instance_id = ids instance_name = names root_password = root_passwords + task_api.API(context).create_instance( instance_id, instance_name, flavor, image_id, databases, users, datastore_version.manager, datastore_version.packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type=volume_type, modules=module_list, - locality=locality, access=access) + locality=locality, access=access, + ds_version=datastore_version.name) return SimpleInstance(context, db_info, service_status, root_password, locality=locality) @@ -1366,7 +1379,6 @@ class Instance(BuiltInstance): _resize_resources) def reboot(self): - self.validate_can_perform_action() LOG.info("Rebooting instance %s.", self.id) if self.db_info.cluster_id is not None and not self.context.is_admin: raise exception.ClusterInstanceOperationNotSupported() @@ -1422,6 +1434,7 @@ class Instance(BuiltInstance): if not self.slaves: raise exception.BadRequest(_("Instance %s is not a replica" " source.") % self.id) + service = InstanceServiceStatus.find_by(instance_id=self.id) last_heartbeat_delta = timeutils.utcnow() - service.updated_at agent_expiry_interval = timedelta(seconds=CONF.agent_heartbeat_expiry) @@ -1496,10 +1509,10 @@ class Instance(BuiltInstance): status=status) def attach_configuration(self, configuration_id): - LOG.debug("Attaching configuration to instance: %s", self.id) + LOG.info("Attaching configuration %s to instance: %s", + configuration_id, self.id) if not self.db_info.configuration_id: self._validate_can_perform_assign() - LOG.debug("Attaching configuration: %s", configuration_id) config = Configuration.find(self.context, configuration_id, self.db_info.datastore_version_id) self.update_configuration(config) @@ -1519,7 +1532,8 @@ class Instance(BuiltInstance): to RESTART_REQUIRED. """ - LOG.debug("Saving configuration on instance: %s", self.id) + LOG.info("Saving configuration %s on instance: %s", + configuration.configuration_id, self.id) overrides = configuration.get_configuration_overrides() # Always put the instance into RESTART_REQUIRED state after @@ -1543,8 +1557,8 @@ class Instance(BuiltInstance): Apply changes only if ALL values can be applied at once. Return True if the configuration has changed. """ - - LOG.debug("Applying configuration on instance: %s", self.id) + LOG.info("Applying configuration %s on instance: %s", + configuration.configuration_id, self.id) overrides = configuration.get_configuration_overrides() if not configuration.does_configuration_need_restart(): diff --git a/trove/instance/service.py b/trove/instance/service.py index 8f66de445c..ab4c65c0c0 100644 --- a/trove/instance/service.py +++ b/trove/instance/service.py @@ -412,7 +412,6 @@ class InstanceController(wsgi.Controller): def _modify_instance(self, context, req, instance, **kwargs): if 'detach_replica' in kwargs and kwargs['detach_replica']: - LOG.debug("Detaching replica from source.") context.notification = notification.DBaaSInstanceDetach( context, request=req) with StartNotification(context, instance_id=instance.id): diff --git a/trove/taskmanager/api.py b/trove/taskmanager/api.py index f8ddb4df11..025495e6df 100644 --- a/trove/taskmanager/api.py +++ b/trove/taskmanager/api.py @@ -193,7 +193,8 @@ class API(object): availability_zone=None, root_password=None, nics=None, overrides=None, slave_of_id=None, cluster_config=None, volume_type=None, - modules=None, locality=None, access=None): + modules=None, locality=None, access=None, + ds_version=None): LOG.debug("Making async call to create instance %s ", instance_id) version = self.API_BASE_VERSION @@ -214,7 +215,8 @@ class API(object): slave_of_id=slave_of_id, cluster_config=cluster_config, volume_type=volume_type, - modules=modules, locality=locality, access=access) + modules=modules, locality=locality, access=access, + ds_version=ds_version) def create_cluster(self, cluster_id): LOG.debug("Making async call to create cluster %s ", cluster_id) diff --git a/trove/taskmanager/manager.py b/trove/taskmanager/manager.py index ce1bcff809..bbec7d0b63 100644 --- a/trove/taskmanager/manager.py +++ b/trove/taskmanager/manager.py @@ -120,13 +120,10 @@ class Manager(periodic_task.PeriodicTasks): # this step took place right after step 4, which causes failures # with MariaDB replications. old_master.make_read_only(True) - master_ips = old_master.detach_public_ips() - slave_ips = master_candidate.detach_public_ips() latest_txn_id = old_master.get_latest_txn_id() master_candidate.wait_for_txn(latest_txn_id) master_candidate.detach_replica(old_master, for_failover=True) master_candidate.enable_as_master() - master_candidate.attach_public_ips(master_ips) master_candidate.make_read_only(False) # At this point, should something go wrong, there @@ -151,7 +148,8 @@ class Manager(periodic_task.PeriodicTasks): "slave": replica.id, "old_master": old_master.id, "new_master": master_candidate.id} - LOG.exception(log_fmt, msg_content) + LOG.error(log_fmt, msg_content) + exception_replicas.append(replica) error_messages += "%s (%s)\n" % ( exc_fmt % msg_content, ex) @@ -159,19 +157,19 @@ class Manager(periodic_task.PeriodicTasks): # dealing with the old master after all the other replicas # has been migrated. old_master.attach_replica(master_candidate) - old_master.attach_public_ips(slave_ips) try: old_master.demote_replication_master() except Exception as ex: log_fmt = "Exception demoting old replica source %s." exc_fmt = _("Exception demoting old replica source %s.") - LOG.exception(log_fmt, old_master.id) + LOG.error(log_fmt, old_master.id) exception_replicas.append(old_master) error_messages += "%s (%s)\n" % ( exc_fmt % old_master.id, ex) self._set_task_status([old_master] + replica_models, InstanceTasks.NONE) + if exception_replicas: self._set_task_status(exception_replicas, InstanceTasks.PROMOTION_ERROR) @@ -183,10 +181,15 @@ class Manager(periodic_task.PeriodicTasks): "err": error_messages}) raise ReplicationSlaveAttachError(msg) + LOG.info('Finished to promote %s as master.', instance_id) + with EndNotification(context): + LOG.info('Promoting %s as replication master', instance_id) + master_candidate = BuiltInstanceTasks.load(context, instance_id) old_master = BuiltInstanceTasks.load(context, master_candidate.slave_of_id) + replicas = [] for replica_dbinfo in old_master.slaves: if replica_dbinfo.id == instance_id: @@ -211,6 +214,7 @@ class Manager(periodic_task.PeriodicTasks): return [[repl] + repl.get_last_txn() for repl in replica_models] def _most_current_replica(self, old_master, replica_models): + # last_txns is [instance, master UUID, last txn] last_txns = self._get_replica_txns(replica_models) master_ids = [txn[1] for txn in last_txns if txn[1]] if len(set(master_ids)) > 1: @@ -224,14 +228,11 @@ class Manager(periodic_task.PeriodicTasks): master_candidate = self._most_current_replica(old_master, replica_models) + LOG.info('New master selected: %s', master_candidate.id) - master_ips = old_master.detach_public_ips() - slave_ips = master_candidate.detach_public_ips() master_candidate.detach_replica(old_master, for_failover=True) master_candidate.enable_as_master() - master_candidate.attach_public_ips(master_ips) master_candidate.make_read_only(False) - old_master.attach_public_ips(slave_ips) exception_replicas = [] error_messages = "" @@ -251,10 +252,9 @@ class Manager(periodic_task.PeriodicTasks): "slave": replica.id, "old_master": old_master.id, "new_master": master_candidate.id} - LOG.exception(log_fmt, msg_content) + LOG.error(log_fmt, msg_content) exception_replicas.append(replica) - error_messages += "%s (%s)\n" % ( - exc_fmt % msg_content, ex) + error_messages += "%s (%s)\n" % (exc_fmt % msg_content, ex) self._set_task_status([old_master] + replica_models, InstanceTasks.NONE) @@ -269,6 +269,8 @@ class Manager(periodic_task.PeriodicTasks): "err": error_messages}) raise ReplicationSlaveAttachError(msg) + LOG.info('New master enabled: %s', master_candidate.id) + with EndNotification(context): master = BuiltInstanceTasks.load(context, instance_id) replicas = [BuiltInstanceTasks.load(context, dbinfo.id) @@ -314,7 +316,8 @@ class Manager(periodic_task.PeriodicTasks): datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, - volume_type, modules): + volume_type, modules, access=None, + ds_version=None): if type(instance_id) in [list]: ids = instance_id @@ -324,7 +327,6 @@ class Manager(periodic_task.PeriodicTasks): root_passwords = [root_password] replica_number = 0 replica_backup_id = backup_id - replica_backup_created = False replicas = [] master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id) @@ -333,52 +335,60 @@ class Manager(periodic_task.PeriodicTasks): LOG.debug("Using scheduler hints %s for creating instance %s", scheduler_hints, instance_id) + # Create backup for master + snapshot = None + try: + instance_tasks = FreshInstanceTasks.load(context, ids[0]) + snapshot = instance_tasks.get_replication_master_snapshot( + context, slave_of_id, flavor, + parent_backup_id=replica_backup_id) + LOG.info('Snapshot info for creating replica of %s: %s', + slave_of_id, snapshot) + except Exception as err: + LOG.error('Failed to get master snapshot info for creating ' + 'replica, error: %s', str(err)) + + if snapshot and snapshot.get('dataset', {}).get('snapshot_id'): + backup_id = snapshot['dataset']['snapshot_id'] + Backup.delete(context, backup_id) + + raise + + # Create replicas using the master backup + replica_backup_id = snapshot['dataset']['snapshot_id'] try: for replica_index in range(0, len(ids)): - try: - replica_number += 1 - LOG.debug("Creating replica %(num)d of %(count)d.", - {'num': replica_number, 'count': len(ids)}) - instance_tasks = FreshInstanceTasks.load( - context, ids[replica_index]) - snapshot = instance_tasks.get_replication_master_snapshot( - context, slave_of_id, flavor, replica_backup_id, - replica_number=replica_number) - LOG.info('Snapshot info for creating replica of %s: %s', - slave_of_id, snapshot) + replica_number += 1 + LOG.info("Creating replica %(num)d of %(count)d.", + {'num': replica_number, 'count': len(ids)}) - replica_backup_id = snapshot['dataset']['snapshot_id'] - replica_backup_created = (replica_backup_id is not None) - - instance_tasks.create_instance( - flavor, image_id, databases, users, datastore_manager, - packages, volume_size, replica_backup_id, - availability_zone, root_passwords[replica_index], - nics, overrides, None, snapshot, volume_type, - modules, scheduler_hints) - - replicas.append(instance_tasks) - except Exception: - # if it's the first replica, then we shouldn't continue - LOG.exception( - "Could not create replica %(num)d of %(count)d.", - {'num': replica_number, 'count': len(ids)}) - if replica_number == 1: - raise + instance_tasks = FreshInstanceTasks.load( + context, ids[replica_index]) + instance_tasks.create_instance( + flavor, image_id, databases, users, datastore_manager, + packages, volume_size, replica_backup_id, + availability_zone, root_passwords[replica_index], + nics, overrides, None, snapshot, volume_type, + modules, scheduler_hints, access=access, + ds_version=ds_version) + replicas.append(instance_tasks) for replica in replicas: replica.wait_for_instance(CONF.restore_usage_timeout, flavor) - + LOG.info('Replica %s created successfully', replica.id) + except Exception as err: + LOG.error('Failed to create replica from %s, error: %s', + slave_of_id, str(err)) + raise finally: - if replica_backup_created: - Backup.delete(context, replica_backup_id) + Backup.delete(context, replica_backup_id) def _create_instance(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules, locality, - access=None): + access=None, ds_version=None): if slave_of_id: self._create_replication_slave(context, instance_id, name, flavor, image_id, databases, users, @@ -386,7 +396,9 @@ class Manager(periodic_task.PeriodicTasks): volume_size, availability_zone, root_password, nics, overrides, slave_of_id, - backup_id, volume_type, modules) + backup_id, volume_type, modules, + access=access, + ds_version=ds_version) else: if type(instance_id) in [list]: raise AttributeError(_( @@ -406,7 +418,7 @@ class Manager(periodic_task.PeriodicTasks): availability_zone, root_password, nics, overrides, cluster_config, None, volume_type, modules, - scheduler_hints, access=access + scheduler_hints, access=access, ds_version=ds_version ) timeout = (CONF.restore_usage_timeout if backup_id @@ -418,18 +430,23 @@ class Manager(periodic_task.PeriodicTasks): packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules, locality, - access=None): - with EndNotification(context, - instance_id=(instance_id[0] - if isinstance(instance_id, list) - else instance_id)): + access=None, ds_version=None): + with EndNotification( + context, + instance_id=( + instance_id[0] + if isinstance(instance_id, list) + else instance_id + ) + ): self._create_instance(context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config, volume_type, modules, - locality, access=access) + locality, access=access, + ds_version=ds_version) def upgrade(self, context, instance_id, datastore_version_id): instance_tasks = models.BuiltInstanceTasks.load(context, instance_id) diff --git a/trove/taskmanager/models.py b/trove/taskmanager/models.py index 6eaa5e04c0..0e0abe9956 100755 --- a/trove/taskmanager/models.py +++ b/trove/taskmanager/models.py @@ -14,30 +14,35 @@ import copy import os.path -import time import traceback from cinderclient import exceptions as cinder_exceptions from eventlet import greenthread from eventlet.timeout import Timeout from oslo_log import log as logging -from oslo_utils import netutils from swiftclient.client import ClientException +import time +from trove import rpc from trove.backup import models as bkup_models from trove.backup.models import Backup from trove.backup.models import DBBackup from trove.backup.state import BackupState +from trove.cluster import tasks from trove.cluster.models import Cluster from trove.cluster.models import DBCluster -from trove.cluster import tasks from trove.common import cfg from trove.common import clients +from trove.common import crypto_utils as cu +from trove.common import exception +from trove.common import instance as rd_instance +from trove.common import neutron +from trove.common import template +from trove.common import timeutils +from trove.common import utils from trove.common.clients import create_cinder_client from trove.common.clients import create_dns_client from trove.common.clients import create_guest_client -from trove.common import crypto_utils as cu -from trove.common import exception from trove.common.exception import BackupCreationError from trove.common.exception import GuestError from trove.common.exception import GuestTimeout @@ -46,21 +51,15 @@ from trove.common.exception import PollTimeOut from trove.common.exception import TroveError from trove.common.exception import VolumeCreationFailure from trove.common.i18n import _ -from trove.common import instance as rd_instance from trove.common.instance import ServiceStatuses -from trove.common import neutron -from trove.common.notification import ( - DBaaSInstanceRestart, - DBaaSInstanceUpgrade, - EndNotification, - StartNotification, - TroveInstanceCreate, - TroveInstanceModifyVolume, - TroveInstanceModifyFlavor) +from trove.common.notification import DBaaSInstanceRestart +from trove.common.notification import DBaaSInstanceUpgrade +from trove.common.notification import EndNotification +from trove.common.notification import StartNotification +from trove.common.notification import TroveInstanceCreate +from trove.common.notification import TroveInstanceModifyFlavor +from trove.common.notification import TroveInstanceModifyVolume from trove.common.strategies.cluster import strategy -from trove.common import template -from trove.common import timeutils -from trove.common import utils from trove.common.utils import try_recover from trove.extensions.mysql import models as mysql_models from trove.instance import models as inst_models @@ -74,7 +73,6 @@ from trove.instance.tasks import InstanceTasks from trove.module import models as module_models from trove.module import views as module_views from trove.quota.quota import run_with_quotas -from trove import rpc LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -224,8 +222,8 @@ class ClusterTasks(Cluster): ServiceStatuses.FAILED_TIMEOUT_GUESTAGENT]) def _all_instances_acquire_status( - self, instance_ids, cluster_id, shard_id, expected_status, - fast_fail_statuses=None): + self, instance_ids, cluster_id, shard_id, expected_status, + fast_fail_statuses=None): def _is_fast_fail_status(status): return ((fast_fail_statuses is not None) and @@ -239,7 +237,7 @@ class ClusterTasks(Cluster): task_status = DBInstance.find_by( id=instance_id).get_task_status() if (_is_fast_fail_status(status) or - (task_status == InstanceTasks.BUILDING_ERROR_SERVER)): + (task_status == InstanceTasks.BUILDING_ERROR_SERVER)): # if one has failed, no need to continue polling LOG.debug("Instance %(id)s has acquired a fast-fail " "status %(status)s and" @@ -264,7 +262,7 @@ class ClusterTasks(Cluster): task_status = DBInstance.find_by( id=instance_id).get_task_status() if (_is_fast_fail_status(status) or - (task_status == InstanceTasks.BUILDING_ERROR_SERVER)): + (task_status == InstanceTasks.BUILDING_ERROR_SERVER)): failed_instance_ids.append(instance_id) return failed_instance_ids @@ -368,8 +366,8 @@ class ClusterTasks(Cluster): context.notification = ( DBaaSInstanceUpgrade(context, **request_info)) with StartNotification( - context, instance_id=instance.id, - datastore_version_id=datastore_version.id): + context, instance_id=instance.id, + datastore_version_id=datastore_version.id): with EndNotification(context): instance.update_db( datastore_version_id=datastore_version.id, @@ -537,7 +535,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): datastore_manager, packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, cluster_config, snapshot, volume_type, - modules, scheduler_hints, access=None): + modules, scheduler_hints, access=None, + ds_version=None): """Create trove instance. It is the caller's responsibility to ensure that @@ -577,7 +576,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): packages, databases, users, backup_info, config.config_contents, root_password, overrides, - cluster_config, snapshot, modules) + cluster_config, snapshot, modules, + ds_version=ds_version) if root_password: self.report_root_enabled() @@ -610,63 +610,56 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): self._log_and_raise(e, log_fmt, exc_fmt, self.id, err) def get_replication_master_snapshot(self, context, slave_of_id, flavor, - backup_id=None, replica_number=1): + parent_backup_id=None): # First check to see if we need to take a backup master = BuiltInstanceTasks.load(context, slave_of_id) backup_required = master.backup_required_for_replication() if backup_required: # if we aren't passed in a backup id, look it up to possibly do # an incremental backup, thus saving time - if not backup_id: + if not parent_backup_id: backup = Backup.get_last_completed( context, slave_of_id, include_incremental=True) if backup: - backup_id = backup.id + parent_backup_id = backup.id else: - LOG.debug('Will skip replication master backup') + LOG.debug('Skip creating replication master backup') snapshot_info = { 'name': "Replication snapshot for %s" % self.id, - 'description': "Backup image used to initialize " - "replication slave", + 'description': "Backup image used to initialize replication slave", 'instance_id': slave_of_id, - 'parent_id': backup_id, + 'parent_id': parent_backup_id, 'tenant_id': self.tenant_id, 'state': BackupState.NEW, 'datastore_version_id': self.datastore_version.id, 'deleted': False, - 'replica_number': replica_number, + 'replica_number': 1, } replica_backup_id = None if backup_required: - # Only do a backup if it's the first replica - if replica_number == 1: - try: - db_info = DBBackup.create(**snapshot_info) - replica_backup_id = db_info.id - except InvalidModelError: - log_fmt = ("Unable to create replication snapshot record " - "for instance: %s") - exc_fmt = _("Unable to create replication snapshot record " - "for instance: %s") - LOG.exception(log_fmt, self.id) - raise BackupCreationError(exc_fmt % self.id) - if backup_id: - # Look up the parent backup info or fail early if not - # found or if the user does not have access to the parent. - _parent = Backup.get_by_id(context, backup_id) - parent = { - 'location': _parent.location, - 'checksum': _parent.checksum, - } - snapshot_info.update({ - 'parent': parent, - }) - else: - # we've been passed in the actual replica backup id, - # so just use it - replica_backup_id = backup_id + try: + db_info = DBBackup.create(**snapshot_info) + replica_backup_id = db_info.id + except InvalidModelError: + log_fmt = ("Unable to create replication snapshot record " + "for instance: %s") + exc_fmt = _("Unable to create replication snapshot record " + "for instance: %s") + LOG.exception(log_fmt, self.id) + raise BackupCreationError(exc_fmt % self.id) + if parent_backup_id: + # Look up the parent backup info or fail early if not + # found or if the user does not have access to the parent. + _parent = Backup.get_by_id(context, parent_backup_id) + parent = { + 'location': _parent.location, + 'checksum': _parent.checksum, + } + snapshot_info.update({ + 'parent': parent, + }) try: snapshot_info.update({ @@ -701,8 +694,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): # if the delete of the 'bad' backup fails, it'll mask the # create exception, so we trap it here try: - # Only try to delete the backup if it's the first replica - if replica_number == 1 and backup_required: + if backup_required: Backup.delete(context, replica_backup_id) except Exception as e_delete: LOG.error(create_log_fmt, create_fmt_content) @@ -765,8 +757,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): status = service.get_status() if (status == rd_instance.ServiceStatuses.RUNNING or - status == rd_instance.ServiceStatuses.INSTANCE_READY or - status == rd_instance.ServiceStatuses.HEALTHY): + status == rd_instance.ServiceStatuses.INSTANCE_READY or + status == rd_instance.ServiceStatuses.HEALTHY): return True elif status not in [rd_instance.ServiceStatuses.NEW, rd_instance.ServiceStatuses.BUILDING, @@ -981,7 +973,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): packages, databases, users, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, - modules=None): + modules=None, ds_version=None): LOG.debug("Entering guest_prepare") # Now wait for the response from the create to do additional work self.guest.prepare(flavor_ram, packages, databases, users, @@ -992,7 +984,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin): root_password=root_password, overrides=overrides, cluster_config=cluster_config, - snapshot=snapshot, modules=modules) + snapshot=snapshot, modules=modules, + ds_version=ds_version) def _create_dns_entry(self): dns_support = CONF.trove_dns_support @@ -1110,9 +1103,8 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin): return self.guest.backup_required_for_replication() def get_replication_snapshot(self, snapshot_info, flavor): - def _get_replication_snapshot(): - LOG.debug("Calling get_replication_snapshot on %s.", self.id) + LOG.info("Getting replication snapshot for instance %s.", self.id) try: rep_source_config = self._render_replica_source_config(flavor) result = self.guest.get_replication_snapshot( @@ -1121,29 +1113,31 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin): LOG.info("Finnished getting replication snapshot for " "instance %s", self.id) return result - except Exception: - LOG.exception("Failed to get replication snapshot from %s.", - self.id) + except Exception as err: + LOG.error("Failed to get replication snapshot from %s, " + "error: %s", self.id, str(err)) raise return run_with_quotas(self.context.project_id, {'backups': 1}, _get_replication_snapshot) def detach_replica(self, master, for_failover=False): - LOG.debug("Calling detach_replica on %s", self.id) + LOG.info("Detaching replica %s from %s", self.id, master.id) try: self.guest.detach_replica(for_failover) self.update_db(slave_of_id=None) self.slave_list = None + LOG.info('Replica %s detached', self.id) except (GuestError, GuestTimeout): - LOG.exception("Failed to detach replica %s.", self.id) + LOG.error("Failed to detach replica %s from %s", self.id, + master.id) raise finally: if not for_failover: self.reset_task_status() def attach_replica(self, master): - LOG.debug("Calling attach_replica on %s", self.id) + LOG.info("Attaching replica %s to master %s", self.id, master.id) try: replica_info = master.guest.get_replica_context() flavor = self.nova_client.flavors.get(self.flavor_id) @@ -1159,71 +1153,38 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin): LOG.debug("Calling make_read_only on %s", self.id) self.guest.make_read_only(read_only) - def _get_floating_ips(self): - """Returns floating ips as a dict indexed by the ip.""" - floating_ips = {} - network_floating_ips = self.neutron_client.list_floatingips() - for ip in network_floating_ips.get('floatingips'): - floating_ips.update( - {ip.get('floating_ip_address'): ip.get('id')}) - LOG.debug("In _get_floating_ips(), returning %s", floating_ips) - return floating_ips - - def detach_public_ips(self): - LOG.info("Begin detach_public_ips for instance %s", self.id) - removed_ips = [] - floating_ips = self._get_floating_ips() - + def get_public_ip(self): + """Get public IP (IP, ID) for the trove instance.""" for item in self.get_visible_ip_addresses(): if item['type'] == 'public': - ip = item['address'] - if ip in floating_ips: - fip_id = floating_ips[ip] - self.neutron_client.update_floatingip( - fip_id, {'floatingip': {'port_id': None}}) - removed_ips.append(fip_id) - return removed_ips + fips = self.neutron_client.list_floatingips( + floating_ip_address=item['address'])['floatingips'] + if fips: + fip_id = fips[0]['id'] + return item['address'], fip_id - def attach_public_ips(self, ips): - LOG.info("Begin attach_public_ips for instance %s", self.id) - server_id = self.db_info.compute_instance_id - - # NOTE(zhaochao): in Nova's addFloatingIp, the new floating ip will - # always be associated with the first IPv4 fixed address of the Nova - # instance, we're doing the same thing here, after add_floating_ip is - # removed from novaclient. - server_ports = (self.neutron_client.list_ports(device_id=server_id) - .get('ports')) - fixed_address, port_id = next( - (fixed_ip['ip_address'], port['id']) - for port in server_ports - for fixed_ip in port.get('fixed_ips') - if netutils.is_valid_ipv4(fixed_ip['ip_address'])) - - for fip_id in ips: - self.neutron_client.update_floatingip( - fip_id, {'floatingip': { - 'port_id': port_id, - 'fixed_ip_address': fixed_address}}) + return None, None def enable_as_master(self): - LOG.debug("Calling enable_as_master on %s", self.id) + LOG.info("Enable %s as master", self.id) + flavor = self.nova_client.flavors.get(self.flavor_id) replica_source_config = self._render_replica_source_config(flavor) self.update_db(slave_of_id=None) self.slave_list = None + self.guest.enable_as_master(replica_source_config.config_contents) def get_last_txn(self): - LOG.debug("Calling get_last_txn on %s", self.id) + LOG.info("Getting master UUID and last txn for replica %s", self.id) return self.guest.get_last_txn() def get_latest_txn_id(self): - LOG.debug("Calling get_latest_txn_id on %s", self.id) + LOG.info("Getting latest txn id on %s", self.id) return self.guest.get_latest_txn_id() def wait_for_txn(self, txn): - LOG.debug("Calling wait_for_txn on %s", self.id) + LOG.info("Waiting for txn sync on %s, txn: %s", self.id, txn) if txn: self.guest.wait_for_txn(txn) @@ -1232,18 +1193,18 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin): self.guest.cleanup_source_on_replica_detach(replica_info) def demote_replication_master(self): - LOG.debug("Calling demote_replication_master on %s", self.id) + LOG.info("Demoting old replication master %s", self.id) self.guest.demote_replication_master() def reboot(self): try: - LOG.debug("Stopping datastore on instance %s.", self.id) + LOG.debug("Stopping database on instance %s.", self.id) try: self.guest.stop_db() except (exception.GuestError, exception.GuestTimeout) as e: # Acceptable to be here if db was already in crashed state # Also we check guest state before issuing reboot - LOG.debug(str(e)) + LOG.warning(str(e)) LOG.info("Rebooting instance %s.", self.id) self.server.reboot() @@ -1252,14 +1213,12 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin): self.refresh_compute_server_info() return self.server_status_matches(['ACTIVE']) - utils.poll_until( - update_server_info, - sleep_time=3, - time_out=CONF.reboot_time_out) + utils.poll_until(update_server_info, sleep_time=3, + time_out=CONF.reboot_time_out, initial_delay=5) + + LOG.info("Starting database on instance %s.", self.id) + self.guest.restart() - # Set the status to PAUSED. The guest agent will reset the status - # when the reboot completes and MySQL is running. - self.set_datastore_status_to_paused() LOG.info("Rebooted instance %s successfully.", self.id) except Exception as e: LOG.error("Failed to reboot instance %(id)s: %(e)s", @@ -1348,7 +1307,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin): # encrypt messages to a guest which potentially doesn't # have the code to handle it. if CONF.enable_secure_rpc_messaging and ( - self.db_info.encrypted_key is None): + self.db_info.encrypted_key is None): encrypted_key = cu.encode_data(cu.encrypt_data( cu.generate_random_key(), CONF.inst_rpc_key_encr_key)) @@ -1430,6 +1389,7 @@ class BackupTasks(object): @classmethod def delete_backup(cls, context, backup_id): """Delete backup from swift.""" + def _delete(backup): backup.deleted = True backup.deleted_at = timeutils.utcnow() @@ -1491,11 +1451,11 @@ class ModuleTasks(object): for instance_module in instance_modules: instance_id = instance_module.instance_id if (instance_module.md5 != current_md5 or force) and ( - not md5 or md5 == instance_module.md5): + not md5 or md5 == instance_module.md5): instance = BuiltInstanceTasks.load(context, instance_id, needs_server=False) if instance and ( - include_clustered or not instance.cluster_id): + include_clustered or not instance.cluster_id): try: module_models.Modules.validate( modules, instance.datastore.id, @@ -1511,8 +1471,8 @@ class ModuleTasks(object): # Sleep if we've fired off too many in a row. if (batch_size and - not reapply_count % batch_size and - (reapply_count + skipped_count) < total_count): + not reapply_count % batch_size and + (reapply_count + skipped_count) < total_count): LOG.debug("Applied module to %(cnt)d of %(total)d " "instances - sleeping for %(batch)ds", {'cnt': reapply_count, @@ -1593,7 +1553,7 @@ class ResizeVolumeAction(object): @try_recover def _unmount_volume(self): LOG.debug("Unmounting the volume on instance %(id)s", { - 'id': self.instance.id}) + 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.unmount_volume(device_path=device_path, @@ -1605,8 +1565,8 @@ class ResizeVolumeAction(object): @try_recover def _detach_volume(self): LOG.debug("Detach volume %(vol_id)s from instance %(id)s", { - 'vol_id': self.instance.volume_id, - 'id': self.instance.id}) + 'vol_id': self.instance.volume_id, + 'id': self.instance.id}) self.instance.nova_client.volumes.delete_server_volume( self.instance.server.id, self.instance.volume_id) @@ -1614,6 +1574,7 @@ class ResizeVolumeAction(object): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.status == 'available' + utils.poll_until(volume_available, sleep_time=2, time_out=CONF.volume_time_out) @@ -1635,6 +1596,7 @@ class ResizeVolumeAction(object): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.status == 'in-use' + utils.poll_until(volume_in_use, sleep_time=2, time_out=CONF.volume_time_out) @@ -1646,7 +1608,7 @@ class ResizeVolumeAction(object): @try_recover def _resize_fs(self): LOG.debug("Resizing the filesystem for instance %(id)s", { - 'id': self.instance.id}) + 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.resize_fs(device_path=device_path, @@ -1658,7 +1620,7 @@ class ResizeVolumeAction(object): @try_recover def _mount_volume(self): LOG.debug("Mount the volume on instance %(id)s", { - 'id': self.instance.id}) + 'id': self.instance.id}) mount_point = self.get_mount_point() device_path = self.get_device_path() self.instance.guest.mount_volume(device_path=device_path, @@ -1685,13 +1647,14 @@ class ResizeVolumeAction(object): self.instance.volume_id) if not volume: msg = (_('Failed to get volume %(vol_id)s') % { - 'vol_id': self.instance.volume_id}) + 'vol_id': self.instance.volume_id}) raise cinder_exceptions.ClientException(msg) def volume_is_new_size(): volume = self.instance.volume_client.volumes.get( self.instance.volume_id) return volume.size == self.new_size + utils.poll_until(volume_is_new_size, sleep_time=2, time_out=CONF.volume_time_out) @@ -1720,7 +1683,7 @@ class ResizeVolumeAction(object): def _resize_active_volume(self): LOG.debug("Begin _resize_active_volume for id: %(id)s", { - 'id': self.instance.id}) + 'id': self.instance.id}) self._stop_db() self._unmount_volume(recover_func=self._recover_restart) self._detach_volume(recover_func=self._recover_mount_restart) @@ -1732,7 +1695,7 @@ class ResizeVolumeAction(object): self._mount_volume(recover_func=self._fail) self.instance.restart() LOG.debug("End _resize_active_volume for id: %(id)s", { - 'id': self.instance.id}) + 'id': self.instance.id}) def execute(self): LOG.debug("%(gt)s: Resizing instance %(id)s volume for server " @@ -1792,7 +1755,7 @@ class ResizeActionBase(object): # so we know it's alive. utils.poll_until( self._guest_is_awake, - sleep_time=2, + sleep_time=3, time_out=CONF.resize_time_out) def _assert_nova_status_is_ok(self): @@ -1805,22 +1768,7 @@ class ResizeActionBase(object): raise TroveError(msg) def _assert_datastore_is_ok(self): - # Tell the guest to turn on datastore, and ensure the status becomes - # RUNNING. self._start_datastore() - utils.poll_until( - self._datastore_is_online, - sleep_time=2, - time_out=CONF.resize_time_out) - - def _assert_datastore_is_offline(self): - # Tell the guest to turn off MySQL, and ensure the status becomes - # SHUTDOWN. - self.instance.guest.stop_db(do_not_start_on_reboot=True) - utils.poll_until( - self._datastore_is_offline, - sleep_time=2, - time_out=CONF.resize_time_out) def _assert_processes_are_ok(self): """Checks the procs; if anything is wrong, reverts the operation.""" @@ -1842,7 +1790,7 @@ class ResizeActionBase(object): def _datastore_is_offline(self): self.instance._refresh_datastore_status() return (self.instance.datastore_status_matches( - rd_instance.ServiceStatuses.SHUTDOWN)) + rd_instance.ServiceStatuses.SHUTDOWN)) def _revert_nova_action(self): LOG.debug("Instance %s calling Compute revert resize...", @@ -1853,11 +1801,11 @@ class ResizeActionBase(object): """Initiates the action.""" try: LOG.debug("Instance %s calling stop_db...", self.instance.id) - self._assert_datastore_is_offline() + self.instance.guest.stop_db() self._perform_nova_action() finally: if self.instance.db_info.task_status != ( - inst_models.InstanceTasks.NONE): + inst_models.InstanceTasks.NONE): self.instance.reset_task_status() def _guest_is_awake(self): @@ -1873,12 +1821,11 @@ class ResizeActionBase(object): try: LOG.debug("Initiating nova action") self._initiate_nova_action() - LOG.debug("Waiting for nova action") + LOG.debug("Waiting for nova action completed") self._wait_for_nova_action() LOG.debug("Asserting nova status is ok") self._assert_nova_status_is_ok() need_to_revert = True - LOG.debug("* * * REVERT BARRIER PASSED * * *") LOG.debug("Asserting nova action success") self._assert_nova_action_was_successful() LOG.debug("Asserting processes are OK") @@ -1903,7 +1850,6 @@ class ResizeActionBase(object): LOG.error("Error resizing instance %s.", self.instance.id) raise - LOG.debug("Recording success") self._record_action_success() LOG.debug("End resize method _perform_nova_action instance: %s", self.instance.id) @@ -1916,7 +1862,7 @@ class ResizeActionBase(object): utils.poll_until( update_server_info, - sleep_time=2, + sleep_time=3, time_out=CONF.resize_time_out) def _wait_for_revert_nova_action(self): diff --git a/trove/templates/mariadb/config.template b/trove/templates/mariadb/config.template index aa10164eaf..fe20dc21a6 100644 --- a/trove/templates/mariadb/config.template +++ b/trove/templates/mariadb/config.template @@ -6,7 +6,6 @@ socket = /var/run/mysqld/mysqld.sock nice = 0 [mysqld] -user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data diff --git a/trove/templates/mariadb/replica.config.template b/trove/templates/mariadb/replica.config.template index 516bcf6cf4..4ae97f48e2 100644 --- a/trove/templates/mariadb/replica.config.template +++ b/trove/templates/mariadb/replica.config.template @@ -1,4 +1,5 @@ [mysqld] log_bin = /var/lib/mysql/data/mariadb-bin.log -relay_log = /var/lib/mysql/data/mariadb-relay-bin.log +binlog_format = MIXED +log_slave_updates = ON read_only = true diff --git a/trove/templates/mariadb/replica_source.config.template b/trove/templates/mariadb/replica_source.config.template index 6df38fca79..67bb3c8301 100644 --- a/trove/templates/mariadb/replica_source.config.template +++ b/trove/templates/mariadb/replica_source.config.template @@ -1,2 +1,4 @@ [mysqld] log_bin = /var/lib/mysql/data/mariadb-bin.log +binlog_format = MIXED +log_slave_updates = ON diff --git a/trove/templates/mysql/config.template b/trove/templates/mysql/config.template index 7f0ef517a7..53e45f8e97 100644 --- a/trove/templates/mysql/config.template +++ b/trove/templates/mysql/config.template @@ -8,7 +8,6 @@ socket = /var/run/mysqld/mysqld.sock nice = 0 [mysqld] -user = mysql port = 3306 basedir = /usr datadir = /var/lib/mysql/data diff --git a/trove/tests/api/backups.py b/trove/tests/api/backups.py index caa39331a3..7ff2a0a23e 100644 --- a/trove/tests/api/backups.py +++ b/trove/tests/api/backups.py @@ -28,8 +28,8 @@ from trove.common.utils import generate_uuid from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import instance_info -from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE +from trove.tests.api.instances import TIMEOUT_INSTANCE_RESTORE from trove.tests.api.instances import WaitForGuestInstallationToFinish from trove.tests.config import CONFIG from trove.tests.util import create_dbaas_client @@ -320,7 +320,7 @@ class WaitForRestoreToFinish(object): assert_equal(instance.volume.get('used', None), None) return False - poll_until(result_is_active, time_out=TIMEOUT_INSTANCE_CREATE, + poll_until(result_is_active, time_out=TIMEOUT_INSTANCE_RESTORE, sleep_time=10) @test diff --git a/trove/tests/api/configurations.py b/trove/tests/api/configurations.py index a961985a7a..a8f540879c 100644 --- a/trove/tests/api/configurations.py +++ b/trove/tests/api/configurations.py @@ -502,7 +502,7 @@ class ListConfigurations(ConfigurationsTestBase): @test(depends_on=[test_waiting_for_instance_in_restart_required]) def test_restart_service_should_return_active(self): - # test that after restarting the instance it becomes active + """test_restart_service_should_return_active""" instance_info.dbaas.instances.restart(instance_info.id) resp, body = instance_info.dbaas.client.last_response assert_equal(resp.status, 202) @@ -513,14 +513,14 @@ class ListConfigurations(ConfigurationsTestBase): if instance.status in CONFIG.running_status: return True else: - assert_equal("REBOOT", instance.status) + assert_true(instance.status in ['REBOOT', 'SHUTDOWN']) return False poll_until(result_is_active) @test(depends_on=[test_restart_service_should_return_active]) @time_out(30) def test_get_configuration_details_from_instance_validation(self): - # validate that the configuraiton was applied correctly to the instance + """test_get_configuration_details_from_instance_validation""" inst = instance_info.dbaas.instances.get(instance_info.id) configuration_id = inst.configuration['id'] assert_not_equal(None, inst.configuration['id']) diff --git a/trove/tests/api/instances.py b/trove/tests/api/instances.py index 8bf2c7954a..06e87c8852 100644 --- a/trove/tests/api/instances.py +++ b/trove/tests/api/instances.py @@ -48,6 +48,7 @@ CONF = cfg.CONF FAKE = test_config.values['fake_mode'] TIMEOUT_INSTANCE_CREATE = 60 * 32 +TIMEOUT_INSTANCE_RESTORE = 60 * 60 TIMEOUT_INSTANCE_DELETE = 120 diff --git a/trove/tests/api/instances_actions.py b/trove/tests/api/instances_actions.py index d6b192e7cb..0c0f068002 100644 --- a/trove/tests/api/instances_actions.py +++ b/trove/tests/api/instances_actions.py @@ -158,17 +158,23 @@ class ActionTestBase(object): self.instance_id, ip_address=self.instance_mgmt_address ) - cmd = "sudo ps acux | grep mysqld " \ - "| grep -v mysqld_safe | awk '{print $2}'" + container_exist_cmd = 'sudo docker ps -q' + pid_cmd = "sudo docker inspect database -f '{{.State.Pid}}'" try: - stdout = server.execute(cmd) + server.execute(container_exist_cmd) + except Exception as err: + asserts.fail("Failed to execute command: %s, error: %s" % + (container_exist_cmd, str(err))) + + try: + stdout = server.execute(pid_cmd) return int(stdout) except ValueError: return None - except Exception as e: + except Exception as err: asserts.fail("Failed to execute command: %s, error: %s" % - (cmd, str(e))) + (pid_cmd, str(err))) def log_current_users(self): users = self.dbaas.users.list(self.instance_id) @@ -469,8 +475,7 @@ class ResizeInstanceTest(ActionTestBase): self.wait_for_resize() @test(depends_on=[test_instance_returns_to_active_after_resize, - test_status_changed_to_resize], - groups=["dbaas.usage"]) + test_status_changed_to_resize]) def test_resize_instance_usage_event_sent(self): expected = self._build_expected_msg() expected['old_instance_size'] = self.old_dbaas_flavor.ram @@ -525,18 +530,20 @@ class ResizeInstanceVolumeTest(ActionTestBase): @test @time_out(60) def test_volume_resize(self): + """test_volume_resize""" instance_info.dbaas.instances.resize_volume(instance_info.id, self.new_volume_size) @test(depends_on=[test_volume_resize]) @time_out(300) def test_volume_resize_success(self): + """test_volume_resize_success""" def check_resize_status(): instance = instance_info.dbaas.instances.get(instance_info.id) if instance.status in CONFIG.running_status: return True - elif instance.status == "RESIZE": + elif instance.status in ["RESIZE", "SHUTDOWN"]: return False else: asserts.fail("Status should not be %s" % instance.status) @@ -547,6 +554,7 @@ class ResizeInstanceVolumeTest(ActionTestBase): @test(depends_on=[test_volume_resize_success]) def test_volume_filesystem_resize_success(self): + """test_volume_filesystem_resize_success""" # The get_volume_filesystem_size is a mgmt call through the guestagent # and the volume resize occurs through the fake nova-volume. # Currently the guestagent fakes don't have access to the nova fakes so @@ -560,8 +568,9 @@ class ResizeInstanceVolumeTest(ActionTestBase): # cinder volume but it should round to it. (e.g. round(1.9) == 2) asserts.assert_equal(round(new_volume_fs_size), self.new_volume_size) - @test(depends_on=[test_volume_resize_success], groups=["dbaas.usage"]) + @test(depends_on=[test_volume_resize_success]) def test_resize_volume_usage_event_sent(self): + """test_resize_volume_usage_event_sent""" expected = self._build_expected_msg() expected['volume_size'] = self.new_volume_size expected['old_volume_size'] = self.old_volume_size @@ -569,9 +578,9 @@ class ResizeInstanceVolumeTest(ActionTestBase): 'trove.instance.modify_volume', **expected) - @test - @time_out(300) + @test(depends_on=[test_volume_resize_success]) def test_volume_resize_success_databases(self): + """test_volume_resize_success_databases""" databases = instance_info.dbaas.databases.list(instance_info.id) db_list = [] for database in databases: diff --git a/trove/tests/api/instances_delete.py b/trove/tests/api/instances_delete.py index fcd5768725..2c35bbb73e 100644 --- a/trove/tests/api/instances_delete.py +++ b/trove/tests/api/instances_delete.py @@ -27,6 +27,7 @@ from troveclient.compat import exceptions from trove import tests from trove.tests.api.instances import instance_info from trove.tests.config import CONFIG +from trove.tests.api import configurations def do_not_delete_instance(): @@ -90,3 +91,12 @@ class TestDeleteInstance(object): # Delete the datastore dbaas_admin.datastores.delete(datastore.id) + + @test(depends_on=[test_instance_status_deleted_in_db]) + def test_delete_configuration(self): + """Delete configurations created during testing.""" + dbaas_admin = instance_info.dbaas_admin + configs = dbaas_admin.configurations.list() + for config in configs: + if config.name == configurations.CONFIG_NAME: + dbaas_admin.configurations.delete(config.id) diff --git a/trove/tests/api/instances_resize.py b/trove/tests/api/instances_resize.py index 739cc4f4e1..0d00a9cb52 100644 --- a/trove/tests/api/instances_resize.py +++ b/trove/tests/api/instances_resize.py @@ -119,7 +119,7 @@ class ResizeTests(ResizeTestBase): def test_guest_wont_stop_mysql(self): self.guest.stop_db.side_effect = RPCException("Could not stop MySQL!") self.assertRaises(RPCException, self.action.execute) - self.guest.stop_db.assert_called_once_with(do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) @@ -128,7 +128,7 @@ class ResizeTests(ResizeTestBase): self.server.resize.side_effect = BadRequest(400) self.server.status = "ACTIVE" self.assertRaises(BadRequest, self.action.execute) - self.guest.stop_db.assert_called_once_with(do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.guest.restart.assert_called_once() self.instance.update_db.assert_called_once_with( @@ -144,8 +144,7 @@ class ResizeTests(ResizeTestBase): expected_calls = [ mock.call(mock.ANY, sleep_time=2, time_out=120)] * 2 self.assertEqual(expected_calls, mock_poll_until.call_args_list) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) @@ -168,8 +167,7 @@ class ResizeTests(ResizeTestBase): self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.guest.reset_configuration.assert_called_once_with( mock.ANY) @@ -196,8 +194,7 @@ class ResizeTests(ResizeTestBase): self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.update_db.assert_called_once_with( task_status=InstanceTasks.NONE) @@ -220,8 +217,7 @@ class ResizeTests(ResizeTestBase): self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.guest.restart.assert_called_once() self.instance.update_db.assert_called_once_with( @@ -250,8 +246,7 @@ class ResizeTests(ResizeTestBase): self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.guest.reset_configuration.assert_called_once_with( @@ -284,8 +279,7 @@ class ResizeTests(ResizeTestBase): self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.guest.reset_configuration.assert_called_once_with( @@ -320,8 +314,7 @@ class ResizeTests(ResizeTestBase): self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.server.confirm_resize.assert_called_once() @@ -351,8 +344,7 @@ class ResizeTests(ResizeTestBase): self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.resize.assert_called_once_with(NEW_FLAVOR_ID) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.guest.reset_configuration.assert_called_once_with( @@ -392,8 +384,7 @@ class MigrateTests(ResizeTestBase): self.assertEqual(expected_calls, mock_poll_until.call_args_list) # Make sure self.poll_until_side_effects is empty self.assertFalse(self.poll_until_side_effects) - self.guest.stop_db.assert_called_once_with( - do_not_start_on_reboot=True) + self.assertEqual(1, self.guest.stop_db.call_count) self.server.migrate.assert_called_once_with(force_host=None) self.instance.set_datastore_status_to_paused.assert_called_once() self.instance.server.confirm_resize.assert_called_once() diff --git a/trove/tests/api/mgmt/datastore_versions.py b/trove/tests/api/mgmt/datastore_versions.py index 33dc77e39d..6b3bcfc812 100644 --- a/trove/tests/api/mgmt/datastore_versions.py +++ b/trove/tests/api/mgmt/datastore_versions.py @@ -154,6 +154,9 @@ class MgmtDataStoreVersion(object): self.client.mgmt_datastore_versions.delete(self.created_version.id) assert_equal(202, self.client.last_http_code) + # Delete the created datastore as well. + self.client.datastores.delete(self.created_version.datastore_id) + # Lets match the total count of ds_version, # it should get back to original ds_versions = self.client.mgmt_datastore_versions.list() diff --git a/trove/tests/api/replication.py b/trove/tests/api/replication.py index 02fc5b66e7..4dd493074b 100644 --- a/trove/tests/api/replication.py +++ b/trove/tests/api/replication.py @@ -28,8 +28,8 @@ from trove.common.utils import poll_until from trove import tests from trove.tests.api.instances import CheckInstance from trove.tests.api.instances import instance_info -from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE +from trove.tests.api.instances import TIMEOUT_INSTANCE_RESTORE from trove.tests.config import CONFIG from trove.tests.scenario import runners from trove.tests.scenario.runners.test_runners import SkipKnownBug @@ -49,13 +49,16 @@ backup_count = None def _get_user_count(server_info): - cmd = ('mysql -BNq -e \\\'select count\\(*\\) from mysql.user' - ' where user like \\\"slave_%\\\"\\\'') + cmd = ( + 'docker exec -e MYSQL_PWD=$(sudo cat /opt/trove-guestagent/root.cnf | ' + 'grep password | awk "{print \$3}") database mysql -uroot -N -e ' + '"select count(*) from mysql.user where user like \\"slave_%\\""' + ) server = create_server_connection(server_info.id) try: stdout = server.execute(cmd) - return int(stdout) + return int(stdout.rstrip()) except Exception as e: fail("Failed to execute command: %s, error: %s" % (cmd, str(e))) @@ -63,8 +66,13 @@ def _get_user_count(server_info): def slave_is_running(running=True): def check_slave_is_running(): server = create_server_connection(slave_instance.id) - cmd = ("mysqladmin extended-status " - "| awk '/Slave_running/{print $4}'") + cmd = ( + 'docker exec -e MYSQL_PWD=$(sudo cat ' + '/opt/trove-guestagent/root.cnf | grep password ' + '| awk "{print \$3}") database mysql -uroot -N -e ' + '"SELECT SERVICE_STATE FROM ' + 'performance_schema.replication_connection_status"' + ) try: stdout = server.execute(cmd) @@ -73,7 +81,7 @@ def slave_is_running(running=True): fail("Failed to execute command %s, error: %s" % (cmd, str(e))) - expected = b"ON" if running else b"OFF" + expected = b"ON" if running else b"" return stdout == expected return check_slave_is_running @@ -172,7 +180,7 @@ class WaitForCreateSlaveToFinish(object): """Wait until the instance is created and set up as slave.""" @test - @time_out(TIMEOUT_INSTANCE_CREATE) + @time_out(TIMEOUT_INSTANCE_RESTORE) def test_slave_created(self): """Wait for replica to be created.""" poll_until(lambda: instance_is_active(slave_instance.id)) @@ -187,13 +195,12 @@ class VerifySlave(object): def find_database(): databases = instance_info.dbaas.databases.list(slave_instance.id) - return (database_to_find - in [d.name for d in databases]) + return (database_to_find in [d.name for d in databases]) return find_database @test - @time_out(20 * 60) + @time_out(10 * 60) def test_correctly_started_replication(self): """test_correctly_started_replication""" poll_until(slave_is_running()) @@ -207,7 +214,12 @@ class VerifySlave(object): @test(depends_on=[test_correctly_started_replication]) def test_slave_is_read_only(self): """test_slave_is_read_only""" - cmd = "mysql -BNq -e \\\'select @@read_only\\\'" + cmd = ( + 'docker exec -e MYSQL_PWD=$(sudo cat ' + '/opt/trove-guestagent/root.cnf | grep password | ' + 'awk "{print \$3}") database mysql -uroot -NBq -e ' + '"select @@read_only"' + ) server = create_server_connection(slave_instance.id) try: @@ -329,7 +341,7 @@ class TestReplicationFailover(object): self._third_slave.id = create_slave() poll_until(lambda: instance_is_active(self._third_slave.id)) poll_until(slave_is_running()) - sleep(30) + sleep(15) validate_master(instance_info, [slave_instance, self._third_slave]) validate_slave(instance_info, self._third_slave) @@ -349,7 +361,7 @@ class TestReplicationFailover(object): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") - cmd = "sudo service trove-guestagent stop" + cmd = "sudo systemctl stop guest-agent.service" server = create_server_connection(self._third_slave.id) try: @@ -366,7 +378,7 @@ class TestReplicationFailover(object): if CONFIG.fake_mode: raise SkipTest("eject_replica_source not supported in fake mode") - sleep(90) + sleep(70) instance_info.dbaas.instances.eject_replica_source(self._third_slave) assert_equal(202, instance_info.dbaas.last_http_code) poll_until(lambda: instance_is_active(self._third_slave.id)) @@ -407,7 +419,12 @@ class DetachReplica(object): # wait until replica is no longer read only def check_not_read_only(): - cmd = "mysql -BNq -e \\\'select @@read_only\\\'" + cmd = ( + 'docker exec -e MYSQL_PWD=$(sudo cat ' + '/opt/trove-guestagent/root.cnf | grep password | ' + 'awk "{print \$3}") database mysql -uroot -NBq -e ' + '"select @@read_only"' + ) server = create_server_connection(slave_instance.id) try: diff --git a/trove/tests/fakes/guestagent.py b/trove/tests/fakes/guestagent.py index aa7f9cf57c..72e30cbf07 100644 --- a/trove/tests/fakes/guestagent.py +++ b/trove/tests/fakes/guestagent.py @@ -269,7 +269,7 @@ class FakeGuest(object): time.sleep(2) self._set_task_status('HEALTHY') - def stop_db(self, do_not_start_on_reboot=False): + def stop_db(self): self._set_task_status('SHUTDOWN') def get_volume_info(self): diff --git a/trove/tests/scenario/groups/backup_group.py b/trove/tests/scenario/groups/backup_group.py index 4cd7f2eaa1..7e693ae683 100644 --- a/trove/tests/scenario/groups/backup_group.py +++ b/trove/tests/scenario/groups/backup_group.py @@ -232,22 +232,6 @@ class BackupInstCreateGroup(TestGroup): @test(depends_on_classes=[BackupInstCreateGroup], - groups=[GROUP, groups.BACKUP_INC_INST, - groups.BACKUP_INC_INST_CREATE]) -class BackupIncInstCreateGroup(TestGroup): - """Test Backup Incremental Instance Create functionality.""" - - def __init__(self): - super(BackupIncInstCreateGroup, self).__init__( - BackupRunnerFactory.instance()) - - @test - def restore_from_inc_1_backup(self): - """Check that restoring an instance from inc 1 backup starts.""" - self.test_runner.run_restore_from_inc_1_backup() - - -@test(depends_on_classes=[BackupIncInstCreateGroup], groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_CREATE_WAIT]) class BackupInstCreateWaitGroup(TestGroup): """Test Backup Instance Create completes.""" @@ -273,6 +257,52 @@ class BackupInstCreateWaitGroup(TestGroup): @test(depends_on_classes=[BackupInstCreateWaitGroup], + groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE]) +class BackupInstDeleteGroup(TestGroup): + """Test Backup Instance Delete functionality.""" + + def __init__(self): + super(BackupInstDeleteGroup, self).__init__( + BackupRunnerFactory.instance()) + + @test + def delete_restored_instance(self): + """Test deleting the restored instance.""" + self.test_runner.run_delete_restored_instance() + + +@test(depends_on_classes=[BackupInstDeleteGroup], + groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE_WAIT]) +class BackupInstDeleteWaitGroup(TestGroup): + """Test Backup Instance Delete completes.""" + + def __init__(self): + super(BackupInstDeleteWaitGroup, self).__init__( + BackupRunnerFactory.instance()) + + @test + def wait_for_restored_instance_delete(self): + """Wait until deleting the restored instance completes.""" + self.test_runner.run_wait_for_restored_instance_delete() + + +@test(depends_on_classes=[BackupInstDeleteWaitGroup], + groups=[GROUP, groups.BACKUP_INC_INST, + groups.BACKUP_INC_INST_CREATE]) +class BackupIncInstCreateGroup(TestGroup): + """Test Backup Incremental Instance Create functionality.""" + + def __init__(self): + super(BackupIncInstCreateGroup, self).__init__( + BackupRunnerFactory.instance()) + + @test + def restore_from_inc_1_backup(self): + """Check that restoring an instance from inc 1 backup starts.""" + self.test_runner.run_restore_from_inc_1_backup() + + +@test(depends_on_classes=[BackupIncInstCreateGroup], groups=[GROUP, groups.BACKUP_INC_INST, groups.BACKUP_INC_INST_CREATE_WAIT]) class BackupIncInstCreateWaitGroup(TestGroup): @@ -299,21 +329,6 @@ class BackupIncInstCreateWaitGroup(TestGroup): @test(depends_on_classes=[BackupIncInstCreateWaitGroup], - groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE]) -class BackupInstDeleteGroup(TestGroup): - """Test Backup Instance Delete functionality.""" - - def __init__(self): - super(BackupInstDeleteGroup, self).__init__( - BackupRunnerFactory.instance()) - - @test - def delete_restored_instance(self): - """Test deleting the restored instance.""" - self.test_runner.run_delete_restored_instance() - - -@test(depends_on_classes=[BackupInstDeleteGroup], groups=[GROUP, groups.BACKUP_INC_INST, groups.BACKUP_INC_INST_DELETE]) class BackupIncInstDeleteGroup(TestGroup): @@ -330,21 +345,6 @@ class BackupIncInstDeleteGroup(TestGroup): @test(depends_on_classes=[BackupIncInstDeleteGroup], - groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE_WAIT]) -class BackupInstDeleteWaitGroup(TestGroup): - """Test Backup Instance Delete completes.""" - - def __init__(self): - super(BackupInstDeleteWaitGroup, self).__init__( - BackupRunnerFactory.instance()) - - @test - def wait_for_restored_instance_delete(self): - """Wait until deleting the restored instance completes.""" - self.test_runner.run_wait_for_restored_instance_delete() - - -@test(depends_on_classes=[BackupInstDeleteWaitGroup], groups=[GROUP, groups.BACKUP_INC_INST, groups.BACKUP_INC_INST_DELETE_WAIT]) class BackupIncInstDeleteWaitGroup(TestGroup): diff --git a/trove/tests/scenario/runners/backup_runners.py b/trove/tests/scenario/runners/backup_runners.py index c381041103..8992d5dcd5 100644 --- a/trove/tests/scenario/runners/backup_runners.py +++ b/trove/tests/scenario/runners/backup_runners.py @@ -26,7 +26,7 @@ from trove.tests.scenario.runners.test_runners import TestRunner class BackupRunner(TestRunner): def __init__(self): - self.TIMEOUT_BACKUP_CREATE = 60 * 30 + self.TIMEOUT_BACKUP_CREATE = 60 * 60 self.TIMEOUT_BACKUP_DELETE = 120 super(BackupRunner, self).__init__(timeout=self.TIMEOUT_BACKUP_CREATE) @@ -357,7 +357,7 @@ class BackupRunner(TestRunner): self.assert_verify_backup_data(self.restore_inc_1_host, DataType.tiny) def run_verify_databases_in_restored_inc_1_instance(self): - self.assert_verify_backup_databases(self.restore_instance_id, + self.assert_verify_backup_databases(self.restore_inc_1_instance_id, self.databases_before_backup) def assert_verify_backup_databases(self, instance_id, expected_databases): diff --git a/trove/tests/scenario/runners/replication_runners.py b/trove/tests/scenario/runners/replication_runners.py index 97d2d249d7..a9115fed1e 100644 --- a/trove/tests/scenario/runners/replication_runners.py +++ b/trove/tests/scenario/runners/replication_runners.py @@ -468,6 +468,4 @@ class PerconaReplicationRunner(MysqlReplicationRunner): class MariadbReplicationRunner(MysqlReplicationRunner): - - def _get_expected_binlog_format(self): - return 'STATEMENT' + pass diff --git a/trove/tests/scenario/runners/test_runners.py b/trove/tests/scenario/runners/test_runners.py index 9786f766ff..350eeaf10b 100644 --- a/trove/tests/scenario/runners/test_runners.py +++ b/trove/tests/scenario/runners/test_runners.py @@ -327,7 +327,7 @@ class TestRunner(object): instance_info = InstanceTestInfo() report = CONFIG.get_report() - def __init__(self, sleep_time=10, timeout=1200): + def __init__(self, sleep_time=10, timeout=1800): self.def_sleep_time = sleep_time self.def_timeout = timeout diff --git a/trove/tests/unittests/backup/test_backupagent.py b/trove/tests/unittests/backup/test_backupagent.py deleted file mode 100644 index 53fa1ecce1..0000000000 --- a/trove/tests/unittests/backup/test_backupagent.py +++ /dev/null @@ -1,580 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hashlib -import os -from unittest import mock - -from mock import Mock, MagicMock, patch, ANY, DEFAULT, call -from oslo_utils import netutils -from webob.exc import HTTPNotFound - -from trove.backup.state import BackupState -from trove.common.context import TroveContext -from trove.common.strategies.storage.base import Storage -from trove.common import utils -from trove.conductor import api as conductor_api -from trove.guestagent.backup import backupagent -from trove.guestagent.common import configuration -from trove.guestagent.common.configuration import ImportOverrideStrategy -from trove.guestagent.datastore.experimental.redis.service import RedisApp -from trove.guestagent.strategies.backup.base import BackupRunner -from trove.guestagent.strategies.backup.base import UnknownBackupType -from trove.guestagent.strategies.backup.experimental import couchbase_impl -from trove.guestagent.strategies.backup.experimental import db2_impl -from trove.guestagent.strategies.backup.experimental import mongo_impl -from trove.guestagent.strategies.backup.experimental import redis_impl -from trove.guestagent.strategies.backup import mysql_impl -from trove.guestagent.strategies.backup.mysql_impl import MySqlApp -from trove.guestagent.strategies.restore.base import RestoreRunner -from trove.tests.unittests import trove_testtools - - -def create_fake_data(): - from random import choice - from string import ascii_letters - - return ''.join([choice(ascii_letters) for _ in range(1024)]) - - -class MockBackup(BackupRunner): - """Create a large temporary file to 'backup' with subprocess.""" - - backup_type = 'mock_backup' - - def __init__(self, *args, **kwargs): - self.data = create_fake_data() - self.cmd = 'echo %s' % self.data - super(MockBackup, self).__init__(*args, **kwargs) - - def cmd(self): - return self.cmd - - -class MockCheckProcessBackup(MockBackup): - """Backup runner that fails confirming the process.""" - - def check_process(self): - return False - - -class MockLossyBackup(MockBackup): - """Fake Incomplete writes to swift.""" - - def read(self, *args): - results = super(MockLossyBackup, self).read(*args) - if results: - # strip a few chars from the stream - return results[20:] - - -class MockSwift(object): - """Store files in String.""" - - def __init__(self, *args, **kwargs): - self.store = '' - self.containers = [] - self.container = "database_backups" - self.url = 'http://mockswift/v1' - self.etag = hashlib.md5() - - def put_container(self, container): - if container not in self.containers: - self.containers.append(container) - return None - - def put_object(self, container, obj, contents, **kwargs): - if container not in self.containers: - raise HTTPNotFound - while True: - if not hasattr(contents, 'read'): - break - content = contents.read(2 ** 16) - if not content: - break - self.store += content - self.etag.update(self.store) - return self.etag.hexdigest() - - def save(self, filename, stream, metadata=None): - location = '%s/%s/%s' % (self.url, self.container, filename) - return True, 'w00t', 'fake-checksum', location - - def load(self, context, storage_url, container, filename, backup_checksum): - pass - - def load_metadata(self, location, checksum): - return {} - - def save_metadata(self, location, metadata): - pass - - -class MockStorage(Storage): - - def __call__(self, *args, **kwargs): - return self - - def load(self, location, backup_checksum): - pass - - def save(self, filename, stream, metadata=None): - pass - - def load_metadata(self, location, checksum): - return {} - - def save_metadata(self, location, metadata={}): - pass - - def is_enabled(self): - return True - - -class MockRestoreRunner(RestoreRunner): - - def __init__(self, storage, **kwargs): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - def restore(self): - pass - - def is_zipped(self): - return False - - -class MockStats(object): - f_blocks = 1024 ** 2 - f_bsize = 4096 - f_bfree = 512 * 1024 - - -class BackupAgentTest(trove_testtools.TestCase): - - def setUp(self): - super(BackupAgentTest, self).setUp() - self.patch_ope = patch.multiple('os.path', - exists=DEFAULT) - self.mock_ope = self.patch_ope.start() - self.addCleanup(self.patch_ope.stop) - self.patch_pc = patch('trove.guestagent.datastore.service.' - 'BaseDbStatus.prepare_completed') - self.mock_pc = self.patch_pc.start() - self.mock_pc.__get__ = Mock(return_value=True) - self.addCleanup(self.patch_pc.stop) - self.get_auth_pwd_patch = patch.object( - MySqlApp, 'get_auth_password', MagicMock(return_value='123')) - self.get_auth_pwd_mock = self.get_auth_pwd_patch.start() - self.addCleanup(self.get_auth_pwd_patch.stop) - self.get_ss_patch = patch.object( - backupagent, 'get_storage_strategy', - MagicMock(return_value=MockSwift)) - self.get_ss_mock = self.get_ss_patch.start() - self.addCleanup(self.get_ss_patch.stop) - self.statvfs_patch = patch.object( - os, 'statvfs', MagicMock(return_value=MockStats)) - self.statvfs_mock = self.statvfs_patch.start() - self.addCleanup(self.statvfs_patch.stop) - self.orig_utils_execute_with_timeout = utils.execute_with_timeout - self.orig_os_get_ip_address = netutils.get_my_ipv4 - - def tearDown(self): - super(BackupAgentTest, self).tearDown() - utils.execute_with_timeout = self.orig_utils_execute_with_timeout - netutils.get_my_ipv4 = self.orig_os_get_ip_address - - def test_backup_impl_MySQLDump(self): - """This test is for - guestagent/strategies/backup/mysql_impl - """ - mysql_dump = mysql_impl.MySQLDump( - 'abc', extra_opts='') - self.assertIsNotNone(mysql_dump.cmd) - str_mysql_dump_cmd = ('mysqldump' - ' --all-databases' - ' %(extra_opts)s' - ' --opt' - ' --password=123' - ' -u os_admin' - ' 2>/tmp/mysqldump.log' - ' | gzip |' - ' openssl enc -aes-256-cbc -salt ' - '-pass pass:default_aes_cbc_key') - self.assertEqual(str_mysql_dump_cmd, mysql_dump.cmd) - self.assertIsNotNone(mysql_dump.manifest) - self.assertEqual('abc.gz.enc', mysql_dump.manifest) - - @mock.patch.object( - MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') - def test_backup_impl_InnoBackupEx(self, mock_datadir): - """This test is for - guestagent/strategies/backup/mysql_impl - """ - inno_backup_ex = mysql_impl.InnoBackupEx('innobackupex', extra_opts='') - self.assertIsNotNone(inno_backup_ex.cmd) - str_innobackup_cmd = ('sudo innobackupex' - ' --stream=xbstream' - ' %(extra_opts)s' - ' --user=os_admin --password=123' - ' --host=localhost' - ' --socket=/var/run/mysqld/mysqld.sock' - ' /var/lib/mysql/data 2>/tmp/innobackupex.log' - ' | gzip |' - ' openssl enc -aes-256-cbc -salt ' - '-pass pass:default_aes_cbc_key') - self.assertEqual(str_innobackup_cmd, inno_backup_ex.cmd) - self.assertIsNotNone(inno_backup_ex.manifest) - str_innobackup_manifest = 'innobackupex.xbstream.gz.enc' - self.assertEqual(str_innobackup_manifest, inno_backup_ex.manifest) - - def test_backup_impl_CbBackup(self): - netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") - utils.execute_with_timeout = Mock(return_value=None) - cbbackup = couchbase_impl.CbBackup('cbbackup', extra_opts='') - self.assertIsNotNone(cbbackup) - str_cbbackup_cmd = ("tar cpPf - /tmp/backups | " - "gzip | openssl enc -aes-256-cbc -salt -pass " - "pass:default_aes_cbc_key") - self.assertEqual(str_cbbackup_cmd, cbbackup.cmd) - self.assertIsNotNone(cbbackup.manifest) - self.assertIn('gz.enc', cbbackup.manifest) - - @mock.patch.object(db2_impl.DB2Backup, 'list_dbnames', - return_value=['testdb1', 'testdb2']) - def test_backup_impl_DB2Backup(self, _): - netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") - db2_backup = db2_impl.DB2Backup('db2backup', extra_opts='') - self.assertIsNotNone(db2_backup) - str_db2_backup_cmd = ("sudo tar cPf - /home/db2inst1/db2inst1/backup " - "| gzip | openssl enc -aes-256-cbc -salt -pass " - "pass:default_aes_cbc_key") - self.assertEqual(str_db2_backup_cmd, db2_backup.cmd) - self.assertIsNotNone(db2_backup.manifest) - self.assertIn('gz.enc', db2_backup.manifest) - - @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') - def test_backup_impl_MongoDump(self, _): - netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") - utils.execute_with_timeout = Mock(return_value=None) - mongodump = mongo_impl.MongoDump('mongodump', extra_opts='') - self.assertIsNotNone(mongodump) - str_mongodump_cmd = ("sudo tar cPf - /var/lib/mongodb/dump | " - "gzip | openssl enc -aes-256-cbc -salt -pass " - "pass:default_aes_cbc_key") - self.assertEqual(str_mongodump_cmd, mongodump.cmd) - self.assertIsNotNone(mongodump.manifest) - self.assertIn('gz.enc', mongodump.manifest) - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - @patch.object(configuration.ConfigurationManager, 'parse_configuration', - Mock(return_value={'dir': '/var/lib/redis', - 'dbfilename': 'dump.rdb'})) - @patch.object(RedisApp, 'get_config_command_name', - Mock(return_value='fakeconfig')) - def test_backup_impl_RedisBackup(self, *mocks): - netutils.get_my_ipv4 = Mock(return_value="1.1.1.1") - redis_backup = redis_impl.RedisBackup('redisbackup', extra_opts='') - self.assertIsNotNone(redis_backup) - str_redis_backup_cmd = ("sudo cat /var/lib/redis/dump.rdb | " - "gzip | openssl enc -aes-256-cbc -salt -pass " - "pass:default_aes_cbc_key") - self.assertEqual(str_redis_backup_cmd, redis_backup.cmd) - self.assertIsNotNone(redis_backup.manifest) - self.assertIn('gz.enc', redis_backup.manifest) - - def test_backup_base(self): - """This test is for - guestagent/strategies/backup/base - """ - BackupRunner.cmd = "%s" - backup_runner = BackupRunner('sample', cmd='echo command') - if backup_runner.is_zipped: - self.assertEqual('.gz', backup_runner.zip_manifest) - self.assertIsNotNone(backup_runner.zip_manifest) - self.assertIsNotNone(backup_runner.zip_cmd) - self.assertEqual(' | gzip', backup_runner.zip_cmd) - else: - self.assertIsNone(backup_runner.zip_manifest) - self.assertIsNone(backup_runner.zip_cmd) - self.assertEqual('BackupRunner', backup_runner.backup_type) - - @patch('os.killpg') - def test_backup_runner_exits_with_exception(self, mock_kill_pg): - """This test is for - guestagent/strategies/backup/base, - ensures that when backup runner exits with an exception, - all child processes are also killed. - """ - BackupRunner.cmd = "%s" - backup_runner = BackupRunner('sample', cmd='echo command') - - def test_backup_runner_reraise_exception(): - mock_func = mock.Mock(side_effect=RuntimeError) - - with backup_runner: - mock_func() - - self.assertRaises(RuntimeError, - test_backup_runner_reraise_exception) - self.assertTrue(mock_kill_pg.called) - - @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) - @patch.object(conductor_api.API, 'update_backup', - Mock(return_value=Mock())) - def test_execute_backup(self): - """This test should ensure backup agent - ensures that backup and storage is not running - resolves backup instance - starts backup - starts storage - reports status - """ - agent = backupagent.BackupAgent() - backup_info = {'id': '123', - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum', - 'datastore': 'mysql', - 'datastore_version': '5.5' - } - agent.execute_backup(context=None, backup_info=backup_info, - runner=MockBackup) - - conductor_api.API.update_backup.assert_has_calls([ - call( - ANY, - backup_id=backup_info['id'], - sent=ANY, - size=ANY, - state=BackupState.BUILDING - ), - call( - ANY, - backup_id=backup_info['id'], - checksum='fake-checksum', - location=ANY, - note='w00t', - sent=ANY, - size=ANY, - backup_type=MockBackup.backup_type, - state=BackupState.COMPLETED, - success=True - ) - ]) - - @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) - @patch.object(conductor_api.API, 'update_backup', - Mock(return_value=Mock())) - @patch('trove.guestagent.backup.backupagent.LOG') - def test_execute_bad_process_backup(self, mock_logging): - agent = backupagent.BackupAgent() - backup_info = {'id': '123', - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum', - 'datastore': 'mysql', - 'datastore_version': '5.5' - } - - self.assertRaises(backupagent.BackupError, agent.execute_backup, - context=None, backup_info=backup_info, - runner=MockCheckProcessBackup) - - conductor_api.API.update_backup.assert_has_calls([ - call( - ANY, - backup_id=backup_info['id'], - sent=ANY, - size=ANY, - state=BackupState.BUILDING - ), - call( - ANY, - backup_id=backup_info['id'], - checksum='fake-checksum', - location=ANY, - note='w00t', - sent=ANY, - size=ANY, - backup_type=MockCheckProcessBackup.backup_type, - state=BackupState.FAILED, - success=True - ) - ]) - - @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) - @patch.object(conductor_api.API, 'update_backup', - Mock(return_value=Mock())) - @patch('trove.guestagent.backup.backupagent.LOG') - def test_execute_lossy_backup(self, mock_logging): - """This test verifies that incomplete writes to swift will fail.""" - with patch.object(MockSwift, 'save', - return_value=(False, 'Error', 'y', 'z')): - - agent = backupagent.BackupAgent() - - backup_info = {'id': '123', - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum', - 'datastore': 'mysql', - 'datastore_version': '5.5' - } - - self.assertRaises(backupagent.BackupError, agent.execute_backup, - context=None, backup_info=backup_info, - runner=MockLossyBackup) - - conductor_api.API.update_backup.assert_has_calls([ - call(ANY, - backup_id=backup_info['id'], - sent=ANY, - size=ANY, - state=BackupState.BUILDING - ), - call( - ANY, - backup_id=backup_info['id'], - checksum='y', - location='z', - note='Error', - sent=ANY, - size=ANY, - backup_type=MockLossyBackup.backup_type, - state=BackupState.FAILED, - success=False - )] - ) - - def test_execute_restore(self): - """This test should ensure backup agent - resolves backup instance - determines backup/restore type - transfers/downloads data and invokes the restore module - reports status - """ - with patch.object(backupagent, 'get_storage_strategy', - return_value=MockStorage): - - with patch.object(backupagent, 'get_restore_strategy', - return_value=MockRestoreRunner): - - agent = backupagent.BackupAgent() - - bkup_info = {'id': '123', - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum', - } - agent.execute_restore(TroveContext(), - bkup_info, - '/var/lib/mysql/data') - - @patch('trove.guestagent.backup.backupagent.LOG') - def test_restore_unknown(self, mock_logging): - with patch.object(backupagent, 'get_restore_strategy', - side_effect=ImportError): - - agent = backupagent.BackupAgent() - - bkup_info = {'id': '123', - 'location': 'fake-location', - 'type': 'foo', - 'checksum': 'fake-checksum', - } - - self.assertRaises(UnknownBackupType, agent.execute_restore, - context=None, backup_info=bkup_info, - restore_location='/var/lib/mysql/data') - - @patch.object(MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') - @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) - @patch.object(MockSwift, 'load_metadata', return_value={'lsn': '54321'}) - @patch.object(MockSwift, 'save') - @patch.object(backupagent, 'get_storage_strategy', return_value=MockSwift) - @patch('trove.guestagent.backup.backupagent.LOG') - def test_backup_incremental_metadata(self, mock_logging, - get_storage_strategy_mock, - save_mock, - load_metadata_mock, - get_datadir_mock): - meta = { - 'lsn': '12345', - 'parent_location': 'fake', - 'parent_checksum': 'md5', - } - with patch.multiple(mysql_impl.InnoBackupExIncremental, - metadata=MagicMock(return_value=meta), - _run=MagicMock(return_value=True), - __exit__=MagicMock(return_value=True)): - agent = backupagent.BackupAgent() - - expected_metadata = {'datastore': 'mysql', - 'datastore_version': 'bo.gus'} - bkup_info = {'id': '123', - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum', - 'parent': {'location': 'fake', 'checksum': 'md5'}} - bkup_info.update(expected_metadata) - - agent.execute_backup(TroveContext(), - bkup_info, - '/var/lib/mysql/data') - - save_mock.assert_called_once_with( - ANY, ANY, metadata=expected_metadata) - - @patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock())) - @patch('trove.guestagent.backup.backupagent.LOG') - def test_backup_incremental_bad_metadata(self, mock_logging): - with patch.object(backupagent, 'get_storage_strategy', - return_value=MockSwift): - - agent = backupagent.BackupAgent() - - bkup_info = {'id': '123', - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum', - 'parent': {'location': 'fake', 'checksum': 'md5'} - } - - self.assertRaises( - AttributeError, - agent.execute_backup, TroveContext(), bkup_info, 'location') - - def test_backup_mysqldump_check_process(self): - mysql_dump = mysql_impl.MySQLDump( - 'abc', extra_opts='') - - str_will_be_true = 'Warning: Using a password ' \ - 'on the command line interface can be insecure.' - str_will_be_false = 'ERROR: mysqldump command did not succeed.' - - with mock.patch('trove.guestagent.strategies.backup.mysql_impl.open', - mock.mock_open(read_data='')): - self.assertTrue(mysql_dump.check_process()) - with mock.patch('trove.guestagent.strategies.backup.mysql_impl.open', - mock.mock_open(read_data=str_will_be_true)): - self.assertTrue(mysql_dump.check_process()) - with mock.patch('trove.guestagent.strategies.backup.mysql_impl.open', - mock.mock_open(read_data=str_will_be_false)): - self.assertFalse(mysql_dump.check_process()) diff --git a/trove/tests/unittests/backup/test_storage.py b/trove/tests/unittests/backup/test_storage.py deleted file mode 100644 index 1ace34b9c3..0000000000 --- a/trove/tests/unittests/backup/test_storage.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright 2013 Rackspace Development Company, L.P. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hashlib - -from mock import Mock, MagicMock, patch - -from trove.common.strategies.storage import swift -from trove.common.strategies.storage.swift import StreamReader -from trove.common.strategies.storage.swift \ - import SwiftDownloadIntegrityError -from trove.common.strategies.storage.swift import SwiftStorage -from trove.tests.fakes.swift import FakeSwiftConnection -from trove.tests.unittests.backup.test_backupagent \ - import MockBackup as MockBackupRunner -from trove.tests.unittests import trove_testtools - - -class SwiftStorageSaveChecksumTests(trove_testtools.TestCase): - """SwiftStorage.save is used to save a backup to Swift.""" - - def setUp(self): - super(SwiftStorageSaveChecksumTests, self).setUp() - self.max_file_size = swift.MAX_FILE_SIZE - swift.MAX_FILE_SIZE = 128 - - def tearDown(self): - swift.MAX_FILE_SIZE = self.max_file_size - super(SwiftStorageSaveChecksumTests, self).tearDown() - - def test_swift_small_file_checksum_save(self): - """This tests that SwiftStorage.save returns the swift checksum - for small files. - """ - context = trove_testtools.TroveTestContext(self) - backup_id = '123' - user = 'user' - password = 'password' - - swift.MAX_FILE_SIZE = 2 * (1024 ** 3) - - swift_client = FakeSwiftConnection() - with patch.object(swift, 'create_swift_client', - return_value=swift_client): - storage_strategy = SwiftStorage(context) - - with MockBackupRunner(filename=backup_id, - user=user, - password=password) as runner: - (success, - note, - checksum, - location) = storage_strategy.save(runner.manifest, runner) - - self.assertTrue(success, "The backup should have been successful.") - self.assertIsNotNone(note, "A note should have been returned.") - self.assertEqual('http://mockswift/v1/database_backups/123.gz.enc', - location, - "Incorrect swift location was returned.") - - def test_swift_checksum_save(self): - """This tests that SwiftStorage.save returns the swift checksum for - large files. - """ - context = trove_testtools.TroveTestContext(self) - backup_id = '123' - user = 'user' - password = 'password' - - swift_client = FakeSwiftConnection() - with patch.object(swift, 'create_swift_client', - return_value=swift_client): - storage_strategy = SwiftStorage(context) - - with MockBackupRunner(filename=backup_id, - user=user, - password=password) as runner: - (success, - note, - checksum, - location) = storage_strategy.save(runner.manifest, runner) - - self.assertTrue(success, "The backup should have been successful.") - self.assertIsNotNone(note, "A note should have been returned.") - self.assertEqual('http://mockswift/v1/database_backups/123.gz.enc', - location, - "Incorrect swift location was returned.") - - @patch('trove.common.strategies.storage.swift.LOG') - def test_swift_segment_checksum_etag_mismatch(self, mock_logging): - """This tests that when etag doesn't match segment uploaded checksum - False is returned and None for checksum and location - """ - context = trove_testtools.TroveTestContext(self) - # this backup_id will trigger fake swift client with calculate_etag - # enabled to spit out a bad etag when a segment object is uploaded - backup_id = 'bad_segment_etag_123' - user = 'user' - password = 'password' - - swift_client = FakeSwiftConnection() - with patch.object(swift, 'create_swift_client', - return_value=swift_client): - storage_strategy = SwiftStorage(context) - - with MockBackupRunner(filename=backup_id, - user=user, - password=password) as runner: - (success, - note, - checksum, - location) = storage_strategy.save(runner.manifest, runner) - - self.assertFalse(success, "The backup should have failed!") - self.assertTrue(note.startswith("Error saving data to Swift!")) - self.assertIsNone(checksum, - "Swift checksum should be None for failed backup.") - self.assertEqual('http://mockswift/v1/database_backups/' - 'bad_segment_etag_123.gz.enc', - location, - "Incorrect swift location was returned.") - - @patch('trove.common.strategies.storage.swift.LOG') - def test_swift_checksum_etag_mismatch(self, mock_logging): - """This tests that when etag doesn't match swift checksum False is - returned and None for checksum and location - """ - context = trove_testtools.TroveTestContext(self) - # this backup_id will trigger fake swift client with calculate_etag - # enabled to spit out a bad etag when a segment object is uploaded - backup_id = 'bad_manifest_etag_123' - user = 'user' - password = 'password' - - swift_client = FakeSwiftConnection() - with patch.object(swift, 'create_swift_client', - return_value=swift_client): - storage_strategy = SwiftStorage(context) - - with MockBackupRunner(filename=backup_id, - user=user, - password=password) as runner: - (success, - note, - checksum, - location) = storage_strategy.save(runner.manifest, runner) - - self.assertFalse(success, "The backup should have failed!") - self.assertTrue(note.startswith("Error saving data to Swift!")) - self.assertIsNone(checksum, - "Swift checksum should be None for failed backup.") - self.assertEqual('http://mockswift/v1/database_backups/' - 'bad_manifest_etag_123.gz.enc', - location, - "Incorrect swift location was returned.") - - -class SwiftStorageUtils(trove_testtools.TestCase): - - def setUp(self): - super(SwiftStorageUtils, self).setUp() - self.context = trove_testtools.TroveTestContext(self) - self.swift_client = FakeSwiftConnection() - self.create_swift_client_patch = patch.object( - swift, 'create_swift_client', - MagicMock(return_value=self.swift_client)) - self.create_swift_client_mock = self.create_swift_client_patch.start() - self.addCleanup(self.create_swift_client_patch.stop) - self.swift = SwiftStorage(self.context) - - def tearDown(self): - super(SwiftStorageUtils, self).tearDown() - - def test_explode_location(self): - location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' - url, container, filename = self.swift._explodeLocation(location) - self.assertEqual('http://mockswift.com/v1/545433', url) - self.assertEqual('backups', container) - self.assertEqual('mybackup.tar', filename) - - def test_validate_checksum_good(self): - match = self.swift._verify_checksum('"my-good-etag"', 'my-good-etag') - self.assertTrue(match) - - @patch('trove.common.strategies.storage.swift.LOG') - def test_verify_checksum_bad(self, mock_logging): - self.assertRaises(SwiftDownloadIntegrityError, - self.swift._verify_checksum, - '"THE-GOOD-THE-BAD"', - 'AND-THE-UGLY') - - -class SwiftStorageLoad(trove_testtools.TestCase): - """SwiftStorage.load is used to return SwiftDownloadStream which is used - to download a backup object from Swift - """ - - def setUp(self): - super(SwiftStorageLoad, self).setUp() - - def tearDown(self): - super(SwiftStorageLoad, self).tearDown() - - def test_run_verify_checksum(self): - """This tests that swift download cmd runs if original backup checksum - matches swift object etag - """ - - context = trove_testtools.TroveTestContext(self) - location = "/backup/location/123" - backup_checksum = "fake-md5-sum" - - swift_client = FakeSwiftConnection() - with patch.object(swift, 'create_swift_client', - return_value=swift_client): - - storage_strategy = SwiftStorage(context) - download_stream = storage_strategy.load(location, backup_checksum) - self.assertIsNotNone(download_stream) - - @patch('trove.common.strategies.storage.swift.LOG') - def test_run_verify_checksum_mismatch(self, mock_logging): - """This tests that SwiftDownloadIntegrityError is raised and swift - download cmd does not run when original backup checksum - does not match swift object etag - """ - - context = trove_testtools.TroveTestContext(self) - location = "/backup/location/123" - backup_checksum = "checksum_different_then_fake_swift_etag" - - swift_client = FakeSwiftConnection() - with patch.object(swift, 'create_swift_client', - return_value=swift_client): - storage_strategy = SwiftStorage(context) - - self.assertRaises(SwiftDownloadIntegrityError, - storage_strategy.load, - location, - backup_checksum) - - -class MockBackupStream(MockBackupRunner): - - def read(self, chunk_size): - return b'X' * chunk_size - - -class StreamReaderTests(trove_testtools.TestCase): - - def setUp(self): - super(StreamReaderTests, self).setUp() - self.runner = MockBackupStream(filename='123.xbstream.enc.gz', - user='user', - password='password') - self.stream = StreamReader(self.runner, - self.runner.manifest, - max_file_size=100) - - def test_base_filename(self): - self.assertEqual('123', self.stream.base_filename) - - def test_base_filename_no_extension(self): - stream_reader = StreamReader(self.runner, 'foo') - self.assertEqual('foo', stream_reader.base_filename) - - def test_segment(self): - self.assertEqual('123_00000000', self.stream.segment) - - def test_end_of_file(self): - self.assertFalse(self.stream.end_of_file) - - def test_end_of_segment(self): - self.assertFalse(self.stream.end_of_segment) - - def test_segment_almost_complete(self): - self.stream.segment_length = 98 - results = self.stream.read(2) - self.assertEqual(b'XX', results) - self.assertEqual('123_00000000', self.stream.segment, - "The Segment should still be the same") - self.assertEqual(100, self.stream.segment_length) - checksum = hashlib.md5(b'XX') - checksum = checksum.hexdigest() - segment_checksum = self.stream.segment_checksum.hexdigest() - self.assertEqual(checksum, segment_checksum, - "Segment checksum did not match") - - def test_segment_complete(self): - self.stream.segment_length = 99 - results = self.stream.read(2) - self.assertEqual('', results, "Results should be empty.") - self.assertEqual('123_00000001', self.stream.segment) - - def test_stream_complete(self): - results = self.stream.read(0) - self.assertEqual('', results, "Results should be empty.") - self.assertTrue(self.stream.end_of_file) - - -class SwiftMetadataTests(trove_testtools.TestCase): - - def setUp(self): - super(SwiftMetadataTests, self).setUp() - self.swift_client = FakeSwiftConnection() - self.context = trove_testtools.TroveTestContext(self) - self.create_swift_client_patch = patch.object( - swift, 'create_swift_client', - MagicMock(return_value=self.swift_client)) - self.create_swift_client_mock = self.create_swift_client_patch.start() - self.addCleanup(self.create_swift_client_patch.stop) - self.swift = SwiftStorage(self.context) - - def tearDown(self): - super(SwiftMetadataTests, self).tearDown() - - def test__get_attr(self): - normal_header = self.swift._get_attr('content-type') - self.assertEqual('content_type', normal_header) - meta_header = self.swift._get_attr('x-object-meta-foo') - self.assertEqual('foo', meta_header) - meta_header_two = self.swift._get_attr('x-object-meta-foo-bar') - self.assertEqual('foo_bar', meta_header_two) - - def test__set_attr(self): - meta_header = self.swift._set_attr('foo') - self.assertEqual('X-Object-Meta-foo', meta_header) - meta_header_two = self.swift._set_attr('foo_bar') - self.assertEqual('X-Object-Meta-foo-bar', meta_header_two) - - def test_load_metadata(self): - location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' - headers = { - 'etag': '"fake-md5-sum"', - 'x-object-meta-lsn': '1234567' - } - with patch.object(self.swift_client, 'head_object', - return_value=headers): - metadata = self.swift.load_metadata(location, 'fake-md5-sum') - self.assertEqual({'lsn': '1234567'}, metadata) - - def test_save_metadata(self): - location = 'http://mockswift.com/v1/545433/backups/mybackup.tar' - metadata = {'lsn': '1234567'} - self.swift_client.post_object = Mock() - - self.swift.save_metadata(location, metadata=metadata) - - headers = { - 'X-Object-Meta-lsn': '1234567' - } - self.swift_client.post_object.assert_called_with( - 'backups', 'mybackup.tar', headers=headers) diff --git a/trove/tests/unittests/guestagent/__init__.py b/trove/tests/unittests/guestagent/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/trove/tests/unittests/guestagent/test_agent_heartbeats_models.py b/trove/tests/unittests/guestagent/test_agent_heartbeats_models.py deleted file mode 100644 index 2b68ba1dde..0000000000 --- a/trove/tests/unittests/guestagent/test_agent_heartbeats_models.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from mock import Mock -from mock import patch -import uuid - -from trove.common import exception -from trove.guestagent.models import AgentHeartBeat -from trove.tests.unittests import trove_testtools -from trove.tests.unittests.util import util - - -class AgentHeartBeatTest(trove_testtools.TestCase): - - def setUp(self): - super(AgentHeartBeatTest, self).setUp() - util.init_db() - - def tearDown(self): - super(AgentHeartBeatTest, self).tearDown() - - def test_create(self): - """ - Test the creation of a new agent heartbeat record - """ - instance_id = str(uuid.uuid4()) - heartbeat = AgentHeartBeat.create( - instance_id=instance_id) - self.assertIsNotNone(heartbeat) - - self.assertIsNotNone(heartbeat.id) - self.assertIsNotNone(heartbeat.instance_id) - self.assertEqual(instance_id, - heartbeat.instance_id) - self.assertIsNotNone(heartbeat.updated_at) - self.assertIsNone(heartbeat.guest_agent_version) - - def test_create_with_version(self): - """ - Test the creation of a new agent heartbeat record w/ guest version - """ - instance_id = str(uuid.uuid4()) - heartbeat = AgentHeartBeat.create( - instance_id=instance_id, - guest_agent_version="1.2.3") - self.assertIsNotNone(heartbeat) - - self.assertIsNotNone(heartbeat.id) - self.assertIsNotNone(heartbeat.instance_id) - self.assertEqual(instance_id, - heartbeat.instance_id) - self.assertIsNotNone(heartbeat.updated_at) - self.assertIsNotNone(heartbeat.guest_agent_version) - self.assertEqual("1.2.3", heartbeat.guest_agent_version) - - def test_create_invalid_model_error(self): - """ - Test the creation failure of a new agent heartbeat record - """ - instance = Mock() - instance.errors = {} - instance.is_valid = Mock(return_value=False) - with patch.object(AgentHeartBeat, 'save', return_value=instance): - self.assertRaises(exception.InvalidModelError, - AgentHeartBeat.create) - - def test_save_invalid_model_error(self): - """ - Test the save failure of an agent heartbeat record - """ - instance_id = str(uuid.uuid4()) - heartbeat = AgentHeartBeat.create( - instance_id=instance_id) - with patch.object(AgentHeartBeat, 'is_valid', return_value=False): - self.assertRaises(exception.InvalidModelError, heartbeat.save) - - def test_find_by_instance_id(self): - """ - Test to retrieve a guest agents by its id - """ - # create a unique record - instance_id = str(uuid.uuid4()) - heartbeat = AgentHeartBeat.create( - instance_id=instance_id, guest_agent_version="1.2.3") - self.assertIsNotNone(heartbeat) - self.assertIsNotNone(heartbeat.id) - self.assertIsNotNone(heartbeat.instance_id) - self.assertEqual(instance_id, heartbeat.instance_id) - self.assertIsNotNone(heartbeat.updated_at) - self.assertIsNotNone(heartbeat.guest_agent_version) - self.assertEqual("1.2.3", heartbeat.guest_agent_version) - - # retrieve the record - heartbeat_found = AgentHeartBeat.find_by_instance_id( - instance_id=instance_id) - self.assertIsNotNone(heartbeat_found) - self.assertEqual(heartbeat.id, heartbeat_found.id) - self.assertEqual(heartbeat.instance_id, heartbeat_found.instance_id) - self.assertEqual(heartbeat.updated_at, heartbeat_found.updated_at) - self.assertEqual(heartbeat.guest_agent_version, - heartbeat_found.guest_agent_version) - - def test_find_by_instance_id_none(self): - """ - Test to retrieve a guest agents when id is None - """ - heartbeat_found = None - exception_raised = False - try: - heartbeat_found = AgentHeartBeat.find_by_instance_id( - instance_id=None) - except exception.ModelNotFoundError: - exception_raised = True - - self.assertIsNone(heartbeat_found) - self.assertTrue(exception_raised) - - @patch('trove.guestagent.models.LOG') - def test_find_by_instance_id_not_found(self, mock_logging): - """ - Test to retrieve a guest agents when id is not found - """ - instance_id = str(uuid.uuid4()) - heartbeat_found = None - exception_raised = False - try: - heartbeat_found = AgentHeartBeat.find_by_instance_id( - instance_id=instance_id) - except exception.ModelNotFoundError: - exception_raised = True - - self.assertIsNone(heartbeat_found) - self.assertTrue(exception_raised) - - def test_find_all_by_version(self): - """ - Test to retrieve all guest agents with a particular version - """ - # create some unique records with the same version - version = str(uuid.uuid4()) - - for x in range(5): - instance_id = str(uuid.uuid4()) - heartbeat = AgentHeartBeat.create( - instance_id=instance_id, - guest_agent_version=version, - deleted=0) - self.assertIsNotNone(heartbeat) - - # get all guests by version - heartbeats = AgentHeartBeat.find_all_by_version(version) - self.assertIsNotNone(heartbeats) - self.assertEqual(5, heartbeats.count()) - - def test_find_all_by_version_none(self): - """ - Test to retrieve all guest agents with a None version - """ - heartbeats = None - exception_raised = False - try: - heartbeats = AgentHeartBeat.find_all_by_version(None) - except exception.ModelNotFoundError: - exception_raised = True - - self.assertIsNone(heartbeats) - self.assertTrue(exception_raised) - - def test_find_all_by_version_not_found(self): - """ - Test to retrieve all guest agents with a non-existing version - """ - version = str(uuid.uuid4()) - exception_raised = False - heartbeats = None - try: - heartbeats = AgentHeartBeat.find_all_by_version(version) - except exception.ModelNotFoundError: - exception_raised = True - - self.assertIsNone(heartbeats) - self.assertTrue(exception_raised) - - def test_update_heartbeat(self): - """ - Test to show the upgrade scenario that will be used by conductor - """ - # create a unique record - instance_id = str(uuid.uuid4()) - heartbeat = AgentHeartBeat.create( - instance_id=instance_id, guest_agent_version="1.2.3") - self.assertIsNotNone(heartbeat) - self.assertIsNotNone(heartbeat.id) - self.assertIsNotNone(heartbeat.instance_id) - self.assertEqual(instance_id, heartbeat.instance_id) - self.assertIsNotNone(heartbeat.updated_at) - self.assertIsNotNone(heartbeat.guest_agent_version) - self.assertEqual("1.2.3", heartbeat.guest_agent_version) - - # retrieve the record - heartbeat_found = AgentHeartBeat.find_by_instance_id( - instance_id=instance_id) - self.assertIsNotNone(heartbeat_found) - self.assertEqual(heartbeat.id, heartbeat_found.id) - self.assertEqual(heartbeat.instance_id, heartbeat_found.instance_id) - self.assertEqual(heartbeat.updated_at, heartbeat_found.updated_at) - self.assertEqual(heartbeat.guest_agent_version, - heartbeat_found.guest_agent_version) - - # update - AgentHeartBeat().update(id=heartbeat_found.id, - instance_id=instance_id, - guest_agent_version="1.2.3") - - # retrieve the record - updated_heartbeat = AgentHeartBeat.find_by_instance_id( - instance_id=instance_id) - self.assertIsNotNone(updated_heartbeat) - self.assertEqual(heartbeat.id, updated_heartbeat.id) - self.assertEqual(heartbeat.instance_id, updated_heartbeat.instance_id) - self.assertEqual(heartbeat.guest_agent_version, - updated_heartbeat.guest_agent_version) - - self.assertEqual(heartbeat.updated_at, updated_heartbeat.updated_at) diff --git a/trove/tests/unittests/guestagent/test_api.py b/trove/tests/unittests/guestagent/test_api.py deleted file mode 100644 index 4e2b700915..0000000000 --- a/trove/tests/unittests/guestagent/test_api.py +++ /dev/null @@ -1,507 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from eventlet import Timeout -from unittest import mock - -import oslo_messaging as messaging -from oslo_messaging.rpc.client import RemoteError -from testtools.matchers import Is - -from trove.common.clients import guest_client -import trove.common.context as context -from trove.common import exception -from trove.guestagent import api -from trove import rpc -from trove.tests.unittests import trove_testtools - -REPLICATION_SNAPSHOT = {'master': {'id': '123', 'host': '192.168.0.1', - 'port': 3306}, - 'dataset': {}, - 'binlog_position': 'binpos'} - -RPC_API_VERSION = '1.0' - - -def _mock_call_pwd_change(cmd, version=None, users=None): - if users == 'dummy': - return True - else: - raise BaseException("Test Failed") - - -def _mock_call(cmd, timeout, version=None, username=None, hostname=None, - database=None, databases=None): - # To check get_user, list_access, grant_access, revoke_access in cmd. - if cmd in ('get_user', 'list_access', 'grant_access', 'revoke_access'): - return True - else: - raise BaseException("Test Failed") - - -class ApiTest(trove_testtools.TestCase): - @mock.patch.object(rpc, 'get_client') - @mock.patch('trove.instance.models.get_instance_encryption_key', - return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08') - def setUp(self, mock_get_encryption_key, *args): - super(ApiTest, self).setUp() - self.context = context.TroveContext() - self.guest = api.API(self.context, 0) - self.guest._cast = _mock_call_pwd_change - self.guest._call = _mock_call - self.api = api.API(self.context, "instance-id-x23d2d") - self._mock_rpc_client() - mock_get_encryption_key.assert_called() - - def test_change_passwords(self): - self.assertIsNone(self.guest.change_passwords("dummy")) - - def test_get_user(self): - self.assertTrue(self.guest.get_user("dummyname", "dummyhost")) - - def test_list_access(self): - self.assertTrue(self.guest.list_access("dummyname", "dummyhost")) - - def test_grant_access(self): - self.assertTrue(self.guest.grant_access("dumname", "dumhost", "dumdb")) - - def test_revoke_access(self): - self.assertTrue(self.guest.revoke_access("dumname", "dumhost", - "dumdb")) - - def test_get_routing_key(self): - self.assertEqual('guestagent.instance-id-x23d2d', - self.api._get_routing_key()) - - def test_update_attributes(self): - self.api.update_attributes('test_user', '%', {'name': 'new_user'}) - - self._verify_rpc_prepare_before_cast() - self._verify_cast('update_attributes', username='test_user', - hostname='%', user_attrs={'name': 'new_user'}) - - def test_create_user(self): - self.api.create_user('test_user') - - self._verify_rpc_prepare_before_cast() - self._verify_cast('create_user', users='test_user') - - @mock.patch('trove.guestagent.api.LOG') - def test_api_cast_exception(self, mock_logging): - self.call_context.cast.side_effect = IOError('host down') - self.assertRaises(exception.GuestError, self.api.create_user, - 'test_user') - - @mock.patch('trove.guestagent.api.LOG') - def test_api_call_exception(self, mock_logging): - self.call_context.call.side_effect = IOError('host_down') - self.assertRaises(exception.GuestError, self.api.list_users) - - def test_api_call_timeout(self): - self.call_context.call.side_effect = Timeout() - self.assertRaises(exception.GuestTimeout, self.api.restart) - - @mock.patch('trove.guestagent.api.LOG') - def test_api_cast_remote_error(self, mock_logging): - self.call_context.cast.side_effect = RemoteError('Error') - self.assertRaises(exception.GuestError, self.api.delete_database, - 'test_db') - - @mock.patch('trove.guestagent.api.LOG') - def test_api_call_remote_error(self, mock_logging): - self.call_context.call.side_effect = RemoteError('Error') - self.assertRaises(exception.GuestError, self.api.stop_db) - - def test_list_users(self): - exp_resp = ['user1', 'user2', 'user3'] - self.call_context.call.return_value = exp_resp - - resp = self.api.list_users() - - self._verify_rpc_prepare_before_call() - self._verify_call('list_users', limit=None, marker=None, - include_marker=False) - self.assertEqual(exp_resp, resp) - - def test_delete_user(self): - self.api.delete_user('test_user') - - self._verify_rpc_prepare_before_cast() - self._verify_cast('delete_user', user='test_user') - - def test_create_database(self): - databases = ['db1', 'db2', 'db3'] - self.api.create_database(databases) - - self._verify_rpc_prepare_before_cast() - self.call_context.cast.assert_called_once_with( - self.context, "create_database", databases=databases) - - def test_list_databases(self): - exp_resp = ['db1', 'db2', 'db3'] - self.call_context.call.return_value = exp_resp - - resp = self.api.list_databases( - limit=1, marker=2, include_marker=False) - - self._verify_rpc_prepare_before_call() - self._verify_call("list_databases", limit=1, marker=2, - include_marker=False) - self.assertEqual(exp_resp, resp) - - def test_delete_database(self): - self.api.delete_database('test_database_name') - - self._verify_rpc_prepare_before_cast() - self._verify_cast("delete_database", database='test_database_name') - - def test_enable_root(self): - self.call_context.call.return_value = True - - resp = self.api.enable_root() - - self._verify_rpc_prepare_before_call() - self._verify_call('enable_root') - self.assertThat(resp, Is(True)) - - def test_enable_root_with_password(self): - self.call_context.call.return_value = True - - resp = self.api.enable_root_with_password() - - self._verify_rpc_prepare_before_call() - self._verify_call('enable_root_with_password', root_password=None) - self.assertThat(resp, Is(True)) - - def test_disable_root(self): - self.call_context.call.return_value = True - - resp = self.api.disable_root() - - self._verify_rpc_prepare_before_call() - self._verify_call('disable_root') - self.assertThat(resp, Is(True)) - - def test_is_root_enabled(self): - self.call_context.call.return_value = False - - resp = self.api.is_root_enabled() - - self._verify_rpc_prepare_before_call() - self._verify_call('is_root_enabled') - self.assertThat(resp, Is(False)) - - def test_get_hwinfo(self): - self.call_context.call.return_value = '[blah]' - - resp = self.api.get_hwinfo() - - self._verify_rpc_prepare_before_call() - self._verify_call('get_hwinfo') - self.assertThat(resp, Is('[blah]')) - - def test_rpc_ping(self): - # execute - self.api.rpc_ping() - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('rpc_ping') - - def test_get_diagnostics(self): - self.call_context.call.return_value = '[all good]' - - resp = self.api.get_diagnostics() - - self._verify_rpc_prepare_before_call() - self._verify_call('get_diagnostics') - self.assertThat(resp, Is('[all good]')) - - def test_restart(self): - self.api.restart() - - self._verify_rpc_prepare_before_call() - self._verify_call('restart') - - def test_start_db_with_conf_changes(self): - self.api.start_db_with_conf_changes(None) - - self._verify_rpc_prepare_before_call() - self._verify_call('start_db_with_conf_changes', config_contents=None) - - def test_reset_configuration(self): - # execute - self.api.reset_configuration({'config_contents': 'some junk'}) - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('reset_configuration', - configuration={'config_contents': 'some junk'}) - - def test_stop_db(self): - self.api.stop_db(do_not_start_on_reboot=False) - - self._verify_rpc_prepare_before_call() - self._verify_call('stop_db', do_not_start_on_reboot=False) - - def test_get_volume_info(self): - exp_resp = {'fake': 'resp'} - self.call_context.call.return_value = exp_resp - - resp = self.api.get_volume_info() - - self._verify_rpc_prepare_before_call() - self._verify_call('get_filesystem_stats', fs_path=None) - self.assertThat(resp, Is(exp_resp)) - - def test_update_guest(self): - self.api.update_guest() - - self._verify_rpc_prepare_before_call() - self._verify_call('update_guest') - - def test_create_backup(self): - self.api.create_backup({'id': '123'}) - - self._verify_rpc_prepare_before_cast() - self._verify_cast('create_backup', backup_info={'id': '123'}) - - def test_unmount_volume(self): - # execute - self.api.unmount_volume('/dev/vdb', '/var/lib/mysql') - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('unmount_volume', device_path='/dev/vdb', - mount_point='/var/lib/mysql') - - def test_mount_volume(self): - # execute - self.api.mount_volume('/dev/vdb', '/var/lib/mysql') - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('mount_volume', device_path='/dev/vdb', - mount_point='/var/lib/mysql') - - def test_resize_fs(self): - # execute - self.api.resize_fs('/dev/vdb', '/var/lib/mysql') - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('resize_fs', device_path='/dev/vdb', - mount_point='/var/lib/mysql') - - def test_update_overrides(self): - self.api.update_overrides('123') - - self._verify_rpc_prepare_before_call() - self._verify_call('update_overrides', overrides='123', remove=False) - - def test_apply_overrides(self): - self.api.apply_overrides('123') - - self._verify_rpc_prepare_before_call() - self._verify_call('apply_overrides', overrides='123') - - def test_get_replication_snapshot(self): - # execute - self.api.get_replication_snapshot({}) - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('get_replication_snapshot', snapshot_info={}, - replica_source_config=None) - - def test_attach_replication_slave(self): - # execute - self.api.attach_replication_slave(REPLICATION_SNAPSHOT) - # verify - self._verify_rpc_prepare_before_cast() - self._verify_cast('attach_replication_slave', - snapshot=REPLICATION_SNAPSHOT, slave_config=None) - - def test_detach_replica(self): - # execute - self.api.detach_replica() - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('detach_replica', for_failover=False) - - def test_get_replica_context(self): - # execute - self.api.get_replica_context() - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('get_replica_context') - - def test_attach_replica(self): - # execute - self.api.attach_replica(REPLICATION_SNAPSHOT, slave_config=None) - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('attach_replica', - replica_info=REPLICATION_SNAPSHOT, slave_config=None) - - def test_make_read_only(self): - # execute - self.api.make_read_only(True) - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('make_read_only', read_only=True) - - def test_enable_as_master(self): - # execute - self.api.enable_as_master({}) - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('enable_as_master', replica_source_config={}) - - def test_get_txn_count(self): - # execute - self.api.get_txn_count() - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('get_txn_count') - - def test_get_last_txn(self): - # execute - self.api.get_last_txn() - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('get_last_txn') - - def test_get_latest_txn_id(self): - # execute - self.api.get_latest_txn_id() - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('get_latest_txn_id') - - def test_wait_for_txn(self): - # execute - self.api.wait_for_txn("") - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('wait_for_txn', txn="") - - def test_cleanup_source_on_replica_detach(self): - # execute - self.api.cleanup_source_on_replica_detach({'replication_user': - 'test_user'}) - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('cleanup_source_on_replica_detach', - replica_info={'replication_user': 'test_user'}) - - def test_demote_replication_master(self): - # execute - self.api.demote_replication_master() - # verify - self._verify_rpc_prepare_before_call() - self._verify_call('demote_replication_master') - - @mock.patch.object(messaging, 'Target') - @mock.patch.object(rpc, 'get_server') - def test_prepare(self, *args): - self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', - '/mnt/opt', None, 'cont', '1-2-3-4', - 'override', {'id': '2-3-4-5'}) - - self._verify_rpc_prepare_before_cast() - self._verify_cast( - 'prepare', packages=['package1'], databases='db1', - memory_mb='2048', users='user1', device_path='/dev/vdt', - mount_point='/mnt/opt', backup_info=None, - config_contents='cont', root_password='1-2-3-4', - overrides='override', cluster_config={'id': '2-3-4-5'}, - snapshot=None, modules=None) - - @mock.patch.object(messaging, 'Target') - @mock.patch.object(rpc, 'get_server') - def test_prepare_with_backup(self, *args): - backup = {'id': 'backup_id_123'} - self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', - '/mnt/opt', backup, 'cont', '1-2-3-4', - 'overrides', {"id": "2-3-4-5"}, modules=None) - - self._verify_rpc_prepare_before_cast() - self._verify_cast( - 'prepare', packages=['package1'], databases='db1', - memory_mb='2048', users='user1', device_path='/dev/vdt', - mount_point='/mnt/opt', backup_info=backup, - config_contents='cont', root_password='1-2-3-4', - overrides='overrides', cluster_config={'id': '2-3-4-5'}, - snapshot=None, modules=None) - - @mock.patch.object(messaging, 'Target') - @mock.patch.object(rpc, 'get_server') - def test_prepare_with_modules(self, *args): - modules = [{'id': 'mod_id'}] - self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt', - '/mnt/opt', None, 'cont', '1-2-3-4', - 'overrides', {"id": "2-3-4-5"}, modules=modules) - - self._verify_rpc_prepare_before_cast() - self._verify_cast( - 'prepare', packages=['package1'], databases='db1', - memory_mb='2048', users='user1', device_path='/dev/vdt', - mount_point='/mnt/opt', backup_info=None, - config_contents='cont', root_password='1-2-3-4', - overrides='overrides', cluster_config={'id': '2-3-4-5'}, - snapshot=None, modules=modules) - - def test_upgrade(self): - instance_version = "v1.0.1" - location = "http://swift/trove-guestagent-v1.0.1.tar.gz" - # execute - self.api.upgrade(instance_version, location) - # verify - self._verify_rpc_prepare_before_cast() - self._verify_cast( - 'upgrade', instance_version=instance_version, - location=location, metadata=None) - - def _verify_rpc_prepare_before_call(self): - self.api.client.prepare.assert_called_once_with( - version=RPC_API_VERSION, timeout=mock.ANY) - - def _verify_rpc_prepare_before_cast(self): - self.api.client.prepare.assert_called_once_with( - version=RPC_API_VERSION) - - def _verify_cast(self, *args, **kwargs): - self.call_context.cast.assert_called_once_with(self.context, *args, - **kwargs) - - def _verify_call(self, *args, **kwargs): - self.call_context.call.assert_called_once_with(self.context, *args, - **kwargs) - - def _mock_rpc_client(self): - self.call_context = mock.Mock() - self.api.client.prepare = mock.Mock(return_value=self.call_context) - self.call_context.call = mock.Mock() - self.call_context.cast = mock.Mock() - - -class ApiStrategyTest(trove_testtools.TestCase): - - @mock.patch('trove.guestagent.api.API.__init__', - mock.Mock(return_value=None)) - def test_guest_client_mongodb(self): - client = guest_client(mock.Mock(), mock.Mock(), 'mongodb') - self.assertFalse(hasattr(client, 'add_config_servers2')) - self.assertTrue(callable(client.add_config_servers)) - - @mock.patch('trove.guestagent.api.API.__init__', - mock.Mock(return_value=None)) - def test_guest_client_vertica(self): - client = guest_client(mock.Mock(), mock.Mock(), 'vertica') - self.assertFalse(hasattr(client, 'get_public_keys2')) - self.assertTrue(callable(client.get_public_keys)) diff --git a/trove/tests/unittests/guestagent/test_backups.py b/trove/tests/unittests/guestagent/test_backups.py deleted file mode 100644 index d6f63777ab..0000000000 --- a/trove/tests/unittests/guestagent/test_backups.py +++ /dev/null @@ -1,983 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from unittest import mock -from unittest.mock import ANY -from unittest.mock import call -from unittest.mock import DEFAULT -from unittest.mock import Mock -from unittest.mock import patch - -from testtools.testcase import ExpectedException - -from trove.common import exception -from trove.common import utils -from trove.guestagent.common import configuration -from trove.guestagent.common.configuration import ImportOverrideStrategy -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.db2 import ( - service as db2_service) -from trove.guestagent.datastore.experimental.redis.service import RedisApp -from trove.guestagent.strategies.backup import base as backupBase -from trove.guestagent.strategies.backup.experimental import db2_impl -from trove.guestagent.strategies.backup.experimental.postgresql_impl \ - import PgBaseBackupUtil -from trove.guestagent.strategies.backup.mysql_impl import MySqlApp -from trove.guestagent.strategies.restore import base as restoreBase -from trove.guestagent.strategies.restore.mysql_impl import MySQLRestoreMixin -from trove.tests.unittests import trove_testtools - -BACKUP_XTRA_CLS = ("trove.guestagent.strategies.backup." - "mysql_impl.InnoBackupEx") -RESTORE_XTRA_CLS = ("trove.guestagent.strategies.restore." - "mysql_impl.InnoBackupEx") -BACKUP_XTRA_INCR_CLS = ("trove.guestagent.strategies.backup." - "mysql_impl.InnoBackupExIncremental") -RESTORE_XTRA_INCR_CLS = ("trove.guestagent.strategies.restore." - "mysql_impl.InnoBackupExIncremental") -BACKUP_SQLDUMP_CLS = ("trove.guestagent.strategies.backup." - "mysql_impl.MySQLDump") -RESTORE_SQLDUMP_CLS = ("trove.guestagent.strategies.restore." - "mysql_impl.MySQLDump") -BACKUP_CBBACKUP_CLS = ("trove.guestagent.strategies.backup." - "experimental.couchbase_impl.CbBackup") -RESTORE_CBBACKUP_CLS = ("trove.guestagent.strategies.restore." - "experimental.couchbase_impl.CbBackup") -BACKUP_MONGODUMP_CLS = ("trove.guestagent.strategies.backup." - "experimental.mongo_impl.MongoDump") -RESTORE_MONGODUMP_CLS = ("trove.guestagent.strategies.restore." - "experimental.mongo_impl.MongoDump") -BACKUP_REDIS_CLS = ("trove.guestagent.strategies.backup." - "experimental.redis_impl.RedisBackup") -RESTORE_REDIS_CLS = ("trove.guestagent.strategies.restore." - "experimental.redis_impl.RedisBackup") -BACKUP_NODETOOLSNAPSHOT_CLS = ("trove.guestagent.strategies.backup." - "experimental.cassandra_impl.NodetoolSnapshot") -RESTORE_NODETOOLSNAPSHOT_CLS = ("trove.guestagent.strategies.restore." - "experimental.cassandra_impl.NodetoolSnapshot") -BACKUP_DB2_CLS = ("trove.guestagent.strategies.backup." - "experimental.db2_impl.DB2Backup") -RESTORE_DB2_CLS = ("trove.guestagent.strategies.restore." - "experimental.db2_impl.DB2Backup") -BACKUP_COUCHDB_BACKUP_CLS = ("trove.guestagent.strategies.backup." - "experimental.couchdb_impl.CouchDBBackup") -RESTORE_COUCHDB_BACKUP_CLS = ("trove.guestagent.strategies.restore." - "experimental.couchdb_impl.CouchDBBackup") - -PIPE = " | " -ZIP = "gzip" -UNZIP = "gzip -d -c" -ENCRYPT = "openssl enc -aes-256-cbc -salt -pass pass:default_aes_cbc_key" -DECRYPT = "openssl enc -d -aes-256-cbc -salt -pass pass:default_aes_cbc_key" -XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s" - " --user=os_admin --password=password --host=localhost" - " --socket=/var/run/mysqld/mysqld.sock" - " /var/lib/mysql/data 2>/tmp/innobackupex.log") -XTRA_BACKUP = XTRA_BACKUP_RAW % {'extra_opts': ''} -XTRA_BACKUP_EXTRA_OPTS = XTRA_BACKUP_RAW % {'extra_opts': '--no-lock'} -XTRA_BACKUP_INCR = ('sudo innobackupex --stream=xbstream' - ' --incremental --incremental-lsn=%(lsn)s' - ' %(extra_opts)s' - ' --user=os_admin --password=password --host=localhost' - ' --socket=/var/run/mysqld/mysqld.sock' - ' /var/lib/mysql/data' - ' 2>/tmp/innobackupex.log') -SQLDUMP_BACKUP_RAW = ("mysqldump --all-databases %(extra_opts)s " - "--opt --password=password -u os_admin" - " 2>/tmp/mysqldump.log") -SQLDUMP_BACKUP = SQLDUMP_BACKUP_RAW % {'extra_opts': ''} -SQLDUMP_BACKUP_EXTRA_OPTS = (SQLDUMP_BACKUP_RAW % - {'extra_opts': '--events --routines --triggers'}) -XTRA_RESTORE_RAW = ("sudo xbstream -x -C %(restore_location)s" - " 2>/tmp/xbstream_extract.log") -XTRA_RESTORE = XTRA_RESTORE_RAW % {'restore_location': '/var/lib/mysql/data'} -XTRA_INCR_PREPARE = ("sudo innobackupex" - " --defaults-file=/var/lib/mysql/data/backup-my.cnf" - " --ibbackup=xtrabackup" - " --apply-log" - " --redo-only" - " /var/lib/mysql/data" - " %(incr)s" - " 2>/tmp/innoprepare.log") -SQLDUMP_RESTORE = "sudo mysql" -PREPARE = ("sudo innobackupex" - " --defaults-file=/var/lib/mysql/data/backup-my.cnf" - " --ibbackup=xtrabackup" - " --apply-log" - " /var/lib/mysql/data" - " 2>/tmp/innoprepare.log") -CRYPTO_KEY = "default_aes_cbc_key" - -CBBACKUP_CMD = "tar cpPf - /tmp/backups" -CBBACKUP_RESTORE = "sudo tar xpPf -" - -MONGODUMP_CMD = "sudo tar cPf - /var/lib/mongodb/dump" -MONGODUMP_RESTORE = "sudo tar xPf -" - -REDISBACKUP_CMD = "sudo cat /var/lib/redis/dump.rdb" -REDISBACKUP_RESTORE = "tee /var/lib/redis/dump.rdb" - -DB2BACKUP_CMD = "sudo tar cPf - /home/db2inst1/db2inst1/backup" -DB2BACKUP_RESTORE = "sudo tar xPf -" - -COUCHDB_BACKUP_CMD = "sudo tar cpPf - /var/lib/couchdb" -COUCHDB_RESTORE_CMD = "sudo tar xPf -" - - -class GuestAgentBackupTest(trove_testtools.TestCase): - - def setUp(self): - super(GuestAgentBackupTest, self).setUp() - self.patch_pc = patch('trove.guestagent.datastore.service.' - 'BaseDbStatus.prepare_completed') - self.mock_pc = self.patch_pc.start() - self.mock_pc.__get__ = Mock(return_value=True) - self.addCleanup(self.patch_pc.stop) - self.get_auth_pwd_patch = patch.object( - MySqlApp, 'get_auth_password', mock.Mock(return_value='password')) - self.get_auth_pwd_mock = self.get_auth_pwd_patch.start() - self.addCleanup(self.get_auth_pwd_patch.stop) - - self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout') - self.exec_timeout_mock = self.exec_timeout_patch.start() - self.addCleanup(self.exec_timeout_patch.stop) - - self.get_data_dir_patch = patch.object( - MySqlApp, 'get_data_dir', return_value='/var/lib/mysql/data') - self.get_datadir_mock = self.get_data_dir_patch.start() - self.addCleanup(self.get_data_dir_patch.stop) - - backupBase.BackupRunner.is_zipped = True - backupBase.BackupRunner.is_encrypted = True - restoreBase.RestoreRunner.is_zipped = True - restoreBase.RestoreRunner.is_encrypted = True - - def tearDown(self): - super(GuestAgentBackupTest, self).tearDown() - - def test_backup_decrypted_xtrabackup_command(self): - backupBase.BackupRunner.is_encrypted = False - RunnerClass = utils.import_class(BACKUP_XTRA_CLS) - bkup = RunnerClass(12345, extra_opts="") - self.assertEqual(XTRA_BACKUP + PIPE + ZIP, bkup.command) - self.assertEqual("12345.xbstream.gz", bkup.manifest) - - def test_backup_decrypted_xtrabackup_with_extra_opts_command(self): - backupBase.BackupRunner.is_encrypted = False - RunnerClass = utils.import_class(BACKUP_XTRA_CLS) - bkup = RunnerClass(12345, extra_opts="--no-lock") - self.assertEqual(XTRA_BACKUP_EXTRA_OPTS + PIPE + ZIP, bkup.command) - self.assertEqual("12345.xbstream.gz", bkup.manifest) - - def test_backup_encrypted_xtrabackup_command(self): - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_XTRA_CLS) - bkup = RunnerClass(12345, extra_opts="") - self.assertEqual(XTRA_BACKUP + PIPE + ZIP + PIPE + ENCRYPT, - bkup.command) - self.assertEqual("12345.xbstream.gz.enc", bkup.manifest) - - def test_backup_xtrabackup_incremental(self): - backupBase.BackupRunner.is_encrypted = False - RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) - opts = {'lsn': '54321', 'extra_opts': ''} - expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP - bkup = RunnerClass(12345, extra_opts="", lsn="54321") - self.assertEqual(expected, bkup.command) - self.assertEqual("12345.xbstream.gz", bkup.manifest) - - def test_backup_xtrabackup_incremental_with_extra_opts_command(self): - backupBase.BackupRunner.is_encrypted = False - RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) - opts = {'lsn': '54321', 'extra_opts': '--no-lock'} - expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP - bkup = RunnerClass(12345, extra_opts="--no-lock", lsn="54321") - self.assertEqual(expected, bkup.command) - self.assertEqual("12345.xbstream.gz", bkup.manifest) - - def test_backup_xtrabackup_incremental_encrypted(self): - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_XTRA_INCR_CLS) - opts = {'lsn': '54321', 'extra_opts': ''} - expected = (XTRA_BACKUP_INCR % opts) + PIPE + ZIP + PIPE + ENCRYPT - bkup = RunnerClass(12345, extra_opts="", lsn="54321") - self.assertEqual(expected, bkup.command) - self.assertEqual("12345.xbstream.gz.enc", bkup.manifest) - - def test_backup_decrypted_mysqldump_command(self): - backupBase.BackupRunner.is_encrypted = False - RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) - bkup = RunnerClass(12345, extra_opts="") - self.assertEqual(SQLDUMP_BACKUP + PIPE + ZIP, bkup.command) - self.assertEqual("12345.gz", bkup.manifest) - - def test_backup_decrypted_mysqldump_with_extra_opts_command(self): - backupBase.BackupRunner.is_encrypted = False - RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) - bkup = RunnerClass(12345, extra_opts="--events --routines --triggers") - self.assertEqual(SQLDUMP_BACKUP_EXTRA_OPTS + PIPE + ZIP, bkup.command) - self.assertEqual("12345.gz", bkup.manifest) - - def test_backup_encrypted_mysqldump_command(self): - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_SQLDUMP_CLS) - bkup = RunnerClass(12345, user="user", - password="password", extra_opts="") - self.assertEqual(SQLDUMP_BACKUP + PIPE + ZIP + PIPE + ENCRYPT, - bkup.command) - self.assertEqual("12345.gz.enc", bkup.manifest) - - def test_restore_decrypted_xtrabackup_command(self): - restoreBase.RestoreRunner.is_encrypted = False - RunnerClass = utils.import_class(RESTORE_XTRA_CLS) - restr = RunnerClass(None, restore_location="/var/lib/mysql/data", - location="filename", checksum="md5") - self.assertEqual(UNZIP + PIPE + XTRA_RESTORE, restr.restore_cmd) - self.assertEqual(PREPARE, restr.prepare_cmd) - - def test_restore_encrypted_xtrabackup_command(self): - restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(RESTORE_XTRA_CLS) - restr = RunnerClass(None, restore_location="/var/lib/mysql/data", - location="filename", checksum="md5") - self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE, - restr.restore_cmd) - self.assertEqual(PREPARE, restr.prepare_cmd) - - def test_restore_xtrabackup_incremental_prepare_command(self): - RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) - restr = RunnerClass(None, restore_location="/var/lib/mysql/data", - location="filename", checksum="m5d") - # Final prepare command (same as normal xtrabackup) - self.assertEqual(PREPARE, restr.prepare_cmd) - # Incremental backup prepare command - expected = XTRA_INCR_PREPARE % {'incr': '--incremental-dir=/foo/bar/'} - observed = restr._incremental_prepare_cmd('/foo/bar/') - self.assertEqual(expected, observed) - # Full backup prepare command - expected = XTRA_INCR_PREPARE % {'incr': ''} - observed = restr._incremental_prepare_cmd(None) - self.assertEqual(expected, observed) - - def test_restore_decrypted_xtrabackup_incremental_command(self): - restoreBase.RestoreRunner.is_encrypted = False - RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) - restr = RunnerClass(None, restore_location="/var/lib/mysql/data", - location="filename", checksum="m5d") - # Full restore command - expected = UNZIP + PIPE + XTRA_RESTORE - self.assertEqual(expected, restr.restore_cmd) - # Incremental backup restore command - opts = {'restore_location': '/foo/bar/'} - expected = UNZIP + PIPE + (XTRA_RESTORE_RAW % opts) - observed = restr._incremental_restore_cmd('/foo/bar/') - self.assertEqual(expected, observed) - - def test_restore_encrypted_xtrabackup_incremental_command(self): - restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(RESTORE_XTRA_INCR_CLS) - restr = RunnerClass(None, restore_location="/var/lib/mysql/data", - location="filename", checksum="md5") - # Full restore command - expected = DECRYPT + PIPE + UNZIP + PIPE + XTRA_RESTORE - self.assertEqual(expected, restr.restore_cmd) - # Incremental backup restore command - opts = {'restore_location': '/foo/bar/'} - expected = DECRYPT + PIPE + UNZIP + PIPE + (XTRA_RESTORE_RAW % opts) - observed = restr._incremental_restore_cmd('/foo/bar/') - self.assertEqual(expected, observed) - - def test_restore_decrypted_mysqldump_command(self): - restoreBase.RestoreRunner.is_encrypted = False - RunnerClass = utils.import_class(RESTORE_SQLDUMP_CLS) - restr = RunnerClass(None, restore_location="/var/lib/mysql/data", - location="filename", checksum="md5") - self.assertEqual(UNZIP + PIPE + SQLDUMP_RESTORE, restr.restore_cmd) - - def test_restore_encrypted_mysqldump_command(self): - restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(RESTORE_SQLDUMP_CLS) - restr = RunnerClass(None, restore_location="/var/lib/mysql/data", - location="filename", checksum="md5") - self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + SQLDUMP_RESTORE, - restr.restore_cmd) - - def test_backup_encrypted_cbbackup_command(self): - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_CBBACKUP_CLS) - utils.execute_with_timeout = mock.Mock(return_value=None) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual( - CBBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) - self.assertIn("gz.enc", bkp.manifest) - - def test_backup_not_encrypted_cbbackup_command(self): - backupBase.BackupRunner.is_encrypted = False - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_CBBACKUP_CLS) - utils.execute_with_timeout = mock.Mock(return_value=None) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual(CBBACKUP_CMD + PIPE + ZIP, bkp.command) - self.assertIn("gz", bkp.manifest) - - def test_restore_decrypted_cbbackup_command(self): - restoreBase.RestoreRunner.is_encrypted = False - RunnerClass = utils.import_class(RESTORE_CBBACKUP_CLS) - restr = RunnerClass(None, restore_location="/tmp", - location="filename", checksum="md5") - self.assertEqual(UNZIP + PIPE + CBBACKUP_RESTORE, restr.restore_cmd) - - def test_restore_encrypted_cbbackup_command(self): - restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(RESTORE_CBBACKUP_CLS) - restr = RunnerClass(None, restore_location="/tmp", - location="filename", checksum="md5") - self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + CBBACKUP_RESTORE, - restr.restore_cmd) - - @patch.multiple('trove.guestagent.common.operating_system', - chmod=DEFAULT, remove=DEFAULT) - def test_reset_root_password_on_mysql_restore(self, chmod, remove): - with patch.object(MySQLRestoreMixin, - '_start_mysqld_safe_with_init_file', - return_value=True): - inst = MySQLRestoreMixin() - inst.reset_root_password() - - chmod.assert_called_once_with( - ANY, operating_system.FileMode.ADD_READ_ALL, as_root=True) - - # Make sure the temporary files got deleted as root - # (see bug/1423759). - remove.assert_has_calls(2 * [call(ANY, force=True, as_root=True)]) - - @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') - def test_backup_encrypted_mongodump_command(self, _): - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_MONGODUMP_CLS) - utils.execute_with_timeout = mock.Mock(return_value=None) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual( - MONGODUMP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) - self.assertIn("gz.enc", bkp.manifest) - - @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') - def test_backup_not_encrypted_mongodump_command(self, _): - backupBase.BackupRunner.is_encrypted = False - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_MONGODUMP_CLS) - utils.execute_with_timeout = mock.Mock(return_value=None) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual(MONGODUMP_CMD + PIPE + ZIP, bkp.command) - self.assertIn("gz", bkp.manifest) - - @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') - def test_restore_decrypted_mongodump_command(self, _): - restoreBase.RestoreRunner.is_encrypted = False - RunnerClass = utils.import_class(RESTORE_MONGODUMP_CLS) - restr = RunnerClass(None, restore_location="/tmp", - location="filename", checksum="md5") - self.assertEqual(restr.restore_cmd, UNZIP + PIPE + MONGODUMP_RESTORE) - - @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') - def test_restore_encrypted_mongodump_command(self, _): - restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(RESTORE_MONGODUMP_CLS) - restr = RunnerClass(None, restore_location="/tmp", - location="filename", checksum="md5") - self.assertEqual(restr.restore_cmd, - DECRYPT + PIPE + UNZIP + PIPE + MONGODUMP_RESTORE) - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - @patch.object(configuration.ConfigurationManager, 'parse_configuration', - mock.Mock(return_value={'dir': '/var/lib/redis', - 'dbfilename': 'dump.rdb'})) - @patch.object(RedisApp, 'get_config_command_name', - Mock(return_value='fakeconfig')) - def test_backup_encrypted_redisbackup_command(self, *mocks): - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_REDIS_CLS) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual( - REDISBACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) - self.assertIn("gz.enc", bkp.manifest) - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - @patch.object(configuration.ConfigurationManager, 'parse_configuration', - mock.Mock(return_value={'dir': '/var/lib/redis', - 'dbfilename': 'dump.rdb'})) - @patch.object(RedisApp, 'get_config_command_name', - Mock(return_value='fakeconfig')) - def test_backup_not_encrypted_redisbackup_command(self, *mocks): - backupBase.BackupRunner.is_encrypted = False - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_REDIS_CLS) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual(REDISBACKUP_CMD + PIPE + ZIP, bkp.command) - self.assertIn("gz", bkp.manifest) - - @patch.object(configuration.ConfigurationManager, 'parse_configuration', - mock.Mock(return_value={'dir': '/var/lib/redis', - 'dbfilename': 'dump.rdb'})) - @patch.object(operating_system, 'chown') - @patch.object(operating_system, 'create_directory') - @patch.object(RedisApp, 'get_config_command_name', - Mock(return_value='fakeconfig')) - def test_restore_decrypted_redisbackup_command(self, *mocks): - restoreBase.RestoreRunner.is_encrypted = False - RunnerClass = utils.import_class(RESTORE_REDIS_CLS) - restr = RunnerClass(None, restore_location="/tmp", - location="filename", checksum="md5") - self.assertEqual(restr.restore_cmd, UNZIP + PIPE + REDISBACKUP_RESTORE) - - @patch.object(configuration.ConfigurationManager, 'parse_configuration', - mock.Mock(return_value={'dir': '/var/lib/redis', - 'dbfilename': 'dump.rdb'})) - @patch.object(operating_system, 'chown') - @patch.object(operating_system, 'create_directory') - @patch.object(RedisApp, 'get_config_command_name', - Mock(return_value='fakeconfig')) - def test_restore_encrypted_redisbackup_command(self, *mocks): - restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(RESTORE_REDIS_CLS) - restr = RunnerClass(None, restore_location="/tmp", - location="filename", checksum="md5") - self.assertEqual(restr.restore_cmd, - DECRYPT + PIPE + UNZIP + PIPE + REDISBACKUP_RESTORE) - - @patch.object(utils, 'execute_with_timeout') - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - @patch.object(db2_service, 'run_command') - @patch.object(db2_service.DB2App, 'process_default_dbm_config') - @patch.object(db2_impl.DB2Backup, 'list_dbnames') - def test_backup_encrypted_db2backup_command(self, *mock, **kwargs): - backupBase.BackupRunner.is_encrypted = True - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_DB2_CLS) - bkp = RunnerClass(12345) # this is not db2 backup filename - self.assertIsNotNone(bkp) # look into this - self.assertEqual( - DB2BACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) - self.assertIn("gz.enc", bkp.manifest) - - @patch.object(utils, 'execute_with_timeout') - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - @patch.object(db2_service, 'run_command') - @patch.object(db2_service.DB2App, 'process_default_dbm_config') - @patch.object(db2_impl.DB2Backup, 'list_dbnames') - def test_backup_not_encrypted_db2backup_command(self, *mock, **kwargs): - backupBase.BackupRunner.is_encrypted = False - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_DB2_CLS) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual(DB2BACKUP_CMD + PIPE + ZIP, bkp.command) - self.assertIn("gz", bkp.manifest) - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - @patch.object(db2_service, 'run_command') - @patch.object(db2_service.DB2App, 'process_default_dbm_config') - def test_restore_decrypted_db2backup_command(self, *args, **kwargs): - restoreBase.RestoreRunner.is_zipped = True - restoreBase.RestoreRunner.is_encrypted = False - RunnerClass = utils.import_class(RESTORE_DB2_CLS) - restr = RunnerClass(None, restore_location="/tmp", - location="filename", checksum="md5") - self.assertEqual(restr.restore_cmd, UNZIP + PIPE + DB2BACKUP_RESTORE) - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - @patch.object(db2_service, 'run_command') - @patch.object(db2_service.DB2App, 'process_default_dbm_config') - def test_restore_encrypted_db2backup_command(self, *args, **kwargs): - restoreBase.RestoreRunner.is_zipped = True - restoreBase.RestoreRunner.is_encrypted = True - restoreBase.RestoreRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(RESTORE_DB2_CLS) - restr = RunnerClass(None, restore_location="/tmp", - location="filename", checksum="md5") - self.assertEqual(restr.restore_cmd, - DECRYPT + PIPE + UNZIP + PIPE + DB2BACKUP_RESTORE) - - def test_backup_encrypted_couchdbbackup_command(self): - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual( - COUCHDB_BACKUP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command) - self.assertIn("gz.enc", bkp.manifest) - - def test_backup_not_encrypted_couchdbbackup_command(self): - backupBase.BackupRunner.is_encrypted = False - backupBase.BackupRunner.encrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) - bkp = RunnerClass(12345) - self.assertIsNotNone(bkp) - self.assertEqual(COUCHDB_BACKUP_CMD + PIPE + ZIP, bkp.command) - self.assertIn("gz", bkp.manifest) - - def test_restore_decrypted_couchdbbackup_command(self): - restoreBase.RestoreRunner.is_encrypted = False - RunnerClass = utils.import_class(RESTORE_COUCHDB_BACKUP_CLS) - restr = RunnerClass(None, restore_location="/var/lib/couchdb", - location="filename", checksum="md5") - self.assertEqual(UNZIP + PIPE + COUCHDB_RESTORE_CMD, restr.restore_cmd) - - def test_restore_encrypted_couchdbbackup_command(self): - restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY - RunnerClass = utils.import_class(RESTORE_COUCHDB_BACKUP_CLS) - restr = RunnerClass(None, restore_location="/var/lib/couchdb", - location="filename", checksum="md5") - self.assertEqual(DECRYPT + PIPE + UNZIP + PIPE + COUCHDB_RESTORE_CMD, - restr.restore_cmd) - - -class MongodbBackupTests(trove_testtools.TestCase): - - def setUp(self): - super(MongodbBackupTests, self).setUp() - self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout', - return_value=('0', '')) - self.exec_timeout_mock = self.exec_timeout_patch.start() - self.addCleanup(self.exec_timeout_patch.stop) - - self.init_overrides_dir_patch = patch.object( - ImportOverrideStrategy, '_initialize_import_directory') - self.init_overrides_dir_mock = self.init_overrides_dir_patch.start() - self.addCleanup(self.init_overrides_dir_patch.stop) - - self.backup_runner = utils.import_class(BACKUP_MONGODUMP_CLS) - - self.backup_runner_patch = patch.multiple( - self.backup_runner, _run=DEFAULT, - _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) - self.backup_runner_mocks = self.backup_runner_patch.start() - self.addCleanup(self.backup_runner_patch.stop) - - def tearDown(self): - super(MongodbBackupTests, self).tearDown() - - def test_backup_success(self): - with self.backup_runner(12345): - pass - - self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() - self.backup_runner_mocks['_run'].assert_called_once_with() - self.backup_runner_mocks['_run_post_backup'].assert_called_once_with() - - def test_backup_failed_due_to_run_backup(self): - self.backup_runner_mocks['_run'].configure_mock( - side_effect=exception.TroveError('test') - ) - with ExpectedException(exception.TroveError, 'test'): - with self.backup_runner(12345): - pass - self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() - self.backup_runner_mocks['_run'].assert_called_once_with() - self.assertEqual( - 0, self.backup_runner_mocks['_run_post_backup'].call_count) - - -class MongodbRestoreTests(trove_testtools.TestCase): - - @mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory') - def setUp(self, _): - super(MongodbRestoreTests, self).setUp() - - self.patch_ope = patch('os.path.expanduser', - return_value='/tmp/mongo') - self.mock_ope = self.patch_ope.start() - self.addCleanup(self.patch_ope.stop) - self.restore_runner = utils.import_class( - RESTORE_MONGODUMP_CLS)('swift', location='http://some.where', - checksum='True_checksum', - restore_location='/var/lib/somewhere') - - def tearDown(self): - super(MongodbRestoreTests, self).tearDown() - - def test_restore_success(self): - expected_content_length = 123 - self.restore_runner._run_restore = mock.Mock( - return_value=expected_content_length) - self.restore_runner.pre_restore = mock.Mock() - self.restore_runner.post_restore = mock.Mock() - actual_content_length = self.restore_runner.restore() - self.assertEqual( - expected_content_length, actual_content_length) - - def test_restore_failed_due_to_pre_restore(self): - self.restore_runner.post_restore = mock.Mock() - self.restore_runner.pre_restore = mock.Mock( - side_effect=exception.ProcessExecutionError('Error')) - self.restore_runner._run_restore = mock.Mock() - self.assertRaises(exception.ProcessExecutionError, - self.restore_runner.restore) - - def test_restore_failed_due_to_run_restore(self): - self.restore_runner.pre_restore = mock.Mock() - self.restore_runner._run_restore = mock.Mock( - side_effect=exception.ProcessExecutionError('Error')) - self.restore_runner.post_restore = mock.Mock() - self.assertRaises(exception.ProcessExecutionError, - self.restore_runner.restore) - - -class RedisBackupTests(trove_testtools.TestCase): - - def setUp(self): - super(RedisBackupTests, self).setUp() - self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout', - return_value=('0', '')) - self.exec_timeout_patch.start() - self.addCleanup(self.exec_timeout_patch.stop) - self.conf_man_patch = patch.object( - configuration.ConfigurationManager, 'parse_configuration', - mock.Mock(return_value={'dir': '/var/lib/redis', - 'dbfilename': 'dump.rdb'})) - self.conf_man_patch.start() - self.addCleanup(self.conf_man_patch.stop) - - self.backup_runner = utils.import_class(BACKUP_REDIS_CLS) - self.backup_runner_patch = patch.multiple( - self.backup_runner, _run=DEFAULT, - _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) - self.backup_runner_mocks = self.backup_runner_patch.start() - self.addCleanup(self.backup_runner_patch.stop) - - def tearDown(self): - super(RedisBackupTests, self).tearDown() - - @patch.object(RedisApp, 'get_config_command_name', - Mock(return_value='fakeconfig')) - def test_backup_success(self): - with self.backup_runner(12345): - pass - - self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() - self.backup_runner_mocks['_run'].assert_called_once_with() - self.backup_runner_mocks['_run_post_backup'].assert_called_once_with() - - @patch.object(RedisApp, 'get_config_command_name', - Mock(return_value='fakeconfig')) - def test_backup_failed_due_to_run_backup(self): - self.backup_runner_mocks['_run'].configure_mock( - side_effect=exception.TroveError('test') - ) - with ExpectedException(exception.TroveError, 'test'): - with self.backup_runner(12345): - pass - self.backup_runner_mocks['_run_pre_backup'].assert_called_once_with() - self.backup_runner_mocks['_run'].assert_called_once_with() - self.assertEqual( - 0, self.backup_runner_mocks['_run_post_backup'].call_count) - - -class RedisRestoreTests(trove_testtools.TestCase): - @patch.object(RedisApp, 'get_config_command_name', - Mock(return_value='fakeconfig')) - def setUp(self): - super(RedisRestoreTests, self).setUp() - self.conf_man_patch = patch.object( - configuration.ConfigurationManager, 'parse_configuration', - mock.Mock(return_value={'dir': '/var/lib/redis', - 'dbfilename': 'dump.rdb'})) - self.conf_man_patch.start() - self.addCleanup(self.conf_man_patch.stop) - self.os_patch = patch.multiple(operating_system, - chown=DEFAULT, - create_directory=DEFAULT) - self.os_patch.start() - self.addCleanup(self.os_patch.stop) - - self.restore_runner = utils.import_class( - RESTORE_REDIS_CLS)('swift', location='http://some.where', - checksum='True_checksum', - restore_location='/var/lib/somewhere') - self.restore_runner_patch = patch.multiple( - self.restore_runner, _run_restore=DEFAULT, - pre_restore=DEFAULT, post_restore=DEFAULT) - self.restore_runner_mocks = self.restore_runner_patch.start() - self.expected_content_length = 123 - self.restore_runner._run_restore = mock.Mock( - return_value=self.expected_content_length) - self.addCleanup(self.restore_runner_patch.stop) - - def tearDown(self): - super(RedisRestoreTests, self).tearDown() - - def test_restore_success(self): - actual_content_length = self.restore_runner.restore() - self.assertEqual( - self.expected_content_length, actual_content_length) - - def test_restore_failed_due_to_pre_restore(self): - self.restore_runner_mocks['pre_restore'].side_effect = ( - exception.ProcessExecutionError('Error')) - self.assertRaises(exception.ProcessExecutionError, - self.restore_runner.restore) - - def test_restore_failed_due_to_run_restore(self): - self.restore_runner._run_restore.side_effect = ( - exception.ProcessExecutionError('Error')) - self.assertRaises(exception.ProcessExecutionError, - self.restore_runner.restore) - - -class PostgresqlBackupTests(trove_testtools.TestCase): - - def setUp(self): - super(PostgresqlBackupTests, self).setUp() - self.bkutil = PgBaseBackupUtil() - - self.b1 = ['000000010000000000000003', - '000000010000000000000004', - '000000010000000000000005', - '000000010000000000000006', - '000000010000000000000006.00000168.backup'] - - self.b2 = ['000000010000000000000007', - '000000010000000000000008', - '000000010000000000000009', - '000000010000000000000010', - '000000010000000000000009.0008A168.backup'] - - def tearDown(self): - super(PostgresqlBackupTests, self).tearDown() - - def test_check_most_recent_backup(self): - - with patch.object(os, 'listdir', return_value=self.b1): - mrb = self.bkutil.most_recent_backup_file() - self.assertEqual(mrb, self.b1[4]) - mrbfile = self.bkutil.most_recent_backup_wal() - self.assertEqual(mrbfile, self.b1[3]) - - with patch.object(os, 'listdir', return_value=self.b1 + self.b2): - mrb = self.bkutil.most_recent_backup_file() - self.assertEqual(mrb, self.b2[4]) - mrbfile = self.bkutil.most_recent_backup_wal() - self.assertEqual(mrbfile, self.b2[2]) - - def test_check_most_recent_wal_list(self): - - with patch.object(os, 'listdir', return_value=self.b1): - logs = self.bkutil.log_files_since_last_backup() - self.assertEqual(logs, [self.b1[3]]) - - with patch.object(os, 'listdir', return_value=self.b2): - logs = self.bkutil.log_files_since_last_backup() - self.assertEqual(logs, [self.b2[2], self.b2[3]]) - - with patch.object(os, 'listdir', return_value=self.b1 + self.b2): - logs = self.bkutil.log_files_since_last_backup() - self.assertEqual(logs, [self.b2[2], self.b2[3]]) - - -class DB2BackupTests(trove_testtools.TestCase): - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - @patch.object(db2_service, 'run_command') - @patch.object(db2_service.DB2App, 'process_default_dbm_config') - def setUp(self, *args, **kwargs): - super(DB2BackupTests, self).setUp() - self.exec_timeout_patch = patch.object(utils, 'execute_with_timeout') - self.exec_timeout_patch.start() - self.exec_list_database = patch.object(db2_impl.DB2Backup, - 'list_dbnames') - self.exec_list_database.start() - self.backup_runner = utils.import_class(BACKUP_DB2_CLS) - self.backup_runner_patch = patch.multiple( - self.backup_runner, _run=DEFAULT, - _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) - - def tearDown(self): - super(DB2BackupTests, self).tearDown() - self.backup_runner_patch.stop() - self.exec_list_database.stop() - self.exec_timeout_patch.stop() - - def test_backup_success(self): - backup_runner_mocks = self.backup_runner_patch.start() - with self.backup_runner(12345): - pass - - backup_runner_mocks['_run_pre_backup'].assert_called_once_with() - backup_runner_mocks['_run'].assert_called_once_with() - backup_runner_mocks['_run_post_backup'].assert_called_once_with() - - def test_backup_failed_due_to_run_backup(self): - backup_runner_mocks = self.backup_runner_patch.start() - backup_runner_mocks['_run'].configure_mock( - side_effect=exception.TroveError('test')) - with ExpectedException(exception.TroveError, 'test'): - with self.backup_runner(12345): - pass - backup_runner_mocks['_run_pre_backup'].assert_called_once_with() - backup_runner_mocks['_run'].assert_called_once_with() - self.assertEqual(0, backup_runner_mocks['_run_post_backup'].call_count) - - -class DB2RestoreTests(trove_testtools.TestCase): - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - @patch.object(db2_service, 'run_command') - @patch.object(db2_service.DB2App, 'process_default_dbm_config') - def setUp(self, *args, **kwargs): - super(DB2RestoreTests, self).setUp() - - self.restore_runner = utils.import_class( - RESTORE_DB2_CLS)('swift', location='http://some.where', - checksum='True_checksum', - restore_location='/var/lib/somewhere') - - def tearDown(self): - super(DB2RestoreTests, self).tearDown() - - def test_restore_success(self): - expected_content_length = 123 - self.restore_runner._run_restore = mock.Mock( - return_value=expected_content_length) - self.restore_runner.post_restore = mock.Mock() - actual_content_length = self.restore_runner.restore() - self.assertEqual( - expected_content_length, actual_content_length) - - def test_restore_failed_due_to_run_restore(self): - self.restore_runner._run_restore = mock.Mock( - side_effect=exception.ProcessExecutionError('Error')) - self.restore_runner.post_restore = mock.Mock() - self.assertRaises(exception.ProcessExecutionError, - self.restore_runner.restore) - - -class CouchDBBackupTests(trove_testtools.TestCase): - - def setUp(self): - super(CouchDBBackupTests, self).setUp() - self.backup_runner = utils.import_class(BACKUP_COUCHDB_BACKUP_CLS) - self.backup_runner_patch = patch.multiple( - self.backup_runner, _run=DEFAULT, - _run_pre_backup=DEFAULT, _run_post_backup=DEFAULT) - - def tearDown(self): - super(CouchDBBackupTests, self).tearDown() - self.backup_runner_patch.stop() - - def test_backup_success(self): - backup_runner_mocks = self.backup_runner_patch.start() - with self.backup_runner(12345): - pass - - backup_runner_mocks['_run_pre_backup'].assert_called_once_with() - backup_runner_mocks['_run'].assert_called_once_with() - backup_runner_mocks['_run_post_backup'].assert_called_once_with() - - def test_backup_failed_due_to_run_backup(self): - backup_runner_mocks = self.backup_runner_patch.start() - backup_runner_mocks['_run'].configure_mock( - side_effect=exception.TroveError('test') - ) - with ExpectedException(exception.TroveError, 'test'): - with self.backup_runner(12345): - pass - - backup_runner_mocks['_run_pre_backup'].assert_called_once_with() - backup_runner_mocks['_run'].assert_called_once_with() - self.assertEqual(0, backup_runner_mocks['_run_post_backup'].call_count) - - -class CouchDBRestoreTests(trove_testtools.TestCase): - - def setUp(self): - super(CouchDBRestoreTests, self).setUp() - - self.restore_runner = utils.import_class( - RESTORE_COUCHDB_BACKUP_CLS)( - 'swift', location='http://some.where', - checksum='True_checksum', - restore_location='/tmp/somewhere') - - def tearDown(self): - super(CouchDBRestoreTests, self).tearDown() - - def test_restore_success(self): - expected_content_length = 123 - self.restore_runner._run_restore = mock.Mock( - return_value=expected_content_length) - self.restore_runner.pre_restore = mock.Mock() - self.restore_runner.post_restore = mock.Mock() - actual_content_length = self.restore_runner.restore() - self.assertEqual( - expected_content_length, actual_content_length) - - def test_restore_failed_due_to_run_restore(self): - self.restore_runner.pre_restore = mock.Mock() - self.restore_runner._run_restore = mock.Mock( - side_effect=exception.ProcessExecutionError('Error')) - self.restore_runner.post_restore = mock.Mock() - self.assertRaises(exception.ProcessExecutionError, - self.restore_runner.restore) - - -class MySQLRestoreTests(trove_testtools.TestCase): - - def setUp(self): - super(MySQLRestoreTests, self).setUp() - - self.restore_runner = utils.import_class( - RESTORE_XTRA_CLS)( - 'swift', location='http://some.where', - checksum='True_checksum', - restore_location='/tmp/somewhere') - - def tearDown(self): - super(MySQLRestoreTests, self).tearDown() - - def test_restore_success(self): - expected_content_length = 123 - self.restore_runner._run_restore = mock.Mock( - return_value=expected_content_length) - self.restore_runner.pre_restore = mock.Mock() - self.restore_runner.post_restore = mock.Mock() - actual_content_length = self.restore_runner.restore() - self.assertEqual( - expected_content_length, actual_content_length) - - def test_restore_failed_due_to_run_restore(self): - self.restore_runner.pre_restore = mock.Mock() - self.restore_runner._run_restore = mock.Mock( - side_effect=restoreBase.RestoreError('Error')) - self.restore_runner.post_restore = mock.Mock() - self.assertRaises(restoreBase.RestoreError, - self.restore_runner.restore) diff --git a/trove/tests/unittests/guestagent/test_cassandra_manager.py b/trove/tests/unittests/guestagent/test_cassandra_manager.py deleted file mode 100644 index 9e646d31ee..0000000000 --- a/trove/tests/unittests/guestagent/test_cassandra_manager.py +++ /dev/null @@ -1,812 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import random -import string - -from mock import ANY -from mock import call -from mock import DEFAULT -from mock import MagicMock -from mock import Mock -from mock import NonCallableMagicMock -from mock import patch -from oslo_utils import netutils -from testtools import ExpectedException - -from trove.common.db.cassandra import models -from trove.common import exception -from trove.common.instance import ServiceStatuses -from trove.guestagent import backup -from trove.guestagent.common.configuration import ImportOverrideStrategy -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.cassandra import ( - manager as cass_manager) -from trove.guestagent.datastore.experimental.cassandra import ( - service as cass_service) -from trove.guestagent import pkg -from trove.guestagent import volume -from trove.tests.unittests.guestagent.test_datastore_manager import \ - DatastoreManagerTest -from trove.tests.unittests import trove_testtools - - -class GuestAgentCassandraDBManagerTest(DatastoreManagerTest): - - __MOUNT_POINT = '/var/lib/cassandra' - - __N_GAK = '_get_available_keyspaces' - __N_GLU = '_get_listed_users' - __N_BU = '_build_user' - __N_RU = '_rename_user' - __N_AUP = '_alter_user_password' - __N_CAU = 'trove.common.db.cassandra.models.CassandraUser' - __N_CU = '_create_user' - __N_GFA = '_grant_full_access_on_keyspace' - __N_DU = '_drop_user' - - __ACCESS_MODIFIERS = ('ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT') - __CREATE_DB_FORMAT = ( - "CREATE KEYSPACE \"{}\" WITH REPLICATION = " - "{{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};" - ) - __DROP_DB_FORMAT = "DROP KEYSPACE \"{}\";" - __CREATE_USR_FORMAT = "CREATE USER '{}' WITH PASSWORD %s NOSUPERUSER;" - __ALTER_USR_FORMAT = "ALTER USER '{}' WITH PASSWORD %s;" - __DROP_USR_FORMAT = "DROP USER '{}';" - __GRANT_FORMAT = "GRANT {} ON KEYSPACE \"{}\" TO '{}';" - __REVOKE_FORMAT = "REVOKE ALL PERMISSIONS ON KEYSPACE \"{}\" FROM '{}';" - __LIST_PERMISSIONS_FORMAT = "LIST ALL PERMISSIONS NORECURSIVE;" - __LIST_PERMISSIONS_OF_FORMAT = "LIST ALL PERMISSIONS OF '{}' NORECURSIVE;" - __LIST_DB_FORMAT = "SELECT * FROM system.schema_keyspaces;" - __LIST_USR_FORMAT = "LIST USERS;" - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def setUp(self, *args, **kwargs): - super(GuestAgentCassandraDBManagerTest, self).setUp('cassandra') - - conn_patcher = patch.multiple(cass_service.CassandraConnection, - _connect=DEFAULT, - is_active=Mock(return_value=True)) - self.addCleanup(conn_patcher.stop) - conn_patcher.start() - - self.real_status = cass_service.CassandraAppStatus.set_status - - class FakeInstanceServiceStatus(object): - status = ServiceStatuses.NEW - - def save(self): - pass - - cass_service.CassandraAppStatus.set_status = MagicMock( - return_value=FakeInstanceServiceStatus()) - self.context = trove_testtools.TroveTestContext(self) - self.manager = cass_manager.Manager() - self.manager._app = cass_service.CassandraApp() - self.manager._admin = cass_service.CassandraAdmin( - models.CassandraUser('Test')) - self.admin = self.manager._admin - self.admin._CassandraAdmin__client = MagicMock() - self.conn = self.admin._CassandraAdmin__client - self.pkg = cass_service.packager - self.origin_os_path_exists = os.path.exists - self.origin_format = volume.VolumeDevice.format - self.origin_migrate_data = volume.VolumeDevice.migrate_data - self.origin_mount = volume.VolumeDevice.mount - self.origin_mount_points = volume.VolumeDevice.mount_points - self.origin_stop_db = cass_service.CassandraApp.stop_db - self.origin_start_db = cass_service.CassandraApp.start_db - self.origin_install_db = cass_service.CassandraApp._install_db - self.original_get_ip = netutils.get_my_ipv4 - self.orig_make_host_reachable = ( - cass_service.CassandraApp.apply_initial_guestagent_configuration) - - def tearDown(self): - super(GuestAgentCassandraDBManagerTest, self).tearDown() - cass_service.packager = self.pkg - os.path.exists = self.origin_os_path_exists - volume.VolumeDevice.format = self.origin_format - volume.VolumeDevice.migrate_data = self.origin_migrate_data - volume.VolumeDevice.mount = self.origin_mount - volume.VolumeDevice.mount_points = self.origin_mount_points - cass_service.CassandraApp.stop_db = self.origin_stop_db - cass_service.CassandraApp.start_db = self.origin_start_db - cass_service.CassandraApp._install_db = self.origin_install_db - netutils.get_my_ipv4 = self.original_get_ip - cass_service.CassandraApp.apply_initial_guestagent_configuration = ( - self.orig_make_host_reachable) - cass_service.CassandraAppStatus.set_status = self.real_status - - def test_update_status(self): - mock_status = MagicMock() - mock_status.is_installed = True - mock_status._is_restarting = False - self.manager._app.status = mock_status - - self.manager.update_status(self.context) - - self.assertTrue(mock_status.set_status.called) - - def test_prepare_pkg(self): - self._prepare_dynamic(['cassandra']) - - def test_prepare_no_pkg(self): - self._prepare_dynamic([]) - - def test_prepare_db_not_installed(self): - self._prepare_dynamic([], is_db_installed=False) - - def test_prepare_db_not_installed_no_package(self): - self._prepare_dynamic([], - is_db_installed=True) - - @patch.object(backup, 'restore') - def test_prepare_db_restore(self, restore): - backup_info = {'id': 'backup_id', - 'instance_id': 'fake-instance-id', - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum'} - - self._prepare_dynamic(['cassandra'], is_db_installed=False, - backup_info=backup_info) - restore.assert_called_once_with( - self.context, backup_info, self.__MOUNT_POINT) - - @patch.multiple(operating_system, enable_service_on_boot=DEFAULT, - disable_service_on_boot=DEFAULT) - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def test_superuser_password_reset( - self, _, enable_service_on_boot, disable_service_on_boot): - fake_status = MagicMock() - fake_status.is_running = False - - test_app = cass_service.CassandraApp() - test_app.status = fake_status - with patch.multiple( - test_app, - start_db=DEFAULT, - stop_db=DEFAULT, - restart=DEFAULT, - _CassandraApp__disable_remote_access=DEFAULT, - _CassandraApp__enable_remote_access=DEFAULT, - _CassandraApp__disable_authentication=DEFAULT, - _CassandraApp__enable_authentication=DEFAULT, - _CassandraApp__reset_user_password_to_default=DEFAULT, - secure=DEFAULT) as calls: - - test_app._reset_admin_password() - - disable_service_on_boot.assert_called_once_with( - test_app.service_candidates) - calls[ - '_CassandraApp__disable_remote_access' - ].assert_called_once_with() - calls[ - '_CassandraApp__disable_authentication' - ].assert_called_once_with() - calls['start_db'].assert_called_once_with(update_db=False, - enable_on_boot=False), - calls[ - '_CassandraApp__enable_authentication' - ].assert_called_once_with() - - pw_reset_mock = calls[ - '_CassandraApp__reset_user_password_to_default' - ] - pw_reset_mock.assert_called_once_with(test_app._ADMIN_USER) - calls['secure'].assert_called_once_with( - update_user=pw_reset_mock.return_value) - calls['restart'].assert_called_once_with() - calls['stop_db'].assert_called_once_with() - calls[ - '_CassandraApp__enable_remote_access' - ].assert_called_once_with() - enable_service_on_boot.assert_called_once_with( - test_app.service_candidates) - - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def test_change_cluster_name(self, _): - fake_status = MagicMock() - fake_status.is_running = True - - test_app = cass_service.CassandraApp() - test_app.status = fake_status - with patch.multiple( - test_app, - start_db=DEFAULT, - stop_db=DEFAULT, - restart=DEFAULT, - _update_cluster_name_property=DEFAULT, - _CassandraApp__reset_cluster_name=DEFAULT) as calls: - - sample_name = NonCallableMagicMock() - test_app.change_cluster_name(sample_name) - calls['_CassandraApp__reset_cluster_name'].assert_called_once_with( - sample_name) - calls['_update_cluster_name_property'].assert_called_once_with( - sample_name) - calls['restart'].assert_called_once_with() - - @patch.object(cass_service, 'CONF', DEFAULT) - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def test_apply_post_restore_updates(self, _, conf_mock): - fake_status = MagicMock() - fake_status.is_running = False - - test_app = cass_service.CassandraApp() - test_app.status = fake_status - with patch.multiple( - test_app, - start_db=DEFAULT, - stop_db=DEFAULT, - _update_cluster_name_property=DEFAULT, - _reset_admin_password=DEFAULT, - change_cluster_name=DEFAULT) as calls: - backup_info = {'instance_id': 'old_id'} - conf_mock.guest_id = 'new_id' - test_app._apply_post_restore_updates(backup_info) - calls['_update_cluster_name_property'].assert_called_once_with( - 'old_id') - calls['_reset_admin_password'].assert_called_once_with() - calls['start_db'].assert_called_once_with(update_db=False) - calls['change_cluster_name'].assert_called_once_with('new_id') - calls['stop_db'].assert_called_once_with() - - def _prepare_dynamic(self, packages, - config_content='MockContent', device_path='/dev/vdb', - is_db_installed=True, backup_info=None, - is_root_enabled=False, - overrides=None): - - mock_status = MagicMock() - mock_app = MagicMock() - mock_app.status = mock_status - self.manager._app = mock_app - - mock_status.begin_install = MagicMock(return_value=None) - mock_app.install_if_needed = MagicMock(return_value=None) - mock_app.init_storage_structure = MagicMock(return_value=None) - mock_app.write_config = MagicMock(return_value=None) - mock_app.apply_initial_guestagent_configuration = MagicMock( - return_value=None) - mock_app.restart = MagicMock(return_value=None) - mock_app.start_db = MagicMock(return_value=None) - mock_app.stop_db = MagicMock(return_value=None) - mock_app._remove_system_tables = MagicMock(return_value=None) - os.path.exists = MagicMock(return_value=True) - volume.VolumeDevice.format = MagicMock(return_value=None) - volume.VolumeDevice.migrate_data = MagicMock(return_value=None) - volume.VolumeDevice.mount = MagicMock(return_value=None) - volume.VolumeDevice.mount_points = MagicMock(return_value=[]) - - with patch.object(pkg.Package, 'pkg_is_installed', - return_value=is_db_installed): - # invocation - self.manager.prepare(context=self.context, packages=packages, - config_contents=config_content, - databases=None, - memory_mb='2048', users=None, - device_path=device_path, - mount_point=self.__MOUNT_POINT, - backup_info=backup_info, - overrides=None, - cluster_config=None) - - # verification/assertion - mock_status.begin_install.assert_any_call() - mock_app.install_if_needed.assert_any_call(packages) - mock_app._remove_system_tables.assert_any_call() - mock_app.init_storage_structure.assert_any_call('/var/lib/cassandra') - mock_app.apply_initial_guestagent_configuration.assert_any_call( - cluster_name=None) - mock_app.start_db.assert_any_call(update_db=False) - mock_app.stop_db.assert_any_call() - if backup_info: - mock_app._apply_post_restore_updates.assert_called_once_with( - backup_info) - - def test_keyspace_validation(self): - valid_name = self._get_random_name(32) - db = models.CassandraSchema(valid_name) - self.assertEqual(valid_name, db.name) - with ExpectedException(ValueError): - models.CassandraSchema(self._get_random_name(33)) - - def test_user_validation(self): - valid_name = self._get_random_name(65535) - usr = models.CassandraUser(valid_name, 'password') - self.assertEqual(valid_name, usr.name) - self.assertEqual('password', usr.password) - with ExpectedException(ValueError): - models.CassandraUser(self._get_random_name(65536)) - - @classmethod - def _serialize_collection(cls, *collection): - return [item.serialize() for item in collection] - - @classmethod - def _get_random_name(cls, size, - chars=string.ascii_letters + string.digits): - return ''.join(random.choice(chars) for _ in range(size)) - - def test_create_database(self): - db1 = models.CassandraSchema('db1') - db2 = models.CassandraSchema('db2') - db3 = models.CassandraSchema(self._get_random_name(32)) - - self.manager.create_database(self.context, - self._serialize_collection(db1, db2, db3)) - self.conn.execute.assert_has_calls([ - call(self.__CREATE_DB_FORMAT, (db1.name,)), - call(self.__CREATE_DB_FORMAT, (db2.name,)), - call(self.__CREATE_DB_FORMAT, (db3.name,)) - ]) - - def test_delete_database(self): - db = models.CassandraSchema(self._get_random_name(32)) - self.manager.delete_database(self.context, db.serialize()) - self.conn.execute.assert_called_once_with( - self.__DROP_DB_FORMAT, (db.name,)) - - def test_create_user(self): - usr1 = models.CassandraUser('usr1') - usr2 = models.CassandraUser('usr2', '') - usr3 = models.CassandraUser(self._get_random_name(1025), 'password') - - self.manager.create_user(self.context, - self._serialize_collection(usr1, usr2, usr3)) - self.conn.execute.assert_has_calls([ - call(self.__CREATE_USR_FORMAT, (usr1.name,), (usr1.password,)), - call(self.__CREATE_USR_FORMAT, (usr2.name,), (usr2.password,)), - call(self.__CREATE_USR_FORMAT, (usr3.name,), (usr3.password,)) - ]) - - def test_delete_user(self): - usr = models.CassandraUser(self._get_random_name(1025), 'password') - self.manager.delete_user(self.context, usr.serialize()) - self.conn.execute.assert_called_once_with( - self.__DROP_USR_FORMAT, (usr.name,)) - - def test_change_passwords(self): - usr1 = models.CassandraUser('usr1') - usr2 = models.CassandraUser('usr2', '') - usr3 = models.CassandraUser(self._get_random_name(1025), 'password') - - self.manager.change_passwords(self.context, self._serialize_collection( - usr1, usr2, usr3)) - self.conn.execute.assert_has_calls([ - call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)), - call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)), - call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,)) - ]) - - def test_alter_user_password(self): - usr1 = models.CassandraUser('usr1') - usr2 = models.CassandraUser('usr2', '') - usr3 = models.CassandraUser(self._get_random_name(1025), 'password') - - self.admin.alter_user_password(usr1) - self.admin.alter_user_password(usr2) - self.admin.alter_user_password(usr3) - self.conn.execute.assert_has_calls([ - call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)), - call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)), - call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,)) - ]) - - def test_grant_access(self): - usr1 = models.CassandraUser('usr1') - usr2 = models.CassandraUser('usr1', 'password') - db1 = models.CassandraSchema('db1') - db2 = models.CassandraSchema('db2') - db3 = models.CassandraSchema('db3') - - self.manager.grant_access(self.context, usr1.name, None, [db1.name, - db2.name]) - self.manager.grant_access(self.context, usr2.name, None, [db3.name]) - - expected = [] - for modifier in self.__ACCESS_MODIFIERS: - expected.append(call(self.__GRANT_FORMAT, - (modifier, db1.name, usr1.name))) - expected.append(call(self.__GRANT_FORMAT, - (modifier, db3.name, usr2.name))) - - self.conn.execute.assert_has_calls( - expected, - any_order=True) - - def test_revoke_access(self): - usr1 = models.CassandraUser('usr1') - usr2 = models.CassandraUser('usr1', 'password') - db1 = models.CassandraSchema('db1') - db2 = models.CassandraSchema('db2') - - self.manager.revoke_access(self.context, usr1.name, None, db1.name) - self.manager.revoke_access(self.context, usr2.name, None, db2.name) - self.conn.execute.assert_has_calls([ - call(self.__REVOKE_FORMAT, (db1.name, usr1.name)), - call(self.__REVOKE_FORMAT, (db2.name, usr2.name)) - ]) - - def test_get_available_keyspaces(self): - self.manager.list_databases(self.context) - self.conn.execute.assert_called_once_with( - self.__LIST_DB_FORMAT) - - def test_list_databases(self): - db1 = models.CassandraSchema('db1') - db2 = models.CassandraSchema('db2') - db3 = models.CassandraSchema(self._get_random_name(32)) - - with patch.object(self.admin, self.__N_GAK, return_value={db1, db2, - db3}): - found = self.manager.list_databases(self.context) - self.assertEqual(2, len(found)) - self.assertEqual(3, len(found[0])) - self.assertIsNone(found[1]) - self.assertIn(db1.serialize(), found[0]) - self.assertIn(db2.serialize(), found[0]) - self.assertIn(db3.serialize(), found[0]) - - with patch.object(self.admin, self.__N_GAK, return_value=set()): - found = self.manager.list_databases(self.context) - self.assertEqual(([], None), found) - - def test_get_acl(self): - r0 = NonCallableMagicMock(username='user1', resource='', - permission='SELECT') - r1 = NonCallableMagicMock(username='user2', resource='', - permission='SELECT') - r2 = NonCallableMagicMock(username='user2', resource='', - permission='SELECT') - r3 = NonCallableMagicMock(username='user2', resource='', - permission='ALTER') - r4 = NonCallableMagicMock(username='user3', resource='
', - permission='SELECT') - r5 = NonCallableMagicMock(username='user3', resource='', - permission='ALTER') - r6 = NonCallableMagicMock(username='user3', resource='', - permission='') - r7 = NonCallableMagicMock(username='user3', resource='', - permission='') - r8 = NonCallableMagicMock(username='user3', resource='', - permission='DELETE') - r9 = NonCallableMagicMock(username='user4', resource='', - permission='UPDATE') - r10 = NonCallableMagicMock(username='user4', resource='', - permission='DELETE') - - available_ks = {models.CassandraSchema('ks1'), - models.CassandraSchema('ks2'), - models.CassandraSchema('ks3')} - - mock_result_set = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r9, r9, r10] - execute_mock = MagicMock(return_value=mock_result_set) - mock_client = MagicMock(execute=execute_mock) - - with patch.object(self.admin, - self.__N_GAK, return_value=available_ks) as gak_mock: - acl = self.admin._get_acl(mock_client) - execute_mock.assert_called_once_with( - self.__LIST_PERMISSIONS_FORMAT) - gak_mock.assert_called_once_with(mock_client) - - self.assertEqual({'user1': {'ks1': {'SELECT'}, - 'ks2': {'SELECT'}, - 'ks3': {'SELECT'}}, - 'user2': {'ks1': {'SELECT'}, - 'ks2': {'SELECT', 'ALTER'}}, - 'user3': {'ks1': {'DELETE'}}, - 'user4': {'ks1': {'UPDATE', 'DELETE'}, - 'ks2': {'UPDATE'}, - 'ks3': {'UPDATE'}} - }, - acl) - - mock_result_set = [r1, r2, r3] - execute_mock = MagicMock(return_value=mock_result_set) - mock_client = MagicMock(execute=execute_mock) - - with patch.object(self.admin, - self.__N_GAK, return_value=available_ks) as gak_mock: - acl = self.admin._get_acl(mock_client, username='user2') - execute_mock.assert_called_once_with( - self.__LIST_PERMISSIONS_OF_FORMAT.format('user2')) - gak_mock.assert_not_called() - - self.assertEqual({'user2': {'ks1': {'SELECT'}, - 'ks2': {'SELECT', 'ALTER'}}}, acl) - - mock_result_set = [] - execute_mock = MagicMock(return_value=mock_result_set) - mock_client = MagicMock(execute=execute_mock) - - with patch.object(self.admin, - self.__N_GAK, return_value=available_ks) as gak_mock: - acl = self.admin._get_acl(mock_client, username='nonexisting') - execute_mock.assert_called_once_with( - self.__LIST_PERMISSIONS_OF_FORMAT.format('nonexisting')) - gak_mock.assert_not_called() - - self.assertEqual({}, acl) - - def test_get_listed_users(self): - usr1 = models.CassandraUser(self._get_random_name(1025)) - usr2 = models.CassandraUser(self._get_random_name(1025)) - usr3 = models.CassandraUser(self._get_random_name(1025)) - db1 = models.CassandraSchema('db1') - db2 = models.CassandraSchema('db2') - usr1.databases.append(db1.serialize()) - usr3.databases.append(db2.serialize()) - - rv_1 = NonCallableMagicMock() - rv_1.configure_mock(name=usr1.name, super=False) - rv_2 = NonCallableMagicMock() - rv_2.configure_mock(name=usr2.name, super=False) - rv_3 = NonCallableMagicMock() - rv_3.configure_mock(name=usr3.name, super=True) - - with patch.object(self.conn, 'execute', return_value=iter( - [rv_1, rv_2, rv_3])): - with patch.object(self.admin, '_get_acl', - return_value={usr1.name: {db1.name: {'SELECT'}, - db2.name: {}}, - usr3.name: {db2.name: {'SELECT'}}} - ): - usrs = self.manager.list_users(self.context) - self.conn.execute.assert_has_calls([ - call(self.__LIST_USR_FORMAT), - ], any_order=True) - self.assertIn(usr1.serialize(), usrs[0]) - self.assertIn(usr2.serialize(), usrs[0]) - self.assertIn(usr3.serialize(), usrs[0]) - - def test_list_access(self): - usr1 = models.CassandraUser('usr1') - usr2 = models.CassandraUser('usr2') - usr3 = models.CassandraUser(self._get_random_name(1025), 'password') - db1 = models.CassandraSchema('db1').serialize() - db2 = models.CassandraSchema('db2').serialize() - usr2.databases.append(db1) - usr3.databases.append(db1) - usr3.databases.append(db2) - - with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, - usr3}): - usr1_dbs = self.manager.list_access(self.context, usr1.name, None) - usr2_dbs = self.manager.list_access(self.context, usr2.name, None) - usr3_dbs = self.manager.list_access(self.context, usr3.name, None) - self.assertEqual([], usr1_dbs) - self.assertEqual([db1], usr2_dbs) - self.assertEqual([db1, db2], usr3_dbs) - - with patch.object(self.admin, self.__N_GLU, return_value=set()): - with ExpectedException(exception.UserNotFound): - self.manager.list_access(self.context, usr3.name, None) - - def test_list_users(self): - usr1 = models.CassandraUser('usr1') - usr2 = models.CassandraUser('usr2') - usr3 = models.CassandraUser(self._get_random_name(1025), 'password') - - with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, - usr3}): - found = self.manager.list_users(self.context) - self.assertEqual(2, len(found)) - self.assertEqual(3, len(found[0])) - self.assertIsNone(found[1]) - self.assertIn(usr1.serialize(), found[0]) - self.assertIn(usr2.serialize(), found[0]) - self.assertIn(usr3.serialize(), found[0]) - - with patch.object(self.admin, self.__N_GLU, return_value=set()): - self.assertEqual(([], None), self.manager.list_users(self.context)) - - def test_get_user(self): - usr1 = models.CassandraUser('usr1') - usr2 = models.CassandraUser('usr2') - usr3 = models.CassandraUser(self._get_random_name(1025), 'password') - - with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2, - usr3}): - found = self.manager.get_user(self.context, usr2.name, None) - self.assertEqual(usr2.serialize(), found) - - with patch.object(self.admin, self.__N_GLU, return_value=set()): - self.assertIsNone( - self.manager.get_user(self.context, usr2.name, None)) - - @patch.object(cass_service.CassandraAdmin, '_deserialize_keyspace', - side_effect=lambda p1: p1) - def test_rename_user(self, ks_deserializer): - usr = models.CassandraUser('usr') - db1 = models.CassandraSchema('db1').serialize() - db2 = models.CassandraSchema('db2').serialize() - usr.databases.append(db1) - usr.databases.append(db2) - - new_user = models.CassandraUser('new_user') - with patch(self.__N_CAU, return_value=new_user): - with patch.object(self.admin, self.__N_BU, return_value=usr): - with patch.object(self.admin, self.__N_CU) as create: - with patch.object(self.admin, self.__N_GFA) as grant: - with patch.object(self.admin, self.__N_DU) as drop: - usr_attrs = {'name': 'user', 'password': 'trove'} - self.manager.update_attributes(self.context, - usr.name, None, - usr_attrs) - create.assert_called_once_with(ANY, new_user) - grant.assert_has_calls([call(ANY, db1, ANY), - call(ANY, db2, ANY)]) - drop.assert_called_once_with(ANY, usr) - - def test_update_attributes(self): - usr = models.CassandraUser('usr', 'pwd') - - with patch.object(self.admin, self.__N_BU, return_value=usr): - usr_attrs = {'name': usr.name, 'password': usr.password} - with patch.object(self.admin, self.__N_RU) as rename: - with patch.object(self.admin, self.__N_AUP) as alter: - self.manager.update_attributes(self.context, usr.name, - None, usr_attrs) - self.assertEqual(0, rename.call_count) - self.assertEqual(0, alter.call_count) - - usr_attrs = {'name': 'user', 'password': 'password'} - with patch.object(self.admin, self.__N_RU) as rename: - with patch.object(self.admin, self.__N_AUP) as alter: - self.manager.update_attributes(self.context, usr.name, - None, usr_attrs) - rename.assert_called_once_with(ANY, usr, usr_attrs['name'], - usr_attrs['password']) - self.assertEqual(0, alter.call_count) - - usr_attrs = {'name': 'user', 'password': usr.password} - with patch.object(self.admin, self.__N_RU) as rename: - with patch.object(self.admin, self.__N_AUP) as alter: - self.manager.update_attributes(self.context, usr.name, - None, usr_attrs) - rename.assert_called_once_with(ANY, usr, usr_attrs['name'], - usr_attrs['password']) - self.assertEqual(0, alter.call_count) - - usr_attrs = {'name': 'user'} - with patch.object(self.admin, self.__N_RU) as rename: - with patch.object(self.admin, self.__N_AUP) as alter: - with ExpectedException( - exception.UnprocessableEntity, "Updating username " - "requires specifying a password as well."): - self.manager.update_attributes(self.context, usr.name, - None, usr_attrs) - self.assertEqual(0, rename.call_count) - self.assertEqual(0, alter.call_count) - - usr_attrs = {'name': usr.name, 'password': 'password'} - with patch.object(self.admin, self.__N_RU) as rename: - with patch.object(self.admin, self.__N_AUP) as alter: - self.manager.update_attributes(self.context, usr.name, - None, usr_attrs) - alter.assert_called_once_with(ANY, usr) - self.assertEqual(0, rename.call_count) - - usr_attrs = {'password': usr.password} - with patch.object(self.admin, self.__N_RU) as rename: - with patch.object(self.admin, self.__N_AUP) as alter: - self.manager.update_attributes(self.context, usr.name, - None, usr_attrs) - self.assertEqual(0, rename.call_count) - self.assertEqual(0, alter.call_count) - - usr_attrs = {'password': 'trove'} - with patch.object(self.admin, self.__N_RU) as rename: - with patch.object(self.admin, self.__N_AUP) as alter: - self.manager.update_attributes(self.context, usr.name, - None, usr_attrs) - alter.assert_called_once_with(ANY, usr) - self.assertEqual(0, rename.call_count) - - def test_update_overrides(self): - cfg_mgr_mock = MagicMock() - self.manager._app.configuration_manager = cfg_mgr_mock - overrides = NonCallableMagicMock() - self.manager.update_overrides(Mock(), overrides) - cfg_mgr_mock.apply_user_override.assert_called_once_with(overrides) - cfg_mgr_mock.remove_user_override.assert_not_called() - - def test_remove_overrides(self): - cfg_mgr_mock = MagicMock() - self.manager._app.configuration_manager = cfg_mgr_mock - self.manager.update_overrides(Mock(), {}, remove=True) - cfg_mgr_mock.remove_user_override.assert_called_once_with() - cfg_mgr_mock.apply_user_override.assert_not_called() - - def test_apply_overrides(self): - self.assertIsNone( - self.manager.apply_overrides(Mock(), NonCallableMagicMock())) - - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - @patch.object(cass_service.CassandraApp, '_run_nodetool_command') - def test_drain(self, command_runner_mock, _): - self.manager._app.drain() - command_runner_mock.assert_called_once_with('drain') - - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - @patch.object(cass_service.CassandraApp, '_run_nodetool_command') - def test_upgrade_sstables(self, command_runner_mock, _): - self.manager._app.upgrade_sstables() - command_runner_mock.assert_called_once_with('upgradesstables') - - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def test_enable_root(self, _): - with patch.object(self.manager._app, 'is_root_enabled', - return_value=False): - with patch.object(cass_service.CassandraAdmin, - '_create_superuser') as create_mock: - self.manager.enable_root(self.context) - create_mock.assert_called_once_with(ANY) - - with patch.object(self.manager._app, 'is_root_enabled', - return_value=True): - with patch.object(cass_service.CassandraAdmin, - 'alter_user_password') as alter_mock: - self.manager.enable_root(self.context) - alter_mock.assert_called_once_with(ANY) - - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def test_is_root_enabled(self, _): - trove_admin = Mock() - trove_admin.configure_mock(name=self.manager._app._ADMIN_USER) - other_admin = Mock() - other_admin.configure_mock(name='someuser') - - with patch.object(cass_service.CassandraAdmin, - 'list_superusers', return_value=[]): - self.assertFalse(self.manager.is_root_enabled(self.context)) - - with patch.object(cass_service.CassandraAdmin, - 'list_superusers', return_value=[trove_admin]): - self.assertFalse(self.manager.is_root_enabled(self.context)) - - with patch.object(cass_service.CassandraAdmin, - 'list_superusers', return_value=[other_admin]): - self.assertTrue(self.manager.is_root_enabled(self.context)) - - with patch.object(cass_service.CassandraAdmin, - 'list_superusers', - return_value=[trove_admin, other_admin]): - self.assertTrue(self.manager.is_root_enabled(self.context)) - - def test_guest_log_enable(self): - self._assert_guest_log_enable(False, 'INFO') - self._assert_guest_log_enable(True, 'OFF') - - def _assert_guest_log_enable(self, disable, expected_level): - with patch.multiple( - self.manager._app, - logback_conf_manager=DEFAULT, - _run_nodetool_command=DEFAULT - ) as app_mocks: - self.assertFalse(self.manager.guest_log_enable( - Mock(), Mock(), disable)) - - (app_mocks['logback_conf_manager'].apply_system_override. - assert_called_once_with( - {'configuration': {'root': {'@level': expected_level}}})) - app_mocks['_run_nodetool_command'].assert_called_once_with( - 'setlogginglevel', 'root', expected_level) diff --git a/trove/tests/unittests/guestagent/test_configuration.py b/trove/tests/unittests/guestagent/test_configuration.py deleted file mode 100644 index c20989230a..0000000000 --- a/trove/tests/unittests/guestagent/test_configuration.py +++ /dev/null @@ -1,460 +0,0 @@ -# Copyright 2015 Tesora Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import getpass -from mock import call -from mock import DEFAULT -from mock import MagicMock -from mock import Mock -from mock import patch -import os -import tempfile -from trove.common.stream_codecs import IniCodec -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import ImportOverrideStrategy -from trove.guestagent.common.configuration import OneFileOverrideStrategy -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.tests.unittests import trove_testtools - - -class TestConfigurationManager(trove_testtools.TestCase): - - @patch.multiple('trove.guestagent.common.operating_system', - read_file=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - def test_read_write_configuration(self, read_file, write_file, - chown, chmod): - sample_path = Mock() - sample_owner = Mock() - sample_group = Mock() - sample_codec = MagicMock() - sample_requires_root = Mock() - sample_strategy = MagicMock() - sample_strategy.configure = Mock() - sample_strategy.parse_updates = Mock(return_value={}) - - manager = ConfigurationManager( - sample_path, sample_owner, sample_group, sample_codec, - requires_root=sample_requires_root, - override_strategy=sample_strategy) - - manager.parse_configuration() - read_file.assert_called_with(sample_path, codec=sample_codec, - as_root=sample_requires_root) - - with patch.object(manager, 'parse_configuration', - return_value={'key1': 'v1', 'key2': 'v2'}): - self.assertEqual('v1', manager.get_value('key1')) - self.assertIsNone(manager.get_value('key3')) - - sample_contents = Mock() - manager.save_configuration(sample_contents) - write_file.assert_called_with( - sample_path, sample_contents, as_root=sample_requires_root) - - chown.assert_called_with(sample_path, sample_owner, sample_group, - as_root=sample_requires_root) - chmod.assert_called_with( - sample_path, FileMode.ADD_READ_ALL, as_root=sample_requires_root) - - sample_data = {} - manager.apply_system_override(sample_data) - manager.apply_user_override(sample_data) - manager.apply_system_override(sample_data, change_id='sys1') - manager.apply_user_override(sample_data, change_id='usr1') - manager.apply_system_override(sample_data, change_id='sys2', - pre_user=True) - sample_strategy.apply.has_calls([ - call(manager.SYSTEM_POST_USER_GROUP, - manager.DEFAULT_CHANGE_ID, sample_data), - call(manager.USER_GROUP, manager.DEFAULT_CHANGE_ID, sample_data), - call(manager.SYSTEM_POST_USER_GROUP, - 'sys1', sample_data), - call(manager.USER_GROUP, 'usr1', sample_data), - call(manager.SYSTEM_PRE_USER_GROUP, - 'sys2', sample_data), - ]) - - -class TestConfigurationOverrideStrategy(trove_testtools.TestCase): - - def setUp(self): - trove_testtools.TestCase.setUp(self) - self._temp_files_paths = [] - self.chmod_patch = patch.object( - operating_system, 'chmod', - MagicMock(return_value=None)) - self.chmod_patch_mock = self.chmod_patch.start() - self.addCleanup(self.chmod_patch.stop) - - def tearDown(self): - trove_testtools.TestCase.tearDown(self) - - # Remove temporary files in the LIFO order. - while self._temp_files_paths: - try: - os.remove(self._temp_files_paths.pop()) - except Exception: - pass # Do not fail in cleanup. - - def _create_temp_dir(self): - path = tempfile.mkdtemp() - self._temp_files_paths.append(path) - return path - - def test_import_override_strategy(self): - - # Data structures representing overrides. - # ('change id', 'values', 'expected import index', - # 'expected final import data') - - # Distinct IDs within each group mean that there is one file for each - # override. - user_overrides_v1 = ('id1', - {'Section_1': {'name': 'sqrt(2)', - 'value': '1.4142'}}, - 1, - {'Section_1': {'name': 'sqrt(2)', - 'value': 1.4142}} - ) - - user_overrides_v2 = ('id2', - {'Section_1': {'is_number': False}}, - 2, - {'Section_1': {'is_number': False}} - ) - - system_overrides_v1 = ('id1', - {'Section_1': {'name': 'e', - 'value': 2.7183}}, - 1, - {'Section_1': {'name': 'e', - 'value': 2.7183}} - ) - - system_overrides_v2 = ('id2', - {'Section_2': {'is_number': True}}, - 2, - {'Section_2': {'is_number': True}} - ) - - self._test_import_override_strategy( - [system_overrides_v1, system_overrides_v2], - [user_overrides_v1, user_overrides_v2], True) - - # Same IDs within a group mean that the overrides get written into a - # single file. - user_overrides_v1 = ('id1', - {'Section_1': {'name': 'sqrt(2)', - 'value': 1.4142}}, - 1, - {'Section_1': {'name': 'sqrt(2)', - 'is_number': False, - 'value': 1.4142}} - ) - - user_overrides_v2 = ('id1', - {'Section_1': {'is_number': False}}, - 1, - {'Section_1': {'name': 'sqrt(2)', - 'is_number': False, - 'value': 1.4142}} - ) - - system_overrides_v1 = ('id1', - {'Section_1': {'name': 'e', - 'value': 2.7183}}, - 1, - {'Section_1': {'name': 'e', - 'value': 2.7183}, - 'Section_2': {'is_number': True}} - ) - - system_overrides_v2 = ('id1', - {'Section_2': {'is_number': True}}, - 1, - {'Section_1': {'name': 'e', - 'value': 2.7183}, - 'Section_2': {'is_number': True}} - ) - - self._test_import_override_strategy( - [system_overrides_v1, system_overrides_v2], - [user_overrides_v1, user_overrides_v2], False) - - @patch.multiple(operating_system, chmod=Mock(), chown=Mock()) - def _test_import_override_strategy( - self, system_overrides, user_overrides, test_multi_rev): - base_config_contents = {'Section_1': {'name': 'pi', - 'is_number': True, - 'value': 3.1415} - } - - codec = IniCodec() - current_user = getpass.getuser() - revision_dir = self._create_temp_dir() - - with tempfile.NamedTemporaryFile() as base_config: - - # Write initial config contents. - operating_system.write_file( - base_config.name, base_config_contents, codec) - - strategy = ImportOverrideStrategy(revision_dir, 'ext') - strategy.configure( - base_config.name, current_user, current_user, codec, False) - - self._assert_import_override_strategy( - strategy, system_overrides, user_overrides, test_multi_rev) - - def _assert_import_override_strategy( - self, strategy, system_overrides, user_overrides, test_multi_rev): - - def import_path_builder( - root, group_name, change_id, file_index, file_ext): - return os.path.join( - root, '%s-%03d-%s.%s' - % (group_name, file_index, change_id, file_ext)) - - # Apply and remove overrides sequentially. - ########################################## - - # Apply the overrides and verify the files as they are created. - self._apply_import_overrides( - strategy, 'system', system_overrides, import_path_builder) - self._apply_import_overrides( - strategy, 'user', user_overrides, import_path_builder) - - # Verify the files again after applying all overrides. - self._assert_import_overrides( - strategy, 'system', system_overrides, import_path_builder) - self._assert_import_overrides( - strategy, 'user', user_overrides, import_path_builder) - - # Remove the overrides and verify the files are gone. - self._remove_import_overrides( - strategy, 'user', user_overrides, import_path_builder) - self._remove_import_overrides( - strategy, 'system', user_overrides, import_path_builder) - - # Remove a whole group. - ########################################## - - # Apply overrides first. - self._apply_import_overrides( - strategy, 'system', system_overrides, import_path_builder) - self._apply_import_overrides( - strategy, 'user', user_overrides, import_path_builder) - - # Remove all user overrides and verify the files are gone. - self._remove_import_overrides( - strategy, 'user', None, import_path_builder) - - # Assert that the system files are still there intact. - self._assert_import_overrides( - strategy, 'system', system_overrides, import_path_builder) - - # Remove all system overrides and verify the files are gone. - self._remove_import_overrides( - strategy, 'system', None, import_path_builder) - - if test_multi_rev: - - # Remove at the end (only if we have multiple revision files). - ########################################## - - # Apply overrides first. - self._apply_import_overrides( - strategy, 'system', system_overrides, import_path_builder) - self._apply_import_overrides( - strategy, 'user', user_overrides, import_path_builder) - - # Remove the last user and system overrides. - self._remove_import_overrides( - strategy, 'user', [user_overrides[-1]], import_path_builder) - self._remove_import_overrides( - strategy, 'system', [system_overrides[-1]], - import_path_builder) - - # Assert that the first overrides are still there intact. - self._assert_import_overrides( - strategy, 'user', [user_overrides[0]], import_path_builder) - self._assert_import_overrides( - strategy, 'system', [system_overrides[0]], import_path_builder) - - # Re-apply all overrides. - self._apply_import_overrides( - strategy, 'system', system_overrides, import_path_builder) - self._apply_import_overrides( - strategy, 'user', user_overrides, import_path_builder) - - # This should overwrite the existing files and resume counting from - # their indices. - self._assert_import_overrides( - strategy, 'user', user_overrides, import_path_builder) - self._assert_import_overrides( - strategy, 'system', system_overrides, import_path_builder) - - def _apply_import_overrides( - self, strategy, group_name, overrides, path_builder): - # Apply the overrides and immediately check the file and its contents. - for change_id, contents, index, _ in overrides: - strategy.apply(group_name, change_id, contents) - expected_path = path_builder( - strategy._revision_dir, group_name, change_id, index, - strategy._revision_ext) - self._assert_file_exists(expected_path, True) - - def _remove_import_overrides( - self, strategy, group_name, overrides, path_builder): - if overrides: - # Remove the overrides and immediately check the file was removed. - for change_id, _, index, _ in overrides: - strategy.remove(group_name, change_id) - expected_path = path_builder( - strategy._revision_dir, group_name, change_id, index, - strategy._revision_ext) - self._assert_file_exists(expected_path, False) - else: - # Remove the entire group. - strategy.remove(group_name) - found = operating_system.list_files_in_directory( - strategy._revision_dir, pattern='^%s-.+$' % group_name) - self.assertEqual(set(), found, "Some import files from group '%s' " - "were not removed." % group_name) - - def _assert_import_overrides( - self, strategy, group_name, overrides, path_builder): - # Check all override files and their contents, - for change_id, _, index, expected in overrides: - expected_path = path_builder( - strategy._revision_dir, group_name, change_id, index, - strategy._revision_ext) - self._assert_file_exists(expected_path, True) - # Assert that the file contents. - imported = operating_system.read_file( - expected_path, codec=strategy._codec) - self.assertEqual(expected, imported) - - def _assert_file_exists(self, file_path, exists): - if exists: - self.assertTrue(os.path.exists(file_path), - "Revision import '%s' does not exist." - % file_path) - else: - self.assertFalse(os.path.exists(file_path), - "Revision import '%s' was not removed." - % file_path) - - def test_get_value(self): - revision_dir = self._create_temp_dir() - self._assert_get_value(ImportOverrideStrategy(revision_dir, 'ext')) - self._assert_get_value(OneFileOverrideStrategy(revision_dir)) - - @patch.multiple(operating_system, chmod=Mock(), chown=Mock()) - def _assert_get_value(self, override_strategy): - base_config_contents = {'Section_1': {'name': 'pi', - 'is_number': True, - 'value': 3.1415} - } - - config_overrides_v1a = {'Section_1': {'name': 'sqrt(2)', - 'value': 1.4142} - } - - config_overrides_v2 = {'Section_1': {'name': 'e', - 'value': 2.7183}, - 'Section_2': {'foo': 'bar'} - } - - config_overrides_v1b = {'Section_1': {'name': 'sqrt(4)', - 'value': 2.0} - } - - codec = IniCodec() - current_user = getpass.getuser() - - with tempfile.NamedTemporaryFile() as base_config: - - # Write initial config contents. - operating_system.write_file( - base_config.name, base_config_contents, codec) - - manager = ConfigurationManager( - base_config.name, current_user, current_user, codec, - requires_root=False, override_strategy=override_strategy) - - # Test default value. - self.assertIsNone(manager.get_value('Section_2')) - self.assertEqual('foo', manager.get_value('Section_2', 'foo')) - - # Test value before applying overrides. - self.assertEqual('pi', manager.get_value('Section_1')['name']) - self.assertEqual(3.1415, manager.get_value('Section_1')['value']) - - # Test value after applying overrides. - manager.apply_user_override(config_overrides_v1a, change_id='id1') - self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name']) - self.assertEqual(1.4142, manager.get_value('Section_1')['value']) - manager.apply_user_override(config_overrides_v2, change_id='id2') - self.assertEqual('e', manager.get_value('Section_1')['name']) - self.assertEqual(2.7183, manager.get_value('Section_1')['value']) - self.assertEqual('bar', manager.get_value('Section_2')['foo']) - - # Editing change 'id1' become visible only after removing - # change 'id2', which overrides 'id1'. - manager.apply_user_override(config_overrides_v1b, change_id='id1') - self.assertEqual('e', manager.get_value('Section_1')['name']) - self.assertEqual(2.7183, manager.get_value('Section_1')['value']) - - # Test value after removing overrides. - - # The edited values from change 'id1' should be visible after - # removing 'id2'. - manager.remove_user_override(change_id='id2') - self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) - self.assertEqual(2.0, manager.get_value('Section_1')['value']) - - # Back to the base. - manager.remove_user_override(change_id='id1') - self.assertEqual('pi', manager.get_value('Section_1')['name']) - self.assertEqual(3.1415, manager.get_value('Section_1')['value']) - self.assertIsNone(manager.get_value('Section_2')) - - # Test system overrides. - manager.apply_system_override( - config_overrides_v1b, change_id='id1') - self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) - self.assertEqual(2.0, manager.get_value('Section_1')['value']) - - # The system values should take precedence over the user - # override. - manager.apply_user_override( - config_overrides_v1a, change_id='id1') - self.assertEqual('sqrt(4)', manager.get_value('Section_1')['name']) - self.assertEqual(2.0, manager.get_value('Section_1')['value']) - - # The user values should become visible only after removing the - # system change. - manager.remove_system_override(change_id='id1') - self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name']) - self.assertEqual(1.4142, manager.get_value('Section_1')['value']) - - # Back to the base. - manager.remove_user_override(change_id='id1') - self.assertEqual('pi', manager.get_value('Section_1')['name']) - self.assertEqual(3.1415, manager.get_value('Section_1')['value']) - self.assertIsNone(manager.get_value('Section_2')) diff --git a/trove/tests/unittests/guestagent/test_couchbase_manager.py b/trove/tests/unittests/guestagent/test_couchbase_manager.py deleted file mode 100644 index 5aaa51166a..0000000000 --- a/trove/tests/unittests/guestagent/test_couchbase_manager.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import stat -import tempfile -from unittest import mock - -from mock import DEFAULT -from mock import MagicMock -from mock import Mock -from mock import patch -from oslo_utils import netutils - -from trove.common import utils -from trove.guestagent import backup -from trove.guestagent.datastore.experimental.couchbase import ( - manager as couch_manager) -from trove.guestagent.datastore.experimental.couchbase import ( - service as couch_service) -from trove.guestagent import volume -from trove.tests.unittests.guestagent.test_datastore_manager import \ - DatastoreManagerTest - - -class GuestAgentCouchbaseManagerTest(DatastoreManagerTest): - - def setUp(self): - super(GuestAgentCouchbaseManagerTest, self).setUp('couchbase') - self.manager = couch_manager.Manager() - self.packages = 'couchbase-server' - app_patcher = patch.multiple( - couch_service.CouchbaseApp, - stop_db=DEFAULT, start_db=DEFAULT, restart=DEFAULT) - self.addCleanup(app_patcher.stop) - app_patcher.start() - - netutils_patcher = patch.object(netutils, 'get_my_ipv4') - self.addCleanup(netutils_patcher.stop) - netutils_patcher.start() - - def tearDown(self): - super(GuestAgentCouchbaseManagerTest, self).tearDown() - - def test_update_status(self): - mock_status = MagicMock() - mock_status.is_installed = True - mock_status._is_restarting = False - self.manager.appStatus = mock_status - self.manager.update_status(self.context) - self.assertTrue(mock_status.set_status.called) - - def test_prepare_device_path_true(self): - self._prepare_dynamic() - - def test_prepare_from_backup(self): - self._prepare_dynamic(backup_id='backup_id_123abc') - - @patch.multiple(couch_service.CouchbaseApp, - install_if_needed=DEFAULT, - start_db_with_conf_changes=DEFAULT, - initial_setup=DEFAULT) - @patch.multiple(volume.VolumeDevice, - format=DEFAULT, - mount=DEFAULT, - mount_points=Mock(return_value=[])) - @patch.object(backup, 'restore') - def _prepare_dynamic(self, device_path='/dev/vdb', backup_id=None, - *mocks, **kwmocks): - - # covering all outcomes is starting to cause trouble here - backup_info = {'id': backup_id, - 'location': 'fake-location', - 'type': 'CbBackup', - 'checksum': 'fake-checksum'} if backup_id else None - - mock_status = MagicMock() - mock_status.begin_install = MagicMock(return_value=None) - self.manager.appStatus = mock_status - - instance_ram = 2048 - mount_point = '/var/lib/couchbase' - - self.manager.prepare(self.context, self.packages, None, - instance_ram, None, device_path=device_path, - mount_point=mount_point, - backup_info=backup_info, - overrides=None, - cluster_config=None) - - # verification/assertion - mock_status.begin_install.assert_any_call() - kwmocks['install_if_needed'].assert_any_call(self.packages) - if backup_info: - backup.restore.assert_any_call(self.context, - backup_info, - mount_point) - - def test_restart(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couch_service.CouchbaseApp.restart = MagicMock(return_value=None) - # invocation - self.manager.restart(self.context) - # verification/assertion - couch_service.CouchbaseApp.restart.assert_any_call() - - def test_stop_db(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couch_service.CouchbaseApp.stop_db = MagicMock(return_value=None) - # invocation - self.manager.stop_db(self.context) - # verification/assertion - couch_service.CouchbaseApp.stop_db.assert_any_call( - do_not_start_on_reboot=False) - - def __fake_mkstemp(self): - self.tempfd, self.tempname = self.original_mkstemp() - return self.tempfd, self.tempname - - def __fake_mkstemp_raise(self): - raise OSError(11, 'Resource temporarily unavailable') - - def __cleanup_tempfile(self): - if self.tempname: - os.unlink(self.tempname) - - @mock.patch.object(utils, 'execute_with_timeout', - Mock(return_value=('0', ''))) - def test_write_password_to_file1(self): - self.original_mkstemp = tempfile.mkstemp - self.tempname = None - - with mock.patch.object(tempfile, - 'mkstemp', - self.__fake_mkstemp): - self.addCleanup(self.__cleanup_tempfile) - - rootaccess = couch_service.CouchbaseRootAccess() - rootaccess.write_password_to_file('mypassword') - - filepermissions = os.stat(self.tempname).st_mode - self.assertEqual(stat.S_IRUSR, filepermissions & 0o777) - - @mock.patch.object(utils, 'execute_with_timeout', - Mock(return_value=('0', ''))) - @mock.patch( - 'trove.guestagent.datastore.experimental.couchbase.service.LOG') - def test_write_password_to_file2(self, mock_logging): - self.original_mkstemp = tempfile.mkstemp - self.tempname = None - - with mock.patch.object(tempfile, - 'mkstemp', - self.__fake_mkstemp_raise): - - rootaccess = couch_service.CouchbaseRootAccess() - - self.assertRaises(RuntimeError, - rootaccess.write_password_to_file, - 'mypassword') diff --git a/trove/tests/unittests/guestagent/test_couchdb_manager.py b/trove/tests/unittests/guestagent/test_couchdb_manager.py deleted file mode 100644 index dce6fd0803..0000000000 --- a/trove/tests/unittests/guestagent/test_couchdb_manager.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright 2015 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from mock import MagicMock -from mock import patch -from oslo_utils import netutils -from testtools.matchers import Is, Equals, Not - -from trove.common.instance import ServiceStatuses -from trove.guestagent import backup -from trove.guestagent.datastore.experimental.couchdb import ( - manager as couchdb_manager) -from trove.guestagent.datastore.experimental.couchdb import ( - service as couchdb_service) -from trove.guestagent import pkg -from trove.guestagent import volume -from trove.tests.unittests.guestagent.test_datastore_manager import \ - DatastoreManagerTest - - -class GuestAgentCouchDBManagerTest(DatastoreManagerTest): - - def setUp(self): - super(GuestAgentCouchDBManagerTest, self).setUp('couchdb') - self.real_status = couchdb_service.CouchDBAppStatus.set_status - - class FakeInstanceServiceStatus(object): - status = ServiceStatuses.NEW - - def save(self): - pass - - couchdb_service.CouchDBAppStatus.set_status = MagicMock( - return_value=FakeInstanceServiceStatus()) - self.manager = couchdb_manager.Manager() - self.pkg = couchdb_service.packager - self.real_db_app_status = couchdb_service.CouchDBAppStatus - self.origin_os_path_exists = os.path.exists - self.origin_format = volume.VolumeDevice.format - self.origin_migrate_data = volume.VolumeDevice.migrate_data - self.origin_mount = volume.VolumeDevice.mount - self.origin_mount_points = volume.VolumeDevice.mount_points - self.origin_stop_db = couchdb_service.CouchDBApp.stop_db - self.origin_start_db = couchdb_service.CouchDBApp.start_db - self.original_get_ip = netutils.get_my_ipv4 - self.orig_make_host_reachable = ( - couchdb_service.CouchDBApp.make_host_reachable) - self.orig_backup_restore = backup.restore - self.orig_create_users = couchdb_service.CouchDBAdmin.create_user - self.orig_delete_user = couchdb_service.CouchDBAdmin.delete_user - self.orig_list_users = couchdb_service.CouchDBAdmin.list_users - self.orig_get_user = couchdb_service.CouchDBAdmin.get_user - self.orig_grant_access = couchdb_service.CouchDBAdmin.grant_access - self.orig_revoke_access = couchdb_service.CouchDBAdmin.revoke_access - self.orig_list_access = couchdb_service.CouchDBAdmin.list_access - self.orig_enable_root = couchdb_service.CouchDBAdmin.enable_root - self.orig_is_root_enabled = ( - couchdb_service.CouchDBAdmin.is_root_enabled) - self.orig_create_databases = ( - couchdb_service.CouchDBAdmin.create_database) - self.orig_list_databases = couchdb_service.CouchDBAdmin.list_databases - self.orig_delete_database = ( - couchdb_service.CouchDBAdmin.delete_database) - - def tearDown(self): - super(GuestAgentCouchDBManagerTest, self).tearDown() - couchdb_service.packager = self.pkg - couchdb_service.CouchDBAppStatus.set_status = self.real_db_app_status - os.path.exists = self.origin_os_path_exists - volume.VolumeDevice.format = self.origin_format - volume.VolumeDevice.migrate_data = self.origin_migrate_data - volume.VolumeDevice.mount = self.origin_mount - volume.VolumeDevice.mount_points = self.origin_mount_points - couchdb_service.CouchDBApp.stop_db = self.origin_stop_db - couchdb_service.CouchDBApp.start_db = self.origin_start_db - netutils.get_my_ipv4 = self.original_get_ip - couchdb_service.CouchDBApp.make_host_reachable = ( - self.orig_make_host_reachable) - backup.restore = self.orig_backup_restore - couchdb_service.CouchDBAdmin.create_user = self.orig_create_users - couchdb_service.CouchDBAdmin.delete_user = self.orig_delete_user - couchdb_service.CouchDBAdmin.list_users = self.orig_list_users - couchdb_service.CouchDBAdmin.get_user = self.orig_get_user - couchdb_service.CouchDBAdmin.grant_access = self.orig_grant_access - couchdb_service.CouchDBAdmin.revoke_access = self.orig_revoke_access - couchdb_service.CouchDBAdmin.list_access = self.orig_list_access - couchdb_service.CouchDBAdmin.enable_root = self.orig_enable_root - couchdb_service.CouchDBAdmin.is_root_enabled = ( - self.orig_is_root_enabled) - couchdb_service.CouchDBAdmin.create_database = ( - self.orig_create_databases) - couchdb_service.CouchDBAdmin.list_databases = self.orig_list_databases - couchdb_service.CouchDBAdmin.delete_database = ( - self.orig_delete_database) - - def test_update_status(self): - mock_status = MagicMock() - mock_status.is_installed = True - mock_status._is_restarting = False - self.manager.appStatus = mock_status - self.manager.update_status(self.context) - self.assertTrue(mock_status.set_status.called) - - def _prepare_dynamic(self, packages=None, databases=None, - config_content=None, device_path='/dev/vdb', - is_db_installed=True, backup_id=None, - overrides=None): - mock_status = MagicMock() - mock_app = MagicMock() - self.manager.appStatus = mock_status - self.manager.app = mock_app - mount_point = '/var/lib/couchdb' - - mock_status.begin_install = MagicMock(return_value=None) - mock_app.install_if_needed = MagicMock(return_value=None) - mock_app.make_host_reachable = MagicMock(return_value=None) - mock_app.restart = MagicMock(return_value=None) - mock_app.start_db = MagicMock(return_value=None) - mock_app.stop_db = MagicMock(return_value=None) - os.path.exists = MagicMock(return_value=True) - volume.VolumeDevice.format = MagicMock(return_value=None) - volume.VolumeDevice.migrate_data = MagicMock(return_value=None) - volume.VolumeDevice.mount = MagicMock(return_value=None) - volume.VolumeDevice.mount_points = MagicMock(return_value=[]) - backup.restore = MagicMock(return_value=None) - - backup_info = {'id': backup_id, - 'location': 'fake-location', - 'type': 'CouchDBBackup', - 'checksum': 'fake-checksum'} if backup_id else None - - couchdb_service.CouchDBAdmin.create_database = MagicMock( - return_value=None) - couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None) - - with patch.object(pkg.Package, 'pkg_is_installed', - return_value=MagicMock( - return_value=is_db_installed)): - self.manager.prepare(context=self.context, packages=packages, - config_contents=config_content, - databases=databases, - memory_mb='2048', users=None, - device_path=device_path, - mount_point=mount_point, - backup_info=backup_info, - overrides=None, - cluster_config=None) - # verification/assertion - mock_status.begin_install.assert_any_call() - mock_app.install_if_needed.assert_any_call(packages) - mock_app.make_host_reachable.assert_any_call() - mock_app.change_permissions.assert_any_call() - if backup_id: - backup.restore.assert_any_call(self.context, - backup_info, - mount_point) - - def test_prepare_pkg(self): - self._prepare_dynamic(['couchdb']) - - def test_prepare_no_pkg(self): - self._prepare_dynamic([]) - - def test_prepare_from_backup(self): - self._prepare_dynamic(['couchdb'], backup_id='123abc456') - - def test_prepare_database(self): - self._prepare_dynamic(databases=['db1']) - - def test_restart(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - with patch.object(couchdb_service.CouchDBApp, 'restart', - return_value=None): - # invocation - self.manager.restart(self.context) - # verification/assertion - couchdb_service.CouchDBApp.restart.assert_any_call() - - def test_stop_db(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBApp.stop_db = MagicMock(return_value=None) - # invocation - self.manager.stop_db(self.context) - # verification/assertion - couchdb_service.CouchDBApp.stop_db.assert_any_call( - do_not_start_on_reboot=False) - - def test_reset_configuration(self): - try: - configuration = {'config_contents': 'some junk'} - self.manager.reset_configuration(self.context, configuration) - except Exception: - self.fail("reset_configuration raised exception unexpectedly.") - - def test_rpc_ping(self): - output = self.manager.rpc_ping(self.context) - self.assertTrue(output) - - def test_create_user(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None) - self.manager.create_user(self.context, ['user1']) - couchdb_service.CouchDBAdmin.create_user.assert_any_call(['user1']) - - def test_delete_user(self): - user = ['user1'] - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.delete_user = MagicMock(return_value=None) - self.manager.delete_user(self.context, user) - couchdb_service.CouchDBAdmin.delete_user.assert_any_call(user) - - def test_list_users(self): - couchdb_service.CouchDBAdmin.list_users = MagicMock( - return_value=['user1']) - users = self.manager.list_users(self.context) - self.assertThat(users, Equals(['user1'])) - couchdb_service.CouchDBAdmin.list_users.assert_any_call( - None, None, False) - - def test_get_user(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.get_user = MagicMock( - return_value=['user1']) - self.manager.get_user(self.context, 'user1', None) - couchdb_service.CouchDBAdmin.get_user.assert_any_call( - 'user1', None) - - def test_grant_access(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.grant_access = MagicMock( - return_value=None) - self.manager.grant_access(self.context, 'user1', None, ['db1']) - couchdb_service.CouchDBAdmin.grant_access.assert_any_call( - 'user1', ['db1']) - - def test_revoke_access(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.revoke_access = MagicMock( - return_value=None) - self.manager.revoke_access(self.context, 'user1', None, ['db1']) - couchdb_service.CouchDBAdmin.revoke_access.assert_any_call( - 'user1', ['db1']) - - def test_list_access(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.list_access = MagicMock( - return_value=['user1']) - self.manager.list_access(self.context, 'user1', None) - couchdb_service.CouchDBAdmin.list_access.assert_any_call( - 'user1', None) - - def test_enable_root(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.enable_root = MagicMock( - return_value=True) - result = self.manager.enable_root(self.context) - self.assertThat(result, Equals(True)) - - def test_is_root_enabled(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.is_root_enabled = MagicMock( - return_value=True) - result = self.manager.is_root_enabled(self.context) - self.assertThat(result, Equals(True)) - - def test_create_databases(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.create_database = MagicMock( - return_value=None) - self.manager.create_database(self.context, ['db1']) - couchdb_service.CouchDBAdmin.create_database.assert_any_call(['db1']) - - def test_delete_database(self): - databases = ['db1'] - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.delete_database = MagicMock( - return_value=None) - self.manager.delete_database(self.context, databases) - couchdb_service.CouchDBAdmin.delete_database.assert_any_call( - databases) - - def test_list_databases(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - couchdb_service.CouchDBAdmin.list_databases = MagicMock( - return_value=['database1']) - databases = self.manager.list_databases(self.context) - self.assertThat(databases, Not(Is(None))) - self.assertThat(databases, Equals(['database1'])) - couchdb_service.CouchDBAdmin.list_databases.assert_any_call( - None, None, False) diff --git a/trove/tests/unittests/guestagent/test_datastore_manager.py b/trove/tests/unittests/guestagent/test_datastore_manager.py deleted file mode 100644 index 054a623be4..0000000000 --- a/trove/tests/unittests/guestagent/test_datastore_manager.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2016 Tesora Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from trove.tests.unittests import trove_testtools - - -class DatastoreManagerTest(trove_testtools.TestCase): - - def setUp(self, manager_name): - super(DatastoreManagerTest, self).setUp() - self.patch_datastore_manager(manager_name) - self.context = trove_testtools.TroveTestContext(self) diff --git a/trove/tests/unittests/guestagent/test_dbaas.py b/trove/tests/unittests/guestagent/test_dbaas.py deleted file mode 100644 index ee12274056..0000000000 --- a/trove/tests/unittests/guestagent/test_dbaas.py +++ /dev/null @@ -1,3721 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os -import subprocess -import tempfile -import time -from uuid import uuid4 - -from mock import ANY -from mock import call -from mock import DEFAULT -from mock import MagicMock -from mock import Mock -from mock import patch -from mock import PropertyMock -from oslo_utils import netutils -from six.moves import configparser -import sqlalchemy - -from trove.common import cfg -from trove.common import context as trove_context -from trove.common.db.models import DatastoreUser -from trove.common.db.mysql import models as mysql_models -from trove.common.exception import BadRequest -from trove.common.exception import GuestError -from trove.common.exception import PollTimeOut -from trove.common.exception import ProcessExecutionError -from trove.common import instance as rd_instance -from trove.common import utils -from trove.conductor import api as conductor_api -from trove.guestagent.common.configuration import ConfigurationManager -from trove.guestagent.common.configuration import ImportOverrideStrategy -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.cassandra import ( - service as cass_service) -from trove.guestagent.datastore.experimental.couchbase import ( - service as couchservice) -from trove.guestagent.datastore.experimental.couchdb import ( - service as couchdb_service) -from trove.guestagent.datastore.experimental.db2 import ( - service as db2service) -from trove.guestagent.datastore.experimental.mariadb import ( - service as mariadb_service) -from trove.guestagent.datastore.experimental.mongodb import ( - service as mongo_service) -from trove.guestagent.datastore.experimental.mongodb import ( - system as mongo_system) -from trove.guestagent.datastore.experimental.postgresql import ( - service as pg_service) -from trove.guestagent.datastore.experimental.pxc import ( - service as pxc_service) -from trove.guestagent.datastore.experimental.redis import service as rservice -from trove.guestagent.datastore.experimental.redis.service import RedisApp -from trove.guestagent.datastore.experimental.redis import system as RedisSystem -from trove.guestagent.datastore.experimental.vertica import ( - system as vertica_system) -from trove.guestagent.datastore.experimental.vertica.service import ( - VerticaAppStatus) -from trove.guestagent.datastore.experimental.vertica.service import VerticaApp -import trove.guestagent.datastore.mysql.service as dbaas -from trove.guestagent.datastore.mysql.service import KeepAliveConnection -from trove.guestagent.datastore.mysql.service import MySqlAdmin -from trove.guestagent.datastore.mysql.service import MySqlApp -from trove.guestagent.datastore.mysql.service import MySqlAppStatus -from trove.guestagent.datastore.mysql.service import MySqlRootAccess -import trove.guestagent.datastore.mysql_common.service as mysql_common_service -import trove.guestagent.datastore.service as base_datastore_service -from trove.guestagent.datastore.service import BaseDbStatus -from trove.guestagent import dbaas as dbaas_sr -from trove.guestagent.dbaas import get_filesystem_volume_stats -from trove.guestagent import pkg -from trove.guestagent.volume import VolumeDevice -from trove.instance.models import InstanceServiceStatus -from trove.tests.unittests import trove_testtools -from trove.tests.unittests.util import util - -CONF = cfg.CONF - - -""" -Unit tests for the classes and functions in dbaas.py. -""" - -FAKE_DB = {"_name": "testDB", "_character_set": "latin2", - "_collate": "latin2_general_ci"} -FAKE_DB_2 = {"_name": "testDB2", "_character_set": "latin2", - "_collate": "latin2_general_ci"} -FAKE_USER = [{"_name": "random", "_password": "guesswhat", - "_host": "%", "_databases": [FAKE_DB]}] - - -class FakeTime(object): - COUNTER = 0 - - @classmethod - def time(cls): - cls.COUNTER += 1 - return cls.COUNTER - - -def faketime(*args, **kwargs): - return FakeTime.time() - - -class FakeAppStatus(BaseDbStatus): - - def __init__(self, id, status): - self.id = id - self.status = status - self.next_fake_status = status - self._prepare_completed = None - self.start_db_service = MagicMock() - self.stop_db_service = MagicMock() - self.restart_db_service = MagicMock() - - def _get_actual_db_status(self): - return self.next_fake_status - - def set_next_status(self, next_status): - self.next_fake_status = next_status - - def _is_query_router(self): - return False - - -class DbaasTest(trove_testtools.TestCase): - - def setUp(self): - super(DbaasTest, self).setUp() - self.orig_utils_execute_with_timeout = \ - mysql_common_service.utils.execute_with_timeout - self.orig_utils_execute = mysql_common_service.utils.execute - - def tearDown(self): - super(DbaasTest, self).tearDown() - mysql_common_service.utils.execute_with_timeout = \ - self.orig_utils_execute_with_timeout - mysql_common_service.utils.execute = self.orig_utils_execute - - @patch.object(operating_system, 'remove') - def test_clear_expired_password(self, mock_remove): - secret_content = ("# The random password set for the " - "root user at Wed May 14 14:06:38 2014 " - "(local time): somepassword") - with patch.object(mysql_common_service.utils, 'execute', - return_value=(secret_content, None)): - mysql_common_service.clear_expired_password() - self.assertEqual(3, mysql_common_service.utils.execute.call_count) - self.assertEqual(1, mock_remove.call_count) - - @patch.object(operating_system, 'remove') - def test_no_secret_content_clear_expired_password(self, mock_remove): - with patch.object(mysql_common_service.utils, 'execute', - return_value=('', None)): - mysql_common_service.clear_expired_password() - self.assertEqual(2, mysql_common_service.utils.execute.call_count) - mock_remove.assert_not_called() - - @patch.object(operating_system, 'remove') - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - def test_fail_password_update_content_clear_expired_password(self, - mock_logging, - mock_remove): - secret_content = ("# The random password set for the " - "root user at Wed May 14 14:06:38 2014 " - "(local time): somepassword") - with patch.object(mysql_common_service.utils, 'execute', - side_effect=[(secret_content, None), - ProcessExecutionError]): - mysql_common_service.clear_expired_password() - self.assertEqual(2, mysql_common_service.utils.execute.call_count) - mock_remove.assert_not_called() - - @patch.object(operating_system, 'remove') - @patch.object(mysql_common_service.utils, 'execute', - side_effect=[ProcessExecutionError, (None, None)]) - def test_fail_retrieve_secret_content_clear_expired_password(self, - mock_execute, - mock_remove): - mysql_common_service.clear_expired_password() - self.assertEqual(2, mock_execute.call_count) - mock_remove.assert_not_called() - - @patch.object(operating_system, 'read_file', - return_value={'client': - {'password': 'some password'}}) - @patch.object(mysql_common_service.BaseMySqlApp.configuration_manager, - 'get_value', - return_value=MagicMock({'get': 'some password'})) - def test_get_auth_password(self, get_cnf_mock, read_file_mock): - password = MySqlApp.get_auth_password() - read_file_mock.assert_called_once_with(MySqlApp.get_client_auth_file(), - codec=MySqlApp.CFG_CODEC) - self.assertEqual("some password", password) - - @patch.object(mysql_common_service.BaseMySqlApp.configuration_manager, - 'get_value', - side_effect=RuntimeError('Error')) - @patch.object(operating_system, 'read_file', - side_effect=RuntimeError('read_file error')) - def test_get_auth_password_error(self, _, get_cnf_mock): - self.assertRaisesRegex(RuntimeError, "read_file error", - MySqlApp.get_auth_password) - - def test_service_discovery(self): - with patch.object(os.path, 'isfile', return_value=True): - mysql_service = mysql_common_service.operating_system.\ - service_discovery(["mysql"]) - self.assertIsNotNone(mysql_service['cmd_start']) - self.assertIsNotNone(mysql_service['cmd_enable']) - - def test_load_mysqld_options(self): - - output = "mysqld would've been started with these args:\n"\ - "--user=mysql --port=3306 --basedir=/usr "\ - "--tmpdir=/tmp --skip-external-locking" - - with patch.object(os.path, 'isfile', return_value=True): - mysql_common_service.utils.execute = Mock( - return_value=(output, None)) - options = mysql_common_service.load_mysqld_options() - - self.assertEqual(5, len(options)) - self.assertEqual(["mysql"], options["user"]) - self.assertEqual(["3306"], options["port"]) - self.assertEqual(["/usr"], options["basedir"]) - self.assertEqual(["/tmp"], options["tmpdir"]) - self.assertIn("skip-external-locking", options) - - def test_load_mysqld_options_contains_plugin_loads_options(self): - output = ("mysqld would've been started with these args:\n" - "--plugin-load=blackhole=ha_blackhole.so " - "--plugin-load=federated=ha_federated.so") - - with patch.object(os.path, 'isfile', return_value=True): - mysql_common_service.utils.execute = Mock( - return_value=(output, None)) - options = mysql_common_service.load_mysqld_options() - - self.assertEqual(1, len(options)) - self.assertEqual(["blackhole=ha_blackhole.so", - "federated=ha_federated.so"], - options["plugin-load"]) - - @patch.object(os.path, 'isfile', return_value=True) - def test_load_mysqld_options_error(self, mock_exists): - - mysql_common_service.utils.execute = Mock( - side_effect=ProcessExecutionError()) - - self.assertFalse(mysql_common_service.load_mysqld_options()) - - -class ResultSetStub(object): - - def __init__(self, rows): - self._rows = rows - - def __iter__(self): - return self._rows.__iter__() - - @property - def rowcount(self): - return len(self._rows) - - def __repr__(self): - return self._rows.__repr__() - - -class BaseAppTest(object): - """A wrapper to inhibit the base test methods from executing during a - normal test run. - """ - - class AppTestCase(trove_testtools.TestCase): - - def setUp(self, fake_id, manager_name): - super(BaseAppTest.AppTestCase, self).setUp() - self.patch_datastore_manager(manager_name) - self.FAKE_ID = fake_id - util.init_db() - InstanceServiceStatus.create( - instance_id=self.FAKE_ID, - status=rd_instance.ServiceStatuses.NEW) - - def tearDown(self): - InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() - super(BaseAppTest.AppTestCase, self).tearDown() - - @abc.abstractproperty - def appStatus(self): - pass - - @abc.abstractproperty - def expected_state_change_timeout(self): - pass - - @abc.abstractproperty - def expected_service_candidates(self): - pass - - def test_start_db(self): - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.appStatus.set_next_status( - rd_instance.ServiceStatuses.RUNNING) - self.app.start_db() - self.appStatus.start_db_service.assert_called_once_with( - self.expected_service_candidates, - self.expected_state_change_timeout, - enable_on_boot=True, update_db=False) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - def test_stop_db(self): - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.appStatus.set_next_status( - rd_instance.ServiceStatuses.SHUTDOWN) - self.app.stop_db() - self.appStatus.stop_db_service.assert_called_once_with( - self.expected_service_candidates, - self.expected_state_change_timeout, - disable_on_boot=False, update_db=False) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - def test_restart_db(self): - self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.app.restart() - self.appStatus.restart_db_service.assert_called_once_with( - self.expected_service_candidates, - self.expected_state_change_timeout) - - def assert_reported_status(self, expected_status): - service_status = InstanceServiceStatus.find_by( - instance_id=self.FAKE_ID) - self.assertEqual(expected_status, service_status.status) - - -class MySqlAdminMockTest(trove_testtools.TestCase): - - def setUp(self): - super(MySqlAdminMockTest, self).setUp() - mysql_app_patcher = patch.multiple(MySqlApp, get_engine=DEFAULT, - configuration_manager=DEFAULT) - self.addCleanup(mysql_app_patcher.stop) - mysql_app_patcher.start() - create_engine_patcher = patch.object(sqlalchemy, 'create_engine') - self.addCleanup(create_engine_patcher.stop) - create_engine_patcher.start() - exec_timeout_patcher = patch.object(utils, 'execute_with_timeout') - self.addCleanup(exec_timeout_patcher.stop) - exec_timeout_patcher.start() - - self.mock_cli_ctx_mgr = Mock() - self.mock_client = MagicMock() - self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) - self.mock_cli_ctx_mgr.__exit__ = Mock() - - local_client_patcher = patch.object(dbaas.MySqlAdmin, - 'local_sql_client', - return_value=self.mock_cli_ctx_mgr) - self.addCleanup(local_client_patcher.stop) - local_client_patcher.start() - - def tearDown(self): - super(MySqlAdminMockTest, self).tearDown() - - @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', - Mock(return_value='some_password')) - def test_list_databases(self): - with patch.object(self.mock_client, 'execute', - return_value=ResultSetStub( - [('db1', 'utf8', 'utf8_bin'), - ('db2', 'utf8', 'utf8_bin'), - ('db3', 'utf8', 'utf8_bin')])): - databases, next_marker = MySqlAdmin().list_databases(limit=10) - - self.assertIsNone(next_marker) - self.assertEqual(3, len(databases)) - - -class MySqlAdminTest(trove_testtools.TestCase): - - def setUp(self): - - super(MySqlAdminTest, self).setUp() - - self.orig_get_engine = dbaas.get_engine - self.mock_cli_ctx_mgr = Mock() - self.mock_client = MagicMock() - self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) - self.mock_cli_ctx_mgr.__exit__ = Mock() - - local_client_patcher = patch.object(dbaas.MySqlAdmin, - 'local_sql_client', - return_value=self.mock_cli_ctx_mgr) - self.addCleanup(local_client_patcher.stop) - local_client_patcher.start() - - self.orig_MySQLUser_is_valid_user_name = ( - mysql_models.MySQLUser._is_valid_user_name) - dbaas.get_engine = MagicMock(name='get_engine') - - # trove.guestagent.common.configuration import ConfigurationManager - dbaas.orig_configuration_manager = dbaas.MySqlApp.configuration_manager - dbaas.MySqlApp.configuration_manager = Mock() - dbaas.orig_get_auth_password = dbaas.MySqlApp.get_auth_password - dbaas.MySqlApp.get_auth_password = Mock(return_value='root_pwd') - self.orig_configuration_manager = \ - mysql_common_service.BaseMySqlApp.configuration_manager - mysql_common_service.BaseMySqlApp.configuration_manager = Mock() - - self.mySqlAdmin = MySqlAdmin() - - def tearDown(self): - dbaas.get_engine = self.orig_get_engine - mysql_models.MySQLUser._is_valid_user_name = ( - self.orig_MySQLUser_is_valid_user_name) - dbaas.MySqlApp.configuration_manager = \ - dbaas.orig_configuration_manager - dbaas.MySqlApp.get_auth_password = \ - dbaas.orig_get_auth_password - mysql_common_service.BaseMySqlApp.configuration_manager = \ - self.orig_configuration_manager - super(MySqlAdminTest, self).tearDown() - - def test__associate_dbs(self): - db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"}, - {"grantee": "'test_user'@'%'", "table_schema": "db2"}, - {"grantee": "'test_user'@'%'", "table_schema": "db3"}, - {"grantee": "'test_user'@'%'", "table_schema": "db4"}, - {"grantee": "'test_user1'@'%'", "table_schema": "db1"}, - {"grantee": "'test_user1'@'%'", "table_schema": "db3"}] - user = DatastoreUser(name='test_user', host='%') - expected = ("SELECT grantee, table_schema FROM " - "information_schema.SCHEMA_PRIVILEGES WHERE privilege_type" - " != 'USAGE' GROUP BY grantee, table_schema;") - - with patch.object(self.mock_client, 'execute', - return_value=db_result) as mock_execute: - self.mySqlAdmin._associate_dbs(user) - self.assertEqual(4, len(user.databases)) - self._assert_execute_call(expected, mock_execute) - - def _assert_execute_call(self, expected_query, execute_mock, call_idx=0): - args, _ = execute_mock.call_args_list[call_idx] - self.assertTrue(execute_mock.called, - "The client object was not called.") - self.assertEqual(expected_query, args[0].text, - "Queries are not the same.") - - def test_change_passwords(self): - user = [{"name": "test_user", "host": "%", "password": "password"}] - expected = ("SET PASSWORD FOR 'test_user'@'%' = PASSWORD('password');") - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.change_passwords(user) - self._assert_execute_call(expected, mock_execute) - - def test_update_attributes_password(self): - expected = ("SET PASSWORD FOR 'test_user'@'%' = PASSWORD('password');") - user = MagicMock() - user.name = "test_user" - user.host = "%" - user_attrs = {"password": "password"} - with patch.object(self.mock_client, 'execute') as mock_execute: - with patch.object(self.mySqlAdmin, '_get_user', return_value=user): - self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) - self._assert_execute_call(expected, mock_execute) - - def test_update_attributes_name(self): - user = MagicMock() - user.name = "test_user" - user.host = "%" - user_attrs = {"name": "new_name"} - expected = ("RENAME USER 'test_user'@'%' TO 'new_name'@'%';") - with patch.object(self.mock_client, 'execute') as mock_execute: - with patch.object(self.mySqlAdmin, '_get_user', return_value=user): - self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) - self._assert_execute_call(expected, mock_execute) - - def test_update_attributes_host(self): - user = MagicMock() - user.name = "test_user" - user.host = "%" - user_attrs = {"host": "new_host"} - expected = ("RENAME USER 'test_user'@'%' TO 'test_user'@'new_host';") - with patch.object(self.mock_client, 'execute') as mock_execute: - with patch.object(self.mySqlAdmin, '_get_user', return_value=user): - self.mySqlAdmin.update_attributes('test_user', '%', user_attrs) - self._assert_execute_call(expected, mock_execute) - - def test_create_database(self): - databases = [] - databases.append(FAKE_DB) - expected = ("CREATE DATABASE IF NOT EXISTS " - "`testDB` CHARACTER SET = 'latin2' " - "COLLATE = 'latin2_general_ci';") - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.create_database(databases) - self._assert_execute_call(expected, mock_execute) - - def test_create_database_more_than_1(self): - databases = [] - databases.append(FAKE_DB) - databases.append(FAKE_DB_2) - expected_1 = ("CREATE DATABASE IF NOT EXISTS " - "`testDB` CHARACTER SET = 'latin2' " - "COLLATE = 'latin2_general_ci';") - expected_2 = ("CREATE DATABASE IF NOT EXISTS " - "`testDB2` CHARACTER SET = 'latin2' " - "COLLATE = 'latin2_general_ci';") - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.create_database(databases) - self._assert_execute_call(expected_1, mock_execute, call_idx=0) - self._assert_execute_call(expected_2, mock_execute, call_idx=1) - - def test_create_database_no_db(self): - databases = [] - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.create_database(databases) - mock_execute.assert_not_called() - - def test_delete_database(self): - database = {"_name": "testDB"} - expected = "DROP DATABASE `testDB`;" - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.delete_database(database) - self._assert_execute_call(expected, mock_execute) - - def test_delete_user(self): - user = {"_name": "testUser", "_host": None} - expected = "DROP USER `testUser`@`%`;" - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.delete_user(user) - self._assert_execute_call(expected, mock_execute) - - def test_create_user(self): - access_grants_expected = ("GRANT ALL PRIVILEGES ON `testDB`.* TO " - "`random`@`%`;") - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.create_user(FAKE_USER) - mock_execute.assert_any_call(TextClauseMatcher('CREATE USER'), - user='random', host='%') - self._assert_execute_call(access_grants_expected, - mock_execute, call_idx=1) - - @patch.object(mysql_common_service.BaseMySqlApp, 'get_auth_password', - Mock(return_value='some_password')) - def test_list_databases(self): - expected = ("SELECT schema_name as name," - " default_character_set_name as charset," - " default_collation_name as collation" - " FROM information_schema.schemata WHERE" - " schema_name NOT IN ('" + - "', '".join(cfg.get_ignored_dbs()) + - "')" - " ORDER BY schema_name ASC;" - ) - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.list_databases() - self._assert_execute_call(expected, mock_execute) - - def test_list_databases_with_limit(self): - limit = 2 - expected = ("SELECT schema_name as name," - " default_character_set_name as charset," - " default_collation_name as collation" - " FROM information_schema.schemata WHERE" - " schema_name NOT IN ('" + - "', '".join(cfg.get_ignored_dbs()) + "')" - " ORDER BY schema_name ASC LIMIT " + str(limit + 1) + ";" - ) - with patch.object(self.mock_client, 'execute') as mock_execute: - mock_execute.return_value.rowcount = 0 - self.mySqlAdmin.list_databases(limit) - self._assert_execute_call(expected, mock_execute) - - def test_list_databases_with_marker(self): - marker = "aMarker" - expected = ("SELECT schema_name as name," - " default_character_set_name as charset," - " default_collation_name as collation" - " FROM information_schema.schemata WHERE" - " schema_name NOT IN ('" + - "', '".join(cfg.get_ignored_dbs()) + "')" - " AND schema_name > '" + marker + "'" - " ORDER BY schema_name ASC;" - ) - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.list_databases(marker=marker) - self._assert_execute_call(expected, mock_execute) - - def test_list_databases_with_include_marker(self): - marker = "aMarker" - expected = ("SELECT schema_name as name," - " default_character_set_name as charset," - " default_collation_name as collation" - " FROM information_schema.schemata WHERE" - " schema_name NOT IN ('" + - "', '".join(cfg.get_ignored_dbs()) + "')" - " AND schema_name >= '" + marker + "'" - " ORDER BY schema_name ASC;" - ) - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.list_databases(marker=marker, include_marker=True) - self._assert_execute_call(expected, mock_execute) - - def test_list_users(self): - expected = ("SELECT User, Host, Marker FROM" - " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" - " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" - " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" - " ORDER BY Marker;" - ) - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.list_users() - self._assert_execute_call(expected, mock_execute) - - def test_list_users_with_limit(self): - limit = 2 - expected = ("SELECT User, Host, Marker FROM" - " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" - " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" - " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" - " ORDER BY Marker" - " LIMIT " + str(limit + 1) + ";" - ) - - with patch.object(self.mock_client, 'execute') as mock_execute: - mock_execute.return_value.rowcount = 0 - self.mySqlAdmin.list_users(limit) - self._assert_execute_call(expected, mock_execute) - - def test_list_users_with_marker(self): - marker = "aMarker" - expected = ("SELECT User, Host, Marker FROM" - " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" - " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" - " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" - " AND Marker > '" + marker + "'" - " ORDER BY Marker;" - ) - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.list_users(marker=marker) - self._assert_execute_call(expected, mock_execute) - - def test_list_users_with_include_marker(self): - marker = "aMarker" - expected = ("SELECT User, Host, Marker FROM" - " (SELECT User, Host, CONCAT(User, '@', Host) as Marker" - " FROM mysql.user ORDER BY User, Host) as innerquery WHERE" - " Host != 'localhost' AND User NOT IN ('os_admin', 'root')" - " AND Marker >= '" + marker + "'" - " ORDER BY Marker;" - ) - - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.list_users(marker=marker, include_marker=True) - self._assert_execute_call(expected, mock_execute) - - @patch.object(dbaas.MySqlAdmin, '_associate_dbs') - def test_get_user(self, mock_associate_dbs): - """ - Unit tests for mySqlAdmin.get_user. - This test case checks if the sql query formed by the get_user method - is correct or not by checking with expected query. - """ - username = "user1" - hostname = "%" - user = [{"User": "user1", "Host": "%", 'Password': 'some_thing'}] - expected = ("SELECT User, Host FROM mysql.user " - "WHERE Host != 'localhost' AND User = 'user1' " - "AND Host = '%' ORDER BY User, Host;") - - with patch.object(self.mock_client, 'execute') as mock_execute: - fa_mock = Mock(return_value=user) - mock_execute.return_value = Mock() - mock_execute.return_value.fetchall = fa_mock - self.mySqlAdmin.get_user(username, hostname) - self.assertEqual(1, mock_associate_dbs.call_count) - self._assert_execute_call(expected, mock_execute) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - def test_fail_get_user(self, *args): - username = "os_admin" - hostname = "host" - self.assertRaisesRegex(BadRequest, "Username os_admin is not valid", - self.mySqlAdmin.get_user, username, hostname) - - def test_grant_access(self): - user = MagicMock() - user.name = "test_user" - user.host = "%" - user.password = 'some_password' - databases = ['db1'] - expected = ("GRANT ALL PRIVILEGES ON `db1`.* TO `test_user`@`%` " - "IDENTIFIED BY PASSWORD 'some_password';") - with patch.object(self.mock_client, 'execute') as mock_execute: - with patch.object(self.mySqlAdmin, '_get_user', return_value=user): - self.mySqlAdmin.grant_access('test_user', '%', databases) - self._assert_execute_call(expected, mock_execute) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - def test_fail_grant_access(self, *args): - user = MagicMock() - user.name = "test_user" - user.host = "%" - user.password = 'some_password' - databases = ['mysql'] - with patch.object(self.mock_client, 'execute') as mock_execute: - with patch.object(self.mySqlAdmin, '_get_user', return_value=user): - self.mySqlAdmin.grant_access('test_user', '%', databases) - # since mysql is not a database to be provided access to, - # testing that executed was not called in grant access. - mock_execute.assert_not_called() - - def test_is_root_enabled(self): - expected = ("SELECT User FROM mysql.user WHERE " - "User = 'root' AND Host != 'localhost';") - - with patch.object(dbaas.MySqlRootAccess, 'local_sql_client', - return_value=self.mock_cli_ctx_mgr): - with patch.object(self.mock_client, 'execute') as mock_execute: - self.mySqlAdmin.is_root_enabled() - self._assert_execute_call(expected, mock_execute) - - def test_revoke_access(self): - user = MagicMock() - user.name = "test_user" - user.host = "%" - user.password = 'some_password' - databases = ['db1'] - expected = ("REVOKE ALL ON `['db1']`.* FROM `test_user`@`%`;") - with patch.object(self.mock_client, 'execute') as mock_execute: - with patch.object(self.mySqlAdmin, '_get_user', return_value=user): - self.mySqlAdmin.revoke_access('test_usr', '%', databases) - self._assert_execute_call(expected, mock_execute) - - def test_list_access(self): - user = MagicMock() - user.name = "test_user" - user.host = "%" - user.databases = ['db1', 'db2'] - with patch.object(self.mock_client, 'execute'): - with patch.object(self.mySqlAdmin, '_get_user', return_value=user): - databases = self.mySqlAdmin.list_access('test_usr', '%') - self.assertEqual(2, len(databases), - "List access queries are not the same") - - -class MySqlAppTest(trove_testtools.TestCase): - - def setUp(self): - conductor_cli_patcher = patch.object(conductor_api.API, 'get_client') - self.addCleanup(conductor_cli_patcher.stop) - conductor_cli_patcher.start() - super(MySqlAppTest, self).setUp() - self.orig_utils_execute_with_timeout = \ - mysql_common_service.utils.execute_with_timeout - self.orig_time_sleep = time.sleep - self.orig_time_time = time.time - self.orig_unlink = os.unlink - self.orig_service_discovery = operating_system.service_discovery - mysql_app_patcher = patch.multiple(mysql_common_service.BaseMySqlApp, - get_engine=DEFAULT, - get_auth_password=DEFAULT, - configuration_manager=DEFAULT) - self.addCleanup(mysql_app_patcher.stop) - mysql_app_patcher.start() - self.FAKE_ID = str(uuid4()) - InstanceServiceStatus.create(instance_id=self.FAKE_ID, - status=rd_instance.ServiceStatuses.NEW) - self.appStatus = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - self.mySqlApp = MySqlApp(self.appStatus) - mysql_service = {'cmd_start': Mock(), - 'cmd_stop': Mock(), - 'cmd_enable': Mock(), - 'cmd_disable': Mock(), - 'cmd_bootstrap_galera_cluster': Mock(), - 'bin': Mock()} - operating_system.service_discovery = Mock( - return_value=mysql_service) - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - os.unlink = Mock() - self.mock_client = Mock() - self.mock_execute = Mock() - self.mock_client.__enter__ = Mock() - self.mock_client.__exit__ = Mock() - self.mock_client.__enter__.return_value.execute = self.mock_execute - self.orig_create_engine = sqlalchemy.create_engine - - def tearDown(self): - mysql_common_service.utils.execute_with_timeout = \ - self.orig_utils_execute_with_timeout - time.sleep = self.orig_time_sleep - time.time = self.orig_time_time - os.unlink = self.orig_unlink - operating_system.service_discovery = self.orig_service_discovery - InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() - sqlalchemy.create_engine = self.orig_create_engine - super(MySqlAppTest, self).tearDown() - - def assert_reported_status(self, expected_status): - service_status = InstanceServiceStatus.find_by( - instance_id=self.FAKE_ID) - self.assertEqual(expected_status, service_status.status) - - def mysql_starts_successfully(self): - def start(update_db=False): - self.appStatus.set_next_status( - rd_instance.ServiceStatuses.RUNNING) - - self.mySqlApp.start_mysql.side_effect = start - - def mysql_starts_unsuccessfully(self): - def start(): - raise RuntimeError("MySQL failed to start!") - - self.mySqlApp.start_mysql.side_effect = start - - def mysql_stops_successfully(self): - def stop(): - self.appStatus.set_next_status( - rd_instance.ServiceStatuses.SHUTDOWN) - - self.mySqlApp.stop_db.side_effect = stop - - def mysql_stops_unsuccessfully(self): - def stop(): - raise RuntimeError("MySQL failed to stop!") - - self.mySqlApp.stop_db.side_effect = stop - - def test_stop_mysql(self): - - mysql_common_service.utils.execute_with_timeout = Mock() - self.appStatus.set_next_status( - rd_instance.ServiceStatuses.SHUTDOWN) - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.mySqlApp.stop_db() - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - def test_stop_mysql_with_db_update(self): - - mysql_common_service.utils.execute_with_timeout = Mock() - self.appStatus.set_next_status( - rd_instance.ServiceStatuses.SHUTDOWN) - self.patch_conf_property('guest_id', self.FAKE_ID) - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - with patch.object(conductor_api.API, 'heartbeat') as patch_hb: - patch_pc.__get__ = Mock(return_value=True) - self.mySqlApp.stop_db(True) - patch_hb.assert_called_once_with( - self.FAKE_ID, - {'service_status': - rd_instance.ServiceStatuses.SHUTDOWN.description}, - sent=ANY) - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test_stop_mysql_do_not_start_on_reboot(self, mock_execute): - - self.appStatus.set_next_status( - rd_instance.ServiceStatuses.SHUTDOWN) - self.patch_conf_property('guest_id', self.FAKE_ID) - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - with patch.object(conductor_api.API, 'heartbeat') as patch_hb: - patch_pc.__get__ = Mock(return_value=True) - self.mySqlApp.stop_db(True, True) - patch_hb.assert_called_once_with( - self.FAKE_ID, - {'service_status': - rd_instance.ServiceStatuses.SHUTDOWN.description}, - sent=ANY) - self.assertEqual(2, mock_execute.call_count) - - @patch('trove.guestagent.datastore.service.LOG') - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - def test_stop_mysql_error(self, *args): - mysql_common_service.utils.execute_with_timeout = Mock() - self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) - self.mySqlApp.state_change_wait_time = 1 - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.assertRaises(RuntimeError, self.mySqlApp.stop_db) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - @patch.object(operating_system, 'service_discovery', - side_effect=KeyError('error')) - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test_stop_mysql_key_error(self, mock_execute, mock_service, - mock_logging): - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.assertRaisesRegex(RuntimeError, 'Service is not discovered.', - self.mySqlApp.stop_db) - self.assertEqual(0, mock_execute.call_count) - - def test_restart_is_successful(self): - - self.mySqlApp.start_mysql = Mock() - self.mySqlApp.stop_db = Mock() - self.mysql_stops_successfully() - self.mysql_starts_successfully() - self.patch_conf_property('guest_id', self.FAKE_ID) - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - with patch.object(conductor_api.API, 'heartbeat') as patch_hb: - patch_pc.__get__ = Mock(return_value=True) - self.mySqlApp.restart() - - self.assertTrue(self.mySqlApp.stop_db.called) - self.assertTrue(self.mySqlApp.start_mysql.called) - patch_hb.assert_called_once_with( - self.FAKE_ID, - {'service_status': - rd_instance.ServiceStatuses.RUNNING.description}, - sent=ANY) - - def test_restart_mysql_wont_start_up(self): - - self.mySqlApp.start_mysql = Mock() - self.mySqlApp.stop_db = Mock() - self.mysql_stops_unsuccessfully() - self.mysql_starts_unsuccessfully() - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.assertRaises(RuntimeError, self.mySqlApp.restart) - - self.assertTrue(self.mySqlApp.stop_db.called) - self.assertFalse(self.mySqlApp.start_mysql.called) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - @patch.object(dbaas.MySqlApp, 'get_data_dir', return_value='some path') - def test_wipe_ib_logfiles_error(self, get_datadir_mock, mock_logging): - - mocked = Mock(side_effect=ProcessExecutionError('Error')) - mysql_common_service.utils.execute_with_timeout = mocked - - self.assertRaises(ProcessExecutionError, - self.mySqlApp.wipe_ib_logfiles) - - def test_start_mysql(self): - - mysql_common_service.utils.execute_with_timeout = Mock() - self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) - self.mySqlApp._enable_mysql_on_boot = Mock() - self.mySqlApp.start_mysql() - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - def test_start_mysql_with_db_update(self): - - mysql_common_service.utils.execute_with_timeout = Mock() - self.mySqlApp._enable_mysql_on_boot = Mock() - self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) - self.patch_conf_property('guest_id', self.FAKE_ID) - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - with patch.object(conductor_api.API, 'heartbeat') as patch_hb: - patch_pc.__get__ = Mock(return_value=True) - self.mySqlApp.start_mysql(update_db=True) - patch_hb.assert_called_once_with( - self.FAKE_ID, - {'service_status': - rd_instance.ServiceStatuses.RUNNING.description}, - sent=ANY) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - @patch('trove.guestagent.datastore.service.LOG') - def test_start_mysql_runs_forever(self, *args): - - mysql_common_service.utils.execute_with_timeout = Mock() - self.mySqlApp._enable_mysql_on_boot = Mock() - self.mySqlApp.state_change_wait_time = 1 - self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN) - self.patch_conf_property('guest_id', self.FAKE_ID) - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - with patch.object(conductor_api.API, 'heartbeat') as patch_hb: - patch_pc.__get__ = Mock(return_value=True) - self.assertRaises(RuntimeError, self.mySqlApp.start_mysql) - patch_hb.assert_called_once_with( - self.FAKE_ID, - {'service_status': - rd_instance.ServiceStatuses.SHUTDOWN.description}, - sent=ANY) - - @patch('trove.guestagent.datastore.service.LOG') - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - def test_start_mysql_error(self, *args): - - self.mySqlApp._enable_mysql_on_boot = Mock() - mocked = Mock(side_effect=ProcessExecutionError('Error')) - mysql_common_service.utils.execute_with_timeout = mocked - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.assertRaises(RuntimeError, self.mySqlApp.start_mysql) - - def test_start_db_with_conf_changes(self): - self.mySqlApp.start_mysql = Mock() - self.mysql_starts_successfully() - - self.appStatus.status = rd_instance.ServiceStatuses.SHUTDOWN - with patch.object(self.mySqlApp, '_reset_configuration') as cfg_reset: - configuration = 'some junk' - self.mySqlApp.start_db_with_conf_changes(configuration) - cfg_reset.assert_called_once_with(configuration) - - self.assertTrue(self.mySqlApp.start_mysql.called) - self.assertEqual(rd_instance.ServiceStatuses.RUNNING, - self.appStatus._get_actual_db_status()) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - def test_start_db_with_conf_changes_mysql_is_running(self, *args): - self.mySqlApp.start_mysql = Mock() - - self.appStatus.status = rd_instance.ServiceStatuses.RUNNING - self.assertRaises(RuntimeError, - self.mySqlApp.start_db_with_conf_changes, - Mock()) - - def test_configuration_reset(self): - with patch.object(self.mySqlApp, '_reset_configuration') as cfg_reset: - configuration = {'config_contents': 'some junk'} - self.mySqlApp.reset_configuration(configuration=configuration) - cfg_reset.assert_called_once_with('some junk') - - @patch.object(dbaas.MySqlApp, - 'get_auth_password', return_value='some_password') - def test_reset_configuration(self, auth_pwd_mock): - save_cfg_mock = Mock() - save_auth_mock = Mock() - wipe_ib_mock = Mock() - - configuration = {'config_contents': 'some junk'} - - self.mySqlApp.configuration_manager.save_configuration = save_cfg_mock - self.mySqlApp._save_authentication_properties = save_auth_mock - self.mySqlApp.wipe_ib_logfiles = wipe_ib_mock - self.mySqlApp.reset_configuration(configuration=configuration) - - save_cfg_mock.assert_called_once_with('some junk') - save_auth_mock.assert_called_once_with( - auth_pwd_mock.return_value) - - wipe_ib_mock.assert_called_once_with() - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test__enable_mysql_on_boot(self, mock_execute): - mysql_service = \ - mysql_common_service.operating_system.service_discovery(["mysql"]) - self.mySqlApp._enable_mysql_on_boot() - self.assertEqual(1, mock_execute.call_count) - mock_execute.assert_called_with(mysql_service['cmd_enable'], - shell=True) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - @patch.object(operating_system, 'service_discovery', - side_effect=KeyError('error')) - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test_fail__enable_mysql_on_boot(self, mock_execute, mock_service, - mock_logging): - self.assertRaisesRegex(RuntimeError, 'Service is not discovered.', - self.mySqlApp._enable_mysql_on_boot) - self.assertEqual(0, mock_execute.call_count) - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test__disable_mysql_on_boot(self, mock_execute): - mysql_service = \ - mysql_common_service.operating_system.service_discovery(["mysql"]) - self.mySqlApp._disable_mysql_on_boot() - self.assertEqual(1, mock_execute.call_count) - mock_execute.assert_called_with(mysql_service['cmd_disable'], - shell=True) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - @patch.object(operating_system, 'service_discovery', - side_effect=KeyError('error')) - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test_fail__disable_mysql_on_boot(self, mock_execute, mock_service, - mock_logging): - self.assertRaisesRegex(RuntimeError, 'Service is not discovered.', - self.mySqlApp._disable_mysql_on_boot) - self.assertEqual(0, mock_execute.call_count) - - def test_update_overrides(self): - override_value = {'key': 'value'} - with patch.object(self.mySqlApp.configuration_manager, - 'apply_user_override') as apply_usr_mock: - self.mySqlApp.update_overrides(override_value) - apply_usr_mock.assert_called_once_with({'mysqld': override_value}) - - def test_remove_override(self): - with patch.object(self.mySqlApp.configuration_manager, - 'remove_user_override') as remove_usr_mock: - self.mySqlApp.remove_overrides() - remove_usr_mock.assert_called_once_with() - - def test_write_replication_source_overrides(self): - with patch.object(self.mySqlApp.configuration_manager, - 'apply_system_override') as apply_sys_mock: - self.mySqlApp.write_replication_source_overrides('something') - apply_sys_mock.assert_called_once_with( - 'something', mysql_common_service.CNF_MASTER) - - def test_write_replication_replica_overrides(self): - with patch.object(self.mySqlApp.configuration_manager, - 'apply_system_override') as apply_sys_mock: - self.mySqlApp.write_replication_replica_overrides('something') - apply_sys_mock.assert_called_once_with( - 'something', mysql_common_service.CNF_SLAVE) - - def test_remove_replication_source_overrides(self): - with patch.object(self.mySqlApp.configuration_manager, - 'remove_system_override') as remove_sys_mock: - self.mySqlApp.remove_replication_source_overrides() - remove_sys_mock.assert_called_once_with( - mysql_common_service.CNF_MASTER) - - def test_remove_replication_replica_overrides(self): - with patch.object(self.mySqlApp.configuration_manager, - 'remove_system_override') as remove_sys_mock: - self.mySqlApp.remove_replication_replica_overrides() - remove_sys_mock.assert_called_once_with( - mysql_common_service.CNF_SLAVE) - - def test_exists_replication_source_overrides(self): - with patch.object(self.mySqlApp.configuration_manager, - 'has_system_override', - return_value=Mock()) as exists_mock: - self.assertEqual( - exists_mock.return_value, - self.mySqlApp.exists_replication_source_overrides()) - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_grant_replication_privilege(self, *args): - replication_user = {'name': 'testUSr', 'password': 'somePwd'} - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.grant_replication_privilege(replication_user) - args, _ = self.mock_execute.call_args_list[0] - expected = ("GRANT REPLICATION SLAVE ON *.* TO `testUSr`@`%` " - "IDENTIFIED BY 'somePwd';") - self.assertEqual(expected, args[0].text, - "Replication grant statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_get_port(self, *args): - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.get_port() - args, _ = self.mock_execute.call_args_list[0] - expected = ("SELECT @@port") - self.assertEqual(expected, args[0], - "Port queries are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_get_binlog_position(self, *args): - result = {'File': 'mysql-bin.003', 'Position': '73'} - self.mock_execute.return_value.first = Mock(return_value=result) - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - found_result = self.mySqlApp.get_binlog_position() - - self.assertEqual(result['File'], found_result['log_file']) - self.assertEqual(result['Position'], found_result['position']) - - args, _ = self.mock_execute.call_args_list[0] - expected = ("SHOW MASTER STATUS") - self.assertEqual(expected, args[0], - "Master status queries are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_execute_on_client(self, *args): - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.execute_on_client('show tables') - args, _ = self.mock_execute.call_args_list[0] - expected = ("show tables") - self.assertEqual(expected, args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') - def test_start_slave(self, *args): - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.start_slave() - args, _ = self.mock_execute.call_args_list[0] - expected = ("START SLAVE") - self.assertEqual(expected, args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') - def test_stop_slave_with_failover(self, *args): - self.mock_execute.return_value.first = Mock( - return_value={'Master_User': 'root'}) - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - result = self.mySqlApp.stop_slave(True) - self.assertEqual('root', result['replication_user']) - - expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL"] - self.assertEqual(len(expected), len(self.mock_execute.call_args_list)) - for i in range(len(self.mock_execute.call_args_list)): - args, _ = self.mock_execute.call_args_list[i] - self.assertEqual(expected[i], args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - @patch.object(dbaas.MySqlApp, '_wait_for_slave_status') - def test_stop_slave_without_failover(self, *args): - self.mock_execute.return_value.first = Mock( - return_value={'Master_User': 'root'}) - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - result = self.mySqlApp.stop_slave(False) - self.assertEqual('root', result['replication_user']) - - expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL", - "DROP USER root"] - self.assertEqual(len(expected), len(self.mock_execute.call_args_list)) - for i in range(len(self.mock_execute.call_args_list)): - args, _ = self.mock_execute.call_args_list[i] - self.assertEqual(expected[i], args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_stop_master(self, *args): - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.stop_master() - args, _ = self.mock_execute.call_args_list[0] - expected = ("RESET MASTER") - self.assertEqual(expected, args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test__wait_for_slave_status(self, *args): - mock_client = Mock() - mock_client.execute = Mock() - result = ['Slave_running', 'on'] - mock_client.execute.return_value.first = Mock(return_value=result) - self.mySqlApp._wait_for_slave_status('ON', mock_client, 5) - args, _ = mock_client.execute.call_args_list[0] - expected = ("SHOW GLOBAL STATUS like 'slave_running'") - self.assertEqual(expected, args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - @patch.object(utils, 'poll_until', side_effect=PollTimeOut) - def test_fail__wait_for_slave_status(self, *args): - self.assertRaisesRegex(RuntimeError, - "Replication is not on after 5 seconds.", - self.mySqlApp._wait_for_slave_status, 'ON', - Mock(), 5) - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test__get_slave_status(self, *args): - self.mock_execute.return_value.first = Mock(return_value='some_thing') - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - result = self.mySqlApp._get_slave_status() - self.assertEqual('some_thing', result) - args, _ = self.mock_execute.call_args_list[0] - expected = ("SHOW SLAVE STATUS") - self.assertEqual(expected, args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_get_latest_txn_id(self, *args): - self.mock_execute.return_value.first = Mock(return_value=['some_thing'] - ) - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - result = self.mySqlApp.get_latest_txn_id() - self.assertEqual('some_thing', result) - args, _ = self.mock_execute.call_args_list[0] - expected = ("SELECT @@global.gtid_executed") - self.assertEqual(expected, args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_wait_for_txn(self, *args): - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.wait_for_txn('abcd') - args, _ = self.mock_execute.call_args_list[0] - expected = ("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('abcd')") - self.assertEqual(expected, args[0], - "Sql statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_get_txn_count(self, *args): - self.mock_execute.return_value.first = Mock( - return_value=['b1f3f33a-0789-ee1c-43f3-f8373e12f1ea:1']) - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - result = self.mySqlApp.get_txn_count() - self.assertEqual(1, result) - args, _ = self.mock_execute.call_args_list[0] - expected = ("SELECT @@global.gtid_executed") - self.assertEqual(expected, args[0], - "Sql statements are not the same") - - @patch.multiple(pkg.Package, pkg_is_installed=Mock(return_value=False), - pkg_install=DEFAULT) - def test_install(self, pkg_install): - self.mySqlApp._install_mysql = Mock() - utils.execute_with_timeout = Mock() - self.mySqlApp._clear_mysql_config = Mock() - self.mySqlApp._create_mysql_confd_dir = Mock() - self.mySqlApp.start_mysql = Mock() - self.mySqlApp.install_if_needed(["package"]) - self.assertTrue(pkg_install.called) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - @patch.object(operating_system, 'write_file') - def test_save_authentication_properties(self, write_file_mock): - self.mySqlApp._save_authentication_properties("some_password") - write_file_mock.assert_called_once_with( - MySqlApp.get_client_auth_file(), - {'client': {'host': 'localhost', - 'password': 'some_password', - 'user': mysql_common_service.ADMIN_USER_NAME}}, - codec=MySqlApp.CFG_CODEC) - - @patch.object(utils, 'generate_random_password', - return_value='some_password') - @patch.object(mysql_common_service, 'clear_expired_password') - def test_secure(self, clear_pwd_mock, auth_pwd_mock): - - self.mySqlApp.start_mysql = Mock() - self.mySqlApp.stop_db = Mock() - self.mySqlApp._reset_configuration = Mock() - self.mySqlApp._apply_user_overrides = Mock() - self.mysql_stops_successfully() - self.mysql_starts_successfully() - sqlalchemy.create_engine = Mock() - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.mySqlApp.secure('contents') - - self.assertTrue(self.mySqlApp.stop_db.called) - self.mySqlApp._reset_configuration.assert_has_calls( - [call('contents', auth_pwd_mock.return_value)]) - - self.assertTrue(self.mySqlApp.start_mysql.called) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - @patch.object(dbaas, 'get_engine') - @patch.object(utils, 'generate_random_password', - return_value='some_password') - @patch.object(operating_system, 'write_file') - def test_secure_root(self, *args): - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.secure_root() - update_root_password, _ = self.mock_execute.call_args_list[0] - update_expected = ("SET PASSWORD FOR 'root'@'localhost' = " - "PASSWORD('some_password');") - - remove_root, _ = self.mock_execute.call_args_list[1] - remove_expected = ("DELETE FROM mysql.user WHERE " - "User = 'root' AND Host != 'localhost';") - - self.assertEqual(update_expected, update_root_password[0].text, - "Update root password queries are not the same") - self.assertEqual(remove_expected, remove_root[0].text, - "Remove root queries are not the same") - - @patch.object(operating_system, 'create_directory') - def test__create_mysql_confd_dir(self, mkdir_mock): - self.mySqlApp._create_mysql_confd_dir() - mkdir_mock.assert_called_once_with('/etc/mysql/conf.d', as_root=True) - - @patch.object(operating_system, 'move') - def test__clear_mysql_config(self, mock_move): - self.mySqlApp._clear_mysql_config() - self.assertEqual(3, mock_move.call_count) - - @patch.object(operating_system, 'move', side_effect=ProcessExecutionError) - def test_exception__clear_mysql_config(self, mock_move): - self.mySqlApp._clear_mysql_config() - # call-count needs to be same as normal, - # because exception is eaten to make the flow goto next file-move. - self.assertEqual(3, mock_move.call_count) - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_apply_overrides(self, *args): - overrides = {'sort_buffer_size': 1000000} - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.apply_overrides(overrides) - args, _ = self.mock_execute.call_args_list[0] - expected = ("SET GLOBAL sort_buffer_size=1000000") - self.assertEqual(expected, args[0].text, - "Set global statements are not the same") - - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_make_read_only(self, *args): - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp.make_read_only('ON') - args, _ = self.mock_execute.call_args_list[0] - expected = ("set global read_only = ON") - self.assertEqual(expected, args[0].text, - "Set read_only statements are not the same") - - @patch.multiple(pkg.Package, pkg_is_installed=Mock(return_value=False), - pkg_install=Mock( - side_effect=pkg.PkgPackageStateError("Install error"))) - def test_install_install_error(self): - self.mySqlApp.start_mysql = Mock() - self.mySqlApp.stop_db = Mock() - self.mySqlApp._clear_mysql_config = Mock() - self.mySqlApp._create_mysql_confd_dir = Mock() - - self.assertRaises(pkg.PkgPackageStateError, - self.mySqlApp.install_if_needed, ["package"]) - - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - @patch.object(mysql_common_service, 'clear_expired_password') - def test_secure_write_conf_error(self, clear_pwd_mock): - - self.mySqlApp.start_mysql = Mock() - self.mySqlApp.stop_db = Mock() - self.mySqlApp._reset_configuration = Mock( - side_effect=IOError("Could not write file")) - self.mySqlApp._apply_user_overrides = Mock() - self.mysql_stops_successfully() - self.mysql_starts_successfully() - sqlalchemy.create_engine = Mock() - - self.assertRaises(IOError, self.mySqlApp.secure, "foo") - - self.assertTrue(self.mySqlApp.stop_db.called) - self.assertFalse(self.mySqlApp.start_mysql.called) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - @patch.object(dbaas.MySqlApp, '_save_authentication_properties') - @patch.object(dbaas, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test_reset_admin_password(self, mock_engine, mock_save_auth): - with patch.object(dbaas.MySqlApp, 'local_sql_client', - return_value=self.mock_client): - self.mySqlApp._create_admin_user = Mock() - self.mySqlApp.reset_admin_password("newpassword") - self.assertEqual(1, self.mySqlApp._create_admin_user.call_count) - mock_save_auth.assert_called_once_with("newpassword") - - -class TextClauseMatcher(object): - - def __init__(self, text): - self.text = text - - def __repr__(self): - return "TextClause(%s)" % self.text - - def __eq__(self, arg): - return self.text in arg.text - - -class MySqlAppMockTest(trove_testtools.TestCase): - - def setUp(self): - super(MySqlAppMockTest, self).setUp() - self.orig_utils_execute_with_timeout = utils.execute_with_timeout - create_engine_patcher = patch.object(sqlalchemy, 'create_engine') - self.addCleanup(create_engine_patcher.stop) - create_engine_patcher.start() - - self.mock_cli_ctx_mgr = Mock() - self.mock_client = MagicMock() - self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) - self.mock_cli_ctx_mgr.__exit__ = Mock() - - local_client_patcher = patch.object(dbaas.MySqlApp, - 'local_sql_client', - return_value=self.mock_cli_ctx_mgr) - self.addCleanup(local_client_patcher.stop) - local_client_patcher.start() - - def tearDown(self): - utils.execute_with_timeout = self.orig_utils_execute_with_timeout - super(MySqlAppMockTest, self).tearDown() - - @patch('trove.guestagent.common.configuration.ConfigurationManager' - '.refresh_cache') - @patch.object(mysql_common_service, 'clear_expired_password') - @patch.object(utils, 'generate_random_password', - return_value='some_password') - def test_secure_keep_root(self, auth_pwd_mock, clear_pwd_mock, _): - with patch.object(self.mock_client, - 'execute', return_value=None) as mock_execute: - utils.execute_with_timeout = MagicMock(return_value=None) - # skip writing the file for now - with patch.object(os.path, 'isfile', return_value=False): - mock_status = MagicMock() - mock_status.wait_for_real_status_to_change_to = MagicMock( - return_value=True) - app = MySqlApp(mock_status) - app._reset_configuration = MagicMock() - app.start_mysql = MagicMock(return_value=None) - app.stop_db = MagicMock(return_value=None) - app.secure('foo') - reset_config_calls = [call('foo', auth_pwd_mock.return_value)] - app._reset_configuration.assert_has_calls(reset_config_calls) - self.assertTrue(mock_execute.called) - - @patch('trove.guestagent.common.configuration.ConfigurationManager' - '.refresh_cache') - @patch.object(mysql_common_service, 'clear_expired_password') - @patch.object(mysql_common_service.BaseMySqlApp, - 'get_auth_password', return_value='some_password') - def test_secure_with_mycnf_error(self, *args): - with patch.object(self.mock_client, - 'execute', return_value=None) as mock_execute: - with patch.object(operating_system, 'service_discovery', - return_value={'cmd_stop': 'service mysql stop'}): - utils.execute_with_timeout = MagicMock(return_value=None) - # skip writing the file for now - with patch.object(dbaas.MySqlApp, '_reset_configuration', - side_effect=RuntimeError('Error')): - mock_status = MagicMock() - mock_status.wait_for_real_status_to_change_to = MagicMock( - return_value=True) - mysql_common_service.clear_expired_password = \ - MagicMock(return_value=None) - app = MySqlApp(mock_status) - mysql_common_service.clear_expired_password = \ - MagicMock(return_value=None) - self.assertRaises(RuntimeError, app.secure, None) - self.assertTrue(mock_execute.called) - # At least called twice - self.assertGreaterEqual(mock_execute.call_count, 2) - (mock_status.wait_for_real_status_to_change_to. - assert_called_with(rd_instance.ServiceStatuses.SHUTDOWN, - app.state_change_wait_time, False)) - - -class MySqlRootStatusTest(trove_testtools.TestCase): - - def setUp(self): - super(MySqlRootStatusTest, self).setUp() - self.orig_utils_execute_with_timeout = utils.execute_with_timeout - create_engine_patcher = patch.object(sqlalchemy, 'create_engine') - self.addCleanup(create_engine_patcher.stop) - create_engine_patcher.start() - mysql_app_patcher = patch.multiple(MySqlApp, get_engine=DEFAULT, - configuration_manager=DEFAULT) - self.addCleanup(mysql_app_patcher.stop) - mysql_app_patcher.start() - - self.mock_cli_ctx_mgr = Mock() - self.mock_client = MagicMock() - self.mock_cli_ctx_mgr.__enter__ = Mock(return_value=self.mock_client) - self.mock_cli_ctx_mgr.__exit__ = Mock() - - local_client_patcher = patch.object(dbaas.MySqlRootAccess, - 'local_sql_client', - return_value=self.mock_cli_ctx_mgr) - self.addCleanup(local_client_patcher.stop) - local_client_patcher.start() - - def tearDown(self): - utils.execute_with_timeout = self.orig_utils_execute_with_timeout - super(MySqlRootStatusTest, self).tearDown() - - @patch.object(mysql_common_service.BaseMySqlApp, - 'get_auth_password', return_value='some_password') - def test_root_is_enabled(self, auth_pwd_mock): - mock_rs = MagicMock() - mock_rs.rowcount = 1 - with patch.object(self.mock_client, 'execute', return_value=mock_rs): - self.assertTrue(MySqlRootAccess().is_root_enabled()) - - @patch.object(mysql_common_service.BaseMySqlApp, - 'get_auth_password', return_value='some_password') - def test_root_is_not_enabled(self, auth_pwd_mock): - mock_rs = MagicMock() - mock_rs.rowcount = 0 - with patch.object(self.mock_client, 'execute', return_value=mock_rs): - self.assertFalse(MySqlRootAccess().is_root_enabled()) - - @patch.object(mysql_common_service, 'clear_expired_password') - @patch.object(mysql_common_service.BaseMySqlApp, - 'get_auth_password', return_value='some_password') - def test_enable_root(self, auth_pwd_mock, clear_pwd_mock): - with patch.object(self.mock_client, - 'execute', return_value=None) as mock_execute: - # invocation - user_ser = MySqlRootAccess().enable_root() - # verification - self.assertIsNotNone(user_ser) - mock_execute.assert_any_call(TextClauseMatcher('CREATE USER'), - user='root', host='%') - mock_execute.assert_any_call(TextClauseMatcher( - 'GRANT ALL PRIVILEGES ON *.*')) - mock_execute.assert_any_call(TextClauseMatcher( - 'SET PASSWORD')) - - @patch.object(MySqlRootAccess, 'enable_root') - def test_root_disable(self, enable_root_mock): - # invocation - MySqlRootAccess().disable_root() - # verification - enable_root_mock.assert_called_once_with(root_password=None) - - -class MockStats(object): - f_blocks = 1024 ** 2 - f_bsize = 4096 - f_bfree = 512 * 1024 - - -class InterrogatorTest(trove_testtools.TestCase): - - def tearDown(self): - super(InterrogatorTest, self).tearDown() - - def test_get_filesystem_volume_stats(self): - with patch.object(os, 'statvfs', return_value=MockStats): - result = get_filesystem_volume_stats('/some/path/') - - self.assertEqual(4096, result['block_size']) - self.assertEqual(1048576, result['total_blocks']) - self.assertEqual(524288, result['free_blocks']) - self.assertEqual(4.0, result['total']) - self.assertEqual(2147483648, result['free']) - self.assertEqual(2.0, result['used']) - - @patch('trove.guestagent.dbaas.LOG') - def test_get_filesystem_volume_stats_error(self, *args): - with patch.object(os, 'statvfs', side_effect=OSError): - self.assertRaises( - RuntimeError, - get_filesystem_volume_stats, '/nonexistent/path') - - -class ServiceRegistryTest(trove_testtools.TestCase): - - def setUp(self): - super(ServiceRegistryTest, self).setUp() - - def tearDown(self): - super(ServiceRegistryTest, self).tearDown() - - def test_datastore_registry_with_extra_manager(self): - datastore_registry_ext_test = { - 'test': 'trove.guestagent.datastore.test.manager.Manager', - } - with patch.object(dbaas_sr, 'get_custom_managers', - return_value=datastore_registry_ext_test): - test_dict = dbaas_sr.datastore_registry() - self.assertEqual(datastore_registry_ext_test.get('test', None), - test_dict.get('test')) - self.assertEqual('trove.guestagent.datastore.mysql.' - 'manager.Manager', - test_dict.get('mysql')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'percona.manager.Manager', - test_dict.get('percona')) - self.assertEqual('trove.guestagent.datastore.experimental.redis.' - 'manager.Manager', - test_dict.get('redis')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'cassandra.manager.Manager', - test_dict.get('cassandra')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'couchbase.manager.Manager', - test_dict.get('couchbase')) - self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' - 'manager.Manager', - test_dict.get('mongodb')) - self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' - 'manager.Manager', - test_dict.get('couchdb')) - self.assertEqual('trove.guestagent.datastore.experimental.db2.' - 'manager.Manager', - test_dict.get('db2')) - - def test_datastore_registry_with_existing_manager(self): - datastore_registry_ext_test = { - 'mysql': 'trove.guestagent.datastore.mysql.' - 'manager.Manager123', - } - with patch.object(dbaas_sr, 'get_custom_managers', - return_value=datastore_registry_ext_test): - test_dict = dbaas_sr.datastore_registry() - self.assertEqual('trove.guestagent.datastore.mysql.' - 'manager.Manager123', - test_dict.get('mysql')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'percona.manager.Manager', - test_dict.get('percona')) - self.assertEqual('trove.guestagent.datastore.experimental.redis.' - 'manager.Manager', - test_dict.get('redis')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'cassandra.manager.Manager', - test_dict.get('cassandra')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'couchbase.manager.Manager', - test_dict.get('couchbase')) - self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' - 'manager.Manager', - test_dict.get('mongodb')) - self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' - 'manager.Manager', - test_dict.get('couchdb')) - self.assertEqual('trove.guestagent.datastore.experimental.vertica.' - 'manager.Manager', - test_dict.get('vertica')) - self.assertEqual('trove.guestagent.datastore.experimental.db2.' - 'manager.Manager', - test_dict.get('db2')) - self.assertEqual('trove.guestagent.datastore.experimental.mariadb.' - 'manager.Manager', - test_dict.get('mariadb')) - - def test_datastore_registry_with_blank_dict(self): - datastore_registry_ext_test = dict() - with patch.object(dbaas_sr, 'get_custom_managers', - return_value=datastore_registry_ext_test): - test_dict = dbaas_sr.datastore_registry() - self.assertEqual('trove.guestagent.datastore.mysql.' - 'manager.Manager', - test_dict.get('mysql')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'percona.manager.Manager', - test_dict.get('percona')) - self.assertEqual('trove.guestagent.datastore.experimental.redis.' - 'manager.Manager', - test_dict.get('redis')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'cassandra.manager.Manager', - test_dict.get('cassandra')) - self.assertEqual('trove.guestagent.datastore.experimental.' - 'couchbase.manager.Manager', - test_dict.get('couchbase')) - self.assertEqual('trove.guestagent.datastore.experimental.mongodb.' - 'manager.Manager', - test_dict.get('mongodb')) - self.assertEqual('trove.guestagent.datastore.experimental.couchdb.' - 'manager.Manager', - test_dict.get('couchdb')) - self.assertEqual('trove.guestagent.datastore.experimental.vertica.' - 'manager.Manager', - test_dict.get('vertica')) - self.assertEqual('trove.guestagent.datastore.experimental.db2.' - 'manager.Manager', - test_dict.get('db2')) - self.assertEqual('trove.guestagent.datastore.experimental.mariadb.' - 'manager.Manager', - test_dict.get('mariadb')) - - -class KeepAliveConnectionTest(trove_testtools.TestCase): - - class OperationalError(Exception): - - def __init__(self, value): - self.args = [value] - - def __str__(self): - return repr(self.value) - - def setUp(self): - super(KeepAliveConnectionTest, self).setUp() - self.orig_utils_execute_with_timeout = \ - mysql_common_service.utils.execute_with_timeout - self.orig_LOG_err = dbaas.LOG - - def tearDown(self): - super(KeepAliveConnectionTest, self).tearDown() - mysql_common_service.utils.execute_with_timeout = \ - self.orig_utils_execute_with_timeout - dbaas.LOG = self.orig_LOG_err - - def test_checkout_type_error(self): - - dbapi_con = Mock() - dbapi_con.ping = Mock(side_effect=TypeError("Type Error")) - - self.keepAliveConn = KeepAliveConnection() - self.assertRaises(TypeError, self.keepAliveConn.checkout, - dbapi_con, Mock(), Mock()) - - def test_checkout_disconnection_error(self): - - dbapi_con = Mock() - dbapi_con.OperationalError = self.OperationalError - dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(2013)) - - self.keepAliveConn = KeepAliveConnection() - self.assertRaises(sqlalchemy.exc.DisconnectionError, - self.keepAliveConn.checkout, - dbapi_con, Mock(), Mock()) - - def test_checkout_operation_error(self): - - dbapi_con = Mock() - dbapi_con.OperationalError = self.OperationalError - dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(1234)) - - self.keepAliveConn = KeepAliveConnection() - self.assertRaises(self.OperationalError, self.keepAliveConn.checkout, - dbapi_con, Mock(), Mock()) - - -class BaseDbStatusTest(trove_testtools.TestCase): - - def setUp(self): - super(BaseDbStatusTest, self).setUp() - util.init_db() - self.orig_dbaas_time_sleep = time.sleep - self.orig_time_time = time.time - self.FAKE_ID = str(uuid4()) - InstanceServiceStatus.create(instance_id=self.FAKE_ID, - status=rd_instance.ServiceStatuses.NEW) - dbaas.CONF.guest_id = self.FAKE_ID - patcher_log = patch.object(base_datastore_service, 'LOG') - patcher_context = patch.object(trove_context, 'TroveContext') - patcher_api = patch.object(conductor_api, 'API') - patcher_log.start() - patcher_context.start() - patcher_api.start() - self.addCleanup(patcher_log.stop) - self.addCleanup(patcher_context.stop) - self.addCleanup(patcher_api.stop) - - def tearDown(self): - time.sleep = self.orig_dbaas_time_sleep - time.time = self.orig_time_time - InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() - dbaas.CONF.guest_id = None - super(BaseDbStatusTest, self).tearDown() - - @patch.object(operating_system, 'write_file') - def test_begin_install(self, mock_write_file): - base_db_status = BaseDbStatus() - - base_db_status.begin_install() - - self.assertEqual(rd_instance.ServiceStatuses.BUILDING, - base_db_status.status) - - def test_begin_restart(self): - base_db_status = BaseDbStatus() - base_db_status.restart_mode = False - - base_db_status.begin_restart() - - self.assertTrue(base_db_status.restart_mode) - - def test_end_restart(self): - base_db_status = BaseDbStatus() - base_db_status._get_actual_db_status = Mock( - return_value=rd_instance.ServiceStatuses.SHUTDOWN) - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - base_db_status.end_restart() - - self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, - base_db_status.status) - self.assertFalse(base_db_status.restart_mode) - - def test_is_installed(self): - base_db_status = BaseDbStatus() - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - self.assertTrue(base_db_status.is_installed) - - def test_is_installed_failed(self): - base_db_status = BaseDbStatus() - - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=False) - self.assertFalse(base_db_status.is_installed) - - def test_is_restarting(self): - base_db_status = BaseDbStatus() - base_db_status.restart_mode = True - - self.assertTrue(base_db_status._is_restarting) - - def test_is_running(self): - base_db_status = BaseDbStatus() - base_db_status.status = rd_instance.ServiceStatuses.RUNNING - - self.assertTrue(base_db_status.is_running) - - def test_is_running_not(self): - base_db_status = BaseDbStatus() - base_db_status.status = rd_instance.ServiceStatuses.SHUTDOWN - - self.assertFalse(base_db_status.is_running) - - def test_wait_for_real_status_to_change_to(self): - base_db_status = BaseDbStatus() - base_db_status._get_actual_db_status = Mock( - return_value=rd_instance.ServiceStatuses.RUNNING) - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - - self.assertTrue(base_db_status. - wait_for_real_status_to_change_to - (rd_instance.ServiceStatuses.RUNNING, 10)) - - def test_wait_for_real_status_to_change_to_timeout(self): - base_db_status = BaseDbStatus() - base_db_status._get_actual_db_status = Mock( - return_value=rd_instance.ServiceStatuses.RUNNING) - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - - self.assertFalse(base_db_status. - wait_for_real_status_to_change_to - (rd_instance.ServiceStatuses.SHUTDOWN, 10)) - - def _test_set_status(self, initial_status, new_status, - expected_status, install_done=False, force=False): - base_db_status = BaseDbStatus() - base_db_status.status = initial_status - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=install_done) - base_db_status.set_status(new_status, force=force) - - self.assertEqual(expected_status, - base_db_status.status) - - def test_set_status_force_heartbeat(self): - self._test_set_status(rd_instance.ServiceStatuses.BUILDING, - rd_instance.ServiceStatuses.RUNNING, - rd_instance.ServiceStatuses.RUNNING, - force=True) - - def test_set_status_skip_heartbeat_with_building(self): - self._test_set_status(rd_instance.ServiceStatuses.BUILDING, - rd_instance.ServiceStatuses.RUNNING, - rd_instance.ServiceStatuses.BUILDING) - - def test_set_status_skip_heartbeat_with_new(self): - self._test_set_status(rd_instance.ServiceStatuses.NEW, - rd_instance.ServiceStatuses.RUNNING, - rd_instance.ServiceStatuses.NEW) - - def test_set_status_to_failed(self): - self._test_set_status(rd_instance.ServiceStatuses.BUILDING, - rd_instance.ServiceStatuses.FAILED, - rd_instance.ServiceStatuses.FAILED, - force=True) - - def test_set_status_to_build_pending(self): - self._test_set_status(rd_instance.ServiceStatuses.BUILDING, - rd_instance.ServiceStatuses.INSTANCE_READY, - rd_instance.ServiceStatuses.INSTANCE_READY, - force=True) - - def test_set_status_to_shutdown(self): - self._test_set_status(rd_instance.ServiceStatuses.RUNNING, - rd_instance.ServiceStatuses.SHUTDOWN, - rd_instance.ServiceStatuses.SHUTDOWN, - install_done=True) - - def test_wait_for_database_service_status(self): - status = BaseDbStatus() - expected_status = rd_instance.ServiceStatuses.RUNNING - timeout = 10 - update_db = False - - # Test a successful call. - with patch.multiple( - status, - wait_for_real_status_to_change_to=Mock(return_value=True), - cleanup_stalled_db_services=DEFAULT): - self.assertTrue( - status._wait_for_database_service_status( - expected_status, timeout, update_db)) - status.wait_for_real_status_to_change_to.assert_called_once_with( - expected_status, timeout, update_db) - self.assertFalse(status.cleanup_stalled_db_services.called) - - # Test a failing call. - with patch.multiple( - status, - wait_for_real_status_to_change_to=Mock(return_value=False), - cleanup_stalled_db_services=DEFAULT): - self.assertFalse( - status._wait_for_database_service_status( - expected_status, timeout, update_db)) - status.wait_for_real_status_to_change_to.assert_called_once_with( - expected_status, timeout, update_db) - status.cleanup_stalled_db_services.assert_called_once_with() - - # Test a failing call with an error raised from the cleanup code. - # No exception should propagate out of the cleanup block. - with patch.multiple( - status, - wait_for_real_status_to_change_to=Mock(return_value=False), - cleanup_stalled_db_services=Mock( - side_effect=Exception("Error in cleanup."))): - self.assertFalse( - status._wait_for_database_service_status( - expected_status, timeout, update_db)) - status.wait_for_real_status_to_change_to.assert_called_once_with( - expected_status, timeout, update_db) - status.cleanup_stalled_db_services.assert_called_once_with() - - def test_start_db_service(self): - status = BaseDbStatus() - service_candidates = ['name1', 'name2'] - - # Test a successful call with setting auto-start enabled. - with patch.object( - status, '_wait_for_database_service_status', - return_value=True) as service_call: - with patch.multiple(operating_system, start_service=DEFAULT, - enable_service_on_boot=DEFAULT) as os_cmd: - status.start_db_service( - service_candidates, 10, enable_on_boot=True) - service_call.assert_called_once_with( - rd_instance.ServiceStatuses.RUNNING, 10, False) - os_cmd['start_service'].assert_called_once_with( - service_candidates, timeout=10) - os_cmd['enable_service_on_boot'].assert_called_once_with( - service_candidates) - - # Test a successful call without auto-start. - with patch.object( - status, '_wait_for_database_service_status', - return_value=True) as service_call: - with patch.multiple(operating_system, start_service=DEFAULT, - enable_service_on_boot=DEFAULT) as os_cmd: - status.start_db_service( - service_candidates, 10, enable_on_boot=False) - service_call.assert_called_once_with( - rd_instance.ServiceStatuses.RUNNING, 10, False) - os_cmd['start_service'].assert_called_once_with( - service_candidates, timeout=10) - self.assertFalse(os_cmd['enable_service_on_boot'].called) - - # Test a failing call. - # The auto-start setting should not get updated if the service call - # fails. - with patch.object( - status, '_wait_for_database_service_status', - return_value=False) as service_call: - with patch.multiple(operating_system, start_service=DEFAULT, - enable_service_on_boot=DEFAULT) as os_cmd: - self.assertRaisesRegex( - RuntimeError, "Database failed to start.", - status.start_db_service, - service_candidates, 10, enable_on_boot=True) - os_cmd['start_service'].assert_called_once_with( - service_candidates, timeout=10) - self.assertFalse(os_cmd['enable_service_on_boot'].called) - - def test_stop_db_service(self): - status = BaseDbStatus() - service_candidates = ['name1', 'name2'] - - # Test a successful call with setting auto-start disabled. - with patch.object( - status, '_wait_for_database_service_status', - return_value=True) as service_call: - with patch.multiple(operating_system, stop_service=DEFAULT, - disable_service_on_boot=DEFAULT) as os_cmd: - status.stop_db_service( - service_candidates, 10, disable_on_boot=True) - service_call.assert_called_once_with( - rd_instance.ServiceStatuses.SHUTDOWN, 10, False) - os_cmd['stop_service'].assert_called_once_with( - service_candidates, timeout=10) - os_cmd['disable_service_on_boot'].assert_called_once_with( - service_candidates) - - # Test a successful call without auto-start. - with patch.object( - status, '_wait_for_database_service_status', - return_value=True) as service_call: - with patch.multiple(operating_system, stop_service=DEFAULT, - disable_service_on_boot=DEFAULT) as os_cmd: - status.stop_db_service( - service_candidates, 10, disable_on_boot=False) - service_call.assert_called_once_with( - rd_instance.ServiceStatuses.SHUTDOWN, 10, False) - os_cmd['stop_service'].assert_called_once_with( - service_candidates, timeout=10) - self.assertFalse(os_cmd['disable_service_on_boot'].called) - - # Test a failing call. - # The auto-start setting should not get updated if the service call - # fails. - with patch.object( - status, '_wait_for_database_service_status', - return_value=False) as service_call: - with patch.multiple(operating_system, stop_service=DEFAULT, - disable_service_on_boot=DEFAULT) as os_cmd: - self.assertRaisesRegex( - RuntimeError, "Database failed to stop.", - status.stop_db_service, - service_candidates, 10, disable_on_boot=True) - os_cmd['stop_service'].assert_called_once_with( - service_candidates, timeout=10) - self.assertFalse(os_cmd['disable_service_on_boot'].called) - - def test_restart_db_service(self): - status = BaseDbStatus() - service_candidates = ['name1', 'name2'] - - # Test the restart flow (stop followed by start). - # Assert that the auto-start setting does not get changed and the - # Trove instance status updates are suppressed during restart. - with patch.multiple( - status, start_db_service=DEFAULT, stop_db_service=DEFAULT, - begin_restart=DEFAULT, end_restart=DEFAULT): - status.restart_db_service(service_candidates, 10) - status.begin_restart.assert_called_once_with() - status.stop_db_service.assert_called_once_with( - service_candidates, 10, disable_on_boot=False, update_db=False) - status.start_db_service.assert_called_once_with( - service_candidates, 10, enable_on_boot=False, update_db=False) - status.end_restart.assert_called_once_with() - - # Test a failing call. - # Assert the status heartbeat gets re-enabled. - with patch.multiple( - status, start_db_service=Mock( - side_effect=Exception("Error in database start.")), - stop_db_service=DEFAULT, begin_restart=DEFAULT, - end_restart=DEFAULT): - self.assertRaisesRegex( - RuntimeError, "Database restart failed.", - status.restart_db_service, service_candidates, 10) - status.begin_restart.assert_called_once_with() - status.end_restart.assert_called_once_with() - - -class MySqlAppStatusTest(trove_testtools.TestCase): - - def setUp(self): - super(MySqlAppStatusTest, self).setUp() - util.init_db() - self.orig_utils_execute_with_timeout = \ - mysql_common_service.utils.execute_with_timeout - self.orig_load_mysqld_options = \ - mysql_common_service.load_mysqld_options - self.orig_mysql_common_service_os_path_exists = \ - mysql_common_service.os.path.exists - self.orig_dbaas_time_sleep = time.sleep - self.orig_time_time = time.time - self.FAKE_ID = str(uuid4()) - InstanceServiceStatus.create(instance_id=self.FAKE_ID, - status=rd_instance.ServiceStatuses.NEW) - dbaas.CONF.guest_id = self.FAKE_ID - - def tearDown(self): - mysql_common_service.utils.execute_with_timeout = \ - self.orig_utils_execute_with_timeout - mysql_common_service.load_mysqld_options = \ - self.orig_load_mysqld_options - mysql_common_service.os.path.exists = \ - self.orig_mysql_common_service_os_path_exists - time.sleep = self.orig_dbaas_time_sleep - time.time = self.orig_time_time - InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() - dbaas.CONF.guest_id = None - super(MySqlAppStatusTest, self).tearDown() - - def test_get_actual_db_status(self): - mysql_common_service.utils.execute_with_timeout = \ - Mock(return_value=("111", None)) - - self.mySqlAppStatus = MySqlAppStatus.get() - status = self.mySqlAppStatus._get_actual_db_status() - - self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status) - - @patch.object(utils, 'execute_with_timeout', - side_effect=ProcessExecutionError()) - @patch.object(os.path, 'exists', return_value=True) - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - def test_get_actual_db_status_error_crashed(self, mock_logging, - mock_exists, - mock_execute): - mysql_common_service.load_mysqld_options = Mock(return_value={}) - self.mySqlAppStatus = MySqlAppStatus.get() - status = self.mySqlAppStatus._get_actual_db_status() - self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status) - - @patch('trove.guestagent.datastore.mysql_common.service.LOG') - def test_get_actual_db_status_error_shutdown(self, *args): - - mocked = Mock(side_effect=ProcessExecutionError()) - mysql_common_service.utils.execute_with_timeout = mocked - mysql_common_service.load_mysqld_options = Mock(return_value={}) - mysql_common_service.os.path.exists = Mock(return_value=False) - - self.mySqlAppStatus = MySqlAppStatus.get() - status = self.mySqlAppStatus._get_actual_db_status() - - self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status) - - -class TestRedisApp(BaseAppTest.AppTestCase): - - def setUp(self): - super(TestRedisApp, self).setUp(str(uuid4()), 'redis') - self.orig_os_path_eu = os.path.expanduser - os.path.expanduser = Mock(return_value='/tmp/.file') - - with patch.object(RedisApp, '_build_admin_client'): - with patch.object(ImportOverrideStrategy, - '_initialize_import_directory'): - self.redis = RedisApp(state_change_wait_time=0) - self.redis.status = FakeAppStatus( - self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - - self.orig_os_path_isfile = os.path.isfile - self.orig_utils_execute_with_timeout = utils.execute_with_timeout - utils.execute_with_timeout = Mock() - - @property - def app(self): - return self.redis - - @property - def appStatus(self): - return self.redis.status - - @property - def expected_state_change_timeout(self): - return self.redis.state_change_wait_time - - @property - def expected_service_candidates(self): - return RedisSystem.SERVICE_CANDIDATES - - def tearDown(self): - os.path.isfile = self.orig_os_path_isfile - os.path.expanduser = self.orig_os_path_eu - utils.execute_with_timeout = self.orig_utils_execute_with_timeout - super(TestRedisApp, self).tearDown() - - def test_install_if_needed_installed(self): - with patch.object(pkg.Package, 'pkg_is_installed', return_value=True): - with patch.object(RedisApp, '_install_redis', return_value=None): - self.app.install_if_needed('bar') - pkg.Package.pkg_is_installed.assert_any_call('bar') - self.assertEqual(0, RedisApp._install_redis.call_count) - - def test_install_if_needed_not_installed(self): - with patch.object(pkg.Package, 'pkg_is_installed', return_value=False): - with patch.object(RedisApp, '_install_redis', return_value=None): - self.app.install_if_needed('asdf') - pkg.Package.pkg_is_installed.assert_any_call('asdf') - RedisApp._install_redis.assert_any_call('asdf') - - def test_install_redis(self): - with patch.object(utils, 'execute_with_timeout', - return_value=('0', '')): - with patch.object(pkg.Package, 'pkg_install', return_value=None): - with patch.object(RedisApp, 'start_db', return_value=None): - self.app._install_redis('redis') - pkg.Package.pkg_install.assert_any_call('redis', {}, 1200) - RedisApp.start_db.assert_any_call() - self.assertTrue(utils.execute_with_timeout.called) - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test_service_cleanup(self, exec_mock): - rservice.RedisAppStatus(Mock()).cleanup_stalled_db_services() - exec_mock.assert_called_once_with('pkill', '-9', 'redis-server', - run_as_root=True, root_helper='sudo') - - -class CassandraDBAppTest(BaseAppTest.AppTestCase): - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def setUp(self, mock_logging, _): - super(CassandraDBAppTest, self).setUp(str(uuid4()), 'cassandra') - self.sleep = time.sleep - self.orig_time_time = time.time - self.pkg_version = cass_service.packager.pkg_version - self.pkg = cass_service.packager - util.init_db() - self.cassandra = cass_service.CassandraApp() - self.cassandra.status = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - self.orig_unlink = os.unlink - - @property - def app(self): - return self.cassandra - - @property - def appStatus(self): - return self.cassandra.status - - @property - def expected_state_change_timeout(self): - return self.cassandra.state_change_wait_time - - @property - def expected_service_candidates(self): - return self.cassandra.service_candidates - - def tearDown(self): - time.sleep = self.sleep - time.time = self.orig_time_time - cass_service.packager.pkg_version = self.pkg_version - cass_service.packager = self.pkg - super(CassandraDBAppTest, self).tearDown() - - def assert_reported_status(self, expected_status): - service_status = InstanceServiceStatus.find_by( - instance_id=self.FAKE_ID) - self.assertEqual(expected_status, service_status.status) - - @patch.object(utils, 'execute_with_timeout') - def test_service_cleanup(self, exec_mock): - cass_service.CassandraAppStatus(Mock()).cleanup_stalled_db_services() - exec_mock.assert_called_once_with(self.cassandra.CASSANDRA_KILL_CMD, - shell=True) - - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def test_install(self, _): - - self.cassandra._install_db = Mock() - self.pkg.pkg_is_installed = Mock(return_value=False) - self.cassandra.install_if_needed(['cassandra']) - self.assertTrue(self.cassandra._install_db.called) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - @patch('trove.guestagent.datastore.experimental.cassandra.service.LOG') - def test_install_install_error(self, _): - - self.cassandra.start_db = Mock() - self.cassandra.stop_db = Mock() - self.pkg.pkg_is_installed = Mock(return_value=False) - self.cassandra._install_db = Mock( - side_effect=pkg.PkgPackageStateError("Install error")) - - self.assertRaises(pkg.PkgPackageStateError, - self.cassandra.install_if_needed, - ['cassandra=1.2.10']) - - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - -class CouchbaseAppTest(BaseAppTest.AppTestCase): - - def fake_couchbase_service_discovery(self, candidates): - return { - 'cmd_start': 'start', - 'cmd_stop': 'stop', - 'cmd_enable': 'enable', - 'cmd_disable': 'disable' - } - - def setUp(self): - super(CouchbaseAppTest, self).setUp(str(uuid4()), 'couchbase') - self.orig_utils_execute_with_timeout = ( - couchservice.utils.execute_with_timeout) - self.orig_time_sleep = time.sleep - self.orig_time_time = time.time - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - self.orig_service_discovery = operating_system.service_discovery - self.orig_get_ip = netutils.get_my_ipv4 - operating_system.service_discovery = ( - self.fake_couchbase_service_discovery) - netutils.get_my_ipv4 = Mock() - status = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - self.couchbaseApp = couchservice.CouchbaseApp(status) - dbaas.CONF.guest_id = self.FAKE_ID - - @property - def app(self): - return self.couchbaseApp - - @property - def appStatus(self): - return self.couchbaseApp.status - - @property - def expected_state_change_timeout(self): - return self.couchbaseApp.state_change_wait_time - - @property - def expected_service_candidates(self): - return couchservice.system.SERVICE_CANDIDATES - - @patch.object(utils, 'execute_with_timeout') - def test_service_cleanup(self, exec_mock): - couchservice.CouchbaseAppStatus().cleanup_stalled_db_services() - exec_mock.assert_called_once_with(couchservice.system.cmd_kill) - - def tearDown(self): - couchservice.utils.execute_with_timeout = ( - self.orig_utils_execute_with_timeout) - netutils.get_my_ipv4 = self.orig_get_ip - operating_system.service_discovery = self.orig_service_discovery - time.sleep = self.orig_time_sleep - time.time = self.orig_time_time - dbaas.CONF.guest_id = None - super(CouchbaseAppTest, self).tearDown() - - def test_install_when_couchbase_installed(self): - couchservice.packager.pkg_is_installed = Mock(return_value=True) - couchservice.utils.execute_with_timeout = Mock() - - self.couchbaseApp.install_if_needed(["package"]) - self.assertTrue(couchservice.packager.pkg_is_installed.called) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - -class CouchDBAppTest(BaseAppTest.AppTestCase): - - def fake_couchdb_service_discovery(self, candidates): - return { - 'cmd_start': 'start', - 'cmd_stop': 'stop', - 'cmd_enable': 'enable', - 'cmd_disable': 'disable' - } - - def setUp(self): - super(CouchDBAppTest, self).setUp(str(uuid4()), 'couchdb') - self.orig_utils_execute_with_timeout = ( - couchdb_service.utils.execute_with_timeout) - self.orig_time_sleep = time.sleep - self.orig_time_time = time.time - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - self.orig_service_discovery = operating_system.service_discovery - self.orig_get_ip = netutils.get_my_ipv4 - operating_system.service_discovery = ( - self.fake_couchdb_service_discovery) - netutils.get_my_ipv4 = Mock() - util.init_db() - status = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - self.couchdbApp = couchdb_service.CouchDBApp(status) - dbaas.CONF.guest_id = self.FAKE_ID - - @property - def app(self): - return self.couchdbApp - - @property - def appStatus(self): - return self.couchdbApp.status - - @property - def expected_state_change_timeout(self): - return self.couchdbApp.state_change_wait_time - - @property - def expected_service_candidates(self): - return couchdb_service.system.SERVICE_CANDIDATES - - def tearDown(self): - couchdb_service.utils.execute_with_timeout = ( - self.orig_utils_execute_with_timeout) - netutils.get_my_ipv4 = self.orig_get_ip - operating_system.service_discovery = self.orig_service_discovery - time.sleep = self.orig_time_sleep - time.time = self.orig_time_time - dbaas.CONF.guest_id = None - super(CouchDBAppTest, self).tearDown() - - def test_install_when_couchdb_installed(self): - couchdb_service.packager.pkg_is_installed = Mock(return_value=True) - couchdb_service.utils.execute_with_timeout = Mock() - - self.couchdbApp.install_if_needed(["package"]) - self.assertTrue(couchdb_service.packager.pkg_is_installed.called) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - -class MongoDBAppTest(BaseAppTest.AppTestCase): - - def fake_mongodb_service_discovery(self, candidates): - return { - 'cmd_start': 'start', - 'cmd_stop': 'stop', - 'cmd_enable': 'enable', - 'cmd_disable': 'disable' - } - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - def setUp(self, _): - super(MongoDBAppTest, self).setUp(str(uuid4()), 'mongodb') - self.orig_utils_execute_with_timeout = (mongo_service. - utils.execute_with_timeout) - self.orig_time_sleep = time.sleep - self.orig_time_time = time.time - self.orig_packager = mongo_system.PACKAGER - self.orig_service_discovery = operating_system.service_discovery - self.orig_os_unlink = os.unlink - self.orig_os_path_eu = os.path.expanduser - os.path.expanduser = Mock(return_value='/tmp/.file') - - operating_system.service_discovery = ( - self.fake_mongodb_service_discovery) - util.init_db() - - self.mongoDbApp = mongo_service.MongoDBApp() - self.mongoDbApp.status = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - os.unlink = Mock() - - @property - def app(self): - return self.mongoDbApp - - @property - def appStatus(self): - return self.mongoDbApp.status - - @property - def expected_state_change_timeout(self): - return self.mongoDbApp.state_change_wait_time - - @property - def expected_service_candidates(self): - return mongo_system.MONGOD_SERVICE_CANDIDATES - - @patch.object(utils, 'execute_with_timeout') - def test_service_cleanup(self, exec_mock): - self.appStatus.cleanup_stalled_db_services() -# def cleanup_stalled_db_services(self): -# out, err = utils.execute_with_timeout(system.FIND_PID, shell=True) -# pid = "".join(out.split(" ")[1:2]) -# utils.execute_with_timeout(system.MONGODB_KILL % pid, shell=True) - - def tearDown(self): - mongo_service.utils.execute_with_timeout = ( - self.orig_utils_execute_with_timeout) - time.sleep = self.orig_time_sleep - time.time = self.orig_time_time - mongo_system.PACKAGER = self.orig_packager - operating_system.service_discovery = self.orig_service_discovery - os.unlink = self.orig_os_unlink - os.path.expanduser = self.orig_os_path_eu - super(MongoDBAppTest, self).tearDown() - - def test_start_db_with_conf_changes_db_is_running(self): - self.mongoDbApp.start_db = Mock() - self.mongoDbApp.status.status = rd_instance.ServiceStatuses.RUNNING - self.assertRaises(RuntimeError, - self.mongoDbApp.start_db_with_conf_changes, - Mock()) - - def test_install_when_db_installed(self): - packager_mock = MagicMock() - packager_mock.pkg_is_installed = MagicMock(return_value=True) - mongo_system.PACKAGER = packager_mock - self.mongoDbApp.install_if_needed(['package']) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - def test_install_when_db_not_installed(self): - packager_mock = MagicMock() - packager_mock.pkg_is_installed = MagicMock(return_value=False) - mongo_system.PACKAGER = packager_mock - self.mongoDbApp.install_if_needed(['package']) - packager_mock.pkg_install.assert_any_call(ANY, {}, ANY) - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - -class VerticaAppStatusTest(trove_testtools.TestCase): - - def setUp(self): - super(VerticaAppStatusTest, self).setUp() - util.init_db() - self.FAKE_ID = str(uuid4()) - InstanceServiceStatus.create(instance_id=self.FAKE_ID, - status=rd_instance.ServiceStatuses.NEW) - self.appStatus = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - - def tearDown(self): - - super(VerticaAppStatusTest, self).tearDown() - InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() - - def test_get_actual_db_status(self): - self.verticaAppStatus = VerticaAppStatus() - with patch.object(vertica_system, 'shell_execute', - MagicMock(return_value=['db_srvr', None])): - status = self.verticaAppStatus._get_actual_db_status() - self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status) - - def test_get_actual_db_status_shutdown(self): - self.verticaAppStatus = VerticaAppStatus() - with patch.object(vertica_system, 'shell_execute', - MagicMock(side_effect=[['', None], - ['db_srvr', None]])): - status = self.verticaAppStatus._get_actual_db_status() - self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_get_actual_db_status_error_crashed(self, *args): - self.verticaAppStatus = VerticaAppStatus() - with patch.object(vertica_system, 'shell_execute', - MagicMock(side_effect=ProcessExecutionError('problem' - ))): - status = self.verticaAppStatus._get_actual_db_status() - self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status) - - -class VerticaAppTest(trove_testtools.TestCase): - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - def setUp(self, *args, **kwargs): - super(VerticaAppTest, self).setUp() - self.FAKE_ID = 1000 - self.appStatus = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - self.app = VerticaApp(self.appStatus) - self.setread = VolumeDevice.set_readahead_size - self.Popen = subprocess.Popen - - vertica_system_patcher = patch.multiple( - vertica_system, - shell_execute=MagicMock(return_value=('', '')), - exec_vsql_command=MagicMock(return_value=('', ''))) - self.addCleanup(vertica_system_patcher.stop) - vertica_system_patcher.start() - - VolumeDevice.set_readahead_size = Mock() - subprocess.Popen = Mock() - self.test_config = configparser.ConfigParser() - self.test_config.add_section('credentials') - self.test_config.set('credentials', - 'dbadmin_password', 'some_password') - - def tearDown(self): - self.app = None - VolumeDevice.set_readahead_size = self.setread - subprocess.Popen = self.Popen - super(VerticaAppTest, self).tearDown() - - def test_enable_root_is_root_not_enabled(self): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(self.app, 'is_root_enabled', return_value=False): - with patch.object(vertica_system, 'exec_vsql_command', - MagicMock(side_effect=[['', ''], - ['', ''], - ['', '']])): - self.app.enable_root('root_password') - create_user_arguments = ( - vertica_system.exec_vsql_command.call_args_list[0]) - expected_create_user_cmd = ( - vertica_system.CREATE_USER % ('root', - 'root_password')) - create_user_arguments.assert_called_with( - 'some_password', expected_create_user_cmd) - - grant_role_arguments = ( - vertica_system.exec_vsql_command.call_args_list[1]) - expected_grant_role_cmd = ( - vertica_system.GRANT_TO_USER % ('pseudosuperuser', - 'root')) - grant_role_arguments.assert_called_with( - 'some_password', expected_grant_role_cmd) - - enable_user_arguments = ( - vertica_system.exec_vsql_command.call_args_list[2]) - expected_enable_user_cmd = ( - vertica_system.ENABLE_FOR_USER % ('root', - 'pseudosuperuser' - )) - enable_user_arguments.assert_called_with( - 'some_password', expected_enable_user_cmd) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_enable_root_is_root_not_enabled_failed(self, *args): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(self.app, 'is_root_enabled', return_value=False): - with patch.object(vertica_system, 'exec_vsql_command', - MagicMock(side_effect=[ - ['', vertica_system.VSqlError( - 'ERROR 123: Test' - )]])): - self.assertRaises(RuntimeError, self.app.enable_root, - 'root_password') - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_enable_root_is_root_enabled(self, *args): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(self.app, 'is_root_enabled', return_value=True): - with patch.object(vertica_system, 'exec_vsql_command', - MagicMock(side_effect=[['', '']])): - self.app.enable_root('root_password') - alter_user_password_arguments = ( - vertica_system.exec_vsql_command.call_args_list[0]) - expected_alter_user_cmd = ( - vertica_system.ALTER_USER_PASSWORD % ('root', - 'root_password' - )) - alter_user_password_arguments.assert_called_with( - 'some_password', expected_alter_user_cmd) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_enable_root_is_root_enabled_failed(self, *arg): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(self.app, 'is_root_enabled', return_value=True): - with patch.object(vertica_system, 'exec_vsql_command', - MagicMock(side_effect=[ - ['', vertica_system.VSqlError( - 'ERROR 123: Test' - )]])): - self.assertRaises(RuntimeError, self.app.enable_root, - 'root_password') - - def test_is_root_enable(self): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(vertica_system, 'shell_execute', - MagicMock(side_effect=[['', '']])): - self.app.is_root_enabled() - user_exists_args = ( - vertica_system.shell_execute.call_args_list[0]) - expected_user_exists_cmd = vertica_system.USER_EXISTS % ( - 'some_password', 'root') - user_exists_args.assert_called_with(expected_user_exists_cmd, - 'dbadmin') - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_is_root_enable_failed(self, *args): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(vertica_system, 'shell_execute', - MagicMock(side_effect=[ - ['', ProcessExecutionError]])): - self.assertRaises(RuntimeError, self.app.is_root_enabled) - - def test_install_if_needed_installed(self): - with patch.object(pkg.Package, 'pkg_is_installed', return_value=True): - with patch.object(pkg.Package, 'pkg_install', return_value=None): - self.app.install_if_needed('vertica') - pkg.Package.pkg_is_installed.assert_any_call('vertica') - self.assertEqual(0, pkg.Package.pkg_install.call_count) - - def test_install_if_needed_not_installed(self): - with patch.object(pkg.Package, 'pkg_is_installed', return_value=False): - with patch.object(pkg.Package, 'pkg_install', return_value=None): - self.app.install_if_needed('vertica') - pkg.Package.pkg_is_installed.assert_any_call('vertica') - self.assertEqual(1, pkg.Package.pkg_install.call_count) - - def test_prepare_for_install_vertica(self): - self.app.prepare_for_install_vertica() - arguments = vertica_system.shell_execute.call_args_list[0] - self.assertEqual(1, VolumeDevice.set_readahead_size.call_count) - expected_command = ( - "VERT_DBA_USR=dbadmin VERT_DBA_HOME=/home/dbadmin " - "VERT_DBA_GRP=verticadba /opt/vertica/oss/python/bin/python" - " -m vertica.local_coerce") - arguments.assert_called_with(expected_command) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_failure_prepare_for_install_vertica(self, *args): - with patch.object(vertica_system, 'shell_execute', - side_effect=ProcessExecutionError('Error')): - self.assertRaises(ProcessExecutionError, - self.app.prepare_for_install_vertica) - - def test_install_vertica(self): - with patch.object(self.app, 'write_config', - return_value=None): - self.app.install_vertica(members='10.0.0.2') - arguments = vertica_system.shell_execute.call_args_list[0] - expected_command = ( - vertica_system.INSTALL_VERTICA % ('10.0.0.2', '/var/lib/vertica')) - arguments.assert_called_with(expected_command) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_failure_install_vertica(self, *args): - with patch.object(vertica_system, 'shell_execute', - side_effect=ProcessExecutionError('some exception')): - self.assertRaisesRegex(RuntimeError, 'install_vertica failed.', - self.app.install_vertica, - members='10.0.0.2') - - def test_create_db(self): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - self.app.create_db(members='10.0.0.2') - arguments = vertica_system.shell_execute.call_args_list[0] - expected_command = (vertica_system.CREATE_DB % ('10.0.0.2', 'db_srvr', - '/var/lib/vertica', - '/var/lib/vertica', - 'some_password')) - arguments.assert_called_with(expected_command, 'dbadmin') - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_failure_create_db(self, *args): - with patch.object(self.app, 'read_config', - side_effect=RuntimeError('Error')): - self.assertRaisesRegex(RuntimeError, - 'Vertica database create failed.', - self.app.create_db) - # Because of an exception in read_config there was no shell execution. - self.assertEqual(0, vertica_system.shell_execute.call_count) - - def test_vertica_write_config(self): - temp_file_handle = tempfile.NamedTemporaryFile("w", delete=False) - mock_mkstemp = MagicMock(return_value=(temp_file_handle)) - mock_unlink = Mock(return_value=0) - self.app.write_config(config=self.test_config, - temp_function=mock_mkstemp, - unlink_function=mock_unlink) - - arguments = vertica_system.shell_execute.call_args_list[0] - expected_command = ( - ("install -o root -g root -m 644 %(source)s %(target)s" - ) % {'source': temp_file_handle.name, - 'target': vertica_system.VERTICA_CONF}) - arguments.assert_called_with(expected_command) - self.assertEqual(1, mock_mkstemp.call_count) - - configuration_data = configparser.ConfigParser() - configuration_data.read(temp_file_handle.name) - self.assertEqual( - self.test_config.get('credentials', 'dbadmin_password'), - configuration_data.get('credentials', 'dbadmin_password')) - self.assertEqual(1, mock_unlink.call_count) - # delete the temporary_config_file - os.unlink(temp_file_handle.name) - - def test_vertica_error_in_write_config_verify_unlink(self): - mock_unlink = Mock(return_value=0) - temp_file_handle = tempfile.NamedTemporaryFile("w", delete=False) - mock_mkstemp = MagicMock(return_value=temp_file_handle) - - with patch.object(vertica_system, 'shell_execute', - side_effect=ProcessExecutionError('some exception')): - self.assertRaises(ProcessExecutionError, - self.app.write_config, - config=self.test_config, - temp_function=mock_mkstemp, - unlink_function=mock_unlink) - - self.assertEqual(1, mock_unlink.call_count) - - # delete the temporary_config_file - os.unlink(temp_file_handle.name) - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - def test_restart(self, *args, **kwargs): - mock_status = MagicMock() - app = VerticaApp(mock_status) - mock_status.begin_restart = MagicMock(return_value=None) - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - with patch.object(VerticaApp, 'stop_db', return_value=None): - with patch.object(VerticaApp, 'start_db', return_value=None): - mock_status.end_restart = MagicMock( - return_value=None) - app.restart() - mock_status.begin_restart.assert_any_call() - VerticaApp.stop_db.assert_any_call() - VerticaApp.start_db.assert_any_call() - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - def test_start_db(self, *args, **kwargs): - mock_status = MagicMock() - type(mock_status)._is_restarting = PropertyMock(return_value=False) - app = VerticaApp(mock_status) - with patch.object(app, '_enable_db_on_boot', return_value=None): - with patch.object(app, 'read_config', - return_value=self.test_config): - mock_status.end_restart = MagicMock( - return_value=None) - app.start_db() - agent_start, db_start = subprocess.Popen.call_args_list - agent_expected_command = [ - 'sudo', 'su', '-', 'root', '-c', - (vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'start')] - db_expected_cmd = [ - 'sudo', 'su', '-', 'dbadmin', '-c', - (vertica_system.START_DB % ('db_srvr', 'some_password'))] - self.assertTrue(mock_status.end_restart.called) - agent_start.assert_called_with(agent_expected_command) - db_start.assert_called_with(db_expected_cmd) - - def test_start_db_failure(self): - with patch.object(self.app, '_enable_db_on_boot', - side_effect=RuntimeError()): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - self.assertRaises(RuntimeError, self.app.start_db) - - def test_stop_db(self): - type(self.appStatus)._is_restarting = PropertyMock(return_value=False) - with patch.object(self.app, '_disable_db_on_boot', return_value=None): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(vertica_system, 'shell_execute', - MagicMock(side_effect=[['', ''], - ['db_srvr', None], - ['', '']])): - self.appStatus.wait_for_real_status_to_change_to = \ - MagicMock(return_value=True) - self.appStatus.end_restart = MagicMock( - return_value=None) - self.app.stop_db() - - self.assertEqual( - 3, vertica_system.shell_execute.call_count) - # There are 3 shell-executions: - # a) stop vertica-agent service - # b) check daatabase status - # c) stop_db - # We are matcing that 3rd command called was stop_db - arguments = vertica_system.shell_execute.call_args_list[2] - expected_cmd = (vertica_system.STOP_DB % ('db_srvr', - 'some_password')) - self.assertTrue(self.appStatus. - wait_for_real_status_to_change_to.called) - arguments.assert_called_with(expected_cmd, 'dbadmin') - - def test_stop_db_do_not_start_on_reboot(self): - type(self.appStatus)._is_restarting = PropertyMock(return_value=True) - with patch.object(self.app, '_disable_db_on_boot', return_value=None): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(vertica_system, 'shell_execute', - MagicMock(side_effect=[['', ''], - ['db_srvr', None], - ['', '']])): - self.app.stop_db(do_not_start_on_reboot=True) - - self.assertEqual( - 3, vertica_system.shell_execute.call_count) - self.app._disable_db_on_boot.assert_any_call() - - def test_stop_db_database_not_running(self): - with patch.object(self.app, '_disable_db_on_boot', return_value=None): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - self.app.stop_db() - # Since database stop command does not gets executed, - # so only 2 shell calls were there. - self.assertEqual( - 2, vertica_system.shell_execute.call_count) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_stop_db_failure(self, *args): - type(self.appStatus)._is_restarting = PropertyMock(return_value=False) - with patch.object(self.app, '_disable_db_on_boot', return_value=None): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(vertica_system, 'shell_execute', - MagicMock(side_effect=[['', ''], - ['db_srvr', None], - ['', '']])): - self.appStatus.wait_for_real_status_to_change_to = \ - MagicMock(return_value=None) - self.appStatus.end_restart = MagicMock( - return_value=None) - self.assertRaises(RuntimeError, self.app.stop_db) - - def test_export_conf_to_members(self): - self.app._export_conf_to_members(members=['member1', 'member2']) - self.assertEqual(2, vertica_system.shell_execute.call_count) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_fail__export_conf_to_members(self, *args): - # app = VerticaApp(MagicMock()) - with patch.object(vertica_system, 'shell_execute', - side_effect=ProcessExecutionError('Error')): - self.assertRaises(ProcessExecutionError, - self.app._export_conf_to_members, - ['member1', 'member2']) - - def test_authorize_public_keys(self): - user = 'test_user' - keys = ['test_key@machine1', 'test_key@machine2'] - with patch.object(os.path, 'expanduser', - return_value=('/home/' + user)): - self.app.authorize_public_keys(user=user, public_keys=keys) - self.assertEqual(2, vertica_system.shell_execute.call_count) - vertica_system.shell_execute.assert_any_call( - 'cat ' + '/home/' + user + '/.ssh/authorized_keys') - - def test_authorize_public_keys_authorized_file_not_exists(self): - user = 'test_user' - keys = ['test_key@machine1', 'test_key@machine2'] - with patch.object(os.path, 'expanduser', - return_value=('/home/' + user)): - with patch.object( - vertica_system, 'shell_execute', - MagicMock(side_effect=[ProcessExecutionError('Some Error'), - ['', '']])): - self.app.authorize_public_keys(user=user, public_keys=keys) - self.assertEqual(2, vertica_system.shell_execute.call_count) - vertica_system.shell_execute.assert_any_call( - 'cat ' + '/home/' + user + '/.ssh/authorized_keys') - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_fail_authorize_public_keys(self, *args): - user = 'test_user' - keys = ['test_key@machine1', 'test_key@machine2'] - with patch.object(os.path, 'expanduser', - return_value=('/home/' + user)): - with patch.object( - vertica_system, 'shell_execute', - MagicMock(side_effect=[ProcessExecutionError('Some Error'), - ProcessExecutionError('Some Error') - ])): - self.assertRaises(ProcessExecutionError, - self.app.authorize_public_keys, user, keys) - - def test_get_public_keys(self): - user = 'test_user' - with patch.object(os.path, 'expanduser', - return_value=('/home/' + user)): - self.app.get_public_keys(user=user) - self.assertEqual(2, vertica_system.shell_execute.call_count) - vertica_system.shell_execute.assert_any_call( - (vertica_system.SSH_KEY_GEN % ('/home/' + user)), user) - vertica_system.shell_execute.assert_any_call( - 'cat ' + '/home/' + user + '/.ssh/id_rsa.pub') - - def test_get_public_keys_if_key_exists(self): - user = 'test_user' - with patch.object(os.path, 'expanduser', - return_value=('/home/' + user)): - with patch.object( - vertica_system, 'shell_execute', - MagicMock(side_effect=[ProcessExecutionError('Some Error'), - ['some_key', None]])): - key = self.app.get_public_keys(user=user) - self.assertEqual(2, vertica_system.shell_execute.call_count) - self.assertEqual('some_key', key) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_fail_get_public_keys(self, *args): - user = 'test_user' - with patch.object(os.path, 'expanduser', - return_value=('/home/' + user)): - with patch.object( - vertica_system, 'shell_execute', - MagicMock(side_effect=[ProcessExecutionError('Some Error'), - ProcessExecutionError('Some Error') - ])): - self.assertRaises(ProcessExecutionError, - self.app.get_public_keys, user) - - def test_install_cluster(self): - with patch.object(self.app, 'read_config', - return_value=self.test_config): - self.app.install_cluster(members=['member1', 'member2']) - # Verifying the number of shell calls, - # as command has already been tested in preceding tests - self.assertEqual(5, vertica_system.shell_execute.call_count) - - def test__enable_db_on_boot(self): - self.app._enable_db_on_boot() - - restart_policy, agent_enable = subprocess.Popen.call_args_list - expected_restart_policy = [ - 'sudo', 'su', '-', 'dbadmin', '-c', - (vertica_system.SET_RESTART_POLICY % ('db_srvr', 'always'))] - expected_agent_enable = [ - 'sudo', 'su', '-', 'root', '-c', - (vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'enable')] - - self.assertEqual(2, subprocess.Popen.call_count) - restart_policy.assert_called_with(expected_restart_policy) - agent_enable.assert_called_with(expected_agent_enable) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_failure__enable_db_on_boot(self, *args): - with patch.object(subprocess, 'Popen', side_effect=OSError): - self.assertRaisesRegex(RuntimeError, - 'Could not enable database on boot.', - self.app._enable_db_on_boot) - - def test__disable_db_on_boot(self): - self.app._disable_db_on_boot() - - restart_policy, agent_disable = ( - vertica_system.shell_execute.call_args_list) - expected_restart_policy = ( - vertica_system.SET_RESTART_POLICY % ('db_srvr', 'never')) - expected_agent_disable = ( - vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'disable') - - self.assertEqual(2, vertica_system.shell_execute.call_count) - restart_policy.assert_called_with(expected_restart_policy, 'dbadmin') - agent_disable.assert_called_with(expected_agent_disable, 'root') - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_failure__disable_db_on_boot(self, *args): - with patch.object(vertica_system, 'shell_execute', - side_effect=ProcessExecutionError('Error')): - self.assertRaisesRegex(RuntimeError, - 'Could not disable database on boot.', - self.app._disable_db_on_boot) - - def test_read_config(self): - with patch.object(configparser, 'ConfigParser', - return_value=self.test_config): - test_config = self.app.read_config() - self.assertEqual('some_password', - test_config.get('credentials', 'dbadmin_password') - ) - - @patch('trove.guestagent.datastore.experimental.vertica.service.LOG') - def test_fail_read_config(self, *args): - with patch.object(configparser.ConfigParser, 'read', - side_effect=configparser.Error()): - self.assertRaises(RuntimeError, self.app.read_config) - - @patch.object(ConfigurationManager, 'save_configuration') - def test_start_db_with_conf_changes(self, save_cfg): - type(self.appStatus)._is_restarting = PropertyMock(return_value=False) - type(self.appStatus).is_running = PropertyMock(return_value=False) - with patch.object(self.app, 'read_config', - return_value=self.test_config): - with patch.object(self.appStatus, 'end_restart') as end_restart: - config = 'tst_cfg_contents' - self.app.start_db_with_conf_changes(config) - save_cfg.assert_called_once_with(config) - end_restart.assert_any_call() - - -class DB2AppTest(trove_testtools.TestCase): - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - @patch.object(db2service, 'run_command') - @patch.object(db2service.DB2App, 'process_default_dbm_config') - def setUp(self, *args, **kwargs): - super(DB2AppTest, self).setUp() - self.orig_utils_execute_with_timeout = ( - db2service.utils.execute_with_timeout) - util.init_db() - self.FAKE_ID = str(uuid4()) - InstanceServiceStatus.create(instance_id=self.FAKE_ID, - status=rd_instance.ServiceStatuses.NEW) - self.appStatus = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - self.db2App = db2service.DB2App(self.appStatus) - self.db2App.init_config() - dbaas.CONF.guest_id = self.FAKE_ID - - def tearDown(self): - db2service.utils.execute_with_timeout = ( - self.orig_utils_execute_with_timeout) - InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() - dbaas.CONF.guest_id = None - self.db2App = None - super(DB2AppTest, self).tearDown() - - def assert_reported_status(self, expected_status): - service_status = InstanceServiceStatus.find_by( - instance_id=self.FAKE_ID) - self.assertEqual(expected_status, service_status.status) - - def test_stop_db(self): - db2service.utils.execute_with_timeout = MagicMock(return_value=None) - self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN) - self.db2App.stop_db() - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT, - chown=DEFAULT, chmod=DEFAULT) - @patch.object(db2service, 'run_command') - @patch.object(db2service.DB2App, 'process_default_dbm_config') - def test_restart_server(self, *args, **kwargs): - self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) - mock_status = MagicMock(return_value=None) - app = db2service.DB2App(mock_status) - mock_status.begin_restart = MagicMock(return_value=None) - app.stop_db = MagicMock(return_value=None) - app.start_db = MagicMock(return_value=None) - with patch.object(BaseDbStatus, 'prepare_completed') as patch_pc: - patch_pc.__get__ = Mock(return_value=True) - app.restart() - - self.assertTrue(mock_status.begin_restart.called) - self.assertTrue(app.stop_db.called) - self.assertTrue(app.start_db.called) - - def test_start_db(self): - db2service.utils.execute_with_timeout = MagicMock(return_value=None) - self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING) - with patch.object(self.db2App, '_enable_db_on_boot', - return_value=None): - self.db2App.start_db() - self.assert_reported_status(rd_instance.ServiceStatuses.NEW) - - @patch.object(ConfigurationManager, 'save_configuration') - @patch.object(db2service.DB2App, 'start_db') - def test_start_db_with_conf_changes(self, start_db, save_cfg): - config = {'DIAGSIZE': '10'} - self.db2App.start_db_with_conf_changes(config) - start_db.assert_called_once_with(True) - save_cfg.assert_called_once_with(config) - - @patch.object(ConfigurationManager, 'apply_user_override') - @patch.object(db2service.DB2App, '_apply_config') - def test_update_overrides(self, apply_user_override, apply_config): - overrides = {'DIAGSIZE': 50} - context = MagicMock() - self.db2App.update_overrides(context, overrides) - apply_user_override.assert_called_once_with(overrides) - apply_config.assert_called_once_with(overrides) - - @patch.object(ConfigurationManager, 'get_user_override') - @patch.object(ConfigurationManager, 'remove_user_override') - @patch.object(db2service.DB2App, '_reset_config') - def test_remove_overrides(self, reset_config, remove_user_override, - get_user_override): - overrides = {'DIAGSIZE': 50} - get_user_override.return_value = overrides - self.db2App.remove_overrides() - get_user_override.assert_called_once() - reset_config.assert_called_once_with(overrides) - remove_user_override.assert_called_once() - - -class DB2AdminTest(trove_testtools.TestCase): - - def setUp(self): - super(DB2AdminTest, self).setUp() - self.db2Admin = db2service.DB2Admin() - self.orig_utils_execute_with_timeout = ( - db2service.utils.execute_with_timeout) - - def tearDown(self): - db2service.utils.execute_with_timeout = ( - self.orig_utils_execute_with_timeout) - super(DB2AdminTest, self).tearDown() - - @patch('trove.guestagent.datastore.experimental.db2.service.LOG') - def test_delete_database(self, *args): - with patch.object( - db2service, 'run_command', - MagicMock( - return_value=None, - side_effect=ProcessExecutionError('Error'))): - self.assertRaises(GuestError, - self.db2Admin.delete_database, - FAKE_DB) - self.assertTrue(db2service.run_command.called) - args, _ = db2service.run_command.call_args_list[0] - expected = "db2 drop database testDB" - self.assertEqual(expected, args[0], - "Delete database queries are not the same") - - @patch('trove.guestagent.datastore.experimental.db2.service.LOG') - def test_list_databases(self, *args): - with patch.object(db2service, 'run_command', MagicMock( - side_effect=ProcessExecutionError('Error'))): - self.db2Admin.list_databases() - self.assertTrue(db2service.run_command.called) - args, _ = db2service.run_command.call_args_list[0] - expected = "db2 list database directory " \ - "| grep -B6 -i indirect | grep 'Database name' | " \ - "sed 's/.*= //'" - self.assertEqual(expected, args[0], - "Delete database queries are not the same") - - def test_create_users(self): - with patch.object(db2service, 'run_command', MagicMock( - return_value=None)): - db2service.utils.execute_with_timeout = MagicMock( - return_value=None) - self.db2Admin.create_user(FAKE_USER) - self.assertTrue(db2service.utils.execute_with_timeout.called) - self.assertTrue(db2service.run_command.called) - args, _ = db2service.run_command.call_args_list[0] - expected = "db2 connect to testDB; " \ - "db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \ - "ON DATABASE TO USER random; db2 connect reset" - self.assertEqual( - expected, args[0], - "Granting database access queries are not the same") - self.assertEqual(1, db2service.run_command.call_count) - - def test_delete_users_with_db(self): - with patch.object(db2service, 'run_command', - MagicMock(return_value=None)): - with patch.object(db2service.DB2Admin, 'list_access', - MagicMock(return_value=None)): - utils.execute_with_timeout = MagicMock(return_value=None) - self.db2Admin.delete_user(FAKE_USER[0]) - self.assertTrue(db2service.run_command.called) - self.assertTrue(db2service.utils.execute_with_timeout.called) - self.assertFalse(db2service.DB2Admin.list_access.called) - args, _ = db2service.run_command.call_args_list[0] - expected = "db2 connect to testDB; " \ - "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \ - "ON DATABASE FROM USER random; db2 connect reset" - self.assertEqual( - expected, args[0], - "Revoke database access queries are not the same") - self.assertEqual(1, db2service.run_command.call_count) - - def test_delete_users_without_db(self): - FAKE_USER.append( - {"_name": "random2", "_password": "guesswhat", "_host": '%', - "_databases": []}) - with patch.object(db2service, 'run_command', - MagicMock(return_value=None)): - with patch.object(db2service.DB2Admin, 'list_access', - MagicMock(return_value=[FAKE_DB])): - utils.execute_with_timeout = MagicMock(return_value=None) - self.db2Admin.delete_user(FAKE_USER[1]) - self.assertTrue(db2service.run_command.called) - self.assertTrue(db2service.DB2Admin.list_access.called) - self.assertTrue( - db2service.utils.execute_with_timeout.called) - args, _ = db2service.run_command.call_args_list[0] - expected = "db2 connect to testDB; " \ - "db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \ - "DATAACCESS ON DATABASE FROM USER random2; " \ - "db2 connect reset" - self.assertEqual( - expected, args[0], - "Revoke database access queries are not the same") - self.assertEqual(1, db2service.run_command.call_count) - FAKE_USER.pop() - - def test_list_users(self): - databases = [] - databases.append(FAKE_DB) - with patch.object(db2service, 'run_command', MagicMock( - side_effect=ProcessExecutionError('Error'))): - with patch.object(self.db2Admin, "list_databases", - MagicMock(return_value=(databases, None))): - self.db2Admin.list_users() - self.assertTrue(db2service.run_command.called) - args, _ = db2service.run_command.call_args_list[0] - expected = "db2 +o connect to testDB; " \ - "db2 -x select grantee, dataaccessauth " \ - "from sysibm.sysdbauth; db2 connect reset" - self.assertEqual(expected, args[0], - "List database queries are not the same") - - def test_get_user(self): - databases = [] - databases.append(FAKE_DB) - with patch.object(db2service, 'run_command', MagicMock( - side_effect=ProcessExecutionError('Error'))): - with patch.object(self.db2Admin, "list_databases", - MagicMock(return_value=(databases, None))): - self.db2Admin._get_user('random', None) - self.assertTrue(db2service.run_command.called) - args, _ = db2service.run_command.call_args_list[0] - expected = "db2 +o connect to testDB; " \ - "db2 -x select grantee, dataaccessauth " \ - "from sysibm.sysdbauth; db2 connect reset" - self.assertEqual(args[0], expected, - "Delete database queries are not the same") - - -class PXCAppTest(trove_testtools.TestCase): - - def setUp(self): - super(PXCAppTest, self).setUp() - self.orig_utils_execute_with_timeout = \ - mysql_common_service.utils.execute_with_timeout - self.orig_time_sleep = time.sleep - self.orig_time_time = time.time - self.orig_unlink = os.unlink - self.orig_get_auth_password = \ - mysql_common_service.BaseMySqlApp.get_auth_password - self.FAKE_ID = str(uuid4()) - InstanceServiceStatus.create(instance_id=self.FAKE_ID, - status=rd_instance.ServiceStatuses.NEW) - self.appStatus = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - self.PXCApp = pxc_service.PXCApp(self.appStatus) - mysql_service = patch.object( - pxc_service.PXCApp, 'mysql_service', - PropertyMock(return_value={ - 'cmd_start': Mock(), - 'cmd_stop': Mock(), - 'cmd_enable': Mock(), - 'cmd_disable': Mock(), - 'cmd_bootstrap_galera_cluster': Mock(), - 'bin': Mock() - })) - mysql_service.start() - self.addCleanup(mysql_service.stop) - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - os.unlink = Mock() - mysql_common_service.BaseMySqlApp.get_auth_password = Mock() - self.mock_client = Mock() - self.mock_execute = Mock() - self.mock_client.__enter__ = Mock() - self.mock_client.__exit__ = Mock() - self.mock_client.__enter__.return_value.execute = self.mock_execute - self.orig_configuration_manager = \ - mysql_common_service.BaseMySqlApp.configuration_manager - mysql_common_service.BaseMySqlApp.configuration_manager = Mock() - self.orig_create_engine = sqlalchemy.create_engine - - def tearDown(self): - self.PXCApp = None - mysql_common_service.utils.execute_with_timeout = \ - self.orig_utils_execute_with_timeout - time.sleep = self.orig_time_sleep - time.time = self.orig_time_time - os.unlink = self.orig_unlink - mysql_common_service.BaseMySqlApp.get_auth_password = \ - self.orig_get_auth_password - InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() - mysql_common_service.BaseMySqlApp.configuration_manager = \ - self.orig_configuration_manager - sqlalchemy.create_engine = self.orig_create_engine - super(PXCAppTest, self).tearDown() - - @patch.object(pxc_service.PXCApp, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test__grant_cluster_replication_privilege(self, mock_engine): - repl_user = { - 'name': 'test-user', - 'password': 'test-user-password', - } - with patch.object(pxc_service.PXCApp, 'local_sql_client', - return_value=self.mock_client): - self.PXCApp._grant_cluster_replication_privilege(repl_user) - args, _ = self.mock_execute.call_args_list[0] - expected = ("GRANT LOCK TABLES, RELOAD, REPLICATION CLIENT ON *.* " - "TO `test-user`@`%` IDENTIFIED BY 'test-user-password';") - self.assertEqual(expected, args[0].text, - "Sql statements are not the same") - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test__bootstrap_cluster(self, mock_execute): - pxc_service_cmds = self.PXCApp.mysql_service - self.PXCApp._bootstrap_cluster(timeout=20) - self.assertEqual(1, mock_execute.call_count) - mock_execute.assert_called_with( - pxc_service_cmds['cmd_bootstrap_galera_cluster'], - shell=True, - timeout=20) - - def test_install_cluster(self): - repl_user = { - 'name': 'test-user', - 'password': 'test-user-password', - } - apply_mock = Mock() - self.PXCApp.configuration_manager.apply_system_override = apply_mock - self.PXCApp.stop_db = Mock() - self.PXCApp._grant_cluster_replication_privilege = Mock() - self.PXCApp.wipe_ib_logfiles = Mock() - self.PXCApp.start_mysql = Mock() - self.PXCApp.install_cluster(repl_user, "something") - self.assertEqual(1, self.PXCApp.stop_db.call_count) - self.assertEqual( - 1, self.PXCApp._grant_cluster_replication_privilege.call_count) - self.assertEqual(1, apply_mock.call_count) - self.assertEqual(1, self.PXCApp.wipe_ib_logfiles.call_count) - self.assertEqual(1, self.PXCApp.start_mysql.call_count) - - def test_install_cluster_with_bootstrap(self): - repl_user = { - 'name': 'test-user', - 'password': 'test-user-password', - } - apply_mock = Mock() - self.PXCApp.configuration_manager.apply_system_override = apply_mock - self.PXCApp.stop_db = Mock() - self.PXCApp._grant_cluster_replication_privilege = Mock() - self.PXCApp.wipe_ib_logfiles = Mock() - self.PXCApp._bootstrap_cluster = Mock() - self.PXCApp.install_cluster(repl_user, "something", bootstrap=True) - self.assertEqual(1, self.PXCApp.stop_db.call_count) - self.assertEqual( - 1, self.PXCApp._grant_cluster_replication_privilege.call_count) - self.assertEqual(1, self.PXCApp.wipe_ib_logfiles.call_count) - self.assertEqual(1, apply_mock.call_count) - self.assertEqual(1, self.PXCApp._bootstrap_cluster.call_count) - - -class MariaDBAppTest(trove_testtools.TestCase): - - def setUp(self): - super(MariaDBAppTest, self).setUp() - self.orig_utils_execute_with_timeout = \ - mysql_common_service.utils.execute_with_timeout - self.orig_time_sleep = time.sleep - self.orig_time_time = time.time - self.orig_unlink = os.unlink - self.orig_get_auth_password = \ - mysql_common_service.BaseMySqlApp.get_auth_password - self.FAKE_ID = str(uuid4()) - InstanceServiceStatus.create(instance_id=self.FAKE_ID, - status=rd_instance.ServiceStatuses.NEW) - self.appStatus = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - self.MariaDBApp = mariadb_service.MariaDBApp(self.appStatus) - mysql_service = patch.object( - mariadb_service.MariaDBApp, 'mysql_service', - PropertyMock(return_value={ - 'cmd_start': Mock(), - 'cmd_stop': Mock(), - 'cmd_enable': Mock(), - 'cmd_disable': Mock(), - 'cmd_bootstrap_galera_cluster': Mock(), - 'bin': Mock() - })) - mysql_service.start() - self.addCleanup(mysql_service.stop) - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - os.unlink = Mock() - mysql_common_service.BaseMySqlApp.get_auth_password = Mock() - self.mock_client = Mock() - self.mock_execute = Mock() - self.mock_client.__enter__ = Mock() - self.mock_client.__exit__ = Mock() - self.mock_client.__enter__.return_value.execute = self.mock_execute - self.orig_configuration_manager = \ - mysql_common_service.BaseMySqlApp.configuration_manager - mysql_common_service.BaseMySqlApp.configuration_manager = Mock() - self.orig_create_engine = sqlalchemy.create_engine - - def tearDown(self): - self.MariaDBApp = None - mysql_common_service.utils.execute_with_timeout = \ - self.orig_utils_execute_with_timeout - time.sleep = self.orig_time_sleep - time.time = self.orig_time_time - os.unlink = self.orig_unlink - mysql_common_service.BaseMySqlApp.get_auth_password = \ - self.orig_get_auth_password - InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete() - mysql_common_service.BaseMySqlApp.configuration_manager = \ - self.orig_configuration_manager - sqlalchemy.create_engine = self.orig_create_engine - super(MariaDBAppTest, self).tearDown() - - @patch.object(mariadb_service.MariaDBApp, 'get_engine', - return_value=MagicMock(name='get_engine')) - def test__grant_cluster_replication_privilege(self, mock_engine): - repl_user = { - 'name': 'test-user', - 'password': 'test-user-password', - } - with patch.object(mariadb_service.MariaDBApp, 'local_sql_client', - return_value=self.mock_client): - self.MariaDBApp._grant_cluster_replication_privilege(repl_user) - args, _ = self.mock_execute.call_args_list[0] - expected = ("GRANT LOCK TABLES, RELOAD, REPLICATION CLIENT ON *.* " - "TO `test-user`@`%` IDENTIFIED BY 'test-user-password';") - self.assertEqual(expected, args[0].text, - "Sql statements are not the same") - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - def test__bootstrap_cluster(self, mock_execute): - mariadb_service_cmds = self.MariaDBApp.mysql_service - self.MariaDBApp._bootstrap_cluster(timeout=20) - self.assertEqual(1, mock_execute.call_count) - mock_execute.assert_called_with( - mariadb_service_cmds['cmd_bootstrap_galera_cluster'], - shell=True, - timeout=20) - - def test_install_cluster(self): - repl_user = { - 'name': 'test-user', - 'password': 'test-user-password', - } - apply_mock = Mock() - self.MariaDBApp.configuration_manager.apply_system_override = \ - apply_mock - self.MariaDBApp.stop_db = Mock() - self.MariaDBApp._grant_cluster_replication_privilege = Mock() - self.MariaDBApp.wipe_ib_logfiles = Mock() - self.MariaDBApp.start_mysql = Mock() - self.MariaDBApp.install_cluster(repl_user, "something") - self.assertEqual(1, self.MariaDBApp.stop_db.call_count) - self.assertEqual( - 1, self.MariaDBApp._grant_cluster_replication_privilege.call_count) - self.assertEqual(1, apply_mock.call_count) - self.assertEqual(1, self.MariaDBApp.wipe_ib_logfiles.call_count) - self.assertEqual(1, self.MariaDBApp.start_mysql.call_count) - - def test_install_cluster_with_bootstrap(self): - repl_user = { - 'name': 'test-user', - 'password': 'test-user-password', - } - apply_mock = Mock() - self.MariaDBApp.configuration_manager.apply_system_override = \ - apply_mock - self.MariaDBApp.stop_db = Mock() - self.MariaDBApp._grant_cluster_replication_privilege = Mock() - self.MariaDBApp.wipe_ib_logfiles = Mock() - self.MariaDBApp._bootstrap_cluster = Mock() - self.MariaDBApp.install_cluster(repl_user, "something", bootstrap=True) - self.assertEqual(1, self.MariaDBApp.stop_db.call_count) - self.assertEqual( - 1, self.MariaDBApp._grant_cluster_replication_privilege.call_count) - self.assertEqual(1, self.MariaDBApp.wipe_ib_logfiles.call_count) - self.assertEqual(1, apply_mock.call_count) - self.assertEqual(1, self.MariaDBApp._bootstrap_cluster.call_count) - - -class PostgresAppTest(BaseAppTest.AppTestCase): - - @patch.object(utils, 'execute_with_timeout', return_value=('0', '')) - @patch.object(pg_service.PgSqlApp, '_find_config_file', return_value='') - @patch.object(pg_service.PgSqlApp, - 'pgsql_extra_bin_dir', PropertyMock(return_value='')) - def setUp(self, mock_cfg, mock_exec): - super(PostgresAppTest, self).setUp(str(uuid4()), 'postgresql') - self.orig_time_sleep = time.sleep - self.orig_time_time = time.time - time.sleep = Mock() - time.time = Mock(side_effect=faketime) - self.postgres = pg_service.PgSqlApp() - self.postgres.status = FakeAppStatus(self.FAKE_ID, - rd_instance.ServiceStatuses.NEW) - - @property - def app(self): - return self.postgres - - @property - def appStatus(self): - return self.postgres.status - - @property - def expected_state_change_timeout(self): - return CONF.state_change_wait_time - - @property - def expected_service_candidates(self): - return self.postgres.service_candidates - - def tearDown(self): - time.sleep = self.orig_time_sleep - time.time = self.orig_time_time - super(PostgresAppTest, self).tearDown() diff --git a/trove/tests/unittests/guestagent/test_galera_cluster_api.py b/trove/tests/unittests/guestagent/test_galera_cluster_api.py deleted file mode 100644 index e37a549a85..0000000000 --- a/trove/tests/unittests/guestagent/test_galera_cluster_api.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2015 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from eventlet import Timeout -from unittest import mock - -import trove.common.context as context -from trove.common import exception -from trove.common.rpc.version import RPC_API_VERSION -from trove.common.strategies.cluster.experimental.galera_common.guestagent \ - import GaleraCommonGuestAgentStrategy -from trove import rpc -from trove.tests.unittests import trove_testtools - - -def _mock_call(cmd, timeout, version=None, user=None, - public_keys=None, members=None): - # To check get_public_keys, authorize_public_keys, - # install_cluster, cluster_complete in cmd. - if cmd in ('get_public_keys', 'authorize_public_keys', - 'install_cluster', 'cluster_complete'): - return True - else: - raise BaseException("Test Failed") - - -class ApiTest(trove_testtools.TestCase): - @mock.patch.object(rpc, 'get_client') - @mock.patch('trove.instance.models.get_instance_encryption_key', - return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08') - def setUp(self, mock_get_encryption_key, *args): - super(ApiTest, self).setUp() - cluster_guest_api = (GaleraCommonGuestAgentStrategy() - .guest_client_class) - self.context = context.TroveContext() - self.guest = cluster_guest_api(self.context, 0) - self.guest._call = _mock_call - self.api = cluster_guest_api(self.context, "instance-id-x23d2d") - self._mock_rpc_client() - mock_get_encryption_key.assert_called() - - def test_get_routing_key(self): - self.assertEqual('guestagent.instance-id-x23d2d', - self.api._get_routing_key()) - - @mock.patch('trove.guestagent.api.LOG') - def test_api_cast_exception(self, mock_logging): - self.call_context.cast.side_effect = IOError('host down') - self.assertRaises(exception.GuestError, self.api.create_user, - 'test_user') - - @mock.patch('trove.guestagent.api.LOG') - def test_api_call_exception(self, mock_logging): - self.call_context.call.side_effect = IOError('host_down') - self.assertRaises(exception.GuestError, self.api.list_users) - - def test_api_call_timeout(self): - self.call_context.call.side_effect = Timeout() - self.assertRaises(exception.GuestTimeout, self.api.restart) - - def _verify_rpc_prepare_before_call(self): - self.api.client.prepare.assert_called_once_with( - version=RPC_API_VERSION, timeout=mock.ANY) - - def _verify_rpc_prepare_before_cast(self): - self.api.client.prepare.assert_called_once_with( - version=RPC_API_VERSION) - - def _verify_cast(self, *args, **kwargs): - self.call_context.cast.assert_called_once_with(self.context, *args, - **kwargs) - - def _verify_call(self, *args, **kwargs): - self.call_context.call.assert_called_once_with(self.context, *args, - **kwargs) - - def _mock_rpc_client(self): - self.call_context = mock.Mock() - self.api.client.prepare = mock.Mock(return_value=self.call_context) - self.call_context.call = mock.Mock() - self.call_context.cast = mock.Mock() - - def test_install_cluster(self): - exp_resp = None - self.call_context.call.return_value = exp_resp - - resp = self.api.install_cluster( - replication_user="repuser", - cluster_configuration="cluster-configuration", - bootstrap=False) - - self._verify_rpc_prepare_before_call() - self._verify_call('install_cluster', replication_user="repuser", - cluster_configuration="cluster-configuration", - bootstrap=False) - self.assertEqual(exp_resp, resp) - - def test_reset_admin_password(self): - exp_resp = None - self.call_context.call.return_value = exp_resp - - resp = self.api.reset_admin_password( - admin_password="admin_password") - - self._verify_rpc_prepare_before_call() - self._verify_call('reset_admin_password', - admin_password="admin_password") - self.assertEqual(exp_resp, resp) - - def test_cluster_complete(self): - exp_resp = None - self.call_context.call.return_value = exp_resp - - resp = self.api.cluster_complete() - - self._verify_rpc_prepare_before_call() - self._verify_call('cluster_complete') - self.assertEqual(exp_resp, resp) - - def test_get_cluster_context(self): - exp_resp = None - self.call_context.call.return_value = exp_resp - - resp = self.api.get_cluster_context() - - self._verify_rpc_prepare_before_call() - self._verify_call('get_cluster_context') - self.assertEqual(exp_resp, resp) - - def test_write_cluster_configuration_overrides(self): - exp_resp = None - self.call_context.call.return_value = exp_resp - - resp = self.api.write_cluster_configuration_overrides( - cluster_configuration="cluster-configuration") - - self._verify_rpc_prepare_before_call() - self._verify_call('write_cluster_configuration_overrides', - cluster_configuration="cluster-configuration",) - self.assertEqual(exp_resp, resp) diff --git a/trove/tests/unittests/guestagent/test_galera_manager.py b/trove/tests/unittests/guestagent/test_galera_manager.py deleted file mode 100644 index 487ba7d975..0000000000 --- a/trove/tests/unittests/guestagent/test_galera_manager.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright [2015] Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mock import MagicMock -from mock import patch - -from trove.common.context import TroveContext -from trove.guestagent.datastore.galera_common import manager as galera_manager -from trove.guestagent.datastore.galera_common import service as galera_service -from trove.guestagent.datastore.mysql_common import service as mysql_service -from trove.tests.unittests import trove_testtools - - -class GaleraTestApp(galera_service.GaleraApp): - - def __init__(self, status): - super(GaleraTestApp, self).__init__( - status, mysql_service.BaseLocalSqlClient, - mysql_service.BaseKeepAliveConnection) - - @property - def cluster_configuration(self): - return self.configuration_manager.get_value('mysqld') - - -class GaleraTestRootAccess(mysql_service.BaseMySqlRootAccess): - - def __init__(self): - super(GaleraTestRootAccess, self).__init__( - mysql_service.BaseLocalSqlClient, - GaleraTestApp(mysql_service.BaseMySqlAppStatus.get())) - - -class GaleraTestAdmin(mysql_service.BaseMySqlAdmin): - def __init__(self): - super(GaleraTestAdmin, self).__init__( - mysql_service.BaseLocalSqlClient, GaleraTestRootAccess(), - GaleraTestApp) - - -class GuestAgentManagerTest(trove_testtools.TestCase): - - def setUp(self): - super(GuestAgentManagerTest, self).setUp() - self.manager = galera_manager.GaleraManager( - GaleraTestApp, mysql_service.BaseMySqlAppStatus, - GaleraTestAdmin) - self.context = TroveContext() - patcher_rs = patch( - 'trove.guestagent.strategies.replication.get_instance') - patcher_rs.start() - self.addCleanup(patcher_rs.stop) - - @patch.object(mysql_service.BaseMySqlAppStatus, 'get', - new_callable=MagicMock) - @patch.object(galera_service.GaleraApp, 'install_cluster', - new_callable=MagicMock) - def test_install_cluster(self, install_cluster, app_status_get): - install_cluster.return_value = MagicMock() - app_status_get.return_value = None - - replication_user = "repuser" - configuration = "configuration" - bootstrap = True - self.manager.install_cluster(self.context, replication_user, - configuration, bootstrap) - app_status_get.assert_any_call() - install_cluster.assert_called_with( - replication_user, configuration, bootstrap) - - @patch.object(mysql_service.BaseMySqlAppStatus, 'get', - new_callable=MagicMock) - @patch.object(galera_service.GaleraApp, 'reset_admin_password', - new_callable=MagicMock) - def test_reset_admin_password(self, reset_admin_password, app_status_get): - reset_admin_password.return_value = None - app_status_get.return_value = MagicMock() - - admin_password = "password" - self.manager.reset_admin_password(self.context, admin_password) - app_status_get.assert_any_call() - reset_admin_password.assert_called_with(admin_password) - - @patch.object(mysql_service.BaseMySqlAppStatus, 'get', - new_callable=MagicMock) - @patch.object(galera_service.GaleraApp, 'get_cluster_context') - def test_get_cluster_context(self, get_cluster_ctxt, app_status_get): - get_cluster_ctxt.return_value = {'cluster': 'info'} - self.manager.get_cluster_context(self.context) - app_status_get.assert_any_call() - get_cluster_ctxt.assert_any_call() - - @patch.object(mysql_service.BaseMySqlAppStatus, 'get', - new_callable=MagicMock) - @patch.object(galera_service.GaleraApp, - 'write_cluster_configuration_overrides') - def test_write_cluster_configuration_overrides(self, conf_overries, - app_status_get): - cluster_configuration = "cluster_configuration" - self.manager.write_cluster_configuration_overrides( - self.context, cluster_configuration) - app_status_get.assert_any_call() - conf_overries.assert_called_with(cluster_configuration) - - @patch.object(mysql_service.BaseMySqlAppStatus, 'get', - new_callable=MagicMock) - @patch.object(mysql_service.BaseMySqlAdmin, 'enable_root') - def test_enable_root_with_password(self, reset_admin_pwd, - app_status_get): - admin_password = "password" - self.manager.enable_root_with_password(self.context, admin_password) - reset_admin_pwd.assert_called_with(admin_password) diff --git a/trove/tests/unittests/guestagent/test_guestagent_utils.py b/trove/tests/unittests/guestagent/test_guestagent_utils.py deleted file mode 100644 index 3c599dc274..0000000000 --- a/trove/tests/unittests/guestagent/test_guestagent_utils.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright 2015 Tesora Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mock import Mock -from mock import patch - -from trove.common import pagination -from trove.guestagent.common import guestagent_utils -from trove.tests.unittests import trove_testtools - - -class TestGuestagentUtils(trove_testtools.TestCase): - - def test_update_dict(self): - data = [{ - 'dict': {}, 'update': {}, 'expected': {}, - }, { - 'dict': None, 'update': {}, 'expected': {}, - }, { - 'dict': {}, 'update': None, 'expected': {}, - }, { - 'dict': {}, 'update': None, 'expected': {}, - }, { - 'dict': None, 'update': {'name': 'Tom'}, - 'expected': {'name': 'Tom'}, - }, { - 'dict': {}, 'update': {'name': 'Tom'}, - 'expected': {'name': 'Tom'}, - }, { - 'dict': {'name': 'Tom'}, 'update': {}, - 'expected': {'name': 'Tom'}, - }, { - 'dict': {'key1': 'value1', - 'dict1': {'key1': 'value1', 'key2': 'value2'}}, - 'update': {'key1': 'value1+', - 'key2': 'value2', - 'dict1': {'key3': 'value3'}}, - 'expected': {'key1': 'value1+', - 'key2': 'value2', - 'dict1': {'key1': 'value1', 'key2': 'value2', - 'key3': 'value3'}}, - }, { - 'dict': {'d1': {'d2': {'d3': {'k1': 'v1'}}}}, - 'update': {'d1': {'d2': {'d3': {'k2': 'v2'}}}}, - 'expected': {'d1': {'d2': {'d3': {'k1': 'v1', 'k2': 'v2'}}}}, - }, { - 'dict': {'timeout': 0, 'save': [[900, 1], [300, 10]]}, - 'update': {'save': [[300, 20], [60, 10000]]}, - 'expected': {'timeout': 0, - 'save': [[300, 20], [60, 10000]]}, - }, { - 'dict': {'rpc_address': '0.0.0.0', - 'broadcast_rpc_address': '0.0.0.0', - 'listen_address': '0.0.0.0', - 'seed_provider': [{ - 'class_name': - 'org.apache.cassandra.locator.SimpleSeedProvider', - 'parameters': [{'seeds': '0.0.0.0'}]}] - }, - 'update': {'rpc_address': '127.0.0.1', - 'seed_provider': {'parameters': { - 'seeds': '127.0.0.1'}} - }, - 'expected': {'rpc_address': '127.0.0.1', - 'broadcast_rpc_address': '0.0.0.0', - 'listen_address': '0.0.0.0', - 'seed_provider': [{ - 'class_name': - 'org.apache.cassandra.locator.SimpleSeedProvider', - 'parameters': [{'seeds': '127.0.0.1'}]}] - }, - }, { - 'dict': {'rpc_address': '127.0.0.1', - 'broadcast_rpc_address': '0.0.0.0', - 'listen_address': '0.0.0.0', - 'seed_provider': [{ - 'class_name': - 'org.apache.cassandra.locator.SimpleSeedProvider', - 'parameters': [{'seeds': '0.0.0.0'}]}] - }, - 'update': {'seed_provider': - [{'class_name': - 'org.apache.cassandra.locator.SimpleSeedProvider'}] - }, - 'expected': {'rpc_address': '127.0.0.1', - 'broadcast_rpc_address': '0.0.0.0', - 'listen_address': '0.0.0.0', - 'seed_provider': [{ - 'class_name': - 'org.apache.cassandra.locator.SimpleSeedProvider' - }]}, - }] - count = 0 - for record in data: - count += 1 - target = record['dict'] - update = record['update'] - expected = record['expected'] - result = guestagent_utils.update_dict(update, target) - msg = 'Unexpected result for test %s' % str(count) - self.assertEqual(expected, result, msg) - - def test_build_file_path(self): - self.assertEqual( - 'base_dir/base_name', - guestagent_utils.build_file_path('base_dir', 'base_name')) - - self.assertEqual( - 'base_dir/base_name.ext1', - guestagent_utils.build_file_path('base_dir', 'base_name', 'ext1')) - - self.assertEqual( - 'base_dir/base_name.ext1.ext2', - guestagent_utils.build_file_path( - 'base_dir', 'base_name', 'ext1', 'ext2')) - - def test_flatten_expand_dict(self): - self._assert_flatten_expand_dict({}, {}) - self._assert_flatten_expand_dict({'ns1': 1}, {'ns1': 1}) - self._assert_flatten_expand_dict( - {'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}}, - {'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10}) - - def _assert_flatten_expand_dict(self, nested_dict, flattened_dict): - self.assertEqual( - flattened_dict, guestagent_utils.flatten_dict(nested_dict)) - self.assertEqual( - nested_dict, guestagent_utils.expand_dict(flattened_dict)) - - def test_to_bytes(self): - self.assertEqual('1024', guestagent_utils.to_bytes('1024')) - self.assertEqual(1048576, guestagent_utils.to_bytes('1024K')) - self.assertEqual(1073741824, guestagent_utils.to_bytes('1024M')) - self.assertEqual(1099511627776, guestagent_utils.to_bytes('1024G')) - self.assertEqual('1024T', guestagent_utils.to_bytes('1024T')) - self.assertEqual(1024, guestagent_utils.to_bytes(1024)) - self.assertEqual('Hello!', guestagent_utils.to_bytes('Hello!')) - self.assertEqual('', guestagent_utils.to_bytes('')) - self.assertIsNone(guestagent_utils.to_bytes(None)) - - @patch.object(pagination, 'paginate_object_list') - def test_paginate_list(self, paginate_obj_mock): - limit = Mock() - marker = Mock() - include_marker = Mock() - test_list = [Mock(), Mock(), Mock()] - guestagent_utils.paginate_list( - test_list, - limit=limit, marker=marker, include_marker=include_marker) - paginate_obj_mock.assert_called_once_with( - test_list, 'name', - limit=limit, marker=marker, include_marker=include_marker) - - def test_serialize_list(self): - test_list = [Mock(), Mock(), Mock()] - with patch.object(guestagent_utils, 'paginate_list', - return_value=(test_list[:2], test_list[-2]) - ) as paginate_lst_mock: - _, next_name = guestagent_utils.serialize_list(test_list) - paginate_lst_mock.assert_called_once_with( - test_list, - limit=None, marker=None, include_marker=False) - for item in paginate_lst_mock.return_value[0]: - item.serialize.assert_called_once_with() - self.assertEqual(paginate_lst_mock.return_value[1], next_name) diff --git a/trove/tests/unittests/guestagent/test_manager.py b/trove/tests/unittests/guestagent/test_manager.py deleted file mode 100644 index d34edb6ba2..0000000000 --- a/trove/tests/unittests/guestagent/test_manager.py +++ /dev/null @@ -1,556 +0,0 @@ -# Copyright 2015 Tesora Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import getpass -import os - -from mock import ANY -from mock import DEFAULT -from mock import MagicMock -from mock import Mock -from mock import patch -from oslo_utils import encodeutils -from proboscis.asserts import assert_equal -from proboscis.asserts import assert_is_none -from proboscis.asserts import assert_true - -from trove.common.context import TroveContext -from trove.common import exception -from trove.guestagent.common import operating_system -from trove.guestagent.datastore import manager -from trove.guestagent import guest_log -from trove.guestagent.module import module_manager -from trove import rpc -from trove.tests.unittests import trove_testtools - - -class MockManager(manager.Manager): - - def __init__(self): - super(MockManager, self).__init__('mysql') - self._app = MagicMock() - self._status = MagicMock() - self._configuration_manager = MagicMock() - - @property - def app(self): - return self._app - - @property - def status(self): - return self._status - - @property - def configuration_manager(self): - return self._configuration_manager - - def prepare(self, *args): - args[0].notification = MagicMock() - with patch.object(rpc, 'get_client'): - return super(MockManager, self).prepare(*args) - - -class ManagerTest(trove_testtools.TestCase): - - def setUp(self): - super(ManagerTest, self).setUp() - - self.chmod_patch = patch.object(operating_system, 'chmod') - self.chmod_mock = self.chmod_patch.start() - self.addCleanup(self.chmod_patch.stop) - - self.manager = MockManager() - self.context = TroveContext() - - self.log_name_sys = 'guest' - self.log_name_user = 'general' - self.prefix = 'log_prefix' - self.container = 'log_container' - self.size = 1024 - self.published = 128 - self.guest_log_user = guest_log.GuestLog( - self.context, self.log_name_user, guest_log.LogType.USER, None, - '/tmp/gen.log', True) - self.guest_log_sys = guest_log.GuestLog( - self.context, self.log_name_sys, guest_log.LogType.SYS, None, - '/tmp/guest.log', True) - for gl in [self.guest_log_user, self.guest_log_sys]: - gl._container_name = self.container - gl._refresh_details = MagicMock() - gl._log_rotated = MagicMock(return_value=False) - gl._publish_to_container = MagicMock() - gl._delete_log_components = MagicMock() - gl._object_prefix = MagicMock(return_value=self.prefix) - gl._size = self.size - gl._published_size = self.published - self.manager._guest_log_cache = { - self.log_name_user: self.guest_log_user, - self.log_name_sys: self.guest_log_sys} - self.expected_details_user = { - 'status': 'Disabled', - 'prefix': self.prefix, - 'container': self.container, - 'name': self.log_name_user, - 'published': self.published, - 'metafile': self.prefix + '_metafile', - 'type': 'USER', - 'pending': self.size - self.published} - self.expected_details_sys = dict(self.expected_details_user) - self.expected_details_sys['type'] = 'SYS' - self.expected_details_sys['status'] = 'Enabled' - self.expected_details_sys['name'] = self.log_name_sys - self.expected_module_details = { - 'name': 'mymod', - 'type': 'ping', - 'contents': 'e262cfe36134' - } - self.manager.module_manager = Mock() - - def tearDown(self): - super(ManagerTest, self).tearDown() - - def test_update_status(self): - self.manager._status.is_installed = True - self.manager._status._is_restarting = False - self.manager.update_status(self.context) - self.assertTrue(self.manager.status.set_status.called) - - def test_guest_log_list(self): - log_list = self.manager.guest_log_list(self.context) - expected = [self.expected_details_sys, self.expected_details_user] - assert_equal(self._flatten_list_of_dicts(expected), - self._flatten_list_of_dicts(log_list), - "Wrong list: %s (Expected: %s)" % ( - self._flatten_list_of_dicts(log_list), - self._flatten_list_of_dicts(expected))) - - def _flatten_list_of_dicts(self, lod): - value = sorted("".join("%s%s" % (k, d[k]) for k in sorted(d.keys())) - for d in lod) - return "".join(sorted(value)) - - def test_guest_log_action_enable_disable(self): - self.assertRaisesRegex(exception.BadRequest, - "Cannot enable and disable", - self.manager.guest_log_action, - self.context, - self.log_name_sys, - True, True, False, False) - - def test_guest_log_action_enable_sys(self): - self.assertRaisesRegex(exception.BadRequest, - "Cannot enable a SYSTEM log", - self.manager.guest_log_action, - self.context, - self.log_name_sys, - True, False, False, False) - - def test_guest_log_action_disable_sys(self): - self.assertRaisesRegex(exception.BadRequest, - "Cannot disable a SYSTEM log", - self.manager.guest_log_action, - self.context, - self.log_name_sys, - False, True, False, False) - - def test_guest_log_action_publish_sys(self): - with patch.object(os.path, 'isfile', return_value=True): - log_details = self.manager.guest_log_action(self.context, - self.log_name_sys, - False, False, - True, False) - assert_equal(log_details, self.expected_details_sys, - "Wrong details: %s (expected %s)" % - (log_details, self.expected_details_sys)) - assert_equal( - 1, self.guest_log_sys._publish_to_container.call_count) - - def test_guest_log_action_discard_sys(self): - log_details = self.manager.guest_log_action(self.context, - self.log_name_sys, - False, False, - False, True) - assert_equal(log_details, self.expected_details_sys, - "Wrong details: %s (expected %s)" % - (log_details, self.expected_details_sys)) - assert_equal( - 1, self.guest_log_sys._delete_log_components.call_count) - - def test_guest_log_action_enable_user(self): - with patch.object(manager.Manager, 'guest_log_enable', - return_value=False) as mock_enable: - log_details = self.manager.guest_log_action(self.context, - self.log_name_user, - True, False, - False, False) - assert_equal(log_details, self.expected_details_user, - "Wrong details: %s (expected %s)" % - (log_details, self.expected_details_user)) - assert_equal(1, mock_enable.call_count) - - def test_guest_log_action_disable_user(self): - with patch.object(manager.Manager, 'guest_log_enable', - return_value=False) as mock_enable: - self.guest_log_user._enabled = True - log_details = self.manager.guest_log_action(self.context, - self.log_name_user, - False, True, - False, False) - assert_equal(log_details, self.expected_details_user, - "Wrong details: %s (expected %s)" % - (log_details, self.expected_details_user)) - assert_equal(1, mock_enable.call_count) - - def test_guest_log_action_publish_user(self): - with patch.object(manager.Manager, 'guest_log_enable', - return_value=False) as mock_enable: - with patch.object(os.path, 'isfile', return_value=True): - log_details = self.manager.guest_log_action(self.context, - self.log_name_user, - False, False, - True, False) - assert_equal(log_details, self.expected_details_user, - "Wrong details: %s (expected %s)" % - (log_details, self.expected_details_user)) - assert_equal(1, mock_enable.call_count) - - def test_guest_log_action_discard_user(self): - log_details = self.manager.guest_log_action(self.context, - self.log_name_user, - False, False, - False, True) - assert_equal(log_details, self.expected_details_user, - "Wrong details: %s (expected %s)" % - (log_details, self.expected_details_user)) - assert_equal(1, self.guest_log_user._delete_log_components.call_count) - - def test_set_guest_log_status_disabled(self): - data = [ - {'orig': guest_log.LogStatus.Enabled, - 'new': guest_log.LogStatus.Disabled, - 'expect': guest_log.LogStatus.Disabled}, - {'orig': guest_log.LogStatus.Restart_Required, - 'new': guest_log.LogStatus.Enabled, - 'expect': guest_log.LogStatus.Restart_Required}, - {'orig': guest_log.LogStatus.Restart_Required, - 'new': guest_log.LogStatus.Restart_Completed, - 'expect': guest_log.LogStatus.Restart_Completed}, - {'orig': guest_log.LogStatus.Published, - 'new': guest_log.LogStatus.Partial, - 'expect': guest_log.LogStatus.Partial}, - ] - for datum in data: - self.assert_guest_log_status(datum['orig'], - datum['new'], - datum['expect']) - - def assert_guest_log_status(self, original_status, new_status, - expected_final_status): - gl_cache = self.manager.get_guest_log_cache() - gl_cache[self.log_name_sys]._status = original_status - self.manager.set_guest_log_status(new_status, self.log_name_sys) - assert_equal(gl_cache[self.log_name_sys].status, expected_final_status, - "Unexpected status for '%s': %s' (Expected %s)" % - (self.log_name_sys, gl_cache[self.log_name_sys].status, - expected_final_status)) - - def test_build_log_file_name(self): - current_owner = getpass.getuser() - with patch.multiple(operating_system, - exists=MagicMock(return_value=False), - write_file=DEFAULT, - create_directory=DEFAULT, - chown=DEFAULT, - chmod=DEFAULT) as os_mocks: - log_file = self.manager.build_log_file_name(self.log_name_sys, - current_owner) - expected_filename = '%s/%s/%s-%s.log' % ( - self.manager.GUEST_LOG_BASE_DIR, - self.manager.GUEST_LOG_DATASTORE_DIRNAME, - self.manager.manager, self.log_name_sys) - expected_call_counts = {'exists': 1, - 'write_file': 1, - 'create_directory': 2, - 'chown': 1, - 'chmod': 1} - self.assert_build_log_file_name(expected_filename, log_file, - os_mocks, expected_call_counts) - - def assert_build_log_file_name(self, expected_filename, filename, - mocks, call_counts): - assert_equal(expected_filename, filename, - "Unexpected filename: %s (expected %s)" % - (filename, expected_filename)) - for key in mocks.keys(): - assert_true( - mocks[key].call_count == call_counts[key], - "%s called %d time(s)" % (key, mocks[key].call_count)) - - def test_build_log_file_name_with_dir(self): - current_owner = getpass.getuser() - log_dir = '/tmp' - with patch.multiple(operating_system, - exists=MagicMock(return_value=False), - write_file=DEFAULT, - create_directory=DEFAULT, - chown=DEFAULT, - chmod=DEFAULT) as os_mocks: - log_file = self.manager.build_log_file_name(self.log_name_sys, - current_owner, - datastore_dir=log_dir) - expected_filename = '%s/%s-%s.log' % ( - log_dir, - self.manager.manager, self.log_name_sys) - expected_call_counts = {'exists': 1, - 'write_file': 1, - 'create_directory': 1, - 'chown': 1, - 'chmod': 1} - self.assert_build_log_file_name(expected_filename, log_file, - os_mocks, expected_call_counts) - - def test_validate_log_file(self): - file_name = '/tmp/non-existent-file' - current_owner = getpass.getuser() - with patch.multiple(operating_system, - exists=MagicMock(return_value=False), - write_file=DEFAULT, - chown=DEFAULT, - chmod=DEFAULT) as os_mocks: - log_file = self.manager.validate_log_file(file_name, current_owner) - assert_equal(file_name, log_file, "Unexpected filename") - for key in os_mocks.keys(): - assert_true(os_mocks[key].call_count == 1, - "%s not called" % key) - - def test_prepare_single(self): - self.run_prepare_test(cluster_config=None) - - def test_prepare_single_no_users(self): - self.run_prepare_test(cluster_config=None, users=None) - - def test_prepare_single_no_databases(self): - self.run_prepare_test(cluster_config=None, databases=None) - - def test_prepare_single_no_root_password(self): - self.run_prepare_test(cluster_config=None, root_password=None) - - def test_prepare_cluster(self): - self.run_prepare_test() - - def run_prepare_test(self, packages=Mock(), databases=Mock(), - memory_mb=Mock(), users=Mock(), - device_path=Mock(), mount_point=Mock(), - backup_info=Mock(), config_contents=Mock(), - root_password=Mock(), overrides=Mock(), - cluster_config=Mock(), snapshot=Mock()): - self._assert_prepare(self.context, packages, databases, memory_mb, - users, device_path, mount_point, backup_info, - config_contents, root_password, overrides, - cluster_config, snapshot) - - def _assert_prepare(self, context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, config_contents, - root_password, overrides, cluster_config, snapshot): - - is_error_expected = False - is_post_process_expected = cluster_config is not None - - with patch.multiple(self.manager, - do_prepare=DEFAULT, post_prepare=DEFAULT, - apply_overrides_on_prepare=DEFAULT, - enable_root_on_prepare=DEFAULT, - create_database=DEFAULT, create_user=DEFAULT): - self.manager.prepare( - context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, config_contents, - root_password, overrides, cluster_config, snapshot) - - self.manager.status.begin_install.assert_called_once_with() - self.manager.do_prepare.assert_called_once_with( - context, - packages, - databases, - memory_mb, - users, - device_path, - mount_point, - backup_info, - config_contents, - root_password, - overrides, - cluster_config, - snapshot) - self.manager.apply_overrides_on_prepare.assert_called_once_with( - context, - overrides) - self.manager.status.end_install( - error_occurred=is_error_expected, - post_processing=is_post_process_expected) - self.manager.post_prepare.assert_called_once_with( - context, - packages, - databases, - memory_mb, - users, - device_path, - mount_point, - backup_info, - config_contents, - root_password, - overrides, - cluster_config, - snapshot) - - if not is_post_process_expected: - if databases: - self.manager.create_database.assert_called_once_with( - context, - databases) - else: - self.assertEqual( - 0, self.manager.create_database.call_count) - if users: - self.manager.create_user.assert_called_once_with( - context, - users) - else: - self.assertEqual(0, self.manager.create_user.call_count) - if not backup_info and root_password: - (self.manager.enable_root_on_prepare. - assert_called_once_with(context, root_password)) - else: - self.assertEqual( - 0, self.manager.enable_root_on_prepare.call_count) - else: - self.assertEqual(0, self.manager.create_database.call_count) - self.assertEqual(0, self.manager.create_user.call_count) - self.assertEqual( - 0, self.manager.enable_root_on_prepare.call_count) - - def test_apply_overrides_on_prepare(self): - overrides = Mock() - with patch.multiple(self.manager, - update_overrides=DEFAULT, restart=DEFAULT): - self.manager.apply_overrides_on_prepare(self.context, overrides) - - self.manager.update_overrides.assert_called_once_with( - self.context, overrides) - self.manager.restart.assert_called_once_with(self.context) - - @patch('trove.guestagent.datastore.manager.LOG') - def test_apply_overrides_on_prepare_failure(self, mock_logging): - packages = Mock() - databases = Mock() - memory_mb = Mock() - users = Mock() - device_path = Mock() - mount_point = Mock() - backup_info = Mock() - config_contents = Mock() - root_password = Mock() - overrides = Mock() - cluster_config = Mock() - snapshot = Mock() - - expected_failure = Exception("Error in 'apply_overrides_on_prepare'.") - - with patch.multiple( - self.manager, do_prepare=DEFAULT, - apply_overrides_on_prepare=MagicMock( - side_effect=expected_failure - )): - expected_msg = encodeutils.exception_to_unicode(expected_failure) - self.assertRaisesRegex( - Exception, expected_msg, - self.manager.prepare, - self.context, packages, databases, memory_mb, users, - device_path, mount_point, backup_info, config_contents, - root_password, overrides, cluster_config, snapshot) - - self.manager.status.begin_install.assert_called_once_with() - self.manager.status.end_install( - error_occurred=True, - post_processing=ANY) - - @patch.object(operating_system, 'copy') - @patch.object(operating_system, 'chown') - def test_restore_directory_with_owner(self, chown_mock, copy_mock): - restore_dir = '/restore_directory' - restore_files = '/restore_directory/.' - target_dir = '/target_directory' - owner = 'owner' - self.manager._restore_directory(restore_dir, target_dir, owner) - copy_mock.assert_called_once_with(restore_files, target_dir, - preserve=True, as_root=True) - chown_mock.assert_called_once_with(path=target_dir, user=owner, - group=owner, recursive=True, - as_root=True) - - @patch.object(operating_system, 'copy') - @patch.object(operating_system, 'chown') - def test_restore_directory_without_owner(self, chown_mock, copy_mock): - restore_dir = '/restore_directory' - restore_files = '/restore_directory/.' - target_dir = '/target_directory' - self.manager._restore_directory(restore_dir, target_dir) - copy_mock.assert_called_once_with(restore_files, target_dir, - preserve=True, as_root=True) - chown_mock.assert_not_called() - - @patch.object(manager.Manager, '_restore_directory') - @patch.object(operating_system, 'get_current_user', return_value='trove') - def test_restore_home_directory(self, os_mock, restore_mock): - saved_home_dir = '/old_home' - with patch.object(os.path, 'expanduser', return_value='/home/trove'): - self.manager._restore_home_directory(saved_home_dir) - os_mock.assert_any_call() - restore_mock.assert_called_once_with(restore_dir=saved_home_dir, - target_dir='/home/trove', - owner='trove') - - def test_module_list(self): - with patch.object(module_manager.ModuleManager, 'read_module_results', - return_value=[ - self.expected_module_details]) as mock_rmr: - module_list = self.manager.module_list(self.context) - expected = [self.expected_module_details] - assert_equal(self._flatten_list_of_dicts(expected), - self._flatten_list_of_dicts(module_list), - "Wrong list: %s (Expected: %s)" % ( - self._flatten_list_of_dicts(module_list), - self._flatten_list_of_dicts(expected))) - assert_equal(1, mock_rmr.call_count) - - def test_module_apply(self): - with patch.object( - module_manager.ModuleManager, 'apply_module', - return_value=[self.expected_module_details]) as mock_am: - module_details = self.manager.module_apply( - self.context, - [{'module': self.expected_module_details}]) - assert_equal([[self.expected_module_details]], module_details) - assert_equal(1, mock_am.call_count) - - def test_module_remove(self): - with patch.object( - module_manager.ModuleManager, 'remove_module', - return_value=[self.expected_module_details]) as mock_rm: - module_details = self.manager.module_remove( - self.context, - {'module': self.expected_module_details}) - assert_is_none(module_details) - assert_equal(1, mock_rm.call_count) diff --git a/trove/tests/unittests/guestagent/test_mariadb_manager.py b/trove/tests/unittests/guestagent/test_mariadb_manager.py deleted file mode 100644 index 2c76ab6a71..0000000000 --- a/trove/tests/unittests/guestagent/test_mariadb_manager.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright [2015] Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mock import MagicMock -from mock import patch - -from trove.guestagent.datastore.experimental.mariadb import ( - manager as mariadb_manager) -from trove.guestagent.datastore.experimental.mariadb import ( - service as mariadb_service) -from trove.guestagent.datastore.mysql_common import service as mysql_service -from trove.tests.unittests.guestagent.test_datastore_manager import \ - DatastoreManagerTest - - -class GuestAgentManagerTest(DatastoreManagerTest): - - def setUp(self): - super(GuestAgentManagerTest, self).setUp('mariadb') - self.manager = mariadb_manager.Manager() - patcher_rs = patch( - 'trove.guestagent.strategies.replication.get_instance') - patcher_rs.start() - self.addCleanup(patcher_rs.stop) - - @patch.object(mysql_service.BaseMySqlAppStatus, 'get', - new_callable=MagicMock) - @patch.object(mariadb_service.MariaDBApp, 'install_cluster', - new_callable=MagicMock) - def test_install_cluster(self, install_cluster, app_status_get): - install_cluster.return_value = MagicMock() - app_status_get.return_value = None - - replication_user = "repuser" - configuration = "configuration" - bootstrap = True - self.manager.install_cluster(self.context, replication_user, - configuration, bootstrap) - app_status_get.assert_any_call() - install_cluster.assert_called_with( - replication_user, configuration, bootstrap) - - @patch.object(mysql_service.BaseMySqlAppStatus, 'get', - new_callable=MagicMock) - @patch.object(mariadb_service.MariaDBApp, 'reset_admin_password', - new_callable=MagicMock) - def test_reset_admin_password(self, reset_admin_password, app_status_get): - reset_admin_password.return_value = None - app_status_get.return_value = MagicMock() - - admin_password = "password" - self.manager.reset_admin_password(self.context, admin_password) - app_status_get.assert_any_call() - reset_admin_password.assert_called_with(admin_password) diff --git a/trove/tests/unittests/guestagent/test_models.py b/trove/tests/unittests/guestagent/test_models.py deleted file mode 100644 index 6e45834be6..0000000000 --- a/trove/tests/unittests/guestagent/test_models.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime - -from mock import Mock, MagicMock, patch - -from trove.common import timeutils -from trove.common import utils -from trove.db import models as dbmodels -from trove.db.sqlalchemy import api as dbapi -from trove.guestagent import models -from trove.tests.unittests import trove_testtools - - -class AgentHeartBeatTest(trove_testtools.TestCase): - def setUp(self): - super(AgentHeartBeatTest, self).setUp() - self.origin_get_db_api = dbmodels.get_db_api - self.origin_utcnow = timeutils.utcnow - self.origin_db_api_save = dbapi.save - self.origin_is_valid = dbmodels.DatabaseModelBase.is_valid - self.origin_generate_uuid = utils.generate_uuid - - def tearDown(self): - super(AgentHeartBeatTest, self).tearDown() - dbmodels.get_db_api = self.origin_get_db_api - timeutils.utcnow = self.origin_utcnow - dbapi.save = self.origin_db_api_save - dbmodels.DatabaseModelBase.is_valid = self.origin_is_valid - utils.generate_uuid = self.origin_generate_uuid - - def test_create(self): - utils.generate_uuid = Mock() - dbapi.save = MagicMock( - return_value=dbmodels.DatabaseModelBase) - dbmodels.DatabaseModelBase.is_valid = Mock(return_value=True) - models.AgentHeartBeat.create() - self.assertEqual(1, utils.generate_uuid.call_count) - self.assertEqual(3, - dbmodels.DatabaseModelBase.is_valid.call_count) - - @patch('trove.db.models.DatabaseModelBase') - def test_save(self, dmb_mock): - timeutils.utcnow = Mock() - dbmodels.get_db_api = MagicMock( - return_value=dbmodels.DatabaseModelBase) - dbapi.save = Mock() - dbmodels.DatabaseModelBase.is_valid = Mock(return_value=True) - self.heartBeat = models.AgentHeartBeat() - self.heartBeat.save() - self.assertEqual(1, timeutils.utcnow.call_count) - - def test_is_active(self): - models.AGENT_HEARTBEAT = 10000000000 - mock = models.AgentHeartBeat() - models.AgentHeartBeat.__setitem__(mock, 'updated_at', datetime.now()) - self.assertTrue(models.AgentHeartBeat.is_active(mock)) diff --git a/trove/tests/unittests/guestagent/test_mysql_manager.py b/trove/tests/unittests/guestagent/test_mysql_manager.py deleted file mode 100644 index 71556d6710..0000000000 --- a/trove/tests/unittests/guestagent/test_mysql_manager.py +++ /dev/null @@ -1,718 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from mock import DEFAULT -from mock import MagicMock -from mock import patch -from proboscis.asserts import assert_equal -from testtools.matchers import Is, Equals, Not - -from trove.common.db.mysql import models -from trove.common.exception import InsufficientSpaceForReplica -from trove.common.exception import ProcessExecutionError -from trove.common import instance as rd_instance -from trove.guestagent import backup -from trove.guestagent.common import operating_system -# TODO(atomic77) The test cases should be made configurable -# to make it easier to test the various derived datastores. -from trove.guestagent.datastore.mysql.manager import Manager -import trove.guestagent.datastore.mysql.service as dbaas -from trove.guestagent import dbaas as base_dbaas -from trove.guestagent import pkg -from trove.guestagent import volume -from trove.guestagent.volume import VolumeDevice -from trove.tests.unittests.guestagent.test_datastore_manager import \ - DatastoreManagerTest -from trove.tests.unittests import trove_testtools - - -class GuestAgentManagerTest(DatastoreManagerTest): - - def setUp(self): - super(GuestAgentManagerTest, self).setUp('mysql') - self.context = trove_testtools.TroveTestContext(self) - self.replication_strategy = 'MysqlGTIDReplication' - self.patch_rs = patch( - 'trove.guestagent.strategies.replication.get_strategy', - return_value=self.replication_strategy) - self.mock_rs = self.patch_rs.start() - self.addCleanup(self.patch_rs.stop) - self.manager = Manager() - self.origin_MySqlAppStatus = dbaas.MySqlAppStatus.get - self.origin_os_path_exists = os.path.exists - self.origin_format = volume.VolumeDevice.format - self.origin_migrate_data = volume.VolumeDevice.migrate_data - self.origin_mount = volume.VolumeDevice.mount - self.origin_unmount = volume.VolumeDevice.unmount - self.origin_mount_points = volume.VolumeDevice.mount_points - self.origin_stop_mysql = dbaas.MySqlApp.stop_db - self.origin_start_mysql = dbaas.MySqlApp.start_mysql - self.origin_update_overrides = dbaas.MySqlApp.update_overrides - self.origin_install_if_needed = dbaas.MySqlApp.install_if_needed - self.origin_secure = dbaas.MySqlApp.secure - self.origin_secure_root = dbaas.MySqlApp.secure_root - self.origin_pkg_is_installed = pkg.Package.pkg_is_installed - self.origin_os_path_exists = os.path.exists - self.origin_chown = operating_system.chown - # set up common mock objects, etc. for replication testing - self.patcher_gfvs = patch( - 'trove.guestagent.dbaas.get_filesystem_volume_stats') - self.patcher_rs = patch( - 'trove.guestagent.strategies.replication.get_instance') - self.mock_gfvs_class = self.patcher_gfvs.start() - self.mock_rs_class = self.patcher_rs.start() - - def tearDown(self): - super(GuestAgentManagerTest, self).tearDown() - dbaas.MySqlAppStatus.get = self.origin_MySqlAppStatus - os.path.exists = self.origin_os_path_exists - volume.VolumeDevice.format = self.origin_format - volume.VolumeDevice.migrate_data = self.origin_migrate_data - volume.VolumeDevice.mount = self.origin_mount - volume.VolumeDevice.unmount = self.origin_unmount - volume.VolumeDevice.mount_points = self.origin_mount_points - dbaas.MySqlApp.stop_db = self.origin_stop_mysql - dbaas.MySqlApp.start_mysql = self.origin_start_mysql - dbaas.MySqlApp.update_overrides = self.origin_update_overrides - dbaas.MySqlApp.install_if_needed = self.origin_install_if_needed - dbaas.MySqlApp.secure = self.origin_secure - dbaas.MySqlApp.secure_root = self.origin_secure_root - operating_system.chown = self.origin_chown - pkg.Package.pkg_is_installed = self.origin_pkg_is_installed - os.path.exists = self.origin_os_path_exists - # teardown the replication mock objects - self.patcher_gfvs.stop() - self.patcher_rs.stop() - - def test_update_status(self): - mock_status = MagicMock() - mock_status.is_installed = True - mock_status._is_restarting = False - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - self.manager.update_status(self.context) - self.assertTrue(mock_status.set_status.called) - - def _empty_user(self): - return models.MySQLUser(deserializing=True) - - def test_valid_host_name(self): - test_host = "192.58.197.0/255.255.255.0" - user = self._empty_user() - user.host = test_host - self.assertEqual(test_host, user.host) - - @patch.object(dbaas.MySqlAdmin, 'create_database') - def test_create_database(self, create_db_mock): - self.manager.create_database(self.context, ['db1']) - create_db_mock.assert_any_call(['db1']) - - @patch.object(dbaas.MySqlAdmin, 'create_user') - def test_create_user(self, create_user_mock): - self.manager.create_user(self.context, ['user1']) - create_user_mock.assert_any_call(['user1']) - - @patch.object(dbaas.MySqlAdmin, 'delete_database') - def test_delete_database(self, delete_database_mock): - databases = ['db1'] - self.manager.delete_database(self.context, databases) - delete_database_mock.assert_any_call(databases) - - @patch.object(dbaas.MySqlAdmin, 'delete_user') - def test_delete_user(self, delete_user_mock): - user = ['user1'] - self.manager.delete_user(self.context, user) - delete_user_mock.assert_any_call(user) - - @patch.object(dbaas.MySqlAdmin, 'grant_access') - def test_grant_access(self, grant_access_mock): - username = "test_user" - hostname = "test_host" - databases = ["test_database"] - self.manager.grant_access(self.context, - username, - hostname, - databases) - - grant_access_mock.assert_any_call(username, - hostname, - databases) - - @patch.object(dbaas.MySqlAdmin, 'list_databases', - return_value=['database1']) - def test_list_databases(self, list_databases_mock): - databases = self.manager.list_databases(self.context) - self.assertThat(databases, Not(Is(None))) - self.assertThat(databases, Equals(list_databases_mock.return_value)) - list_databases_mock.assert_any_call(None, None, False) - - @patch.object(dbaas.MySqlAdmin, 'list_users', return_value=['user1']) - def test_list_users(self, list_users_mock): - users = self.manager.list_users(self.context) - self.assertThat(users, Equals(list_users_mock.return_value)) - dbaas.MySqlAdmin.list_users.assert_any_call(None, None, False) - - @patch.object(dbaas.MySqlAdmin, 'get_user', return_value=['user1']) - def test_get_users(self, get_user_mock): - username = ['user1'] - hostname = ['host'] - users = self.manager.get_user(self.context, username, hostname) - self.assertThat(users, Equals(get_user_mock.return_value)) - get_user_mock.assert_any_call(username, hostname) - - @patch.object(dbaas.MySqlAdmin, 'enable_root', - return_value='user_id_stuff') - def test_enable_root(self, enable_root_mock): - user_id = self.manager.enable_root(self.context) - self.assertThat(user_id, Is(enable_root_mock.return_value)) - enable_root_mock.assert_any_call() - - @patch.object(dbaas.MySqlAdmin, 'disable_root') - def test_disable_root(self, disable_root_mock): - self.manager.disable_root(self.context) - disable_root_mock.assert_any_call() - - @patch.object(dbaas.MySqlAdmin, 'is_root_enabled', return_value=True) - def test_is_root_enabled(self, is_root_enabled_mock): - is_enabled = self.manager.is_root_enabled(self.context) - self.assertThat(is_enabled, Is(is_root_enabled_mock.return_value)) - is_root_enabled_mock.assert_any_call() - - @patch.object(backup, 'backup') - def test_create_backup(self, backup_mock): - # entry point - Manager().create_backup(self.context, 'backup_id_123') - # assertions - backup_mock.assert_any_call(self.context, 'backup_id_123') - - def test_prepare_device_path_true(self): - self._prepare_dynamic() - - def test_prepare_device_path_false(self): - self._prepare_dynamic(device_path=None) - - def test_prepare_device_path_mounted(self): - self._prepare_dynamic(is_mounted=True) - - def test_prepare_mysql_not_installed(self): - self._prepare_dynamic(is_mysql_installed=False) - - def test_prepare_mysql_from_backup(self): - self._prepare_dynamic(backup_id='backup_id_123abc') - - def test_prepare_mysql_from_backup_with_root(self): - self._prepare_dynamic(backup_id='backup_id_123abc', - is_root_enabled=True) - - def test_prepare_mysql_with_root_password(self): - self._prepare_dynamic(root_password='some_password') - - def test_prepare_mysql_with_users_and_databases(self): - self._prepare_dynamic(databases=['db1'], users=['user1']) - - def test_prepare_mysql_with_snapshot(self): - snapshot = {'replication_strategy': self.replication_strategy, - 'dataset': {'dataset_size': 1.0}, - 'config': None} - total_size = snapshot['dataset']['dataset_size'] + 1 - self.mock_gfvs_class.return_value = {'total': total_size} - self._prepare_dynamic(snapshot=snapshot) - - @patch.multiple(dbaas.MySqlAdmin, - create_user=DEFAULT, - create_database=DEFAULT, - enable_root=DEFAULT) - @patch.object(backup, 'restore') - def _prepare_dynamic(self, restore_mock, create_user, create_database, - enable_root, - device_path='/dev/vdb', - is_mysql_installed=True, - backup_id=None, is_root_enabled=False, - root_password=None, overrides=None, is_mounted=False, - databases=None, users=None, snapshot=None): - # covering all outcomes is starting to cause trouble here - COUNT = 1 if device_path else 0 - backup_info = None - if backup_id is not None: - backup_info = {'id': backup_id, - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum', - } - - # TODO(juice): this should stub an instance of the MySqlAppStatus - mock_status = MagicMock() - - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - mock_status.begin_install = MagicMock(return_value=None) - VolumeDevice.format = MagicMock(return_value=None) - VolumeDevice.migrate_data = MagicMock(return_value=None) - VolumeDevice.mount = MagicMock(return_value=None) - mount_points = [] - if is_mounted: - mount_points = ['/mnt'] - VolumeDevice.mount_points = MagicMock(return_value=mount_points) - VolumeDevice.unmount = MagicMock(return_value=None) - set_data_dir_patcher = patch.object(dbaas.MySqlApp, 'set_data_dir', - return_value='/var/lib/mysql') - self.addCleanup(set_data_dir_patcher.stop) - set_data_dir_patcher.start() - dbaas.MySqlApp.stop_db = MagicMock(return_value=None) - dbaas.MySqlApp.start_mysql = MagicMock(return_value=None) - dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) - dbaas.MySqlApp.install_if_needed = MagicMock(return_value=None) - dbaas.MySqlApp.secure = MagicMock(return_value=None) - dbaas.MySqlApp.secure_root = MagicMock(return_value=None) - pkg.Package.pkg_is_installed = MagicMock( - return_value=is_mysql_installed) - operating_system.chown = MagicMock(return_value=None) - os.path.exists = MagicMock(return_value=True) - mock_replication = MagicMock() - mock_replication.enable_as_slave = MagicMock() - self.mock_rs_class.return_value = mock_replication - - with patch.object(dbaas.MySqlAdmin, 'is_root_enabled', - return_value=is_root_enabled): - self.manager.prepare(context=self.context, - packages=None, - memory_mb='2048', - databases=databases, - users=users, - device_path=device_path, - mount_point='/var/lib/mysql', - backup_info=backup_info, - root_password=root_password, - overrides=overrides, - cluster_config=None, - snapshot=snapshot) - - # verification/assertion - mock_status.begin_install.assert_any_call() - - self.assertEqual(COUNT, VolumeDevice.format.call_count) - self.assertEqual(COUNT, VolumeDevice.migrate_data.call_count) - self.assertEqual(COUNT, VolumeDevice.mount_points.call_count) - self.assertEqual(COUNT, dbaas.MySqlApp.stop_db.call_count) - if is_mounted: - self.assertEqual(1, VolumeDevice.unmount.call_count) - else: - self.assertEqual(0, VolumeDevice.unmount.call_count) - if backup_info: - restore_mock.assert_any_call(self.context, - backup_info, - '/var/lib/mysql/data') - dbaas.MySqlApp.install_if_needed.assert_any_call(None) - # We don't need to make sure the exact contents are there - dbaas.MySqlApp.secure.assert_any_call(None) - dbaas.MySqlApp.secure_root.assert_any_call( - secure_remote_root=not is_root_enabled) - - if root_password: - dbaas.MySqlAdmin.enable_root.assert_any_call(root_password) - if databases: - dbaas.MySqlAdmin.create_database.assert_any_call(databases) - else: - self.assertFalse(dbaas.MySqlAdmin.create_database.called) - - if users: - dbaas.MySqlAdmin.create_user.assert_any_call(users) - else: - self.assertFalse(dbaas.MySqlAdmin.create_user.called) - - if snapshot: - self.assertEqual(1, mock_replication.enable_as_slave.call_count) - else: - self.assertEqual(0, mock_replication.enable_as_slave.call_count) - - def test_get_replication_snapshot(self): - mock_status = MagicMock() - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - - snapshot_id = 'my_snapshot_id' - log_position = 123456789 - master_ref = 'my_master' - used_size = 1.0 - total_size = 2.0 - - mock_replication = MagicMock() - mock_replication.enable_as_master = MagicMock() - mock_replication.snapshot_for_replication = MagicMock( - return_value=(snapshot_id, log_position)) - mock_replication.get_master_ref = MagicMock( - return_value=master_ref) - self.mock_rs_class.return_value = mock_replication - self.mock_gfvs_class.return_value = ( - {'used': used_size, 'total': total_size}) - - expected_replication_snapshot = { - 'dataset': { - 'datastore_manager': self.manager.manager, - 'dataset_size': used_size, - 'volume_size': total_size, - 'snapshot_id': snapshot_id - }, - 'replication_strategy': self.replication_strategy, - 'master': master_ref, - 'log_position': log_position - } - - snapshot_info = None - replica_source_config = None - # entry point - replication_snapshot = ( - self.manager.get_replication_snapshot(self.context, snapshot_info, - replica_source_config)) - # assertions - self.assertEqual(expected_replication_snapshot, replication_snapshot) - self.assertEqual(1, mock_replication.enable_as_master.call_count) - self.assertEqual( - 1, mock_replication.snapshot_for_replication.call_count) - self.assertEqual(1, mock_replication.get_master_ref.call_count) - - def test_attach_replication_slave_valid(self): - mock_status = MagicMock() - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - - total_size = 2.0 - dataset_size = 1.0 - - mock_replication = MagicMock() - mock_replication.enable_as_slave = MagicMock() - self.mock_rs_class.return_value = mock_replication - self.mock_gfvs_class.return_value = {'total': total_size} - - snapshot = {'replication_strategy': self.replication_strategy, - 'dataset': {'dataset_size': dataset_size}} - - # entry point - self.manager.attach_replica(self.context, snapshot, None) - # assertions - self.assertEqual(1, mock_replication.enable_as_slave.call_count) - - @patch('trove.guestagent.datastore.mysql_common.manager.LOG') - def test_attach_replication_slave_invalid(self, *args): - mock_status = MagicMock() - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - - total_size = 2.0 - dataset_size = 3.0 - - mock_replication = MagicMock() - mock_replication.enable_as_slave = MagicMock() - self.mock_rs_class.return_value = mock_replication - self.mock_gfvs_class.return_value = {'total': total_size} - - snapshot = {'replication_strategy': self.replication_strategy, - 'dataset': {'dataset_size': dataset_size}} - - # entry point - self.assertRaises(InsufficientSpaceForReplica, - self.manager.attach_replica, - self.context, snapshot, None) - # assertions - self.assertEqual(0, mock_replication.enable_as_slave.call_count) - - def test_detach_replica(self): - mock_status = MagicMock() - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - - mock_replication = MagicMock() - mock_replication.detach_slave = MagicMock() - self.mock_rs_class.return_value = mock_replication - - # entry point - self.manager.detach_replica(self.context) - # assertions - self.assertEqual(1, mock_replication.detach_slave.call_count) - - def test_demote_replication_master(self): - mock_status = MagicMock() - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - - mock_replication = MagicMock() - mock_replication.demote_master = MagicMock() - self.mock_rs_class.return_value = mock_replication - - # entry point - self.manager.demote_replication_master(self.context) - # assertions - self.assertEqual(1, mock_replication.demote_master.call_count) - - def test_get_master_UUID(self): - app = dbaas.MySqlApp(None) - - def test_case(slave_status, expected_value): - with patch.object(dbaas.MySqlApp, '_get_slave_status', - return_value=slave_status): - assert_equal(app._get_master_UUID(), expected_value) - - test_case({'Master_UUID': '2a5b-2064-32fb'}, '2a5b-2064-32fb') - test_case({'Master_UUID': ''}, None) - test_case({}, None) - - def test_get_last_txn(self): - - def test_case(gtid_list, expected_value): - with patch.object(dbaas.MySqlApp, '_get_gtid_executed', - return_value=gtid_list): - txn = self.manager.get_last_txn(self.context) - assert_equal(txn, expected_value) - - with patch.object(dbaas.MySqlApp, '_get_slave_status', - return_value={'Master_UUID': '2a5b-2064-32fb'}): - test_case('2a5b-2064-32fb:1', ('2a5b-2064-32fb', 1)) - test_case('2a5b-2064-32fb:1-5', ('2a5b-2064-32fb', 5)) - test_case('2a5b-2064-32fb:1,4b4-23:5', ('2a5b-2064-32fb', 1)) - test_case('4b4-23:5,2a5b-2064-32fb:1', ('2a5b-2064-32fb', 1)) - test_case('4b-23:5,2a5b-2064-32fb:1,25:3-4', ('2a5b-2064-32fb', 1)) - test_case('4b4-23:1-5,2a5b-2064-32fb:1-10', ('2a5b-2064-32fb', 10)) - - with patch.object(dbaas.MySqlApp, '_get_slave_status', - return_value={'Master_UUID': ''}): - test_case('2a5b-2064-32fb:1', (None, 0)) - - with patch.object(dbaas.MySqlApp, '_get_slave_status', - return_value={}): - test_case('2a5b-2064-32fb:1', (None, 0)) - - def test_rpc_ping(self): - self.assertTrue(self.manager.rpc_ping(self.context)) - - @patch.object(dbaas.MySqlAdmin, 'change_passwords') - def test_change_passwords(self, change_passwords_mock): - self.manager.change_passwords( - self.context, [{'name': 'test_user', 'password': 'testpwd'}]) - change_passwords_mock.assert_any_call( - [{'name': 'test_user', 'password': 'testpwd'}]) - - @patch.object(dbaas.MySqlAdmin, 'update_attributes') - def test_update_attributes(self, update_attr_mock): - self.manager.update_attributes(self.context, 'test_user', '%', - {'password': 'testpwd'}) - update_attr_mock.assert_any_call('test_user', '%', - {'password': - 'testpwd'}) - - @patch.object(dbaas.MySqlApp, 'reset_configuration') - def test_reset_configuration(self, reset_config_mock): - dbaas.MySqlAppStatus.get = MagicMock(return_value=MagicMock()) - configuration = {'config_contents': 'some junk'} - self.manager.reset_configuration(self.context, configuration) - dbaas.MySqlAppStatus.get.assert_any_call() - reset_config_mock.assert_any_call({'config_contents': 'some junk'}) - - @patch.object(dbaas.MySqlAdmin, 'revoke_access') - def test_revoke_access(self, revoke_access_mock): - self.manager.revoke_access(self.context, 'test_user', '%', 'test_db') - revoke_access_mock.assert_any_call('test_user', '%', 'test_db') - - @patch.object(dbaas.MySqlAdmin, 'list_access', return_value=['database1']) - def test_list_access(self, list_access_mock): - access = self.manager.list_access(self.context, 'test_user', '%') - self.assertEqual(list_access_mock.return_value, access) - list_access_mock.assert_any_call('test_user', '%') - - @patch.object(dbaas.MySqlApp, 'restart') - def test_restart(self, restart_mock): - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - self.manager.restart(self.context) - dbaas.MySqlAppStatus.get.assert_any_call() - restart_mock.assert_any_call() - - @patch.object(dbaas.MySqlApp, 'start_db_with_conf_changes') - def test_start_db_with_conf_changes(self, start_db_mock): - mock_status = MagicMock() - configuration = {'config_contents': 'some junk'} - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - self.manager.start_db_with_conf_changes(self.context, configuration) - dbaas.MySqlAppStatus.get.assert_any_call() - start_db_mock.assert_any_call({'config_contents': 'some junk'}) - - def test_stop_db(self): - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - dbaas.MySqlApp.stop_db = MagicMock(return_value=None) - self.manager.stop_db(self.context) - dbaas.MySqlAppStatus.get.assert_any_call() - dbaas.MySqlApp.stop_db.assert_any_call(do_not_start_on_reboot=False) - - def test_get_filesystem_stats(self): - with patch.object(base_dbaas, 'get_filesystem_volume_stats'): - self.manager.get_filesystem_stats(self.context, '/var/lib/mysql') - base_dbaas.get_filesystem_volume_stats.assert_any_call( - '/var/lib/mysql') - - def test_mount_volume(self): - with patch.object(volume.VolumeDevice, 'mount', return_value=None): - self.manager.mount_volume(self.context, - device_path='/dev/vdb', - mount_point='/var/lib/mysql') - test_mount = volume.VolumeDevice.mount.call_args_list[0] - test_mount.assert_called_with('/var/lib/mysql', False) - - def test_unmount_volume(self): - with patch.object(volume.VolumeDevice, 'unmount', return_value=None): - self.manager.unmount_volume(self.context, device_path='/dev/vdb') - test_unmount = volume.VolumeDevice.unmount.call_args_list[0] - test_unmount.assert_called_with('/var/lib/mysql') - - def test_resize_fs(self): - with patch.object(volume.VolumeDevice, 'resize_fs', return_value=None): - self.manager.resize_fs(self.context, device_path='/dev/vdb') - test_resize_fs = volume.VolumeDevice.resize_fs.call_args_list[0] - test_resize_fs.assert_called_with('/var/lib/mysql') - - @patch.object(dbaas.MySqlApp, 'remove_overrides') - def test_update_overrides(self, remove_config_mock): - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) - self.manager.update_overrides(self.context, 'something_overrides') - dbaas.MySqlAppStatus.get.assert_any_call() - remove_config_mock.assert_not_called() - dbaas.MySqlApp.update_overrides.assert_any_call('something_overrides') - - @patch.object(dbaas.MySqlApp, 'remove_overrides') - def test_update_overrides_with_remove(self, remove_overrides_mock): - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - dbaas.MySqlApp.update_overrides = MagicMock(return_value=None) - self.manager.update_overrides(self.context, 'something_overrides', - True) - dbaas.MySqlAppStatus.get.assert_any_call() - remove_overrides_mock.assert_any_call() - dbaas.MySqlApp.update_overrides.assert_any_call('something_overrides') - - @patch.object(dbaas.MySqlApp, 'apply_overrides') - def test_apply_overrides(self, apply_overrides_mock): - mock_status = MagicMock() - override = {'some_key': 'some value'} - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - self.manager.apply_overrides(self.context, override) - dbaas.MySqlAppStatus.get.assert_any_call() - apply_overrides_mock.assert_any_call({'some_key': 'some value'}) - - @patch.object(dbaas.MySqlApp, 'get_txn_count', return_value=(9879)) - def test_get_txn_count(self, get_txn_count_mock): - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - txn_count = self.manager.get_txn_count(self.context) - self.assertEqual(get_txn_count_mock.return_value, txn_count) - dbaas.MySqlAppStatus.get.assert_any_call() - get_txn_count_mock.assert_any_call() - - @patch.object(dbaas.MySqlApp, 'get_latest_txn_id', - return_value=('2a5b-2064-32fb:1')) - def test_get_latest_txn_id(self, get_latest_txn_id_mock): - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - latest_txn_id = self.manager.get_latest_txn_id(self.context) - self.assertEqual(get_latest_txn_id_mock.return_value, latest_txn_id) - dbaas.MySqlAppStatus.get.assert_any_call() - get_latest_txn_id_mock.assert_any_call() - - @patch.object(dbaas.MySqlApp, 'wait_for_txn') - def test_wait_for_txn(self, wait_for_txn_mock): - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - self.manager.wait_for_txn(self.context, '4b4-23:5,2a5b-2064-32fb:1') - dbaas.MySqlAppStatus.get.assert_any_call() - wait_for_txn_mock.assert_any_call('4b4-23:5,2a5b-2064-32fb:1') - - @patch.object(dbaas.MySqlApp, 'make_read_only') - def test_make_read_only(self, make_read_only_mock): - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - self.manager.make_read_only(self.context, 'ON') - dbaas.MySqlAppStatus.get.assert_any_call() - make_read_only_mock.assert_any_call('ON') - - def test_cleanup_source_on_replica_detach(self): - mock_replication = MagicMock() - mock_replication.cleanup_source_on_replica_detach = MagicMock() - self.mock_rs_class.return_value = mock_replication - snapshot = {'replication_strategy': self.replication_strategy, - 'dataset': {'dataset_size': '1.0'}} - - # entry point - self.manager.cleanup_source_on_replica_detach(self.context, snapshot) - # assertions - self.assertEqual( - 1, mock_replication.cleanup_source_on_replica_detach.call_count) - - def test_get_replica_context(self): - replication_user = { - 'name': 'repl_user', - 'password': 'repl_pwd' - } - master_ref = { - 'host': '1.2.3.4', - 'port': 3306 - } - rep_info = { - 'master': master_ref, - 'log_position': { - 'replication_user': replication_user - } - } - mock_replication = MagicMock() - mock_replication.get_replica_context = MagicMock(return_value=rep_info) - self.mock_rs_class.return_value = mock_replication - - # entry point - replica_info = self.manager.get_replica_context(self.context) - # assertions - self.assertEqual(1, mock_replication.get_replica_context.call_count) - self.assertEqual(rep_info, replica_info) - - def test_enable_as_master(self): - mock_replication = MagicMock() - mock_replication.enable_as_master = MagicMock() - self.mock_rs_class.return_value = mock_replication - - # entry point - self.manager.enable_as_master(self.context, None) - # assertions - self.assertEqual(mock_replication.enable_as_master.call_count, 1) - - @patch('trove.guestagent.datastore.mysql_common.manager.LOG') - def test__perform_restore(self, *args): - backup_info = {'id': 'backup_id_123abc', - 'location': 'fake-location', - 'type': 'InnoBackupEx', - 'checksum': 'fake-checksum', - } - mock_status = MagicMock() - self.manager.appStatus = mock_status - dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status) - app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get()) - with patch.object(backup, 'restore', - side_effect=ProcessExecutionError): - self.assertRaises(ProcessExecutionError, - self.manager._perform_restore, backup_info, - self.context, '/var/lib/mysql', app) - app.status.set_status.assert_called_with( - rd_instance.ServiceStatuses.FAILED) diff --git a/trove/tests/unittests/guestagent/test_operating_system.py b/trove/tests/unittests/guestagent/test_operating_system.py deleted file mode 100644 index 178b987bd4..0000000000 --- a/trove/tests/unittests/guestagent/test_operating_system.py +++ /dev/null @@ -1,1300 +0,0 @@ -# Copyright 2015 Tesora Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import stat -import tempfile - -from mock import call, patch, mock_open -from oslo_concurrency.processutils import UnknownArgumentError -import six -from testtools import ExpectedException - -from trove.common import exception -from trove.common.stream_codecs import ( - Base64Codec, IdentityCodec, IniCodec, JsonCodec, - KeyValueCodec, PropertiesCodec, XmlCodec, YamlCodec) -from trove.common import utils -from trove.guestagent.common import guestagent_utils -from trove.guestagent.common import operating_system -from trove.guestagent.common.operating_system import FileMode -from trove.tests.unittests import trove_testtools - - -class TestOperatingSystem(trove_testtools.TestCase): - - def test_base64_codec(self): - data = "Line 1\nLine 2\n" - # Base64Codec.deserialize returns bytes instead of string. - self._test_file_codec(data, Base64Codec(), - expected_data=data.encode('utf-8')) - - # when encoding is reversed for Base64Codec, reading from files - # will call Base64Codec.serialize which returns string. - data = "TGluZSAxCkxpbmUgMgo=" - self._test_file_codec(data, Base64Codec(), reverse_encoding=True) - - data = "5Am9+y0wTwqUx39sMMBg3611FWg=" - self._test_file_codec(data, Base64Codec(), reverse_encoding=True) - - def test_identity_file_codec(self): - data = ("Lorem Ipsum, Lorem Ipsum\n" - "Lorem Ipsum, Lorem Ipsum\n" - "Lorem Ipsum, Lorem Ipsum\n") - - self._test_file_codec(data, IdentityCodec()) - - def test_ini_file_codec(self): - data_no_none = {"Section1": {"s1k1": 's1v1', - "s1k2": 3.1415926535}, - "Section2": {"s2k1": 1, - "s2k2": True}} - - self._test_file_codec(data_no_none, IniCodec()) - - data_with_none = {"Section1": {"s1k1": 's1v1', - "s1k2": 3.1415926535}, - "Section2": {"s2k1": 1, - "s2k2": True, - "s2k3": None}} - - # Keys with None values will be written without value. - self._test_file_codec(data_with_none, IniCodec()) - - # None will be replaced with 'default_value'. - default_value = 1 - expected_data = guestagent_utils.update_dict( - {"Section2": {"s2k3": default_value}}, dict(data_with_none)) - self._test_file_codec(data_with_none, - IniCodec(default_value=default_value), - expected_data=expected_data) - - def test_yaml_file_codec(self): - data = {"Section1": 's1v1', - "Section2": {"s2k1": '1', - "s2k2": 'True'}, - "Section3": {"Section4": {"s4k1": '3.1415926535', - "s4k2": None}}, - "Section5": {"s5k1": 1, - "s5k2": True}, - "Section6": {"Section7": {"s7k1": 3.1415926535, - "s7k2": None}} - } - - self._test_file_codec(data, YamlCodec()) - self._test_file_codec(data, YamlCodec(default_flow_style=True)) - - def test_properties_file_codec(self): - data = {'key1': [1, "str1", '127.0.0.1', 3.1415926535, True, None], - 'key2': [2.0, 3, 0, "str1 str2"], - 'key3': ['str1', 'str2'], - 'key4': [], - 'key5': 5000, - 'key6': 'str1', - 'key7': 0, - 'key8': None, - 'key9': [['str1', 'str2'], ['str3', 'str4']], - 'key10': [['str1', 'str2', 'str3'], ['str3', 'str4'], 'str5'], - 'key11': True - } - - self._test_file_codec(data, PropertiesCodec()) - self._test_file_codec(data, PropertiesCodec( - string_mappings={'yes': True, 'no': False, "''": None})) - - def test_key_value_file_codec(self): - data = {'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3'} - - self._test_file_codec(data, KeyValueCodec()) - - def test_json_file_codec(self): - data = {"Section1": 's1v1', - "Section2": {"s2k1": '1', - "s2k2": 'True'}, - "Section3": {"Section4": {"s4k1": '3.1415926535', - "s4k2": None}}, - "Section5": {"s5k1": 1, - "s5k2": True}, - "Section6": {"Section7": {"s7k1": 3.1415926535, - "s7k2": None}} - } - - self._test_file_codec(data, JsonCodec()) - - def test_xml_file_codec(self): - data = {'document': {'@name': 'mydocument', '@ttl': '10', - 'author': {'@name': 'Jycll ;-)'}, - 'page': [{'@number': '1', 'paragraph': - ['lorem ipsum', 'more lorem ipsum']}, - {'@number': '1', 'paragraph': - ['lorem ipsum', 'more lorem ipsum']}] - } - } - - self._test_file_codec(data, XmlCodec()) - - def _test_file_codec(self, data, read_codec, write_codec=None, - expected_data=None, - expected_exception=None, - reverse_encoding=False): - write_codec = write_codec or read_codec - - with tempfile.NamedTemporaryFile() as test_file: - encode = True - decode = True - if reverse_encoding: - encode = False - decode = False - if expected_exception: - with expected_exception: - operating_system.write_file(test_file.name, data, - codec=write_codec, - encode=encode) - operating_system.read_file(test_file.name, - codec=read_codec, - decode=decode) - else: - operating_system.write_file(test_file.name, data, - codec=write_codec, - encode=encode) - read = operating_system.read_file(test_file.name, - codec=read_codec, - decode=decode) - if expected_data is not None: - self.assertEqual(expected_data, read) - else: - self.assertEqual(data, read) - - def test_read_write_file_input_validation(self): - with ExpectedException(exception.UnprocessableEntity, - "File does not exist: None"): - operating_system.read_file(None) - - with ExpectedException(exception.UnprocessableEntity, - "File does not exist: /__DOES_NOT_EXIST__"): - operating_system.read_file('/__DOES_NOT_EXIST__') - - with ExpectedException(exception.UnprocessableEntity, - "Invalid path: None"): - operating_system.write_file(None, {}) - - @patch.object(operating_system, 'copy') - def test_write_file_as_root(self, copy_mock): - target_file = tempfile.NamedTemporaryFile() - temp_file = tempfile.NamedTemporaryFile('w') - - with patch('tempfile.NamedTemporaryFile', return_value=temp_file): - operating_system.write_file( - target_file.name, "Lorem Ipsum", as_root=True) - copy_mock.assert_called_once_with( - temp_file.name, target_file.name, force=True, as_root=True) - self.assertFalse(os.path.exists(temp_file.name)) - - @patch.object(operating_system, 'copy', - side_effect=Exception("Error while executing 'copy'.")) - def test_write_file_as_root_with_error(self, copy_mock): - target_file = tempfile.NamedTemporaryFile() - temp_file = tempfile.NamedTemporaryFile('w') - with patch('tempfile.NamedTemporaryFile', return_value=temp_file): - with ExpectedException(Exception, "Error while executing 'copy'."): - operating_system.write_file(target_file.name, - "Lorem Ipsum", as_root=True) - self.assertFalse(os.path.exists(temp_file.name)) - - @patch.object(operating_system, 'exists', return_value=True) - @patch.object(operating_system, 'copy') - @patch.object(operating_system, 'chmod') - @patch.object(IdentityCodec, 'deserialize') - @patch.object(IdentityCodec, 'serialize') - @patch.object(operating_system, 'open', - mock_open(read_data='MockingRead')) - def test_read_file_with_flags_and_conv_func(self, mock_serialize, - mock_deserialize, - mock_chmod, mock_copy, - *args): - test_path = '/path/of/file' - test_data = 'MockingRead' - # use getattr to avoid pylint 'no-member' warning - mock_file = getattr(operating_system, 'open') - - # simple read - operating_system.read_file(test_path) - mock_file.assert_called_once_with(test_path, 'r') - mock_file().read.assert_called_once() - mock_deserialize.called_once_with(test_data) - mock_file.reset_mock() - mock_deserialize.reset_mock() - - # read with decode=False - operating_system.read_file(test_path, decode=False) - mock_file.assert_called_once_with(test_path, 'rb') - mock_file().read.assert_called_once() - mock_serialize.called_once_with(test_data) - mock_file.reset_mock() - mock_serialize.reset_mock() - - # checking _read_file_as_root arguments - with patch.object(operating_system, - '_read_file_as_root') as mock_read_file_as_root: - # simple read as root, - operating_system.read_file(test_path, as_root=True) - mock_read_file_as_root.assert_called_once_with( - test_path, 'r', mock_deserialize) - mock_deserialize.assert_not_called() - mock_read_file_as_root.reset_mock() - - # read as root with decode=False, - operating_system.read_file(test_path, as_root=True, decode=False) - mock_read_file_as_root.assert_called_once_with( - test_path, 'rb', mock_serialize) - mock_serialize.assert_not_called() - - # simple read as root - temp_file = tempfile.NamedTemporaryFile('r') - with patch.object(tempfile, 'NamedTemporaryFile', - return_value=temp_file) as mock_temp_file: - operating_system.read_file(test_path, as_root=True) - mock_temp_file.assert_called_once_with('r') - mock_copy.called_once_with(test_path, temp_file.name, - force=True, dereference=True, - as_root=True) - mock_chmod.called_once_with(temp_file.name, - FileMode.ADD_READ_ALL(), - as_root=True) - mock_deserialize.assert_called_once_with('') - self.assertFalse(os.path.exists(temp_file.name)) - mock_copy.reset_mock() - mock_chmod.reset_mock() - mock_deserialize.reset_mock() - - # read as root with decode=False - temp_file = tempfile.NamedTemporaryFile('rb') - with patch.object(tempfile, 'NamedTemporaryFile', - return_value=temp_file) as mock_temp_file: - operating_system.read_file(test_path, as_root=True, - decode=False) - mock_temp_file.assert_called_once_with('rb') - mock_copy.called_once_with(test_path, temp_file.name, - force=True, dereference=True, - as_root=True) - mock_chmod.called_once_with(temp_file.name, - FileMode.ADD_READ_ALL(), - as_root=True) - mock_serialize.assert_called_once_with(b'') - self.assertFalse(os.path.exists(temp_file.name)) - - @patch.object(operating_system, 'copy') - @patch.object(operating_system, 'chmod') - @patch.object(IdentityCodec, 'deserialize', - return_value=b'DeseiralizedData') - @patch.object(IdentityCodec, 'serialize', - return_value='SerializedData') - @patch.object(operating_system, 'open', mock_open()) - def test_write_file_with_flags_and_conv_func(self, mock_serialize, - mock_deserialize, - mock_chmod, mock_copy): - test_path = '/path/of/file' - test_data = 'MockingWrite' - test_serialize = 'SerializedData' - test_deserialize = b'DeseiralizedData' - mock_file = getattr(operating_system, 'open') - - # simple write - operating_system.write_file(test_path, test_data) - mock_file.assert_called_once_with(test_path, 'w') - mock_serialize.called_once_with(test_data) - mock_file().write.assert_called_once_with(test_serialize) - mock_file().flush.assert_called_once() - mock_file.reset_mock() - mock_serialize.reset_mock() - - # write with encode=False - operating_system.write_file(test_path, test_data, encode=False) - mock_file.assert_called_once_with(test_path, 'wb') - mock_deserialize.called_once_with(test_data) - mock_file().write.assert_called_once_with(test_deserialize) - mock_file().flush.assert_called_once() - mock_file.reset_mock() - mock_deserialize.reset_mock() - - # checking _write_file_as_root arguments - with patch.object(operating_system, - '_write_file_as_root') as mock_write_file_as_root: - # simple write as root, - operating_system.write_file(test_path, test_data, as_root=True) - mock_write_file_as_root.assert_called_once_with( - test_path, test_data, 'w', mock_serialize) - mock_serialize.assert_not_called() - mock_write_file_as_root.reset_mock() - - # read as root with encode=False, - operating_system.write_file(test_path, test_data, - as_root=True, encode=False) - mock_write_file_as_root.assert_called_once_with( - test_path, test_data, 'wb', mock_deserialize) - mock_deserialize.assert_not_called() - - # simple write as root - temp_file = tempfile.NamedTemporaryFile('w') - with patch.object(tempfile, 'NamedTemporaryFile', - return_value=temp_file) as mock_temp_file: - operating_system.write_file(test_path, test_data, as_root=True) - mock_temp_file.assert_called_once_with('w', delete=False) - mock_serialize.assert_called_once_with(test_data) - mock_copy.called_once_with(temp_file.name, test_path, - force=True, as_root=True) - self.assertFalse(os.path.exists(temp_file.name)) - mock_copy.reset_mock() - mock_chmod.reset_mock() - mock_serialize.reset_mock() - - # write as root with decode=False - temp_file = tempfile.NamedTemporaryFile('wb') - with patch.object(tempfile, 'NamedTemporaryFile', - return_value=temp_file) as mock_temp_file: - operating_system.write_file(test_path, test_data, - as_root=True, encode=False) - mock_temp_file.assert_called_once_with('wb', delete=False) - mock_deserialize.assert_called_once_with(test_data) - mock_copy.called_once_with(temp_file.name, test_path, - force=True, as_root=True) - self.assertFalse(os.path.exists(temp_file.name)) - - def test_start_service(self): - self._assert_service_call(operating_system.start_service, - 'cmd_start') - - def test_stop_service(self): - self._assert_service_call(operating_system.stop_service, - 'cmd_stop') - - def test_enable_service_on_boot(self): - self._assert_service_call(operating_system.enable_service_on_boot, - 'cmd_enable') - - def test_disable_service_on_boot(self): - self._assert_service_call(operating_system.disable_service_on_boot, - 'cmd_disable') - - @patch.object(operating_system, '_execute_service_command') - def _assert_service_call(self, fun, expected_cmd_key, - exec_service_cmd_mock): - test_candidate_names = ['test_service_1', 'test_service_2'] - fun(test_candidate_names) - exec_service_cmd_mock.assert_called_once_with(test_candidate_names, - expected_cmd_key) - - @patch.object(operating_system, 'service_discovery', - return_value={'cmd_start': 'start', - 'cmd_stop': 'stop', - 'cmd_enable': 'enable', - 'cmd_disable': 'disable'}) - def test_execute_service_command(self, discovery_mock): - test_service_candidates = ['service_name'] - self._assert_execute_call([['start']], [{'shell': True}], - operating_system._execute_service_command, - None, test_service_candidates, 'cmd_start') - discovery_mock.assert_called_once_with(test_service_candidates) - - with ExpectedException(exception.UnprocessableEntity, - "Candidate service names not specified."): - operating_system._execute_service_command([], 'cmd_disable') - - with ExpectedException(exception.UnprocessableEntity, - "Candidate service names not specified."): - operating_system._execute_service_command(None, 'cmd_start') - - with ExpectedException(RuntimeError, "Service control command not " - "available: unknown"): - operating_system._execute_service_command(test_service_candidates, - 'unknown') - - def test_modes(self): - self._assert_modes(None, None, None, operating_system.FileMode()) - self._assert_modes(None, None, None, - operating_system.FileMode([], [], [])) - self._assert_modes(0o770, 0o4, 0o3, operating_system.FileMode( - [stat.S_IRWXU, stat.S_IRWXG], - [stat.S_IROTH], - [stat.S_IWOTH | stat.S_IXOTH]) - ) - self._assert_modes(0o777, None, None, operating_system.FileMode( - [stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) - ) - self._assert_modes(0o777, None, None, operating_system.FileMode( - reset=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) - ) - self._assert_modes(None, 0o777, None, operating_system.FileMode( - add=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) - ) - self._assert_modes(None, None, 0o777, operating_system.FileMode( - remove=[stat.S_IRWXU, stat.S_IRWXG, stat.S_IRWXO]) - ) - - self.assertEqual( - operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR]), - operating_system.FileMode(add=[stat.S_IWUSR, stat.S_IRUSR])) - - self.assertEqual( - hash(operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR])), - hash(operating_system.FileMode(add=[stat.S_IWUSR, stat.S_IRUSR]))) - - self.assertNotEqual( - operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR]), - operating_system.FileMode(reset=[stat.S_IRUSR, stat.S_IWUSR])) - - self.assertNotEqual( - hash(operating_system.FileMode(add=[stat.S_IRUSR, stat.S_IWUSR])), - hash(operating_system.FileMode(reset=[stat.S_IRUSR, stat.S_IWUSR])) - ) - - def _assert_modes(self, ex_reset, ex_add, ex_remove, actual): - self.assertEqual(bool(ex_reset or ex_add or ex_remove), - actual.has_any()) - self.assertEqual(ex_reset, actual.get_reset_mode()) - self.assertEqual(ex_add, actual.get_add_mode()) - self.assertEqual(ex_remove, actual.get_remove_mode()) - - def test_chmod(self): - self._assert_execute_call( - [['chmod', '-R', '=064', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chmod, None, - 'path', FileMode.SET_GRP_RW_OTH_R, - as_root=True) - self._assert_execute_call( - [['chmod', '-R', '+444', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chmod, None, - 'path', FileMode.ADD_READ_ALL, - as_root=True) - - self._assert_execute_call( - [['chmod', '-R', '+060', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chmod, None, - 'path', FileMode.ADD_GRP_RW, - as_root=True) - - self._assert_execute_call( - [['chmod', '-R', '=777', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chmod, None, - 'path', FileMode.SET_FULL, - as_root=True) - - self._assert_execute_call( - [['chmod', '-f', '=777', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chmod, None, - 'path', FileMode.SET_FULL, - as_root=True, recursive=False, force=True) - - self._assert_execute_call( - [['chmod', '-R', '=777', 'path']], - [{'timeout': 100}], - operating_system.chmod, None, - 'path', FileMode.SET_FULL, - timeout=100) - - self._assert_execute_call( - [['chmod', '-R', '=777', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], - operating_system.chmod, None, - 'path', FileMode.SET_FULL, - as_root=True, timeout=None) - - self._assert_execute_call( - None, None, - operating_system.chmod, - ExpectedException(exception.UnprocessableEntity, - "No file mode specified."), - 'path', FileMode()) - - self._assert_execute_call( - None, None, - operating_system.chmod, - ExpectedException(exception.UnprocessableEntity, - "No file mode specified."), - 'path', None) - - self._assert_execute_call( - None, None, - operating_system.chmod, - ExpectedException(exception.UnprocessableEntity, - "Cannot change mode of a blank file."), - '', FileMode.SET_FULL) - - self._assert_execute_call( - None, None, - operating_system.chmod, - ExpectedException(exception.UnprocessableEntity, - "Cannot change mode of a blank file."), - None, FileMode.SET_FULL) - - self._assert_execute_call( - None, None, - operating_system.chmod, - ExpectedException(UnknownArgumentError, - "Got unknown keyword args: {'_unknown_kw': 0}"), - 'path', FileMode.SET_FULL, _unknown_kw=0) - - def test_remove(self): - self._assert_execute_call( - [['rm', '-R', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.remove, None, 'path', as_root=True) - - self._assert_execute_call( - [['rm', '-f', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.remove, None, 'path', recursive=False, force=True, - as_root=True) - - self._assert_execute_call( - [['rm', '-R', 'path']], - [{'timeout': 100}], - operating_system.remove, None, - 'path', timeout=100) - - self._assert_execute_call( - [['rm', '-R', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], - operating_system.remove, None, 'path', timeout=None, as_root=True) - - self._assert_execute_call( - None, None, - operating_system.remove, - ExpectedException(exception.UnprocessableEntity, - "Cannot remove a blank file."), '') - - self._assert_execute_call( - None, None, - operating_system.remove, - ExpectedException(exception.UnprocessableEntity, - "Cannot remove a blank file."), None) - - self._assert_execute_call( - None, None, - operating_system.remove, - ExpectedException(UnknownArgumentError, - "Got unknown keyword args: {'_unknown_kw': 0}"), - 'path', _unknown_kw=0) - - def test_move(self): - self._assert_execute_call( - [['mv', 'source', 'destination']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.move, None, 'source', 'destination', as_root=True) - - self._assert_execute_call( - [['mv', '-f', 'source', 'destination']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.move, None, 'source', 'destination', force=True, - as_root=True) - - self._assert_execute_call( - [['mv', 'source', 'destination']], - [{'timeout': 100}], - operating_system.move, None, 'source', 'destination', - timeout=100) - - self._assert_execute_call( - [['mv', 'source', 'destination']], - [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], - operating_system.move, None, 'source', 'destination', timeout=None, - as_root=True) - - self._assert_execute_call( - None, None, - operating_system.move, - ExpectedException(exception.UnprocessableEntity, - "Missing source path."), '', 'destination') - - self._assert_execute_call( - None, None, - operating_system.move, - ExpectedException(exception.UnprocessableEntity, - "Missing source path."), None, 'destination') - - self._assert_execute_call( - None, None, - operating_system.move, - ExpectedException(exception.UnprocessableEntity, - "Missing destination path."), 'source', '') - - self._assert_execute_call( - None, None, - operating_system.move, - ExpectedException(exception.UnprocessableEntity, - "Missing destination path."), 'source', None) - - self._assert_execute_call( - None, None, - operating_system.move, - ExpectedException(exception.UnprocessableEntity, - "Missing source path."), '', '') - - self._assert_execute_call( - None, None, - operating_system.move, - ExpectedException(exception.UnprocessableEntity, - "Missing source path."), None, None) - - self._assert_execute_call( - None, None, - operating_system.move, - ExpectedException(UnknownArgumentError, - "Got unknown keyword args: {'_unknown_kw': 0}"), - 'source', 'destination', _unknown_kw=0) - - def test_copy(self): - self._assert_execute_call( - [['cp', '-R', 'source', 'destination']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.copy, None, 'source', 'destination', as_root=True) - - self._assert_execute_call( - [['cp', '-f', '-p', 'source', 'destination']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.copy, None, 'source', 'destination', force=True, - preserve=True, recursive=False, as_root=True) - - self._assert_execute_call( - [['cp', '-R', 'source', 'destination']], - [{'timeout': 100}], - operating_system.copy, None, 'source', 'destination', - timeout=100) - - self._assert_execute_call( - [['cp', '-R', 'source', 'destination']], - [{'run_as_root': True, 'root_helper': "sudo", 'timeout': None}], - operating_system.copy, None, 'source', 'destination', timeout=None, - as_root=True) - - self._assert_execute_call( - None, None, - operating_system.copy, - ExpectedException(exception.UnprocessableEntity, - "Missing source path."), '', 'destination') - - self._assert_execute_call( - None, None, - operating_system.copy, - ExpectedException(exception.UnprocessableEntity, - "Missing source path."), None, 'destination') - - self._assert_execute_call( - None, None, - operating_system.copy, - ExpectedException(exception.UnprocessableEntity, - "Missing destination path."), 'source', '') - - self._assert_execute_call( - None, None, - operating_system.copy, - ExpectedException(exception.UnprocessableEntity, - "Missing destination path."), 'source', None) - - self._assert_execute_call( - None, None, - operating_system.copy, - ExpectedException(exception.UnprocessableEntity, - "Missing source path."), '', '') - - self._assert_execute_call( - None, None, - operating_system.copy, - ExpectedException(exception.UnprocessableEntity, - "Missing source path."), None, None) - - self._assert_execute_call( - None, None, - operating_system.copy, - ExpectedException(UnknownArgumentError, - "Got unknown keyword args: {'_unknown_kw': 0}"), - 'source', 'destination', _unknown_kw=0) - - def test_chown(self): - self._assert_execute_call( - [['chown', '-R', 'usr:grp', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chown, None, 'path', 'usr', 'grp', as_root=True) - - self._assert_execute_call( - [['chown', 'usr:grp', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chown, None, - 'path', 'usr', 'grp', recursive=False, as_root=True) - - self._assert_execute_call( - [['chown', '-f', '-R', 'usr:grp', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chown, None, - 'path', 'usr', 'grp', force=True, as_root=True) - - self._assert_execute_call( - [['chown', '-R', ':grp', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chown, None, 'path', '', 'grp', as_root=True) - - self._assert_execute_call( - [['chown', '-R', 'usr:', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chown, None, 'path', 'usr', '', as_root=True) - - self._assert_execute_call( - [['chown', '-R', ':grp', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chown, None, 'path', None, 'grp', as_root=True) - - self._assert_execute_call( - [['chown', '-R', 'usr:', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.chown, None, 'path', 'usr', None, as_root=True) - - self._assert_execute_call( - [['chown', '-R', 'usr:', 'path']], - [{'timeout': 100}], - operating_system.chown, None, - 'path', 'usr', None, timeout=100) - - self._assert_execute_call( - [['chown', '-R', 'usr:', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo', - 'timeout': None}], - operating_system.chown, None, - 'path', 'usr', None, timeout=None, as_root=True) - - self._assert_execute_call( - None, None, - operating_system.chown, - ExpectedException(exception.UnprocessableEntity, - "Cannot change ownership of a blank file."), - '', 'usr', 'grp') - - self._assert_execute_call( - None, None, - operating_system.chown, - ExpectedException(exception.UnprocessableEntity, - "Cannot change ownership of a blank file."), - None, 'usr', 'grp') - - self._assert_execute_call( - None, None, - operating_system.chown, - ExpectedException(exception.UnprocessableEntity, - "Please specify owner or group, or both."), - 'path', '', '') - - self._assert_execute_call( - None, None, - operating_system.chown, - ExpectedException(exception.UnprocessableEntity, - "Please specify owner or group, or both."), - 'path', None, None) - - self._assert_execute_call( - None, None, - operating_system.chown, - ExpectedException(exception.UnprocessableEntity, - "Cannot change ownership of a blank file."), - None, None, None) - - self._assert_execute_call( - None, None, - operating_system.chown, - ExpectedException(exception.UnprocessableEntity, - "Cannot change ownership of a blank file."), - '', '', '') - - self._assert_execute_call( - None, None, - operating_system.chown, - ExpectedException(UnknownArgumentError, - "Got unknown keyword args: {'_unknown_kw': 0}"), - 'path', 'usr', None, _unknown_kw=0) - - def test_change_user_group(self): - self._assert_execute_call( - [['usermod', '-a', '-G', 'user', 'group']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.change_user_group, None, 'group', 'user', - as_root=True) - - self._assert_execute_call( - [['usermod', '-a', '-G', 'user', 'group']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.change_user_group, None, 'group', 'user', - append=True, add_group=True, as_root=True) - - self._assert_execute_call( - [['usermod', '-a', '-G', 'user', 'group']], - [{'timeout': 100}], - operating_system.change_user_group, None, 'group', 'user', - timeout=100) - - self._assert_execute_call( - [['usermod', '-a', '-G', 'user', 'group']], - [{'run_as_root': True, 'root_helper': "sudo", 'timeout': None}], - operating_system.change_user_group, None, 'group', 'user', - timeout=None, as_root=True) - - self._assert_execute_call( - None, None, - operating_system.change_user_group, - ExpectedException(exception.UnprocessableEntity, - "Missing user."), '', 'group') - - self._assert_execute_call( - None, None, - operating_system.change_user_group, - ExpectedException(exception.UnprocessableEntity, - "Missing user."), None, 'group') - - self._assert_execute_call( - None, None, - operating_system.change_user_group, - ExpectedException(exception.UnprocessableEntity, - "Missing group."), 'user', '') - - self._assert_execute_call( - None, None, - operating_system.change_user_group, - ExpectedException(exception.UnprocessableEntity, - "Missing group."), 'user', None) - - self._assert_execute_call( - None, None, - operating_system.change_user_group, - ExpectedException(exception.UnprocessableEntity, - "Missing user."), '', '') - - self._assert_execute_call( - None, None, - operating_system.change_user_group, - ExpectedException(exception.UnprocessableEntity, - "Missing user."), None, None) - - self._assert_execute_call( - None, None, - operating_system.change_user_group, - ExpectedException(UnknownArgumentError, - "Got unknown keyword args: {'_unknown_kw': 0}"), - 'user', 'add_group', _unknown_kw=0) - - def test_create_directory(self): - self._assert_execute_call( - [['mkdir', '-p', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.create_directory, None, 'path', as_root=True) - - self._assert_execute_call( - [['mkdir', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.create_directory, None, 'path', force=False, - as_root=True) - - self._assert_execute_call( - [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:grp', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}, - {'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.create_directory, None, - 'path', user='usr', group='grp', as_root=True) - - self._assert_execute_call( - [['mkdir', '-p', 'path'], ['chown', '-R', ':grp', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}, - {'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.create_directory, None, 'path', group='grp', - as_root=True) - - self._assert_execute_call( - [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}, - {'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.create_directory, None, 'path', user='usr', - as_root=True) - - self._assert_execute_call( - [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], - [{'timeout': 100}, {'timeout': 100}], - operating_system.create_directory, None, - 'path', user='usr', timeout=100) - - self._assert_execute_call( - [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}, - {'run_as_root': True, 'root_helper': 'sudo', 'timeout': None}], - operating_system.create_directory, None, - 'path', user='usr', timeout=None, as_root=True) - - self._assert_execute_call( - [['mkdir', '-p', 'path'], ['chown', '-R', 'usr:', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}, - {'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.create_directory, None, - 'path', user='usr', group='', as_root=True) - - self._assert_execute_call( - [['mkdir', '-p', 'path'], ['chown', '-R', ':grp', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}, - {'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.create_directory, None, - 'path', user='', group='grp', as_root=True) - - self._assert_execute_call( - [['mkdir', '-p', 'path']], - [{'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.create_directory, None, 'path', user='', group='', - as_root=True) - - self._assert_execute_call( - None, None, - operating_system.create_directory, - ExpectedException(exception.UnprocessableEntity, - "Cannot create a blank directory."), - '', user='usr', group='grp') - - self._assert_execute_call( - None, None, - operating_system.create_directory, - ExpectedException(exception.UnprocessableEntity, - "Cannot create a blank directory."), None) - - self._assert_execute_call( - None, None, - operating_system.create_directory, - ExpectedException(UnknownArgumentError, - "Got unknown keyword args: {'_unknown_kw': 0}"), - 'path', _unknown_kw=0) - - def test_find_executable_without_path(self): - command = "command" - self._delegate_assert_find_executable(command=command, - path=None, - isfile=True, - access=True, - expected_return_value=( - "/usr/bin/command")) - self._delegate_assert_find_executable(command=command, - path=None, - isfile=True, - access=False, - expected_return_value=None) - self._delegate_assert_find_executable(command=command, - path=None, - isfile=False, - access=True, - expected_return_value=None) - self._delegate_assert_find_executable(command=command, - path=None, - isfile=False, - access=False, - expected_return_value=None) - - def test_find_executable_with_path(self): - command = "command" - path = "/home" - self._delegate_assert_find_executable(command=command, - path=path, - isfile=True, - access=True, - expected_return_value=( - "/home/command")) - self._delegate_assert_find_executable(command=command, - path=path, - isfile=True, - access=False, - expected_return_value=None) - self._delegate_assert_find_executable(command=command, - path=path, - isfile=False, - access=True, - expected_return_value=None) - self._delegate_assert_find_executable(command=command, - path=path, - isfile=False, - access=False, - expected_return_value=None) - - def _delegate_assert_find_executable(self, command, path, isfile, - access, expected_return_value): - self._assert_find_executable(command, path, isfile, access, - expected_return_value) - - @patch.object(os, 'access') - @patch.object(os.path, 'isfile') - @patch.object(os.environ, 'get', return_value="/usr/bin") - def _assert_find_executable(self, command, path, isfile, access, - expected_return_value, mock_environ, - mock_isfile, mock_access): - mock_access.return_value = access - mock_isfile.return_value = isfile - actual_result = operating_system.find_executable(command, path) - self.assertEqual(expected_return_value, actual_result) - if path is None: - mock_environ.assert_called_once() - else: - mock_environ.assert_not_called() - - def test_exists(self): - self.assertFalse( - operating_system.exists(tempfile.gettempdir(), is_directory=False)) - self.assertTrue( - operating_system.exists(tempfile.gettempdir(), is_directory=True)) - - with tempfile.NamedTemporaryFile() as test_file: - self.assertTrue( - operating_system.exists(test_file.name, is_directory=False)) - self.assertFalse( - operating_system.exists(test_file.name, is_directory=True)) - - self._assert_execute_call( - [['test -f path && echo 1 || echo 0']], - [{'shell': True, 'check_exit_code': False, - 'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.exists, None, 'path', is_directory=False, - as_root=True) - self._assert_execute_call( - [['test -d path && echo 1 || echo 0']], - [{'shell': True, 'check_exit_code': False, - 'run_as_root': True, 'root_helper': 'sudo'}], - operating_system.exists, None, 'path', is_directory=True, - as_root=True) - - def _assert_execute_call(self, exec_args, exec_kwargs, - func, return_value, *args, **kwargs): - """ - Execute a function with given arguments. - Assert a return value and appropriate sequence of calls to the - 'utils.execute_with_timeout' interface as the result. - - :param exec_args: Expected arguments to the execute calls. - This is a list-of-list where each sub-list - represent a single call to - 'utils.execute_with_timeout'. - :type exec_args: list-of-lists - - :param exec_kwargs: Expected keywords to the execute call. - This is a list-of-dicts where each dict - represent a single call to - 'utils.execute_with_timeout'. - :type exec_kwargs: list-of-dicts - - :param func: Tested function call. - :type func: callable - - :param return_value: Expected return value or exception - from the tested call if any. - :type return_value: object - - :param args: Arguments passed to the tested call. - :type args: list - - :param kwargs: Keywords passed to the tested call. - :type kwargs: dict - """ - - with patch.object(utils, 'execute_with_timeout', - return_value=('0', '')) as exec_call: - if isinstance(return_value, ExpectedException): - with return_value: - func(*args, **kwargs) - else: - actual_value = func(*args, **kwargs) - if return_value is not None: - self.assertEqual(return_value, actual_value, - "Return value mismatch.") - expected_calls = [] - for arg, kw in six.moves.zip(exec_args, exec_kwargs): - expected_calls.append(call(*arg, **kw)) - - self.assertEqual(expected_calls, exec_call.mock_calls, - "Mismatch in calls to " - "'execute_with_timeout'.") - - def test_get_os_redhat(self): - with patch.object(os.path, 'isfile', side_effect=[True]): - find_os = operating_system.get_os() - self.assertEqual('redhat', find_os) - - def test_get_os_suse(self): - with patch.object(os.path, 'isfile', side_effect=[False, True]): - find_os = operating_system.get_os() - self.assertEqual('suse', find_os) - - def test_get_os_debian(self): - with patch.object(os.path, 'isfile', side_effect=[False, False]): - find_os = operating_system.get_os() - self.assertEqual('debian', find_os) - - def test_upstart_type_service_discovery(self): - with patch.object(os.path, 'isfile', side_effect=[True]): - mysql_service = operating_system.service_discovery(["mysql"]) - self.assertIsNotNone(mysql_service['cmd_start']) - self.assertIsNotNone(mysql_service['cmd_enable']) - - def test_sysvinit_type_service_discovery(self): - with patch.object(os.path, 'isfile', side_effect=[False, True, True]): - mysql_service = operating_system.service_discovery(["mysql"]) - self.assertIsNotNone(mysql_service['cmd_start']) - self.assertIsNotNone(mysql_service['cmd_enable']) - - def test_sysvinit_chkconfig_type_service_discovery(self): - with patch.object(os.path, 'isfile', - side_effect=[False, True, False, True]): - mysql_service = operating_system.service_discovery(["mysql"]) - self.assertIsNotNone(mysql_service['cmd_start']) - self.assertIsNotNone(mysql_service['cmd_enable']) - - @patch.object(os.path, 'islink', return_value=True) - @patch.object(os.path, 'realpath') - @patch.object(os.path, 'basename') - def test_systemd_symlinked_type_service_discovery(self, mock_base, - mock_path, mock_islink): - with patch.object(os.path, 'isfile', side_effect=[False, False, True]): - mysql_service = operating_system.service_discovery(["mysql"]) - self.assertIsNotNone(mysql_service['cmd_start']) - self.assertIsNotNone(mysql_service['cmd_enable']) - - def test_systemd_not_symlinked_type_service_discovery(self): - with patch.object(os.path, 'isfile', side_effect=[False, False, True]): - with patch.object(os.path, 'islink', return_value=False): - mysql_service = operating_system.service_discovery(["mysql"]) - self.assertIsNotNone(mysql_service['cmd_start']) - self.assertIsNotNone(mysql_service['cmd_enable']) - - def test_file_discovery(self): - with patch.object(os.path, 'isfile', side_effect=[False, True]): - config_file = operating_system.file_discovery( - ["/etc/mongodb.conf", "/etc/mongod.conf"]) - self.assertEqual('/etc/mongod.conf', config_file) - with patch.object(os.path, 'isfile', side_effect=[False]): - config_file = operating_system.file_discovery( - ["/etc/mongodb.conf"]) - self.assertEqual('', config_file) - - def test_list_files_in_directory(self): - root_path = tempfile.mkdtemp() - try: - all_paths = set() - self._create_temp_fs_structure( - root_path, 3, 3, ['txt', 'py', ''], 1, all_paths) - - # All files in the top directory. - self._assert_list_files( - root_path, False, None, False, all_paths, 9) - - # All files & directories in the top directory. - self._assert_list_files( - root_path, False, None, True, all_paths, 10) - - # All files recursive. - self._assert_list_files( - root_path, True, None, False, all_paths, 27) - - # All files & directories recursive. - self._assert_list_files( - root_path, True, None, True, all_paths, 29) - - # Only '*.txt' in the top directory. - self._assert_list_files( - root_path, False, r'.*\.txt$', False, all_paths, 3) - - # Only '*.txt' (including directories) in the top directory. - self._assert_list_files( - root_path, False, r'.*\.txt$', True, all_paths, 3) - - # Only '*.txt' recursive. - self._assert_list_files( - root_path, True, r'.*\.txt$', True, all_paths, 9) - - # Only '*.txt' (including directories) recursive. - self._assert_list_files( - root_path, True, r'.*\.txt$', False, all_paths, 9) - - # Only extension-less files in the top directory. - self._assert_list_files( - root_path, False, r'[^\.]*$', False, all_paths, 3) - - # Only extension-less files recursive. - self._assert_list_files( - root_path, True, r'[^\.]*$', False, all_paths, 9) - - # Non-existing extension in the top directory. - self._assert_list_files( - root_path, False, r'.*\.bak$', False, all_paths, 0) - - # Non-existing extension recursive. - self._assert_list_files( - root_path, True, r'.*\.bak$', False, all_paths, 0) - finally: - try: - os.remove(root_path) - except Exception: - pass # Do not fail in the cleanup. - - def _assert_list_files(self, root, recursive, pattern, include_dirs, - all_paths, count): - found = operating_system.list_files_in_directory( - root, recursive=recursive, pattern=pattern, - include_dirs=include_dirs) - expected = { - path for path in filter( - lambda item: include_dirs or not os.path.isdir(item), - all_paths) if ( - (recursive or os.path.dirname(path) == root) and ( - not pattern or re.match( - pattern, os.path.basename(path))))} - self.assertEqual(expected, found) - self.assertEqual(count, len(found), - "Incorrect number of listed files.") - - def _create_temp_fs_structure(self, root_path, - num_levels, num_files_per_extension, - file_extensions, level, created_paths): - """Create a structure of temporary directories 'num_levels' deep with - temporary files on each level. - """ - file_paths = self._create_temp_files( - root_path, num_files_per_extension, file_extensions) - created_paths.update(file_paths) - - if level < num_levels: - path = tempfile.mkdtemp(dir=root_path) - created_paths.add(path) - self._create_temp_fs_structure( - path, num_levels, num_files_per_extension, - file_extensions, level + 1, created_paths) - - def _create_temp_files(self, root_path, num_files_per_extension, - file_extensions): - """Create 'num_files_per_extension' temporary files - per each of the given extensions. - """ - files = set() - for ext in file_extensions: - for fileno in range(1, num_files_per_extension + 1): - prefix = str(fileno) - suffix = os.extsep + ext if ext else '' - _, path = tempfile.mkstemp(prefix=prefix, suffix=suffix, - dir=root_path) - files.add(path) - - return files diff --git a/trove/tests/unittests/guestagent/test_pkg.py b/trove/tests/unittests/guestagent/test_pkg.py deleted file mode 100644 index 16b61023dd..0000000000 --- a/trove/tests/unittests/guestagent/test_pkg.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright (c) 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import subprocess - -from mock import Mock, MagicMock, patch -import pexpect - -from trove.common import exception -from trove.common import utils -from trove.guestagent import pkg -from trove.tests.unittests import trove_testtools - - -""" -Unit tests for the classes and functions in pkg.py. -""" - - -class PkgDEBInstallTestCase(trove_testtools.TestCase): - - def setUp(self): - super(PkgDEBInstallTestCase, self).setUp() - self.pkg = pkg.DebianPackagerMixin() - self.pkg_fix = self.pkg._fix - self.pkg_fix_package_selections = self.pkg._fix_package_selections - - p0 = patch('pexpect.spawn') - p0.start() - self.addCleanup(p0.stop) - - p1 = patch('trove.common.utils.execute') - p1.start() - self.addCleanup(p1.stop) - - self.pkg._fix = Mock(return_value=None) - self.pkg._fix_package_selections = Mock(return_value=None) - self.pkgName = 'packageName' - - def tearDown(self): - super(PkgDEBInstallTestCase, self).tearDown() - self.pkg._fix = self.pkg_fix - self.pkg._fix_package_selections = self.pkg_fix_package_selections - - def test_pkg_is_installed_no_packages(self): - packages = [] - self.assertTrue(self.pkg.pkg_is_installed(packages)) - - def test_pkg_is_installed_yes(self): - packages = ["package1=1.0", "package2"] - self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0"]) - self.assertTrue(self.pkg.pkg_is_installed(packages)) - - def test_pkg_is_installed_no(self): - packages = ["package1=1.0", "package2", "package3=3.1"] - self.pkg.pkg_version = MagicMock(side_effect=["1.0", "2.0", "3.0"]) - self.assertFalse(self.pkg.pkg_is_installed(packages)) - - def test_success_install(self): - # test - pexpect.spawn.return_value.expect.return_value = 7 - pexpect.spawn.return_value.match = False - self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) - - def test_success_install_with_config_opts(self): - # test - config_opts = {'option': 'some_opt'} - pexpect.spawn.return_value.expect.return_value = 7 - pexpect.spawn.return_value.match = False - self.assertTrue( - self.pkg.pkg_install(self.pkgName, config_opts, 5000) is None) - - def test_permission_error(self): - # test - pexpect.spawn.return_value.expect.return_value = 0 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_not_found_1(self): - # test - pexpect.spawn.return_value.expect.return_value = 1 - pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) - # test and verify - self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_not_found_2(self): - # test - pexpect.spawn.return_value.expect.return_value = 2 - pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) - # test and verify - self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_run_DPKG_bad_State(self): - # test _fix method is called and PackageStateError is thrown - pexpect.spawn.return_value.expect.return_value = 4 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - self.assertTrue(self.pkg._fix.called) - - def test_admin_lock_error(self): - # test 'Unable to lock the administration directory' error - pexpect.spawn.return_value.expect.return_value = 5 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_broken_error(self): - pexpect.spawn.return_value.expect.return_value = 6 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgBrokenError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_timeout_error(self): - # test timeout error - pexpect.spawn.return_value.expect.side_effect = ( - pexpect.TIMEOUT('timeout error')) - # test and verify - self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - -class PkgDEBRemoveTestCase(trove_testtools.TestCase): - - def setUp(self): - super(PkgDEBRemoveTestCase, self).setUp() - self.pkg = pkg.DebianPackagerMixin() - self.pkg_version = self.pkg.pkg_version - self.pkg_install = self.pkg._install - self.pkg_fix = self.pkg._fix - - p0 = patch('pexpect.spawn') - p0.start() - self.addCleanup(p0.stop) - - p1 = patch('trove.common.utils.execute') - p1.start() - self.addCleanup(p1.stop) - - self.pkg.pkg_version = Mock(return_value="OK") - self.pkg._install = Mock(return_value=None) - self.pkg._fix = Mock(return_value=None) - - self.pkgName = 'packageName' - - def tearDown(self): - super(PkgDEBRemoveTestCase, self).tearDown() - self.pkg.pkg_version = self.pkg_version - self.pkg._install = self.pkg_install - self.pkg._fix = self.pkg_fix - - def test_remove_no_pkg_version(self): - # test - pexpect.spawn.return_value.expect.return_value = 6 - pexpect.spawn.return_value.match = False - with patch.object(self.pkg, 'pkg_version', return_value=None): - self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) - - def test_success_remove(self): - # test - pexpect.spawn.return_value.expect.return_value = 6 - pexpect.spawn.return_value.match = False - self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) - - def test_permission_error(self): - # test - pexpect.spawn.return_value.expect.return_value = 0 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove, - self.pkgName, 5000) - - def test_package_not_found(self): - # test - pexpect.spawn.return_value.expect.return_value = 1 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove, - self.pkgName, 5000) - - def test_package_reinstall_first_1(self): - # test - pexpect.spawn.return_value.expect.return_value = 2 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, - self.pkgName, 5000) - self.assertTrue(self.pkg._install.called) - self.assertFalse(self.pkg._fix.called) - - def test_package_reinstall_first_2(self): - # test - pexpect.spawn.return_value.expect.return_value = 3 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, - self.pkgName, 5000) - self.assertTrue(self.pkg._install.called) - self.assertFalse(self.pkg._fix.called) - - def test_package_DPKG_first(self): - # test - pexpect.spawn.return_value.expect.return_value = 4 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgPackageStateError, self.pkg.pkg_remove, - self.pkgName, 5000) - self.assertFalse(self.pkg._install.called) - self.assertTrue(self.pkg._fix.called) - - def test_admin_lock_error(self): - # test 'Unable to lock the administration directory' error - pexpect.spawn.return_value.expect.return_value = 5 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgAdminLockError, self.pkg.pkg_remove, - self.pkgName, 5000) - - def test_timeout_error(self): - # test timeout error - pexpect.spawn.return_value.expect.side_effect = ( - pexpect.TIMEOUT('timeout error')) - # test and verify - self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, - self.pkgName, 5000) - - @patch.object(subprocess, 'call') - def test_timeout_error_with_exception(self, mock_call): - # test timeout error - pexpect.spawn.return_value.expect.side_effect = ( - pexpect.TIMEOUT('timeout error')) - pexpect.spawn.return_value.close.side_effect = ( - pexpect.ExceptionPexpect('error')) - # test and verify - self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, - self.pkgName, 5000) - self.assertEqual(1, mock_call.call_count) - - -class PkgDEBVersionTestCase(trove_testtools.TestCase): - - def setUp(self): - super(PkgDEBVersionTestCase, self).setUp() - self.pkgName = 'mysql-server-5.7' - self.pkgVersion = '5.7.20-0' - self.getoutput = pkg.getoutput - - def tearDown(self): - super(PkgDEBVersionTestCase, self).tearDown() - pkg.getoutput = self.getoutput - - def test_version_success(self): - cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, self.pkgVersion) - pkg.getoutput = Mock(return_value=cmd_out) - version = pkg.DebianPackagerMixin().pkg_version(self.pkgName) - self.assertTrue(version) - self.assertEqual(self.pkgVersion, version) - - def test_version_unknown_package(self): - cmd_out = "N: Unable to locate package %s" % self.pkgName - pkg.getoutput = Mock(return_value=cmd_out) - self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName)) - - def test_version_no_version(self): - cmd_out = "%s:\n Installed: %s\n" % (self.pkgName, "(none)") - pkg.getoutput = Mock(return_value=cmd_out) - self.assertFalse(pkg.DebianPackagerMixin().pkg_version(self.pkgName)) - - -class PkgRPMVersionTestCase(trove_testtools.TestCase): - - def setUp(self): - super(PkgRPMVersionTestCase, self).setUp() - self.pkgName = 'python-requests' - self.pkgVersion = '0.14.2-1.el6' - self.getoutput = pkg.getoutput - - def tearDown(self): - super(PkgRPMVersionTestCase, self).tearDown() - pkg.getoutput = self.getoutput - - @patch('trove.guestagent.pkg.LOG') - def test_version_no_output(self, mock_logging): - cmd_out = '' - pkg.getoutput = Mock(return_value=cmd_out) - self.assertIsNone(pkg.RedhatPackagerMixin().pkg_version(self.pkgName)) - - def test_version_success(self): - cmd_out = self.pkgVersion - pkg.getoutput = Mock(return_value=cmd_out) - version = pkg.RedhatPackagerMixin().pkg_version(self.pkgName) - self.assertTrue(version) - self.assertEqual(self.pkgVersion, version) - - -class PkgRPMInstallTestCase(trove_testtools.TestCase): - - def setUp(self): - super(PkgRPMInstallTestCase, self).setUp() - self.pkg = pkg.RedhatPackagerMixin() - self.getoutput = pkg.getoutput - self.pkgName = 'packageName' - - p0 = patch('pexpect.spawn') - p0.start() - self.addCleanup(p0.stop) - - p1 = patch('trove.common.utils.execute') - p1.start() - self.addCleanup(p1.stop) - - def tearDown(self): - super(PkgRPMInstallTestCase, self).tearDown() - pkg.getoutput = self.getoutput - - def test_pkg_is_installed_no_packages(self): - packages = [] - self.assertTrue(self.pkg.pkg_is_installed(packages)) - - def test_pkg_is_installed_yes(self): - packages = ["package1=1.0", "package2"] - with patch.object(pkg, 'getoutput', MagicMock( - return_value="package1=1.0\n" "package2=2.0")): - self.assertTrue(self.pkg.pkg_is_installed(packages)) - - def test_pkg_is_installed_no(self): - packages = ["package1=1.0", "package2", "package3=3.0"] - with patch.object(pkg, 'getoutput', MagicMock( - return_value="package1=1.0\n" "package2=2.0")): - self.assertFalse(self.pkg.pkg_is_installed(packages)) - - def test_permission_error(self): - # test - pexpect.spawn.return_value.expect.return_value = 0 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_not_found(self): - # test - pexpect.spawn.return_value.expect.return_value = 1 - pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) - # test and verify - self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_conflict_remove(self): - # test - pexpect.spawn.return_value.expect.return_value = 2 - pexpect.spawn.return_value.match = re.match('(.*)', self.pkgName) - self.pkg._rpm_remove_nodeps = Mock() - # test and verify - self.pkg._install(self.pkgName, 5000) - self.assertTrue(self.pkg._rpm_remove_nodeps.called) - - def test_package_conflict_remove_install(self): - with patch.object(self.pkg, '_install', side_effect=[3, 3, 0]): - self.assertTrue( - self.pkg.pkg_install(self.pkgName, {}, 5000) is None) - self.assertEqual(3, self.pkg._install.call_count) - - @patch.object(utils, 'execute') - def test__rpm_remove_nodeps(self, mock_execute): - self.pkg._rpm_remove_nodeps(self.pkgName) - mock_execute.assert_called_with('rpm', '-e', '--nodeps', self.pkgName, - run_as_root=True, root_helper='sudo') - - def test_package_scriptlet_error(self): - # test - pexpect.spawn.return_value.expect.return_value = 5 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgScriptletError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_http_error(self): - # test - pexpect.spawn.return_value.expect.return_value = 6 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_nomirrors_error(self): - # test - pexpect.spawn.return_value.expect.return_value = 7 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgDownloadError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_sign_error(self): - # test - pexpect.spawn.return_value.expect.return_value = 8 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgSignError, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - def test_package_already_installed(self): - # test - pexpect.spawn.return_value.expect.return_value = 9 - pexpect.spawn.return_value.match = False - # test and verify - self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) - - def test_package_success_updated(self): - # test - pexpect.spawn.return_value.expect.return_value = 10 - pexpect.spawn.return_value.match = False - # test and verify - self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) - - def test_package_success_installed(self): - # test - pexpect.spawn.return_value.expect.return_value = 11 - pexpect.spawn.return_value.match = False - # test and verify - self.assertTrue(self.pkg.pkg_install(self.pkgName, {}, 5000) is None) - - def test_timeout_error(self): - # test timeout error - pexpect.spawn.return_value.expect.side_effect = ( - pexpect.TIMEOUT('timeout error')) - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_install, - self.pkgName, {}, 5000) - - -class PkgRPMRemoveTestCase(trove_testtools.TestCase): - - def setUp(self): - super(PkgRPMRemoveTestCase, self).setUp() - self.pkg = pkg.RedhatPackagerMixin() - self.pkg_version = self.pkg.pkg_version - self.pkg_install = self.pkg._install - - p0 = patch('pexpect.spawn') - p0.start() - self.addCleanup(p0.stop) - - p1 = patch('trove.common.utils.execute') - p1.start() - self.addCleanup(p1.stop) - - self.pkg.pkg_version = Mock(return_value="OK") - self.pkg._install = Mock(return_value=None) - self.pkgName = 'packageName' - - def tearDown(self): - super(PkgRPMRemoveTestCase, self).tearDown() - self.pkg.pkg_version = self.pkg_version - self.pkg._install = self.pkg_install - - def test_permission_error(self): - # test - pexpect.spawn.return_value.expect.return_value = 0 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgPermissionError, self.pkg.pkg_remove, - self.pkgName, 5000) - - def test_package_not_found(self): - # test - pexpect.spawn.return_value.expect.return_value = 1 - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgNotFoundError, self.pkg.pkg_remove, - self.pkgName, 5000) - - def test_remove_no_pkg_version(self): - # test - pexpect.spawn.return_value.expect.return_value = 2 - pexpect.spawn.return_value.match = False - with patch.object(self.pkg, 'pkg_version', return_value=None): - self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) - - def test_success_remove(self): - # test - pexpect.spawn.return_value.expect.return_value = 2 - pexpect.spawn.return_value.match = False - self.assertTrue(self.pkg.pkg_remove(self.pkgName, 5000) is None) - - def test_timeout_error(self): - # test timeout error - pexpect.spawn.return_value.expect.side_effect = ( - pexpect.TIMEOUT('timeout error')) - pexpect.spawn.return_value.match = False - # test and verify - self.assertRaises(pkg.PkgTimeout, self.pkg.pkg_remove, - self.pkgName, 5000) - - -class PkgDEBFixPackageSelections(trove_testtools.TestCase): - - def setUp(self): - super(PkgDEBFixPackageSelections, self).setUp() - self.pkg = pkg.DebianPackagerMixin() - self.getoutput = pkg.getoutput - - def tearDown(self): - super(PkgDEBFixPackageSelections, self).tearDown() - pkg.getoutput = self.getoutput - - @patch.object(os, 'remove') - @patch.object(pkg, 'NamedTemporaryFile') - @patch.object(utils, 'execute') - def test__fix_package_selections(self, mock_execute, mock_temp_file, - mock_remove): - packages = ["package1"] - config_opts = {'option': 'some_opt'} - pkg.getoutput = Mock( - return_value="* package1/option: some_opt") - self.pkg._fix_package_selections(packages, config_opts) - self.assertEqual(2, mock_execute.call_count) - self.assertEqual(1, mock_remove.call_count) - - @patch.object(os, 'remove') - @patch.object(pkg, 'NamedTemporaryFile') - @patch.object(utils, 'execute', - side_effect=exception.ProcessExecutionError) - def test_fail__fix_package_selections(self, mock_execute, mock_temp_file, - mock_remove): - packages = ["package1"] - config_opts = {'option': 'some_opt'} - pkg.getoutput = Mock( - return_value="* package1/option: some_opt") - self.assertRaises(pkg.PkgConfigureError, - self.pkg._fix_package_selections, - packages, config_opts) - self.assertEqual(1, mock_remove.call_count) - - @patch.object(utils, 'execute') - def test__fix(self, mock_execute): - self.pkg._fix(30) - mock_execute.assert_called_with('dpkg', '--configure', '-a', - run_as_root=True, root_helper='sudo') diff --git a/trove/tests/unittests/guestagent/test_query.py b/trove/tests/unittests/guestagent/test_query.py deleted file mode 100644 index 162d4470da..0000000000 --- a/trove/tests/unittests/guestagent/test_query.py +++ /dev/null @@ -1,420 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from trove.guestagent.common import sql_query -from trove.tests.unittests import trove_testtools - - -class QueryTestBase(trove_testtools.TestCase): - def setUp(self): - super(QueryTestBase, self).setUp() - - def tearDown(self): - super(QueryTestBase, self).tearDown() - - -class QueryTest(QueryTestBase): - def setUp(self): - super(QueryTest, self).setUp() - - def tearDown(self): - super(QueryTest, self).tearDown() - - def test_columns(self): - myQuery = sql_query.Query(columns=None) - self.assertEqual("SELECT *", myQuery._columns) - - def test_columns_2(self): - columns = ["col_A", "col_B"] - myQuery = sql_query.Query(columns=columns) - self.assertEqual("SELECT col_A, col_B", myQuery._columns) - - def test_tables(self): - tables = ['table_A', 'table_B'] - myQuery = sql_query.Query(tables=tables) - self.assertEqual("FROM table_A, table_B", myQuery._tables) - - def test_where(self): - myQuery = sql_query.Query(where=None) - self.assertEqual("", myQuery._where) - - def test_where_2(self): - conditions = ['cond_A', 'cond_B'] - myQuery = sql_query.Query(where=conditions) - self.assertEqual("WHERE cond_A AND cond_B", myQuery._where) - - def test_order(self): - myQuery = sql_query.Query(order=None) - self.assertEqual('', myQuery._order) - - def test_order_2(self): - orders = ['deleted_at', 'updated_at'] - myQuery = sql_query.Query(order=orders) - self.assertEqual('ORDER BY deleted_at, updated_at', myQuery._order) - - def test_group_by(self): - myQuery = sql_query.Query(group=None) - self.assertEqual('', myQuery._group_by) - - def test_group_by_2(self): - groups = ['deleted=1'] - myQuery = sql_query.Query(group=groups) - self.assertEqual('GROUP BY deleted=1', myQuery._group_by) - - def test_limit(self): - myQuery = sql_query.Query(limit=None) - self.assertEqual('', myQuery._limit) - - def test_limit_2(self): - limit_count = 20 - myQuery = sql_query.Query(limit=limit_count) - self.assertEqual('LIMIT 20', myQuery._limit) - - -class GrantTest(QueryTestBase): - def setUp(self): - super(GrantTest, self).setUp() - - def tearDown(self): - super(GrantTest, self).tearDown() - - def test_grant_no_arg_constr(self): - grant = sql_query.Grant() - self.assertIsNotNone(grant) - self.assertEqual("GRANT USAGE ON *.* " - "TO ``@`%`;", - str(grant)) - - def test_grant_all_with_grant_option(self): - permissions = ['ALL'] - user_name = 'root' - user_password = 'password123' - host = 'localhost' - - # grant_option defaults to True - grant = sql_query.Grant(permissions=permissions, - user=user_name, - host=host, - clear=user_password, - grant_option=True) - - self.assertEqual("GRANT ALL PRIVILEGES ON *.* TO " - "`root`@`localhost` " - "IDENTIFIED BY 'password123' " - "WITH GRANT OPTION;", - str(grant)) - - def test_grant_all_with_explicit_grant_option(self): - permissions = ['ALL', 'GRANT OPTION'] - user_name = 'root' - user_password = 'password123' - host = 'localhost' - grant = sql_query.Grant(permissions=permissions, - user=user_name, - host=host, - clear=user_password, - grant_option=True) - - self.assertEqual("GRANT ALL PRIVILEGES ON *.* TO " - "`root`@`localhost` " - "IDENTIFIED BY 'password123' " - "WITH GRANT OPTION;", - str(grant)) - - def test_grant_specify_permissions(self): - permissions = ['ALTER ROUTINE', - 'CREATE', - 'ALTER', - 'CREATE ROUTINE', - 'CREATE TEMPORARY TABLES', - 'CREATE VIEW', - 'CREATE USER', - 'DELETE', - 'DROP', - 'EVENT', - 'EXECUTE', - 'INDEX', - 'INSERT', - 'LOCK TABLES', - 'PROCESS', - 'REFERENCES', - 'SELECT', - 'SHOW DATABASES', - 'SHOW VIEW', - 'TRIGGER', - 'UPDATE', - 'USAGE'] - - user_name = 'root' - user_password = 'password123' - host = 'localhost' - grant = sql_query.Grant(permissions=permissions, - user=user_name, - host=host, - clear=user_password) - - self.assertEqual("GRANT ALTER, " - "ALTER ROUTINE, " - "CREATE, " - "CREATE ROUTINE, " - "CREATE TEMPORARY TABLES, " - "CREATE USER, " - "CREATE VIEW, " - "DELETE, " - "DROP, " - "EVENT, " - "EXECUTE, " - "INDEX, " - "INSERT, " - "LOCK TABLES, " - "PROCESS, " - "REFERENCES, " - "SELECT, " - "SHOW DATABASES, " - "SHOW VIEW, " - "TRIGGER, " - "UPDATE, " - "USAGE ON *.* TO " - "`root`@`localhost` " - "IDENTIFIED BY " - "'password123';", - str(grant)) - - def test_grant_specify_duplicate_permissions(self): - permissions = ['ALTER ROUTINE', - 'CREATE', - 'CREATE', - 'DROP', - 'DELETE', - 'DELETE', - 'ALTER', - 'CREATE ROUTINE', - 'CREATE TEMPORARY TABLES', - 'CREATE VIEW', - 'CREATE USER', - 'DELETE', - 'DROP', - 'EVENT', - 'EXECUTE', - 'INDEX', - 'INSERT', - 'LOCK TABLES', - 'PROCESS', - 'REFERENCES', - 'SELECT', - 'SHOW DATABASES', - 'SHOW VIEW', - 'TRIGGER', - 'UPDATE', - 'USAGE'] - - user_name = 'root' - user_password = 'password123' - host = 'localhost' - grant = sql_query.Grant(permissions=permissions, - user=user_name, - host=host, - clear=user_password) - - self.assertEqual("GRANT ALTER, " - "ALTER ROUTINE, " - "CREATE, " - "CREATE ROUTINE, " - "CREATE TEMPORARY TABLES, " - "CREATE USER, " - "CREATE VIEW, " - "DELETE, " - "DROP, " - "EVENT, " - "EXECUTE, " - "INDEX, " - "INSERT, " - "LOCK TABLES, " - "PROCESS, " - "REFERENCES, " - "SELECT, " - "SHOW DATABASES, " - "SHOW VIEW, " - "TRIGGER, " - "UPDATE, " - "USAGE ON *.* TO " - "`root`@`localhost` " - "IDENTIFIED BY " - "'password123';", - str(grant)) - - -class RevokeTest(QueryTestBase): - def setUp(self): - super(RevokeTest, self).setUp() - - def tearDown(self): - super(RevokeTest, self).tearDown() - - def test_defaults(self): - r = sql_query.Revoke() - # Technically, this isn't valid for MySQL. - self.assertEqual("REVOKE ALL ON *.* FROM ``@`%`;", str(r)) - - def test_permissions(self): - r = sql_query.Revoke() - r.user = 'x' - r.permissions = ['CREATE', 'DELETE', 'DROP'] - self.assertEqual("REVOKE CREATE, DELETE, DROP ON *.* FROM `x`@`%`;", - str(r)) - - def test_database(self): - r = sql_query.Revoke() - r.user = 'x' - r.database = 'foo' - self.assertEqual("REVOKE ALL ON `foo`.* FROM `x`@`%`;", str(r)) - - def test_table(self): - r = sql_query.Revoke() - r.user = 'x' - r.database = 'foo' - r.table = 'bar' - self.assertEqual("REVOKE ALL ON `foo`.'bar' FROM `x`@`%`;", str(r)) - - def test_user(self): - r = sql_query.Revoke() - r.user = 'x' - self.assertEqual("REVOKE ALL ON *.* FROM `x`@`%`;", str(r)) - - def test_user_host(self): - r = sql_query.Revoke() - r.user = 'x' - r.host = 'y' - self.assertEqual("REVOKE ALL ON *.* FROM `x`@`y`;", str(r)) - - -class CreateDatabaseTest(QueryTestBase): - def setUp(self): - super(CreateDatabaseTest, self).setUp() - - def tearDown(self): - super(CreateDatabaseTest, self).tearDown() - - def test_defaults(self): - cd = sql_query.CreateDatabase('foo') - self.assertEqual("CREATE DATABASE IF NOT EXISTS `foo`;", str(cd)) - - def test_charset(self): - cd = sql_query.CreateDatabase('foo') - cd.charset = "foo" - self.assertEqual(("CREATE DATABASE IF NOT EXISTS `foo` " - "CHARACTER SET = 'foo';"), str(cd)) - - def test_collate(self): - cd = sql_query.CreateDatabase('foo') - cd.collate = "bar" - self.assertEqual(("CREATE DATABASE IF NOT EXISTS `foo` " - "COLLATE = 'bar';"), str(cd)) - - -class DropDatabaseTest(QueryTestBase): - def setUp(self): - super(DropDatabaseTest, self).setUp() - - def tearDown(self): - super(DropDatabaseTest, self).tearDown() - - def test_defaults(self): - dd = sql_query.DropDatabase('foo') - self.assertEqual("DROP DATABASE `foo`;", str(dd)) - - -class CreateUserTest(QueryTestBase): - def setUp(self): - super(CreateUserTest, self).setUp() - - def tearDown(self): - super(CreateUserTest, self).tearDown() - - def test_defaults(self): - username = 'root' - hostname = 'localhost' - password = 'password123' - cu = sql_query.CreateUser(user=username, host=hostname, clear=password) - self.assertEqual("CREATE USER :user@:host " - "IDENTIFIED BY 'password123';", str(cu)) - - -class RenameUserTest(QueryTestBase): - - def setUp(self): - super(RenameUserTest, self).setUp() - - def tearDown(self): - super(RenameUserTest, self).tearDown() - - def test_rename_user(self): - username = 'root' - hostname = 'localhost' - new_user = 'root123' - uu = sql_query.RenameUser(user=username, host=hostname, - new_user=new_user) - self.assertEqual("RENAME USER 'root'@'localhost' " - "TO 'root123'@'localhost';", str(uu)) - - def test_change_host(self): - username = 'root' - hostname = 'localhost' - new_host = '%' - uu = sql_query.RenameUser(user=username, host=hostname, - new_host=new_host) - self.assertEqual("RENAME USER 'root'@'localhost' " - "TO 'root'@'%';", str(uu)) - - def test_change_username_and_hostname(self): - username = 'root' - hostname = 'localhost' - new_user = 'root123' - new_host = '%' - uu = sql_query.RenameUser(user=username, host=hostname, - new_user=new_user, new_host=new_host) - self.assertEqual("RENAME USER 'root'@'localhost' " - "TO 'root123'@'%';", str(uu)) - - -class SetPasswordTest(QueryTestBase): - - def setUp(self): - super(SetPasswordTest, self).setUp() - - def tearDown(self): - super(SetPasswordTest, self).tearDown() - - def test_alter_user(self): - username = 'root' - hostname = 'localhost' - new_password = 'new_password' - uu = sql_query.SetPassword(user=username, host=hostname, - new_password=new_password) - self.assertEqual("SET PASSWORD FOR 'root'@'localhost' = " - "PASSWORD('new_password');", str(uu)) - - -class DropUserTest(QueryTestBase): - def setUp(self): - super(DropUserTest, self).setUp() - - def tearDown(self): - super(DropUserTest, self).tearDown() - - def test_defaults(self): - username = 'root' - hostname = 'localhost' - du = sql_query.DropUser(user=username, host=hostname) - self.assertEqual("DROP USER `root`@`localhost`;", str(du)) diff --git a/trove/tests/unittests/guestagent/test_redis_manager.py b/trove/tests/unittests/guestagent/test_redis_manager.py deleted file mode 100644 index 5f3817d69e..0000000000 --- a/trove/tests/unittests/guestagent/test_redis_manager.py +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mock import DEFAULT, MagicMock, Mock, patch - -from trove.common import utils -from trove.guestagent import backup -from trove.guestagent.common import configuration -from trove.guestagent.common.configuration import ImportOverrideStrategy -from trove.guestagent.common import operating_system -from trove.guestagent.datastore.experimental.redis import ( - service as redis_service) -from trove.guestagent.datastore.experimental.redis.manager import ( - Manager as RedisManager) -from trove.guestagent.volume import VolumeDevice -from trove.tests.unittests.guestagent.test_datastore_manager import \ - DatastoreManagerTest - - -class RedisGuestAgentManagerTest(DatastoreManagerTest): - - @patch.object(redis_service.RedisApp, '_build_admin_client') - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - def setUp(self, *args, **kwargs): - super(RedisGuestAgentManagerTest, self).setUp('redis') - self.patch_ope = patch('os.path.expanduser', - return_value='/tmp/redis') - self.mock_ope = self.patch_ope.start() - self.addCleanup(self.patch_ope.stop) - self.replication_strategy = 'RedisSyncReplication' - self.patch_rs = patch( - 'trove.guestagent.strategies.replication.get_strategy', - return_value=self.replication_strategy) - self.mock_rs = self.patch_rs.start() - self.addCleanup(self.patch_rs.stop) - self.manager = RedisManager() - self.packages = 'redis-server' - self.origin_RedisAppStatus = redis_service.RedisAppStatus - self.origin_start_redis = redis_service.RedisApp.start_db - self.origin_stop_redis = redis_service.RedisApp.stop_db - self.origin_install_redis = redis_service.RedisApp._install_redis - self.origin_install_if_needed = \ - redis_service.RedisApp.install_if_needed - self.origin_format = VolumeDevice.format - self.origin_mount = VolumeDevice.mount - self.origin_mount_points = VolumeDevice.mount_points - self.origin_restore = backup.restore - self.patch_repl = patch( - 'trove.guestagent.strategies.replication.get_instance') - self.mock_repl = self.patch_repl.start() - self.addCleanup(self.patch_repl.stop) - self.patch_gfvs = patch( - 'trove.guestagent.dbaas.get_filesystem_volume_stats') - self.mock_gfvs_class = self.patch_gfvs.start() - self.addCleanup(self.patch_gfvs.stop) - - def tearDown(self): - super(RedisGuestAgentManagerTest, self).tearDown() - redis_service.RedisAppStatus = self.origin_RedisAppStatus - redis_service.RedisApp.stop_db = self.origin_stop_redis - redis_service.RedisApp.start_db = self.origin_start_redis - redis_service.RedisApp._install_redis = self.origin_install_redis - redis_service.RedisApp.install_if_needed = \ - self.origin_install_if_needed - VolumeDevice.format = self.origin_format - VolumeDevice.mount = self.origin_mount - VolumeDevice.mount_points = self.origin_mount_points - backup.restore = self.origin_restore - - def test_update_status(self): - mock_status = MagicMock() - mock_status.is_installed = True - mock_status._is_restarting = False - self.manager._app.status = mock_status - self.manager.update_status(self.context) - self.assertTrue(mock_status.set_status.called) - - def test_prepare_redis_not_installed(self): - self._prepare_dynamic(is_redis_installed=False) - - def test_prepare_redis_with_snapshot(self): - snapshot = {'replication_strategy': self.replication_strategy, - 'dataset': {'dataset_size': 1.0}, - 'config': None} - self._prepare_dynamic(snapshot=snapshot) - - @patch.object(redis_service.RedisApp, 'get_working_dir', - MagicMock(return_value='/var/lib/redis')) - def test_prepare_redis_from_backup(self): - self._prepare_dynamic(backup_id='backup_id_123abc') - - @patch.multiple(redis_service.RedisApp, - apply_initial_guestagent_configuration=DEFAULT, - restart=DEFAULT, - install_if_needed=DEFAULT) - @patch.object(operating_system, 'chown') - @patch.object(configuration.ConfigurationManager, 'save_configuration') - def _prepare_dynamic(self, save_configuration_mock, chown_mock, - apply_initial_guestagent_configuration, restart, - install_if_needed, - device_path='/dev/vdb', is_redis_installed=True, - backup_info=None, is_root_enabled=False, - mount_point='var/lib/redis', backup_id=None, - snapshot=None): - - backup_info = None - if backup_id is not None: - backup_info = {'id': backup_id, - 'location': 'fake-location', - 'type': 'RedisBackup', - 'checksum': 'fake-checksum', - } - - # covering all outcomes is starting to cause trouble here - mock_status = MagicMock() - self.manager._app.status = mock_status - self.manager._build_admin_client = MagicMock(return_value=MagicMock()) - redis_service.RedisApp.stop_db = MagicMock(return_value=None) - redis_service.RedisApp.start_db = MagicMock(return_value=None) - mock_status.begin_install = MagicMock(return_value=None) - VolumeDevice.format = MagicMock(return_value=None) - VolumeDevice.mount = MagicMock(return_value=None) - VolumeDevice.mount_points = MagicMock(return_value=[]) - backup.restore = MagicMock(return_value=None) - mock_replication = MagicMock() - mock_replication.enable_as_slave = MagicMock() - self.mock_repl.return_value = mock_replication - - self.manager.prepare(self.context, self.packages, - None, '2048', - None, device_path=device_path, - mount_point=mount_point, - backup_info=backup_info, - overrides=None, - cluster_config=None, - snapshot=snapshot) - - mock_status.begin_install.assert_any_call() - VolumeDevice.format.assert_any_call() - install_if_needed.assert_any_call(self.packages) - save_configuration_mock.assert_any_call(None) - apply_initial_guestagent_configuration.assert_called_once_with() - chown_mock.assert_any_call(mount_point, 'redis', 'redis', as_root=True) - if backup_info: - backup.restore.assert_called_once_with(self.context, - backup_info, - '/var/lib/redis') - else: - redis_service.RedisApp.restart.assert_any_call() - - if snapshot: - self.assertEqual(1, mock_replication.enable_as_slave.call_count) - else: - self.assertEqual(0, mock_replication.enable_as_slave.call_count) - - @patch.object(redis_service.RedisApp, 'restart') - def test_restart(self, redis_mock): - self.manager.restart(self.context) - redis_mock.assert_any_call() - - @patch.object(redis_service.RedisApp, 'stop_db') - def test_stop_db(self, redis_mock): - self.manager.stop_db(self.context) - redis_mock.assert_any_call(do_not_start_on_reboot=False) - - @patch.object(ImportOverrideStrategy, '_initialize_import_directory') - @patch.object(backup, 'backup') - @patch.object(configuration.ConfigurationManager, 'parse_configuration', - MagicMock(return_value={'dir': '/var/lib/redis', - 'dbfilename': 'dump.rdb'})) - @patch.object(operating_system, 'chown') - @patch.object(operating_system, 'create_directory') - @patch.object(redis_service.RedisApp, - 'get_config_command_name', - Mock(return_value='fakeconfig')) - def test_create_backup(self, *mocks): - backup.backup = MagicMock(return_value=None) - RedisManager().create_backup(self.context, 'backup_id_123') - backup.backup.assert_any_call(self.context, 'backup_id_123') - - def test_backup_required_for_replication(self): - mock_replication = MagicMock() - mock_replication.backup_required_for_replication = MagicMock() - self.mock_repl.return_value = mock_replication - - self.manager.backup_required_for_replication(self.context) - self.assertEqual( - 1, mock_replication.backup_required_for_replication.call_count) - - @patch.object(redis_service.RedisApp, 'update_overrides') - @patch.object(redis_service.RedisApp, 'remove_overrides') - def test_update_overrides(self, remove_config_mock, update_config_mock): - self.manager.update_overrides(self.context, 'overrides') - remove_config_mock.assert_not_called() - update_config_mock.assert_called_once_with(self.context, 'overrides', - False) - - @patch.object(redis_service.RedisApp, 'update_overrides') - @patch.object(redis_service.RedisApp, 'remove_overrides') - def test_update_overrides_with_remove(self, remove_config_mock, - update_config_mock): - self.manager.update_overrides(self.context, 'overrides', True) - remove_config_mock.assert_called_once_with() - update_config_mock.assert_not_called() - - @patch.object(redis_service.RedisApp, 'apply_overrides') - def test_apply_overrides(self, apply_config_mock): - self.manager.apply_overrides(self.context, 'overrides') - apply_config_mock.assert_called_once_with(self.manager._app.admin, - 'overrides') - - def test_attach_replica(self): - mock_replication = MagicMock() - mock_replication.enable_as_slave = MagicMock() - self.mock_repl.return_value = mock_replication - - snapshot = {'replication_strategy': self.replication_strategy, - 'dataset': {'dataset_size': 1.0}} - - self.manager.attach_replica(self.context, snapshot, None) - self.assertEqual(1, mock_replication.enable_as_slave.call_count) - - def test_detach_replica(self): - mock_replication = MagicMock() - mock_replication.detach_slave = MagicMock() - self.mock_repl.return_value = mock_replication - - self.manager.detach_replica(self.context) - self.assertEqual(1, mock_replication.detach_slave.call_count) - - def test_enable_as_master(self): - mock_replication = MagicMock() - mock_replication.enable_as_master = MagicMock() - self.mock_repl.return_value = mock_replication - - self.manager.enable_as_master(self.context, None) - self.assertEqual(mock_replication.enable_as_master.call_count, 1) - - def test_demote_replication_master(self): - mock_replication = MagicMock() - mock_replication.demote_master = MagicMock() - self.mock_repl.return_value = mock_replication - - self.manager.demote_replication_master(self.context) - self.assertEqual(1, mock_replication.demote_master.call_count) - - @patch.object(redis_service.RedisApp, 'make_read_only') - def test_make_read_only(self, redis_mock): - self.manager.make_read_only(self.context, 'ON') - redis_mock.assert_any_call('ON') - - def test_cleanup_source_on_replica_detach(self): - mock_replication = MagicMock() - mock_replication.cleanup_source_on_replica_detach = MagicMock() - self.mock_repl.return_value = mock_replication - snapshot = {'replication_strategy': self.replication_strategy, - 'dataset': {'dataset_size': '1.0'}} - - self.manager.cleanup_source_on_replica_detach(self.context, snapshot) - self.assertEqual( - 1, mock_replication.cleanup_source_on_replica_detach.call_count) - - def test_get_replication_snapshot(self): - snapshot_id = None - log_position = None - master_ref = 'my_master' - used_size = 1.0 - total_size = 2.0 - - mock_replication = MagicMock() - mock_replication.enable_as_master = MagicMock() - mock_replication.snapshot_for_replication = MagicMock( - return_value=(snapshot_id, log_position)) - mock_replication.get_master_ref = MagicMock( - return_value=master_ref) - self.mock_repl.return_value = mock_replication - self.mock_gfvs_class.return_value = ( - {'used': used_size, 'total': total_size}) - - expected_replication_snapshot = { - 'dataset': { - 'datastore_manager': self.manager.manager, - 'dataset_size': used_size, - 'volume_size': total_size, - 'snapshot_id': snapshot_id - }, - 'replication_strategy': self.replication_strategy, - 'master': master_ref, - 'log_position': log_position - } - - snapshot_info = None - replica_source_config = None - replication_snapshot = ( - self.manager.get_replication_snapshot(self.context, snapshot_info, - replica_source_config)) - self.assertEqual(expected_replication_snapshot, replication_snapshot) - self.assertEqual(1, mock_replication.enable_as_master.call_count) - self.assertEqual( - 1, mock_replication.snapshot_for_replication.call_count) - self.assertEqual(1, mock_replication.get_master_ref.call_count) - - def test_get_replica_context(self): - master_ref = { - 'host': '1.2.3.4', - 'port': 3306 - } - expected_info = { - 'master': master_ref, - } - mock_replication = MagicMock() - mock_replication.get_replica_context = MagicMock( - return_value=expected_info) - self.mock_repl.return_value = mock_replication - - replica_info = self.manager.get_replica_context(self.context) - self.assertEqual(1, mock_replication.get_replica_context.call_count) - self.assertEqual(expected_info, replica_info) - - def test_get_last_txn(self): - expected_host = '10.0.0.2' - self.manager._get_master_host = MagicMock(return_value=expected_host) - expected_txn_id = 199 - repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} - self.manager._get_repl_info = MagicMock(return_value=repl_info) - - (host, txn_id) = self.manager.get_last_txn(self.context) - self.manager._get_master_host.assert_any_call() - self.manager._get_repl_info.assert_any_call() - self.assertEqual(expected_host, host) - self.assertEqual(expected_txn_id, txn_id) - - def test_get_latest_txn_id(self): - expected_txn_id = 199 - repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} - self.manager._get_repl_info = MagicMock(return_value=repl_info) - latest_txn_id = self.manager.get_latest_txn_id(self.context) - self.assertEqual(expected_txn_id, latest_txn_id) - self.manager._get_repl_info.assert_any_call() - - def test_wait_for_txn(self): - expected_txn_id = 199 - repl_info = {'role': 'master', 'master_repl_offset': expected_txn_id} - self.manager._get_repl_info = MagicMock(return_value=repl_info) - self.manager.wait_for_txn(self.context, expected_txn_id) - self.manager._get_repl_info.assert_any_call() - - @patch.object(configuration.ConfigurationManager, 'apply_system_override') - @patch.object(redis_service.RedisApp, 'apply_overrides') - @patch.object(utils, 'generate_random_password', - return_value='password') - def test_enable_root(self, *mock): - root_user = {'_name': '-', - '_password': 'password'} - - result = self.manager.enable_root(self.context) - self.assertEqual(root_user, result) - - @patch.object(redis_service.RedisApp, 'disable_root') - def test_disable_root(self, disable_root_mock): - self.manager.disable_root(self.context) - disable_root_mock.assert_any_call() - - @patch.object(redis_service.RedisApp, 'get_auth_password', - return_value="password") - def test_get_root_password(self, get_auth_password_mock): - result = self.manager.get_root_password(self.context) - self.assertTrue(get_auth_password_mock.called) - self.assertEqual('password', result) diff --git a/trove/tests/unittests/guestagent/test_service.py b/trove/tests/unittests/guestagent/test_service.py deleted file mode 100644 index d983e00f9e..0000000000 --- a/trove/tests/unittests/guestagent/test_service.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mock import Mock -from mock import patch -from trove.guestagent import service -from trove.tests.unittests import trove_testtools - - -class ServiceTest(trove_testtools.TestCase): - def setUp(self): - super(ServiceTest, self).setUp() - - def tearDown(self): - super(ServiceTest, self).tearDown() - - @patch.object(service.API, '_instance_router') - def test_app_factory(self, instance_router_mock): - service.app_factory(Mock) - self.assertEqual(1, instance_router_mock.call_count) diff --git a/trove/tests/unittests/guestagent/test_volume.py b/trove/tests/unittests/guestagent/test_volume.py deleted file mode 100644 index 5a7e8cba00..0000000000 --- a/trove/tests/unittests/guestagent/test_volume.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from mock import ANY, call, DEFAULT, patch, mock_open - -from trove.common import cfg -from trove.common import exception -from trove.common import utils -from trove.guestagent.common import operating_system -from trove.guestagent import volume -from trove.tests.unittests import trove_testtools - - -CONF = cfg.CONF - - -class VolumeDeviceTest(trove_testtools.TestCase): - - def setUp(self): - super(VolumeDeviceTest, self).setUp() - self.patch_conf_property('volume_fstype', 'ext3') - self.patch_conf_property('format_options', '-m 5') - self.volumeDevice = volume.VolumeDevice('/dev/vdb') - - self.exec_patcher = patch.object( - utils, 'execute', return_value=('has_journal', '')) - self.mock_exec = self.exec_patcher.start() - self.addCleanup(self.exec_patcher.stop) - self.ismount_patcher = patch.object(operating_system, 'is_mount') - self.mock_ismount = self.ismount_patcher.start() - self.addCleanup(self.ismount_patcher.stop) - - def tearDown(self): - super(VolumeDeviceTest, self).tearDown() - - def test_migrate_data(self): - with patch.multiple(self.volumeDevice, - mount=DEFAULT, unmount=DEFAULT) as mocks: - self.volumeDevice.migrate_data('/') - self.assertEqual(1, mocks['mount'].call_count) - self.assertEqual(1, mocks['unmount'].call_count) - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call('rsync', '--safe-links', '--perms', '--recursive', - '--owner', '--group', '--xattrs', - '--sparse', '/', '/mnt/volume', - root_helper='sudo', run_as_root=True), - ] - self.mock_exec.assert_has_calls(calls) - - def test__check_device_exists(self): - self.volumeDevice._check_device_exists() - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call('blockdev', '--getsize64', '/dev/vdb', attempts=3, - root_helper='sudo', run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - @patch('trove.guestagent.volume.LOG') - def test_fail__check_device_exists(self, mock_logging): - with patch.object(utils, 'execute', - side_effect=exception.ProcessExecutionError): - self.assertRaises(exception.GuestError, - self.volumeDevice._check_device_exists) - - def test__check_format(self): - self.volumeDevice._check_format() - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call('dumpe2fs', '/dev/vdb', root_helper='sudo', run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - @patch('trove.guestagent.volume.LOG') - def test__check_format_2(self, mock_logging): - self.assertEqual(0, self.mock_exec.call_count) - proc_err = exception.ProcessExecutionError() - proc_err.stderr = 'Wrong magic number' - self.mock_exec.side_effect = proc_err - self.assertRaises(exception.GuestError, - self.volumeDevice._check_format) - - def test__format(self): - self.volumeDevice._format() - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call('mkfs', '--type', 'ext3', '-m', '5', '/dev/vdb', - root_helper='sudo', run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - def test_format(self): - self.volumeDevice.format() - self.assertEqual(3, self.mock_exec.call_count) - calls = [ - call('blockdev', '--getsize64', '/dev/vdb', attempts=3, - root_helper='sudo', run_as_root=True), - call('mkfs', '--type', 'ext3', '-m', '5', '/dev/vdb', - root_helper='sudo', run_as_root=True), - call('dumpe2fs', '/dev/vdb', root_helper='sudo', run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - def test_mount(self): - with patch.multiple(volume.VolumeMountPoint, - mount=DEFAULT, write_to_fstab=DEFAULT) as mocks: - self.volumeDevice.mount('/dev/vba') - self.assertEqual(1, mocks['mount'].call_count, - "Wrong number of calls to mount()") - self.assertEqual(1, mocks['write_to_fstab'].call_count, - "Wrong number of calls to write_to_fstab()") - self.mock_exec.assert_not_called() - - def test_resize_fs(self): - with patch.object(operating_system, 'is_mount', return_value=True): - mount_point = '/mnt/volume' - self.volumeDevice.resize_fs(mount_point) - self.assertEqual(4, self.mock_exec.call_count) - calls = [ - call('blockdev', '--getsize64', '/dev/vdb', attempts=3, - root_helper='sudo', run_as_root=True), - call("umount", mount_point, run_as_root=True, - root_helper='sudo'), - call('e2fsck', '-f', '-p', '/dev/vdb', root_helper='sudo', - run_as_root=True), - call('resize2fs', '/dev/vdb', root_helper='sudo', - run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - @patch.object(utils, 'execute', - side_effect=exception.ProcessExecutionError) - @patch('trove.guestagent.volume.LOG') - def test_fail_resize_fs(self, mock_logging, mock_execute): - with patch.object(self.volumeDevice, '_check_device_exists'): - self.assertRaises(exception.GuestError, - self.volumeDevice.resize_fs, '/mnt/volume') - self.assertEqual(1, - self.volumeDevice._check_device_exists.call_count) - self.assertEqual(2, self.mock_ismount.call_count) - - def test_unmount_positive(self): - self._test_unmount() - - def test_unmount_negative(self): - self._test_unmount(has_mount=False) - - def _test_unmount(self, has_mount=True): - with patch.object(operating_system, 'is_mount', - return_value=has_mount): - self.volumeDevice.unmount('/mnt/volume') - if has_mount: - self.assertEqual(1, self.mock_exec.call_count) - else: - self.mock_exec.assert_not_called() - - def test_mount_points(self): - self.mock_exec.return_value = ( - ("/dev/vdb /var/lib/mysql xfs rw 0 0", "")) - mount_point = self.volumeDevice.mount_points('/dev/vdb') - self.assertEqual(['/var/lib/mysql'], mount_point) - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call("grep '^/dev/vdb ' /etc/mtab", check_exit_code=[0, 1], - shell=True) - ] - self.mock_exec.assert_has_calls(calls) - - def test_set_readahead_size(self): - readahead_size = 2048 - self.volumeDevice.set_readahead_size(readahead_size) - self.assertEqual(2, self.mock_exec.call_count) - calls = [ - call('blockdev', '--getsize64', '/dev/vdb', attempts=3, - root_helper='sudo', run_as_root=True), - call('blockdev', '--setra', readahead_size, '/dev/vdb', - root_helper='sudo', run_as_root=True), - ] - self.mock_exec.assert_has_calls(calls) - - @patch('trove.guestagent.volume.LOG') - def test_fail_set_readahead_size(self, mock_logging): - self.mock_exec.side_effect = exception.ProcessExecutionError - readahead_size = 2048 - self.assertRaises(exception.GuestError, - self.volumeDevice.set_readahead_size, - readahead_size) - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call('blockdev', '--getsize64', '/dev/vdb', attempts=3, - root_helper='sudo', run_as_root=True), - ] - self.mock_exec.assert_has_calls(calls) - - -class VolumeDeviceTestXFS(trove_testtools.TestCase): - - def setUp(self): - super(VolumeDeviceTestXFS, self).setUp() - self.patch_conf_property('volume_fstype', 'xfs') - self.patch_conf_property('format_options', '') - self.volumeDevice = volume.VolumeDevice('/dev/vdb') - - self.exec_patcher = patch.object( - utils, 'execute', return_value=('', '')) - self.mock_exec = self.exec_patcher.start() - self.addCleanup(self.exec_patcher.stop) - self.ismount_patcher = patch.object(operating_system, 'is_mount') - self.mock_ismount = self.ismount_patcher.start() - self.addCleanup(self.ismount_patcher.stop) - - def tearDown(self): - super(VolumeDeviceTestXFS, self).tearDown() - self.volumeDevice = None - - def test__check_format(self): - self.volumeDevice._check_format() - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call('xfs_admin', '-l', '/dev/vdb', - root_helper='sudo', run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - @patch('trove.guestagent.volume.LOG') - @patch.object(utils, 'execute', - return_value=('not a valid XFS filesystem', '')) - def test__check_format_2(self, mock_logging, mock_exec): - self.assertRaises(exception.GuestError, - self.volumeDevice._check_format) - - def test__format(self): - self.volumeDevice._format() - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call('mkfs.xfs', '/dev/vdb', - root_helper='sudo', run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - def test_resize_fs(self): - with patch.object(operating_system, 'is_mount', return_value=True): - mount_point = '/mnt/volume' - self.volumeDevice.resize_fs(mount_point) - self.assertEqual(6, self.mock_exec.call_count) - calls = [ - call('blockdev', '--getsize64', '/dev/vdb', attempts=3, - root_helper='sudo', run_as_root=True), - call("umount", mount_point, run_as_root=True, - root_helper='sudo'), - call('xfs_repair', '/dev/vdb', root_helper='sudo', - run_as_root=True), - call('mount', '/dev/vdb', root_helper='sudo', - run_as_root=True), - call('xfs_growfs', '/dev/vdb', root_helper='sudo', - run_as_root=True), - call('umount', '/dev/vdb', root_helper='sudo', - run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - -class VolumeMountPointTest(trove_testtools.TestCase): - - def setUp(self): - super(VolumeMountPointTest, self).setUp() - self.patch_conf_property('volume_fstype', 'ext3') - self.patch_conf_property('format_options', '-m 5') - self.volumeMountPoint = volume.VolumeMountPoint('/mnt/device', - '/dev/vdb') - self.exec_patcher = patch.object(utils, 'execute', - return_value=('', '')) - self.mock_exec = self.exec_patcher.start() - self.addCleanup(self.exec_patcher.stop) - - def tearDown(self): - super(VolumeMountPointTest, self).tearDown() - - def test_mount(self): - with patch.object(operating_system, 'exists', return_value=False): - self.volumeMountPoint.mount() - self.assertEqual(2, self.mock_exec.call_count) - calls = [ - call('mkdir', '-p', '/dev/vdb', root_helper='sudo', - run_as_root=True), - call('mount', '-t', 'ext3', '-o', 'defaults,noatime', - '/mnt/device', '/dev/vdb', root_helper='sudo', - run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) - - def test_write_to_fstab(self): - mock_file = mock_open() - with patch('%s.open' % volume.__name__, mock_file, create=True): - self.volumeMountPoint.write_to_fstab() - self.assertEqual(1, self.mock_exec.call_count) - calls = [ - call('install', '-o', 'root', '-g', 'root', '-m', '644', - ANY, '/etc/fstab', root_helper='sudo', - run_as_root=True) - ] - self.mock_exec.assert_has_calls(calls) diff --git a/trove/tests/unittests/taskmanager/test_api.py b/trove/tests/unittests/taskmanager/test_api.py index 9bc6b33551..bf265f40c2 100644 --- a/trove/tests/unittests/taskmanager/test_api.py +++ b/trove/tests/unittests/taskmanager/test_api.py @@ -66,7 +66,7 @@ class ApiTest(trove_testtools.TestCase): instance_id='inst-id', locality='affinity', modules=['mod-id'], name='inst-name', nics=['nic-id'], overrides={}, packages=None, root_password='pwd', slave_of_id='slv-id', users={'name': 'usr1'}, - volume_size=1, volume_type='type', access=None) + volume_size=1, volume_type='type', access=None, ds_version=None) def test_detach_replica(self): self.api.detach_replica('some-instance-id') diff --git a/trove/tests/unittests/taskmanager/test_manager.py b/trove/tests/unittests/taskmanager/test_manager.py index 7ac006e906..610f79f35d 100644 --- a/trove/tests/unittests/taskmanager/test_manager.py +++ b/trove/tests/unittests/taskmanager/test_manager.py @@ -207,8 +207,8 @@ class TestManager(trove_testtools.TestCase): 'some-master-id', None, None, None, None) mock_tasks.get_replication_master_snapshot.assert_called_with( - self.context, 'some-master-id', mock_flavor, 'temp-backup-id', - replica_number=1) + self.context, 'some-master-id', mock_flavor, + parent_backup_id='temp-backup-id') mock_backup_delete.assert_called_with(self.context, 'test-id') @patch.object(models.FreshInstanceTasks, 'load') @@ -248,17 +248,18 @@ class TestManager(trove_testtools.TestCase): 'temp-backup-id', None, 'password', None, mock_override, None, None, None, None, 'affinity') - mock_tasks.create_instance.assert_called_with(mock_flavor, - 'mysql-image-id', None, - None, 'mysql', - 'mysql-server', 2, - 'temp-backup-id', None, - 'password', None, - mock_override, - None, None, None, None, - {'group': 'sg-id'}, - access=None) - mock_tasks.wait_for_instance.assert_called_with(36000, mock_flavor) + mock_tasks.create_instance.assert_called_with( + mock_flavor, + 'mysql-image-id', None, + None, 'mysql', + 'mysql-server', 2, + 'temp-backup-id', None, + 'password', None, + mock_override, + None, None, None, None, + {'group': 'sg-id'}, + access=None, ds_version=None) + mock_tasks.wait_for_instance.assert_called_with(3600, mock_flavor) def test_create_cluster(self): mock_tasks = Mock() diff --git a/trove/tests/unittests/taskmanager/test_models.py b/trove/tests/unittests/taskmanager/test_models.py index a3cda332f5..f3315f71b7 100644 --- a/trove/tests/unittests/taskmanager/test_models.py +++ b/trove/tests/unittests/taskmanager/test_models.py @@ -378,7 +378,7 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest): ) mock_guest_prepare.assert_called_with( 768, mock_build_volume_info(), 'mysql-server', None, None, None, - config_content, None, overrides, None, None, None + config_content, None, overrides, None, None, None, ds_version=None ) mock_create_server.assert_called_with( 8, 'mysql-image-id', 'mysql', @@ -440,7 +440,7 @@ class FreshInstanceTasksTest(BaseFreshInstanceTasksTest): ) mock_guest_prepare.assert_called_with( 768, mock_build_volume_info(), 'mysql-server', None, None, None, - config_content, None, mock.ANY, None, None, None) + config_content, None, mock.ANY, None, None, None, ds_version=None) mock_create_server.assert_called_with( 8, 'mysql-image-id', 'mysql', mock_build_volume_info()['block_device'], None, @@ -822,8 +822,7 @@ class BuiltInstanceTasksTest(trove_testtools.TestCase): self.new_flavor) # verify self.assertIsNot(self.instance_task.server, orig_server) - self.instance_task._guest.stop_db.assert_any_call( - do_not_start_on_reboot=True) + self.assertEqual(1, self.instance_task._guest.stop_db.call_count) orig_server.resize.assert_any_call(self.new_flavor['id']) self.assertThat(self.db_instance.task_status, Is(InstanceTasks.NONE)) self.assertEqual(1, self.stub_server_mgr.get.call_count) @@ -842,8 +841,7 @@ class BuiltInstanceTasksTest(trove_testtools.TestCase): self.assertTrue(self.stub_server_mgr.get.called) self.assertIs(self.instance_task.server, self.stub_verifying_server) - self.instance_task._guest.stop_db.assert_any_call( - do_not_start_on_reboot=True) + self.assertEqual(1, self.instance_task._guest.stop_db.call_count) orig_server.resize.assert_any_call(self.new_flavor['id']) self.assertThat(self.db_instance.task_status, Is(InstanceTasks.NONE)) @@ -852,11 +850,12 @@ class BuiltInstanceTasksTest(trove_testtools.TestCase): @patch.object(utils, 'poll_until') def test_reboot(self, mock_poll): self.instance_task.server.reboot = Mock() - self.instance_task.set_datastore_status_to_paused = Mock() + self.instance_task.reboot() + self.instance_task._guest.stop_db.assert_any_call() self.instance_task.server.reboot.assert_any_call() - self.instance_task.set_datastore_status_to_paused.assert_any_call() + self.instance_task._guest.restart.assert_any_call() @patch.object(BaseInstance, 'update_db') def test_detach_replica(self, mock_update_db): @@ -926,34 +925,6 @@ class BuiltInstanceTasksTest(trove_testtools.TestCase): self.assertRaises(GuestError, self.instance_task.attach_replica, Mock()) - def test_get_floating_ips(self): - floating_ips = self.instance_task._get_floating_ips() - self.assertEqual({'192.168.10.1': 'fake-floatingip-id'}, - floating_ips) - - @patch.object(BaseInstance, 'get_visible_ip_addresses', - return_value=[{'address': '192.168.10.1', 'type': 'public'}]) - def test_detach_public_ips(self, mock_address): - removed_ips = self.instance_task.detach_public_ips() - self.assertEqual(['fake-floatingip-id'], removed_ips) - mock_update_floatingip = (self.instance_task.neutron_client - .update_floatingip) - mock_update_floatingip.assert_called_once_with( - removed_ips[0], {'floatingip': {'port_id': None}}) - - def test_attach_public_ips(self): - self.instance_task.attach_public_ips(['fake-floatingip-id']) - mock_list_ports = (self.instance_task.neutron_client - .list_ports) - mock_list_ports.assert_called_once_with(device_id='computeinst-id-1') - - mock_update_floatingip = (self.instance_task.neutron_client - .update_floatingip) - mock_update_floatingip.assert_called_once_with( - 'fake-floatingip-id', - {'floatingip': {'port_id': 'fake-port-id', - 'fixed_ip_address': '10.0.0.1'}}) - @patch.object(BaseInstance, 'update_db') def test_enable_as_master(self, mock_update_db): test_func = self.instance_task._guest.enable_as_master diff --git a/trove/tests/util/server_connection.py b/trove/tests/util/server_connection.py index 659d6d0c5b..0b06af77dd 100644 --- a/trove/tests/util/server_connection.py +++ b/trove/tests/util/server_connection.py @@ -62,7 +62,7 @@ class ServerSSHConnection(object): retry=tenacity.retry_if_exception_type(subprocess.CalledProcessError) ) def execute(self, cmd): - exe_cmd = "%s %s %s" % (tests.SSH_CMD, self.ip_address, cmd) + exe_cmd = "%s %s '%s'" % (tests.SSH_CMD, self.ip_address, cmd) print("RUNNING COMMAND: %s" % exe_cmd) output = util.process(exe_cmd)