diff --git a/doc/saio/swift/container-server/1.conf b/doc/saio/swift/container-server/1.conf index c2530dcc07..0e89ff4d64 100644 --- a/doc/saio/swift/container-server/1.conf +++ b/doc/saio/swift/container-server/1.conf @@ -36,4 +36,4 @@ rsync_module = {replication_ip}::container{replication_port} shard_container_size = 100 # The probe tests make explicit assumptions about the batch sizes shard_scanner_batch_size = 10 -shard_batch_size = 2 +cleave_batch_size = 2 diff --git a/doc/saio/swift/container-server/2.conf b/doc/saio/swift/container-server/2.conf index 861ee4d162..ad63666457 100644 --- a/doc/saio/swift/container-server/2.conf +++ b/doc/saio/swift/container-server/2.conf @@ -36,4 +36,4 @@ rsync_module = {replication_ip}::container{replication_port} shard_container_size = 100 # The probe tests make explicit assumptions about the batch sizes shard_scanner_batch_size = 10 -shard_batch_size = 2 +cleave_batch_size = 2 diff --git a/doc/saio/swift/container-server/3.conf b/doc/saio/swift/container-server/3.conf index bafe302ac8..8ed6afaf72 100644 --- a/doc/saio/swift/container-server/3.conf +++ b/doc/saio/swift/container-server/3.conf @@ -36,4 +36,4 @@ rsync_module = {replication_ip}::container{replication_port} shard_container_size = 100 # The probe tests make explicit assumptions about the batch sizes shard_scanner_batch_size = 10 -shard_batch_size = 2 +cleave_batch_size = 2 diff --git a/doc/saio/swift/container-server/4.conf b/doc/saio/swift/container-server/4.conf index c5af7f8f28..ad1e516e2e 100644 --- a/doc/saio/swift/container-server/4.conf +++ b/doc/saio/swift/container-server/4.conf @@ -36,4 +36,4 @@ rsync_module = {replication_ip}::container{replication_port} shard_container_size = 100 # The probe tests make explicit assumptions about the batch sizes shard_scanner_batch_size = 10 -shard_batch_size = 2 +cleave_batch_size = 2 diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index d49aa2afc5..f2f13a3c54 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -800,9 +800,6 @@ class Replicator(Daemon): if elapsed < self.interval: sleep(self.interval - elapsed) - def _is_locked(self, broker): - return False - class ReplicatorRpc(object): """Handle Replication RPC calls. TODO(redbo): document please :)""" diff --git a/swift/common/exceptions.py b/swift/common/exceptions.py index 480e4863ec..319c0f3757 100644 --- a/swift/common/exceptions.py +++ b/swift/common/exceptions.py @@ -280,7 +280,3 @@ class ClientException(Exception): class InvalidPidFileException(Exception): pass - - -class RangeAnalyserException(SwiftException): - pass diff --git a/swift/container/backend.py b/swift/container/backend.py index 29b2143224..a4d8c19ff0 100644 --- a/swift/container/backend.py +++ b/swift/container/backend.py @@ -49,12 +49,6 @@ SHARDED = 3 COLLAPSED = 4 DB_STATES = ['not_found', 'unsharded', 'sharding', 'sharded', 'collapsed'] -SHARD_STATS_STATES = [ShardRange.ACTIVE, ShardRange.SHARDING, - ShardRange.SHRINKING] -SHARD_LISTING_STATES = SHARD_STATS_STATES + [ShardRange.CLEAVED] -SHARD_UPDATE_STATES = [ShardRange.CREATED, ShardRange.CLEAVED, - ShardRange.ACTIVE, ShardRange.SHARDING] - def db_state_text(state): try: @@ -63,6 +57,13 @@ def db_state_text(state): return 'unknown (%d)' % state +SHARD_STATS_STATES = [ShardRange.ACTIVE, ShardRange.SHARDING, + ShardRange.SHRINKING] +SHARD_LISTING_STATES = SHARD_STATS_STATES + [ShardRange.CLEAVED] +SHARD_UPDATE_STATES = [ShardRange.CREATED, ShardRange.CLEAVED, + ShardRange.ACTIVE, ShardRange.SHARDING] + + # attribute names in order used when transforming shard ranges from dicts to # tuples and vice-versa SHARD_RANGE_KEYS = ('name', 'timestamp', 'lower', 'upper', 'object_count', @@ -420,45 +421,6 @@ class ContainerBroker(DatabaseBroker): hash_, epoch, ext = parse_db_filename(self.db_file) return epoch - # TODO: needs unit test - def update_sharding_info(self, info): - """ - Updates the broker's metadata with the given ``info``. Each key in - ``info`` is prefixed with a sharding specific namespace. - - :param info: a dict of info to be persisted - """ - prefix = 'X-Container-Sysmeta-Shard-' - timestamp = Timestamp.now() - metadata = dict( - ('%s%s' % (prefix, key), - (value, timestamp.internal)) - for key, value in info.items() - ) - self.update_metadata(metadata) - - # TODO: needs unit test - def get_sharding_info(self, key=None, default=None): - """ - Returns sharding specific info from the broker's metadata. - - :param key: if given the value stored under ``key`` in the sharding - info will be returned. If ``key`` is not found in the info then the - value of ``default`` will be returned or None if ``default`` is not - given. - :param default: a default value to return if ``key`` is given but not - found in the sharding info. - :return: either a dict of sharding info or the value stored under - ``key`` in that dict. - """ - prefix = 'X-Container-Sysmeta-Shard-' - metadata = self.metadata - info = dict((k[len(prefix):], v[0]) for - k, v in metadata.items() if k.startswith(prefix)) - if key: - return info.get(key, default) - return info - @property def storage_policy_index(self): if not hasattr(self, '_storage_policy_index'): @@ -466,6 +428,13 @@ class ContainerBroker(DatabaseBroker): self.get_info()['storage_policy_index'] return self._storage_policy_index + @property + def path(self): + if self.container is None: + # Ensure account/container get populated + self.get_info() + return '%s/%s' % (self.account, self.container) + def _initialize(self, conn, put_timestamp, storage_policy_index): """ Create a brand new container database (tables, indices, triggers, etc.) @@ -837,7 +806,7 @@ class ContainerBroker(DatabaseBroker): except sqlite3.OperationalError as err: err_msg = str(err) if err_msg in errors: - # only attempt each migration once + # only attempt migration once raise errors.add(err_msg) if 'no such column: storage_policy_index' in err_msg: @@ -1954,6 +1923,45 @@ class ContainerBroker(DatabaseBroker): brokers.append(self) return brokers + # TODO: needs unit test + def update_sharding_info(self, info): + """ + Updates the broker's metadata with the given ``info``. Each key in + ``info`` is prefixed with a sharding specific namespace. + + :param info: a dict of info to be persisted + """ + prefix = 'X-Container-Sysmeta-Shard-' + timestamp = Timestamp.now() + metadata = dict( + ('%s%s' % (prefix, key), + (value, timestamp.internal)) + for key, value in info.items() + ) + self.update_metadata(metadata) + + # TODO: needs unit test + def get_sharding_info(self, key=None, default=None): + """ + Returns sharding specific info from the broker's metadata. + + :param key: if given the value stored under ``key`` in the sharding + info will be returned. If ``key`` is not found in the info then the + value of ``default`` will be returned or None if ``default`` is not + given. + :param default: a default value to return if ``key`` is given but not + found in the sharding info. + :return: either a dict of sharding info or the value stored under + ``key`` in that dict. + """ + prefix = 'X-Container-Sysmeta-Shard-' + metadata = self.metadata + info = dict((k[len(prefix):], v[0]) for + k, v in metadata.items() if k.startswith(prefix)) + if key: + return info.get(key, default) + return info + def get_items_since(self, start, count, include_sharding=False): """ Get a list of objects in the database between start and end. @@ -2003,13 +2011,6 @@ class ContainerBroker(DatabaseBroker): "of the form 'account/container', got %r" % path) self._root_account, self._root_container = tuple(path.split('/')) - @property - def path(self): - if self.container is None: - # Ensure account/container get populated - self.get_info() - return '%s/%s' % (self.account, self.container) - @property def root_account(self): if not self._root_account: diff --git a/swift/container/server.py b/swift/container/server.py index 94295a4640..3e61e66b76 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -35,8 +35,7 @@ from swift.common.request_helpers import get_param, \ from swift.common.utils import get_logger, hash_path, public, \ Timestamp, storage_directory, validate_sync_to, \ config_true_value, timing_stats, replication, \ - override_bytes_from_content_type, get_log_line, whataremyips, ShardRange, \ - list_from_csv + override_bytes_from_content_type, get_log_line, ShardRange, list_from_csv from swift.common.constraints import valid_timestamp, check_utf8, check_drive from swift.common import constraints @@ -98,13 +97,12 @@ class ContainerController(BaseStorageServer): self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.node_timeout = float(conf.get('node_timeout', 3)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) - swift_dir = conf.get('swift_dir', '/etc/swift') #: ContainerSyncCluster instance for validating sync-to values. self.realms_conf = ContainerSyncRealms( - os.path.join(swift_dir, 'container-sync-realms.conf'), + os.path.join( + conf.get('swift_dir', '/etc/swift'), + 'container-sync-realms.conf'), self.logger) - self.ips = whataremyips() - self.port = int(conf.get('bind_port', 6201)) #: The list of hosts we're allowed to send syncs to. This can be #: overridden by data in self.realms_conf self.allowed_sync_hosts = [ @@ -527,8 +525,10 @@ class ContainerController(BaseStorageServer): """ Perform any mutations to container listing records that are common to all serialization formats, and returns it as a dict. + Converts created time to iso timestamp. Replaces size with 'swift_bytes' content type parameter. + :params record: object entry record :returns: modified record """ @@ -618,7 +618,6 @@ class ContainerController(BaseStorageServer): limit, marker, end_marker, prefix, delimiter, path, storage_policy_index=info['storage_policy_index'], reverse=reverse) - return self.create_listing(req, out_content_type, info, resp_headers, broker.metadata, container_list, container) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 233f507919..0ab0107a44 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -64,6 +64,7 @@ from swift.common.request_helpers import strip_sys_meta_prefix, \ strip_object_transient_sysmeta_prefix from swift.common.storage_policy import POLICIES + DEFAULT_RECHECK_ACCOUNT_EXISTENCE = 60 # seconds DEFAULT_RECHECK_CONTAINER_EXISTENCE = 60 # seconds diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 743ad6b18d..8e67abb009 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -35,7 +35,6 @@ from tempfile import mkdtemp, NamedTemporaryFile import weakref import operator import functools - from swift.obj import diskfile import re import random