Cleanup for review

reverting unnecessary changes frm master

relocating some methods within modules

Change-Id: I33a46f4daa99e57d946793323d9396a2ad62cd1a
This commit is contained in:
Alistair Coles 2018-05-02 09:29:48 +01:00
parent b72d040884
commit 28514903e0
10 changed files with 65 additions and 72 deletions

View File

@ -36,4 +36,4 @@ rsync_module = {replication_ip}::container{replication_port}
shard_container_size = 100 shard_container_size = 100
# The probe tests make explicit assumptions about the batch sizes # The probe tests make explicit assumptions about the batch sizes
shard_scanner_batch_size = 10 shard_scanner_batch_size = 10
shard_batch_size = 2 cleave_batch_size = 2

View File

@ -36,4 +36,4 @@ rsync_module = {replication_ip}::container{replication_port}
shard_container_size = 100 shard_container_size = 100
# The probe tests make explicit assumptions about the batch sizes # The probe tests make explicit assumptions about the batch sizes
shard_scanner_batch_size = 10 shard_scanner_batch_size = 10
shard_batch_size = 2 cleave_batch_size = 2

View File

@ -36,4 +36,4 @@ rsync_module = {replication_ip}::container{replication_port}
shard_container_size = 100 shard_container_size = 100
# The probe tests make explicit assumptions about the batch sizes # The probe tests make explicit assumptions about the batch sizes
shard_scanner_batch_size = 10 shard_scanner_batch_size = 10
shard_batch_size = 2 cleave_batch_size = 2

View File

@ -36,4 +36,4 @@ rsync_module = {replication_ip}::container{replication_port}
shard_container_size = 100 shard_container_size = 100
# The probe tests make explicit assumptions about the batch sizes # The probe tests make explicit assumptions about the batch sizes
shard_scanner_batch_size = 10 shard_scanner_batch_size = 10
shard_batch_size = 2 cleave_batch_size = 2

View File

@ -800,9 +800,6 @@ class Replicator(Daemon):
if elapsed < self.interval: if elapsed < self.interval:
sleep(self.interval - elapsed) sleep(self.interval - elapsed)
def _is_locked(self, broker):
return False
class ReplicatorRpc(object): class ReplicatorRpc(object):
"""Handle Replication RPC calls. TODO(redbo): document please :)""" """Handle Replication RPC calls. TODO(redbo): document please :)"""

View File

@ -280,7 +280,3 @@ class ClientException(Exception):
class InvalidPidFileException(Exception): class InvalidPidFileException(Exception):
pass pass
class RangeAnalyserException(SwiftException):
pass

View File

@ -49,12 +49,6 @@ SHARDED = 3
COLLAPSED = 4 COLLAPSED = 4
DB_STATES = ['not_found', 'unsharded', 'sharding', 'sharded', 'collapsed'] DB_STATES = ['not_found', 'unsharded', 'sharding', 'sharded', 'collapsed']
SHARD_STATS_STATES = [ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING]
SHARD_LISTING_STATES = SHARD_STATS_STATES + [ShardRange.CLEAVED]
SHARD_UPDATE_STATES = [ShardRange.CREATED, ShardRange.CLEAVED,
ShardRange.ACTIVE, ShardRange.SHARDING]
def db_state_text(state): def db_state_text(state):
try: try:
@ -63,6 +57,13 @@ def db_state_text(state):
return 'unknown (%d)' % state return 'unknown (%d)' % state
SHARD_STATS_STATES = [ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING]
SHARD_LISTING_STATES = SHARD_STATS_STATES + [ShardRange.CLEAVED]
SHARD_UPDATE_STATES = [ShardRange.CREATED, ShardRange.CLEAVED,
ShardRange.ACTIVE, ShardRange.SHARDING]
# attribute names in order used when transforming shard ranges from dicts to # attribute names in order used when transforming shard ranges from dicts to
# tuples and vice-versa # tuples and vice-versa
SHARD_RANGE_KEYS = ('name', 'timestamp', 'lower', 'upper', 'object_count', SHARD_RANGE_KEYS = ('name', 'timestamp', 'lower', 'upper', 'object_count',
@ -420,45 +421,6 @@ class ContainerBroker(DatabaseBroker):
hash_, epoch, ext = parse_db_filename(self.db_file) hash_, epoch, ext = parse_db_filename(self.db_file)
return epoch return epoch
# TODO: needs unit test
def update_sharding_info(self, info):
"""
Updates the broker's metadata with the given ``info``. Each key in
``info`` is prefixed with a sharding specific namespace.
:param info: a dict of info to be persisted
"""
prefix = 'X-Container-Sysmeta-Shard-'
timestamp = Timestamp.now()
metadata = dict(
('%s%s' % (prefix, key),
(value, timestamp.internal))
for key, value in info.items()
)
self.update_metadata(metadata)
# TODO: needs unit test
def get_sharding_info(self, key=None, default=None):
"""
Returns sharding specific info from the broker's metadata.
:param key: if given the value stored under ``key`` in the sharding
info will be returned. If ``key`` is not found in the info then the
value of ``default`` will be returned or None if ``default`` is not
given.
:param default: a default value to return if ``key`` is given but not
found in the sharding info.
:return: either a dict of sharding info or the value stored under
``key`` in that dict.
"""
prefix = 'X-Container-Sysmeta-Shard-'
metadata = self.metadata
info = dict((k[len(prefix):], v[0]) for
k, v in metadata.items() if k.startswith(prefix))
if key:
return info.get(key, default)
return info
@property @property
def storage_policy_index(self): def storage_policy_index(self):
if not hasattr(self, '_storage_policy_index'): if not hasattr(self, '_storage_policy_index'):
@ -466,6 +428,13 @@ class ContainerBroker(DatabaseBroker):
self.get_info()['storage_policy_index'] self.get_info()['storage_policy_index']
return self._storage_policy_index return self._storage_policy_index
@property
def path(self):
if self.container is None:
# Ensure account/container get populated
self.get_info()
return '%s/%s' % (self.account, self.container)
def _initialize(self, conn, put_timestamp, storage_policy_index): def _initialize(self, conn, put_timestamp, storage_policy_index):
""" """
Create a brand new container database (tables, indices, triggers, etc.) Create a brand new container database (tables, indices, triggers, etc.)
@ -837,7 +806,7 @@ class ContainerBroker(DatabaseBroker):
except sqlite3.OperationalError as err: except sqlite3.OperationalError as err:
err_msg = str(err) err_msg = str(err)
if err_msg in errors: if err_msg in errors:
# only attempt each migration once # only attempt migration once
raise raise
errors.add(err_msg) errors.add(err_msg)
if 'no such column: storage_policy_index' in err_msg: if 'no such column: storage_policy_index' in err_msg:
@ -1954,6 +1923,45 @@ class ContainerBroker(DatabaseBroker):
brokers.append(self) brokers.append(self)
return brokers return brokers
# TODO: needs unit test
def update_sharding_info(self, info):
"""
Updates the broker's metadata with the given ``info``. Each key in
``info`` is prefixed with a sharding specific namespace.
:param info: a dict of info to be persisted
"""
prefix = 'X-Container-Sysmeta-Shard-'
timestamp = Timestamp.now()
metadata = dict(
('%s%s' % (prefix, key),
(value, timestamp.internal))
for key, value in info.items()
)
self.update_metadata(metadata)
# TODO: needs unit test
def get_sharding_info(self, key=None, default=None):
"""
Returns sharding specific info from the broker's metadata.
:param key: if given the value stored under ``key`` in the sharding
info will be returned. If ``key`` is not found in the info then the
value of ``default`` will be returned or None if ``default`` is not
given.
:param default: a default value to return if ``key`` is given but not
found in the sharding info.
:return: either a dict of sharding info or the value stored under
``key`` in that dict.
"""
prefix = 'X-Container-Sysmeta-Shard-'
metadata = self.metadata
info = dict((k[len(prefix):], v[0]) for
k, v in metadata.items() if k.startswith(prefix))
if key:
return info.get(key, default)
return info
def get_items_since(self, start, count, include_sharding=False): def get_items_since(self, start, count, include_sharding=False):
""" """
Get a list of objects in the database between start and end. Get a list of objects in the database between start and end.
@ -2003,13 +2011,6 @@ class ContainerBroker(DatabaseBroker):
"of the form 'account/container', got %r" % path) "of the form 'account/container', got %r" % path)
self._root_account, self._root_container = tuple(path.split('/')) self._root_account, self._root_container = tuple(path.split('/'))
@property
def path(self):
if self.container is None:
# Ensure account/container get populated
self.get_info()
return '%s/%s' % (self.account, self.container)
@property @property
def root_account(self): def root_account(self):
if not self._root_account: if not self._root_account:

View File

@ -35,8 +35,7 @@ from swift.common.request_helpers import get_param, \
from swift.common.utils import get_logger, hash_path, public, \ from swift.common.utils import get_logger, hash_path, public, \
Timestamp, storage_directory, validate_sync_to, \ Timestamp, storage_directory, validate_sync_to, \
config_true_value, timing_stats, replication, \ config_true_value, timing_stats, replication, \
override_bytes_from_content_type, get_log_line, whataremyips, ShardRange, \ override_bytes_from_content_type, get_log_line, ShardRange, list_from_csv
list_from_csv
from swift.common.constraints import valid_timestamp, check_utf8, check_drive from swift.common.constraints import valid_timestamp, check_utf8, check_drive
from swift.common import constraints from swift.common import constraints
@ -98,13 +97,12 @@ class ContainerController(BaseStorageServer):
self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.node_timeout = float(conf.get('node_timeout', 3)) self.node_timeout = float(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.conn_timeout = float(conf.get('conn_timeout', 0.5))
swift_dir = conf.get('swift_dir', '/etc/swift')
#: ContainerSyncCluster instance for validating sync-to values. #: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms( self.realms_conf = ContainerSyncRealms(
os.path.join(swift_dir, 'container-sync-realms.conf'), os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger) self.logger)
self.ips = whataremyips()
self.port = int(conf.get('bind_port', 6201))
#: The list of hosts we're allowed to send syncs to. This can be #: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf #: overridden by data in self.realms_conf
self.allowed_sync_hosts = [ self.allowed_sync_hosts = [
@ -527,8 +525,10 @@ class ContainerController(BaseStorageServer):
""" """
Perform any mutations to container listing records that are common to Perform any mutations to container listing records that are common to
all serialization formats, and returns it as a dict. all serialization formats, and returns it as a dict.
Converts created time to iso timestamp. Converts created time to iso timestamp.
Replaces size with 'swift_bytes' content type parameter. Replaces size with 'swift_bytes' content type parameter.
:params record: object entry record :params record: object entry record
:returns: modified record :returns: modified record
""" """
@ -618,7 +618,6 @@ class ContainerController(BaseStorageServer):
limit, marker, end_marker, prefix, delimiter, path, limit, marker, end_marker, prefix, delimiter, path,
storage_policy_index=info['storage_policy_index'], storage_policy_index=info['storage_policy_index'],
reverse=reverse) reverse=reverse)
return self.create_listing(req, out_content_type, info, resp_headers, return self.create_listing(req, out_content_type, info, resp_headers,
broker.metadata, container_list, container) broker.metadata, container_list, container)

View File

@ -64,6 +64,7 @@ from swift.common.request_helpers import strip_sys_meta_prefix, \
strip_object_transient_sysmeta_prefix strip_object_transient_sysmeta_prefix
from swift.common.storage_policy import POLICIES from swift.common.storage_policy import POLICIES
DEFAULT_RECHECK_ACCOUNT_EXISTENCE = 60 # seconds DEFAULT_RECHECK_ACCOUNT_EXISTENCE = 60 # seconds
DEFAULT_RECHECK_CONTAINER_EXISTENCE = 60 # seconds DEFAULT_RECHECK_CONTAINER_EXISTENCE = 60 # seconds

View File

@ -35,7 +35,6 @@ from tempfile import mkdtemp, NamedTemporaryFile
import weakref import weakref
import operator import operator
import functools import functools
from swift.obj import diskfile from swift.obj import diskfile
import re import re
import random import random