Merge "Use botocore in API instead of boto"

This commit is contained in:
Jenkins 2017-08-11 11:31:02 +00:00 committed by Gerrit Code Review
commit c4f3a1e8ed
8 changed files with 133 additions and 113 deletions

View File

@ -196,12 +196,13 @@ function configure_ec2api {
iniset $EC2API_CONF_FILE DEFAULT ec2api_listen_port "$EC2API_SERVICE_PORT"
iniset $EC2API_CONF_FILE DEFAULT ec2_port "$EC2API_SERVICE_PORT"
local s3_port="$EC2API_S3_SERVICE_PORT"
local s3_protocol="http"
if is_service_enabled swift3; then
iniset $EC2API_CONF_FILE DEFAULT s3_port "$S3_SERVICE_PORT"
else
iniset $EC2API_CONF_FILE DEFAULT s3_port "$EC2API_S3_SERVICE_PORT"
s3_port="$S3_SERVICE_PORT"
s3_protocol="$SWIFT_SERVICE_PROTOCOL"
fi
iniset $EC2API_CONF_FILE DEFAULT s3_host "$SERVICE_HOST"
iniset $EC2API_CONF_FILE DEFAULT s3_url "$s3_protocol://$SERVICE_HOST:$s3_port"
configure_ec2api_rpc_backend

View File

@ -18,6 +18,7 @@ Starting point for routing EC2 requests.
import hashlib
import sys
import botocore
from keystoneclient import access as keystone_access
from keystoneclient.auth.identity import access as keystone_identity_access
from keystoneclient import session as keystone_session
@ -381,6 +382,18 @@ class Executor(wsgi.Application):
api_request = req.environ['ec2.request']
try:
result = api_request.invoke(context)
except botocore.exceptions.ClientError as ex:
error = ex.response.get('Error', {})
code = ex.response.get('Code', error.get('Code'))
message = ex.response.get('Message', error.get('Message'))
# the early versions of botocore didn't provide HTTPStatusCode
# for 400 errors
status = ex.response.get('ResponseMetadata', {}).get(
'HTTPStatusCode', 400)
if status < 400 or status > 499:
LOG.exception("Exception from remote server")
return faults.ec2_error_response(
context.request_id, code, message, status=status)
except Exception as ex:
return ec2_error_ex(
ex, req, unexpected=not isinstance(ex, exception.EC2Exception))

View File

@ -20,7 +20,9 @@ import tarfile
import tempfile
import time
import boto.s3.connection
import botocore.client
import botocore.config
import botocore.session
from cinderclient import exceptions as cinder_exception
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import padding
@ -32,6 +34,7 @@ from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from ec2api.api import common
from ec2api.api import ec2utils
@ -48,20 +51,13 @@ s3_opts = [
cfg.StrOpt('image_decryption_dir',
default='/tmp',
help='Parent directory for tempdir used for image decryption'),
cfg.StrOpt('s3_host',
default='$my_ip',
help='Hostname or IP for OpenStack to use when accessing '
'the S3 api'),
cfg.IntOpt('s3_port',
default=3334,
help='Port used when accessing the S3 api'),
cfg.BoolOpt('s3_use_ssl',
default=False,
help='Whether to use SSL when talking to S3'),
cfg.BoolOpt('s3_affix_tenant',
default=False,
help='Whether to affix the tenant id to the access key '
'when downloading from S3'),
cfg.StrOpt('s3_url',
default='http://$my_ip:3334',
help='URL to S3 server'),
# TODO(andrey-mp): this should be reworked with all region`s logic
cfg.StrOpt('s3_region',
default='RegionOne',
help='Region of S3 server'),
cfg.StrOpt('x509_root_private_key',
help='Path to ca private key file'),
]
@ -820,9 +816,14 @@ def _s3_create(context, metadata):
image_location = metadata['image_location'].lstrip('/')
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = _s3_conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
s3_client = _s3_conn(context)
key = s3_client.get_object(Bucket=bucket_name, Key=manifest_path)
body = key['Body']
if isinstance(body, six.string_types):
manifest = body
else:
# TODO(andrey-mp): check big objects
manifest = body.read()
(image_metadata, image_parts,
encrypted_key, encrypted_iv) = _s3_parse_manifest(context, manifest)
@ -852,7 +853,8 @@ def _s3_create(context, metadata):
try:
parts = []
for part_name in image_parts:
part = _s3_download_file(bucket, part_name, image_path)
part = _s3_download_file(s3_client, bucket_name,
part_name, image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
@ -964,10 +966,17 @@ def _s3_parse_manifest(context, manifest):
return metadata, image_parts, encrypted_key, encrypted_iv
def _s3_download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
def _s3_download_file(s3_client, bucket_name, filename, local_dir):
s3_object = s3_client.get_object(Bucket=bucket_name, Key=filename)
local_filename = os.path.join(local_dir, os.path.basename(filename))
key.get_contents_to_filename(local_filename)
body = s3_object['Body']
with open(local_filename, 'w') as f:
if isinstance(body, six.string_types):
f.write(body)
else:
# TODO(andrey-mp): check big objects
f.write(body.read())
f.close()
return local_filename
@ -1021,20 +1030,24 @@ def _s3_test_for_malicious_tarball(path, filename):
def _s3_conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
region = CONF.s3_region
ec2_creds = clients.keystone(context).ec2.list(context.user_id)
access = ec2_creds[0].access
if CONF.s3_affix_tenant:
access = '%s:%s' % (access, context.project_id)
secret = ec2_creds[0].secret
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=CONF.s3_use_ssl,
calling_format=calling,
port=CONF.s3_port,
host=CONF.s3_host)
# Here we a) disable user's default config to let ec2api works independetly
# of user's local settings;
# b) specify region to be used by botocore;
# c) do not change standard botocore keys to get these settings
# from environment
connection_data = {
'config_file': (None, 'AWS_CONFIG_FILE', None, None),
'region': ('region', 'AWS_DEFAULT_REGION', region, None),
}
session = botocore.session.get_session(connection_data)
return session.create_client(
's3', region_name=region, endpoint_url=CONF.s3_url,
aws_access_key_id=ec2_creds[0].access,
aws_secret_access_key=ec2_creds[0].secret,
config=botocore.config.Config(signature_version='s3v4'))
def _decrypt_text(text):

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from boto import exception as boto_exception
from botocore import exceptions as botocore_exceptions
from cinderclient import exceptions as cinder_exception
from glanceclient.common import exceptions as glance_exception
from keystoneclient import exceptions as keystone_exception
@ -100,5 +100,9 @@ class ApiInitTestCase(base.BaseTestCase):
400, 'BadRequest', 'fake_msg')
do_check(keystone_exception.BadRequest(message='fake_msg'),
400, 'BadRequest', 'fake_msg (HTTP 400)')
do_check(boto_exception.S3ResponseError(400, '', 'fake_msg'),
400, 'S3ResponseError', 'fake_msg')
do_check(botocore_exceptions.ClientError({'Error':
{'Code': '', 'Message': ''},
'Code': 'FakeCode',
'Message': 'fake_msg'},
'register_image'),
400, 'FakeCode', 'fake_msg')

View File

@ -828,9 +828,7 @@ class S3TestCase(base.BaseTestCase):
def do_test(s3_conn, s3_download_file, s3_decrypt_image,
s3_untarzip_image):
(s3_conn.return_value.
get_bucket.return_value.
get_key.return_value.
get_contents_as_string.return_value) = FILE_MANIFEST_XML
get_object.return_value) = {'Body': FILE_MANIFEST_XML}
s3_download_file.return_value = tempf
s3_untarzip_image.return_value = tempf
os_image_id = fakes.random_os_id()
@ -850,15 +848,10 @@ class S3TestCase(base.BaseTestCase):
os_image_id, image_state='available')
self.glance.images.upload.assert_any_call(
os_image_id, mock.ANY)
s3_conn.return_value.get_bucket.assert_called_with(bucket)
(s3_conn.return_value.get_bucket.return_value.
get_key.assert_called_with(manifest))
(s3_conn.return_value.get_bucket.return_value.
get_key.return_value.
get_contents_as_string.assert_called_with())
s3_conn.return_value.get_object.assert_called_with(
Bucket=bucket, Key=manifest)
s3_download_file.assert_called_with(
s3_conn.return_value.get_bucket.return_value,
'foo', mock.ANY)
mock.ANY, bucket, 'foo', mock.ANY)
s3_decrypt_image.assert_called_with(
fake_context, mock.ANY, 'foo', 'foo', mock.ANY)
s3_untarzip_image.assert_called_with(mock.ANY, mock.ANY)
@ -882,9 +875,7 @@ class S3TestCase(base.BaseTestCase):
with mock.patch('ec2api.api.image._s3_conn') as s3_conn:
(s3_conn.return_value.
get_bucket.return_value.
get_key.return_value.
get_contents_as_string.return_value) = FILE_MANIFEST_XML
get_object.return_value) = {'Body': FILE_MANIFEST_XML}
image_api._s3_create(fake_context, metadata)

View File

@ -16,9 +16,8 @@
"""
Unittets for S3 objectstore clone.
"""
import boto
from boto import exception as boto_exception
from boto.s3 import connection as s3
from botocore import exceptions as botocore_exception
import botocore.session
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
@ -48,16 +47,16 @@ class S3APITestCase(test_base.BaseTestCase):
self.server.start()
self.addCleanup(self.server.stop)
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
conn = s3.S3Connection(aws_access_key_id='fake',
aws_secret_access_key='fake',
host=CONF.s3_listen,
port=self.server.port,
is_secure=False,
calling_format=s3.OrdinaryCallingFormat())
s3_url = 'http://' + CONF.s3_listen + ':' + str(self.server.port)
region = 'FakeRegion'
connection_data = {
'config_file': (None, 'AWS_CONFIG_FILE', None, None),
'region': ('region', 'BOTO_DEFAULT_REGION', region, None),
}
session = botocore.session.get_session(connection_data)
conn = session.create_client(
's3', region_name=region, endpoint_url=s3_url,
aws_access_key_id='fake', aws_secret_access_key='fake')
self.conn = conn
def get_http_connection(*args):
@ -67,69 +66,55 @@ class S3APITestCase(test_base.BaseTestCase):
self.conn.get_http_connection = get_http_connection
def _ensure_no_buckets(self, buckets):
self.assertEqual(len(buckets), 0, "Bucket list was not empty")
self.assertEqual(len(buckets['Buckets']), 0,
"Bucket list was not empty")
return True
def _ensure_one_bucket(self, buckets, name):
self.assertEqual(len(buckets), 1,
self.assertEqual(len(buckets['Buckets']), 1,
"Bucket list didn't have exactly one element in it")
self.assertEqual(buckets[0].name, name, "Wrong name")
self.assertEqual(buckets['Buckets'][0]['Name'], name, "Wrong name")
return True
def test_list_buckets(self):
# Make sure we are starting with no buckets.
self._ensure_no_buckets(self.conn.get_all_buckets())
# Make sure we started with no buckets.
self._ensure_no_buckets(self.conn.list_buckets())
def test_create_and_delete_bucket(self):
# Test bucket creation and deletion.
bucket_name = 'testbucket'
self.conn.create_bucket(bucket_name)
self._ensure_one_bucket(self.conn.get_all_buckets(), bucket_name)
self.conn.delete_bucket(bucket_name)
self._ensure_no_buckets(self.conn.get_all_buckets())
self.conn.create_bucket(Bucket=bucket_name)
self._ensure_one_bucket(self.conn.list_buckets(), bucket_name)
self.conn.delete_bucket(Bucket=bucket_name)
self._ensure_no_buckets(self.conn.list_buckets())
def test_create_bucket_and_key_and_delete_key_again(self):
def test_create_bucket_and_key_and_delete_key(self):
# Test key operations on buckets.
bucket_name = 'testbucket'
key_name = 'somekey'
key_contents = b'somekey'
b = self.conn.create_bucket(bucket_name)
k = b.new_key(key_name)
k.set_contents_from_string(key_contents)
bucket = self.conn.get_bucket(bucket_name)
self.conn.create_bucket(Bucket=bucket_name)
self.conn.put_object(Bucket=bucket_name, Key=key_name,
Body=key_contents)
# make sure the contents are correct
key = bucket.get_key(key_name)
self.assertEqual(key.get_contents_as_string(), key_contents,
key = self.conn.get_object(Bucket=bucket_name, Key=key_name)
self.assertEqual(key['Body'].read(), key_contents,
"Bad contents")
# delete the key
key.delete()
self.conn.delete_object(Bucket=bucket_name, Key=key_name)
self._ensure_no_buckets(bucket.get_all_keys())
self.assertRaises(botocore_exception.ClientError, self.conn.get_object,
Bucket=bucket_name, Key=key_name)
def test_unknown_bucket(self):
# NOTE(unicell): Since Boto v2.25.0, the underlying implementation
# of get_bucket method changed from GET to HEAD.
#
# Prior to v2.25.0, default validate=True fetched a list of keys in the
# bucket and raises S3ResponseError. As a side effect of switching to
# HEAD request, get_bucket call now generates less error message.
#
# To keep original semantics, additional get_all_keys call is
# suggestted per Boto document. This case tests both validate=False and
# validate=True case for completeness.
#
# http://docs.pythonboto.org/en/latest/releasenotes/v2.25.0.html
# http://docs.pythonboto.org/en/latest/s3_tut.html#accessing-a-bucket
bucket_name = 'falalala'
self.assertRaises(boto_exception.S3ResponseError,
self.conn.get_bucket,
bucket_name)
bucket = self.conn.get_bucket(bucket_name, validate=False)
self.assertRaises(boto_exception.S3ResponseError,
bucket.get_all_keys,
maxkeys=0)
self.assertRaises(botocore_exception.ClientError,
self.conn.head_bucket,
Bucket=bucket_name)
self.assertRaises(botocore_exception.ClientError,
self.conn.list_objects,
Bucket=bucket_name, MaxKeys=0)

View File

@ -298,10 +298,24 @@ if [[ -f "$NOVA_CONF" ]]; then
# NOTE(ft): use swift instead internal s3 server if enabled
if [[ -n $(openstack catalog show object-store 2>/dev/null) ]] &&
[[ -n $(openstack catalog show s3 2>/dev/null) ]]; then
copynovaopt s3_host DEFAULT
copynovaopt s3_port DEFAULT
copynovaopt s3_affix_tenant DEFAULT
copynovaopt s3_use_ssl DEFAULT
s3_host="127.0.0.1"
if ini_has_option "$NOVA_CONF" DEFAULT "s3_host"; then
s3_host=$(iniget $NOVA_CONF DEFAULT $option_name)
fi
s3_port="3334"
if ini_has_option "$NOVA_CONF" DEFAULT "s3_port"; then
s3_port=$(iniget $NOVA_CONF DEFAULT $option_name)
fi
s3_proto="http"
if ini_has_option "$NOVA_CONF" DEFAULT "s3_use_ssl"; then
s3_use_ssl=$(iniget $NOVA_CONF DEFAULT $option_name)
s3_use_ssl=`echo $s3_use_ssl | awk '{print toupper($0)}'`
if [[ $s3_use_ssl == "TRUE" ]]; then
s3_proto="https"
fi
fi
iniset $CONF_FILE DEFAULT s3_url "$s3_proto://$s3_host:$s3_port"
fi
nova_state_path=$(iniget $NOVA_CONF DEFAULT state_path)

View File

@ -2,7 +2,6 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
Babel!=2.4.0,>=2.3.4 # BSD
boto>=2.32.1 # MIT
botocore>=1.0.0 # Apache-2.0
cryptography!=2.0,>=1.6 # BSD/Apache-2.0
eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT