python_code
stringlengths 0
258k
|
---|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Signature Version 4 test suite.
AWS provides a test suite for signature version 4:
http://docs.aws.amazon.com/general/latest/gr/signature-v4-test-suite.html
This module contains logic to run these tests. The test files were
placed in ./aws4_testsuite, and we're using nose's test generators to
dynamically generate testcases based on these files.
"""
import os
import logging
import io
import datetime
from botocore.compat import six
import mock
import botocore.auth
from botocore.awsrequest import AWSRequest
from botocore.credentials import Credentials
try:
from urllib.parse import urlsplit
from urllib.parse import parse_qsl
except ImportError:
from urlparse import urlsplit
from urlparse import parse_qsl
CREDENTIAL_SCOPE = "KEYNAME/20110909/us-west-1/s3/aws4_request"
SECRET_KEY = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"
ACCESS_KEY = 'AKIDEXAMPLE'
DATE_STRING = 'Mon, 09 Sep 2011 23:36:00 GMT'
TESTSUITE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'aws4_testsuite')
# The following tests are not run. Each test has a comment as
# to why the test is being ignored.
TESTS_TO_IGNORE = [
# Bad POST syntax, python's HTTP parser chokes on this.
'post-vanilla-query-space',
# Bad POST syntax, python's HTTP parser chokes on this.
'post-vanilla-query-nonunreserved',
# Multiple query params of the same key not supported by
# the SDKs.
'get-vanilla-query-order-key-case',
# Multiple query params of the same key not supported by
# the SDKs.
'get-vanilla-query-order-value',
]
if not six.PY3:
TESTS_TO_IGNORE += [
# NO support
'get-header-key-duplicate',
'get-header-value-order',
]
log = logging.getLogger(__name__)
class RawHTTPRequest(six.moves.BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, raw_request):
if isinstance(raw_request, six.text_type):
raw_request = raw_request.encode('utf-8')
self.rfile = six.BytesIO(raw_request)
self.raw_requestline = self.rfile.readline()
self.error_code = None
self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
def test_generator():
datetime_patcher = mock.patch.object(
botocore.auth.datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.utcnow.return_value = datetime.datetime(2011, 9, 9, 23, 36)
formatdate_patcher = mock.patch('botocore.auth.formatdate')
formatdate = formatdate_patcher.start()
# We have to change this because Sep 9, 2011 was actually
# a Friday, but the tests have this set to a Monday.
formatdate.return_value = 'Mon, 09 Sep 2011 23:36:00 GMT'
for test_case in set(os.path.splitext(i)[0]
for i in os.listdir(TESTSUITE_DIR)):
if test_case in TESTS_TO_IGNORE:
log.debug("Skipping test: %s", test_case)
continue
yield (_test_signature_version_4, test_case)
datetime_patcher.stop()
formatdate_patcher.stop()
def create_request_from_raw_request(raw_request):
raw_request = raw_request.replace('http/1.1', 'HTTP/1.1')
request = AWSRequest()
raw = RawHTTPRequest(raw_request)
if raw.error_code is not None:
raise Exception(raw.error_message)
request.method = raw.command
datetime_now = datetime.datetime(2011, 9, 9, 23, 36)
request.context['timestamp'] = datetime_now.strftime('%Y%m%dT%H%M%SZ')
for key, val in raw.headers.items():
request.headers[key] = val
request.data = raw.rfile.read()
host = raw.headers.get('host', '')
# For whatever reason, the BaseHTTPRequestHandler encodes
# the first line of the response as 'iso-8859-1',
# so we need decode this into utf-8.
if isinstance(raw.path, six.text_type):
raw.path = raw.path.encode('iso-8859-1').decode('utf-8')
url = 'https://%s%s' % (host, raw.path)
if '?' in url:
split_url = urlsplit(url)
params = dict(parse_qsl(split_url.query))
request.url = split_url.path
request.params = params
else:
request.url = url
return request
def _test_signature_version_4(test_case):
test_case = _SignatureTestCase(test_case)
request = create_request_from_raw_request(test_case.raw_request)
auth = botocore.auth.SigV4Auth(test_case.credentials, 'host', 'us-east-1')
actual_canonical_request = auth.canonical_request(request)
assert_equal(actual_canonical_request, test_case.canonical_request,
test_case.raw_request, 'canonical_request')
actual_string_to_sign = auth.string_to_sign(request,
actual_canonical_request)
assert_equal(actual_string_to_sign, test_case.string_to_sign,
test_case.raw_request, 'string_to_sign')
auth.add_auth(request)
actual_auth_header = request.headers['Authorization']
assert_equal(actual_auth_header, test_case.authorization_header,
test_case.raw_request, 'authheader')
def assert_equal(actual, expected, raw_request, part):
if actual != expected:
message = "The %s did not match" % part
message += "\nACTUAL:%r !=\nEXPECT:%r" % (actual, expected)
message += '\nThe raw request was:\n%s' % raw_request
raise AssertionError(message)
class _SignatureTestCase(object):
def __init__(self, test_case):
p = os.path.join
# We're using io.open() because we need to open these files with
# a specific encoding, and in 2.x io.open is the best way to do this.
self.raw_request = io.open(p(TESTSUITE_DIR, test_case + '.req'),
encoding='utf-8').read()
self.canonical_request = io.open(
p(TESTSUITE_DIR, test_case + '.creq'),
encoding='utf-8').read().replace('\r', '')
self.string_to_sign = io.open(
p(TESTSUITE_DIR, test_case + '.sts'),
encoding='utf-8').read().replace('\r', '')
self.authorization_header = io.open(
p(TESTSUITE_DIR, test_case + '.authz'),
encoding='utf-8').read().replace('\r', '')
self.signed_request = io.open(p(TESTSUITE_DIR, test_case + '.sreq'),
encoding='utf-8').read()
self.credentials = Credentials(ACCESS_KEY, SECRET_KEY)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from tests.unit.docs import BaseDocsTest
from botocore.docs.utils import py_type_name
from botocore.docs.utils import py_default
from botocore.docs.utils import get_official_service_name
from botocore.docs.utils import AutoPopulatedParam
from botocore.docs.utils import HideParamFromOperations
from botocore.docs.utils import AppendParamDocumentation
from botocore.docs.utils import escape_controls
class TestPythonTypeName(unittest.TestCase):
def test_structure(self):
self.assertEqual('dict', py_type_name('structure'))
def test_list(self):
self.assertEqual('list', py_type_name('list'))
def test_map(self):
self.assertEqual('dict', py_type_name('map'))
def test_string(self):
self.assertEqual('string', py_type_name('string'))
def test_character(self):
self.assertEqual('string', py_type_name('character'))
def test_blob(self):
self.assertEqual('bytes', py_type_name('blob'))
def test_timestamp(self):
self.assertEqual('datetime', py_type_name('timestamp'))
def test_integer(self):
self.assertEqual('integer', py_type_name('integer'))
def test_long(self):
self.assertEqual('integer', py_type_name('long'))
def test_float(self):
self.assertEqual('float', py_type_name('float'))
def test_double(self):
self.assertEqual('float', py_type_name('double'))
class TestPythonDefault(unittest.TestCase):
def test_structure(self):
self.assertEqual('{...}', py_default('structure'))
def test_list(self):
self.assertEqual('[...]', py_default('list'))
def test_map(self):
self.assertEqual('{...}', py_default('map'))
def test_string(self):
self.assertEqual('\'string\'', py_default('string'))
def test_blob(self):
self.assertEqual('b\'bytes\'', py_default('blob'))
def test_timestamp(self):
self.assertEqual('datetime(2015, 1, 1)', py_default('timestamp'))
def test_integer(self):
self.assertEqual('123', py_default('integer'))
def test_long(self):
self.assertEqual('123', py_default('long'))
def test_double(self):
self.assertEqual('123.0', py_default('double'))
class TestGetOfficialServiceName(BaseDocsTest):
def setUp(self):
super(TestGetOfficialServiceName, self).setUp()
self.service_model.metadata = {
'serviceFullName': 'Official Name'
}
def test_no_short_name(self):
self.assertEqual('Official Name',
get_official_service_name(self.service_model))
def test_aws_short_name(self):
self.service_model.metadata['serviceAbbreviation'] = 'AWS Foo'
self.assertEqual('Official Name (Foo)',
get_official_service_name(self.service_model))
def test_amazon_short_name(self):
self.service_model.metadata['serviceAbbreviation'] = 'Amazon Foo'
self.assertEqual('Official Name (Foo)',
get_official_service_name(self.service_model))
def test_short_name_in_official_name(self):
self.service_model.metadata['serviceFullName'] = 'The Foo Service'
self.service_model.metadata['serviceAbbreviation'] = 'Amazon Foo'
self.assertEqual('The Foo Service',
get_official_service_name(self.service_model))
class TestAutopopulatedParam(BaseDocsTest):
def setUp(self):
super(TestAutopopulatedParam, self).setUp()
self.name = 'MyMember'
self.param = AutoPopulatedParam(self.name)
def test_request_param_not_required(self):
section = self.doc_structure.add_new_section(self.name)
section.add_new_section('param-documentation')
self.param.document_auto_populated_param(
'docs.request-params', self.doc_structure)
self.assert_contains_line(
'this parameter is automatically populated')
def test_request_param_required(self):
section = self.doc_structure.add_new_section(self.name)
is_required_section = section.add_new_section('is-required')
section.add_new_section('param-documentation')
is_required_section.write('**[REQUIRED]**')
self.param.document_auto_populated_param(
'docs.request-params', self.doc_structure)
self.assert_not_contains_line('**[REQUIRED]**')
self.assert_contains_line(
'this parameter is automatically populated')
def test_non_default_param_description(self):
description = 'This is a custom description'
self.param = AutoPopulatedParam(self.name, description)
section = self.doc_structure.add_new_section(self.name)
section.add_new_section('param-documentation')
self.param.document_auto_populated_param(
'docs.request-params', self.doc_structure)
self.assert_contains_line(description)
def test_request_example(self):
top_section = self.doc_structure.add_new_section('structure-value')
section = top_section.add_new_section(self.name)
example = 'MyMember: \'string\''
section.write(example)
self.assert_contains_line(example)
self.param.document_auto_populated_param(
'docs.request-example', self.doc_structure)
self.assert_not_contains_line(example)
def test_param_not_in_section_request_param(self):
self.doc_structure.add_new_section('Foo')
self.param.document_auto_populated_param(
'docs.request-params', self.doc_structure)
self.assertEqual(
'', self.doc_structure.flush_structure().decode('utf-8'))
def test_param_not_in_section_request_example(self):
top_section = self.doc_structure.add_new_section('structure-value')
section = top_section.add_new_section('Foo')
example = 'Foo: \'string\''
section.write(example)
self.assert_contains_line(example)
self.param.document_auto_populated_param(
'docs.request-example', self.doc_structure)
self.assert_contains_line(example)
class TestHideParamFromOperations(BaseDocsTest):
def setUp(self):
super(TestHideParamFromOperations, self).setUp()
self.name = 'MyMember'
self.param = HideParamFromOperations(
's3', self.name, ['SampleOperation'])
def test_hides_params_from_doc_string(self):
section = self.doc_structure.add_new_section(self.name)
param_signature = ':param %s: ' % self.name
section.write(param_signature)
self.assert_contains_line(param_signature)
self.param.hide_param(
'docs.request-params.s3.SampleOperation.complete-section',
self.doc_structure)
self.assert_not_contains_line(param_signature)
def test_hides_param_from_example(self):
structure = self.doc_structure.add_new_section('structure-value')
section = structure.add_new_section(self.name)
example = '%s: \'string\'' % self.name
section.write(example)
self.assert_contains_line(example)
self.param.hide_param(
'docs.request-example.s3.SampleOperation.complete-section',
self.doc_structure)
self.assert_not_contains_line(example)
class TestAppendParamDocumentation(BaseDocsTest):
def setUp(self):
super(TestAppendParamDocumentation, self).setUp()
self.name = 'MyMember'
self.param = AppendParamDocumentation(self.name, 'hello!')
def test_appends_documentation(self):
section = self.doc_structure.add_new_section(self.name)
param_section = section.add_new_section('param-documentation')
param_section.writeln('foo')
self.param.append_documentation(
'docs.request-params', self.doc_structure)
self.assert_contains_line('foo\n')
self.assert_contains_line('hello!')
class TestEscapeControls(unittest.TestCase):
def test_escapes_controls(self):
escaped = escape_controls('\na\rb\tc\fd\be')
self.assertEquals(escaped, '\\na\\rb\\tc\\fd\\be')
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.unit.docs import BaseDocsTest
from botocore.docs.paginator import PaginatorDocumenter
from botocore.paginate import PaginatorModel
class TestPaginatorDocumenter(BaseDocsTest):
def setUp(self):
super(TestPaginatorDocumenter, self).setUp()
self.add_shape_to_params('Biz', 'String')
self.extra_setup()
def extra_setup(self):
self.setup_client()
paginator_model = PaginatorModel(self.paginator_json_model)
self.paginator_documenter = PaginatorDocumenter(
client=self.client, service_paginator_model=paginator_model)
def test_document_paginators(self):
self.paginator_documenter.document_paginators(
self.doc_structure)
self.assert_contains_lines_in_order([
'==========',
'Paginators',
'==========',
'The available paginators are:',
'* :py:class:`MyService.Paginator.SampleOperation`',
'.. py:class:: MyService.Paginator.SampleOperation',
' ::',
' paginator = client.get_paginator(\'sample_operation\')',
' .. py:method:: paginate(**kwargs)',
(' Creates an iterator that will paginate through responses'
' from :py:meth:`MyService.Client.sample_operation`.'),
' **Request Syntax**',
' ::',
' response_iterator = paginator.paginate(',
' Biz=\'string\',',
' PaginationConfig={',
' \'MaxItems\': 123,',
' \'PageSize\': 123,',
' \'StartingToken\': \'string\'',
' }',
' )',
' :type Biz: string',
' :param Biz:',
' :type PaginationConfig: dict',
' :param PaginationConfig:',
(' A dictionary that provides parameters to '
'control pagination.'),
' - **MaxItems** *(integer) --*',
' - **PageSize** *(integer) --*',
' - **StartingToken** *(string) --*',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
' \'Biz\': \'string\',',
' \'NextToken\': \'string\'',
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Biz** *(string) --*',
' - **NextToken** *(string) --*'
])
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from tests.unit.docs import BaseDocsTest
from botocore.hooks import HierarchicalEmitter
from botocore.docs.method import document_model_driven_signature
from botocore.docs.method import document_custom_signature
from botocore.docs.method import document_custom_method
from botocore.docs.method import document_model_driven_method
from botocore.docs.method import get_instance_public_methods
from botocore.docs.utils import DocumentedShape
class TestGetInstanceMethods(unittest.TestCase):
class MySampleClass(object):
def _internal_method(self):
pass
def public_method(self):
pass
def test_get_instance_methods(self):
instance = self.MySampleClass()
instance_methods = get_instance_public_methods(instance)
self.assertEqual(len(instance_methods), 1)
self.assertIn('public_method', instance_methods)
self.assertEqual(
instance.public_method, instance_methods['public_method'])
class TestDocumentModelDrivenSignature(BaseDocsTest):
def setUp(self):
super(TestDocumentModelDrivenSignature, self).setUp()
self.add_shape_to_params('Foo', 'String')
self.add_shape_to_params('Bar', 'String', is_required=True)
self.add_shape_to_params('Baz', 'String')
def test_document_signature(self):
document_model_driven_signature(
self.doc_structure, 'my_method', self.operation_model)
self.assert_contains_line(
'.. py:method:: my_method(**kwargs)')
def test_document_signature_exclude_all_kwargs(self):
exclude_params = ['Foo', 'Bar', 'Baz']
document_model_driven_signature(
self.doc_structure, 'my_method', self.operation_model,
exclude=exclude_params)
self.assert_contains_line(
'.. py:method:: my_method()')
def test_document_signature_exclude_and_include(self):
exclude_params = ['Foo', 'Bar', 'Baz']
include_params = [
DocumentedShape(
name='Biz', type_name='integer', documentation='biz docs')
]
document_model_driven_signature(
self.doc_structure, 'my_method', self.operation_model,
include=include_params, exclude=exclude_params)
self.assert_contains_line(
'.. py:method:: my_method(**kwargs)')
class TestDocumentCustomSignature(BaseDocsTest):
def sample_method(self, foo, bar='bar', baz=None):
pass
def test_document_signature(self):
document_custom_signature(
self.doc_structure, 'my_method', self.sample_method)
self.assert_contains_line(
'.. py:method:: my_method(foo, bar=\'bar\', baz=None)')
class TestDocumentCustomMethod(BaseDocsTest):
def custom_method(self, foo):
"""This is a custom method
:type foo: string
:param foo: The foo parameter
"""
pass
def test_document_custom_signature(self):
document_custom_method(
self.doc_structure, 'my_method', self.custom_method)
self.assert_contains_lines_in_order([
'.. py:method:: my_method(foo)',
' This is a custom method',
' :type foo: string',
' :param foo: The foo parameter'
])
class TestDocumentModelDrivenMethod(BaseDocsTest):
def setUp(self):
super(TestDocumentModelDrivenMethod, self).setUp()
self.event_emitter = HierarchicalEmitter()
self.add_shape_to_params('Bar', 'String')
def test_default(self):
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo'
)
cross_ref_link = (
'See also: `AWS API Documentation '
'<https://docs.aws.amazon.com/goto/WebAPI'
'/myservice-2014-01-01/SampleOperation>'
)
self.assert_contains_lines_in_order([
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
cross_ref_link,
' **Request Syntax**',
' ::',
' response = client.foo(',
' Bar=\'string\'',
' )',
' :type Bar: string',
' :param Bar:',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
' \'Bar\': \'string\'',
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Bar** *(string) --*'
])
def test_no_input_output_shape(self):
del self.json_model['operations']['SampleOperation']['input']
del self.json_model['operations']['SampleOperation']['output']
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo'
)
self.assert_contains_lines_in_order([
'.. py:method:: foo()',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = client.foo()',
' :returns: None',
])
def test_include_input(self):
include_params = [
DocumentedShape(
name='Biz', type_name='string', documentation='biz docs')
]
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo',
include_input=include_params
)
self.assert_contains_lines_in_order([
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = client.foo(',
' Bar=\'string\',',
' Biz=\'string\'',
' )',
' :type Bar: string',
' :param Bar:',
' :type Biz: string',
' :param Biz: biz docs',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
' \'Bar\': \'string\'',
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Bar** *(string) --*'
])
def test_include_output(self):
include_params = [
DocumentedShape(
name='Biz', type_name='string', documentation='biz docs')
]
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo',
include_output=include_params
)
self.assert_contains_lines_in_order([
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = client.foo(',
' Bar=\'string\'',
' )',
' :type Bar: string',
' :param Bar:',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
' \'Bar\': \'string\'',
' \'Biz\': \'string\'',
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Bar** *(string) --*',
' - **Biz** *(string) --*'
])
def test_exclude_input(self):
self.add_shape_to_params('Biz', 'String')
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo',
exclude_input=['Bar']
)
self.assert_contains_lines_in_order([
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = client.foo(',
' Biz=\'string\'',
' )',
' :type Biz: string',
' :param Biz:',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
' \'Bar\': \'string\'',
' \'Biz\': \'string\'',
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Bar** *(string) --*',
' - **Biz** *(string) --*'
])
self.assert_not_contains_lines([
':param Bar: string',
'Bar=\'string\''
])
def test_exclude_output(self):
self.add_shape_to_params('Biz', 'String')
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo',
exclude_output=['Bar']
)
self.assert_contains_lines_in_order([
'.. py:method:: foo(**kwargs)',
' This describes the foo method.',
' **Request Syntax**',
' ::',
' response = client.foo(',
' Bar=\'string\'',
' Biz=\'string\'',
' )',
' :type Biz: string',
' :param Biz:',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
' \'Biz\': \'string\'',
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Biz** *(string) --*'
])
self.assert_not_contains_lines([
'\'Bar\': \'string\'',
'- **Bar** *(string) --*',
])
def test_streaming_body_in_output(self):
self.add_shape_to_params('Body', 'Blob')
self.json_model['shapes']['Blob'] = {'type': 'blob'}
self.json_model['shapes']['SampleOperationInputOutput']['payload'] = \
'Body'
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo'
)
self.assert_contains_line('**Body** (:class:`.StreamingBody`)')
def test_streaming_body_in_input(self):
del self.json_model['operations']['SampleOperation']['output']
self.add_shape_to_params('Body', 'Blob')
self.json_model['shapes']['Blob'] = {'type': 'blob'}
self.json_model['shapes']['SampleOperationInputOutput']['payload'] = \
'Body'
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo'
)
# The line in the example
self.assert_contains_line('Body=b\'bytes\'|file')
# The line in the parameter description
self.assert_contains_line(
':type Body: bytes or seekable file-like object')
def test_deprecated(self):
self.json_model['operations']['SampleOperation']['deprecated'] = True
document_model_driven_method(
self.doc_structure, 'foo', self.operation_model,
event_emitter=self.event_emitter,
method_description='This describes the foo method.',
example_prefix='response = client.foo'
)
# The line in the example
self.assert_contains_lines_in_order([
' .. danger::',
' This operation is deprecated and may not function as '
'expected. This operation should not be used going forward and is '
'only kept for the purpose of backwards compatiblity.'
])
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import mock
from tests.unit.docs import BaseDocsTest
from botocore.session import get_session
from botocore.docs.service import ServiceDocumenter
class TestServiceDocumenter(BaseDocsTest):
def setUp(self):
super(TestServiceDocumenter, self).setUp()
self.add_shape_to_params('Biz', 'String')
self.setup_client()
with mock.patch('botocore.session.create_loader',
return_value=self.loader):
session = get_session()
self.service_documenter = ServiceDocumenter(
'myservice', session)
def test_document_service(self):
# Note that not everything will be included as it is just
# a smoke test to make sure all of the main parts are inluded.
contents = self.service_documenter.document_service().decode('utf-8')
lines = [
'*********',
'MyService',
'*********',
'.. contents:: Table of Contents',
' :depth: 2',
'======',
'Client',
'======',
'.. py:class:: MyService.Client',
' A low-level client representing AWS MyService::',
' client = session.create_client(\'myservice\')',
' These are the available methods:',
' * :py:meth:`~MyService.Client.sample_operation`',
' .. py:method:: sample_operation(**kwargs)',
' **Examples** ',
' Sample Description.',
' ::',
' response = client.sample_operation(',
'==========',
'Paginators',
'==========',
'.. py:class:: MyService.Paginator.SampleOperation',
' .. py:method:: paginate(**kwargs)',
'=======',
'Waiters',
'=======',
'.. py:class:: MyService.Waiter.SampleOperationComplete',
' .. py:method:: wait(**kwargs)'
]
for line in lines:
self.assertIn(line, contents)
def test_document_service_no_paginator(self):
os.remove(self.paginator_model_file)
contents = self.service_documenter.document_service().decode('utf-8')
self.assertNotIn('Paginators', contents)
def test_document_service_no_waiter(self):
os.remove(self.waiter_model_file)
contents = self.service_documenter.document_service().decode('utf-8')
self.assertNotIn('Waiters', contents)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, mock
from botocore.docs.docstring import LazyLoadedDocstring
from botocore.docs.docstring import ClientMethodDocstring
from botocore.docs.docstring import WaiterDocstring
from botocore.docs.docstring import PaginatorDocstring
class MockedLazyLoadedDocstring(LazyLoadedDocstring):
def __init__(self, *args, **kwargs):
super(MockedLazyLoadedDocstring, self).__init__(*args, **kwargs)
self.mocked_writer_method = mock.Mock()
def _write_docstring(self, *args, **kwargs):
self.mocked_writer_method(*args, **kwargs)
class TestLazyLoadedDocstring(unittest.TestCase):
def test_raises_not_implemented(self):
with self.assertRaises(NotImplementedError):
str(LazyLoadedDocstring())
def test_expandtabs(self):
docstring = MockedLazyLoadedDocstring()
docstring.mocked_writer_method.side_effect = (
lambda section: section.write('foo\t'))
self.assertEqual('foo ', docstring.expandtabs(1))
def test_str(self):
docstring = MockedLazyLoadedDocstring()
docstring.mocked_writer_method.side_effect = (
lambda section: section.write('foo'))
self.assertEqual('foo', str(docstring))
def test_repr(self):
docstring = MockedLazyLoadedDocstring()
docstring.mocked_writer_method.side_effect = (
lambda section: section.write('foo'))
self.assertEqual('foo', repr(docstring))
def test_is_lazy_loaded(self):
docstring = MockedLazyLoadedDocstring()
str(docstring)
str(docstring)
# The mock.ANY represents the DocumentStructure that is filled out.
docstring.mocked_writer_method.assert_called_once_with(mock.ANY)
def test_args_kwargs_passed(self):
args = ['foo', 'bar']
kwargs = {'biz': 'baz'}
docstring = MockedLazyLoadedDocstring(*args, **kwargs)
str(docstring)
# The mock.ANY represents the DocumentStructure that is filled out.
docstring.mocked_writer_method.assert_called_with(
mock.ANY, *args, **kwargs)
class TestClientMethodDocstring(unittest.TestCase):
def test_use_correct_docstring_writer(self):
with mock.patch(
'botocore.docs.docstring'
'.document_model_driven_method') as mock_writer:
docstring = ClientMethodDocstring()
str(docstring)
self.assertTrue(mock_writer.called)
class TestWaiterDocstring(unittest.TestCase):
def test_use_correct_docstring_writer(self):
with mock.patch(
'botocore.docs.docstring'
'.document_wait_method') as mock_writer:
docstring = WaiterDocstring()
str(docstring)
self.assertTrue(mock_writer.called)
class TestPaginatorDocstring(unittest.TestCase):
def test_use_correct_docstring_writer(self):
with mock.patch(
'botocore.docs.docstring'
'.document_paginate_method') as mock_writer:
docstring = PaginatorDocstring()
str(docstring)
self.assertTrue(mock_writer.called)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.compat import OrderedDict
from tests.unit.docs import BaseDocsTest
from botocore.docs.sharedexample import SharedExampleDocumenter, \
document_shared_examples
class TestDocumentSharedExamples(BaseDocsTest):
def setUp(self):
super(TestDocumentSharedExamples, self).setUp()
self.add_shape({
"foo": {
"type": "string"
}
})
self.add_shape({
"nested": {"type": "string"}
})
self.add_shape({
"other": {
"type": "structure",
"members": {"nested": {"shape": "nested"}}
}
})
self.add_shape({
"aloha": {
"type": "list",
"member": {"shape": "other"}
}
})
self.add_shape_to_params('foo', 'foo')
self.add_shape_to_params('aloha', 'aloha')
self._examples = [{
"id": "sample-id",
"title": "sample-title",
"description": "Sample Description.",
"input": OrderedDict([
("aloha", [
"other",
{
"nested": "fun!"
}
]),
("foo", "bar"),
]),
"output": OrderedDict([
("foo", "baz"),
]),
"comments": {
"input": {
"aloha": "mahalo"
},
"output": {
"foo": "Sample Comment"
}
}
}
]
def test_default(self):
document_shared_examples(
self.doc_structure, self.operation_model,
'response = client.foo', self._examples)
self.assert_contains_lines_in_order([
"**Examples**",
"Sample Description.",
"::",
" response = client.foo(",
" # mahalo",
" aloha=[",
" 'other',",
" {",
" 'nested': 'fun!',",
" },",
" ],",
" foo='bar',",
" )",
" print(response)",
"Expected Output:",
"::",
" {",
" # Sample Comment",
" 'foo': 'baz',",
" 'ResponseMetadata': {",
" '...': '...',",
" },",
" }",
])
class TestSharedExampleDocumenter(BaseDocsTest):
def setUp(self):
super(TestSharedExampleDocumenter, self).setUp()
self.documenter = SharedExampleDocumenter()
def test_is_input(self):
self.add_shape_to_params('foo', 'String')
self.documenter.document_shared_example(
example={
'input': {
'foo': 'bar'
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"foo.bar(",
" foo='bar'",
")"
])
def test_dict_example(self):
self.add_shape({
'bar': {
"type": "structure",
"members": {
"bar": {"shape": "String"}
}
}
})
self.add_shape_to_params('foo', 'bar')
self.documenter.document_shared_example(
example={
'input': {
'foo': {'bar': 'baz'}
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"foo.bar(",
" foo={",
" 'bar': 'baz',",
" },",
")"
])
def test_list_example(self):
self.add_shape({
"foo": {
"type": "list",
"member": {"shape": "String"}
}
})
self.add_shape_to_params('foo', 'foo')
self.documenter.document_shared_example(
example={
'input': {
'foo': ['bar']
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"foo.bar(",
" foo=[",
" 'bar',",
" ],",
")"
])
def test_can_handle_no_input_key(self):
self.add_shape_to_params('foo', 'String')
self.documenter.document_shared_example(
example={},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"foo.bar(",
")"
])
def test_unicode_string_example(self):
self.add_shape_to_params('foo', 'String')
self.documenter.document_shared_example(
example={
'input': {
'foo': u'bar'
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"foo.bar(",
" foo='bar'",
")"
])
def test_timestamp_example(self):
self.add_shape({
'foo': {'type': 'timestamp'}
})
self.add_shape_to_params('foo', 'foo')
self.documenter.document_shared_example(
example={
'input': {
'foo': 'Fri, 20 Nov 2015 21:13:12 GMT'
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"foo.bar(",
" foo=datetime(2015, 11, 20, 21, 13, 12, 4, 324, 0)",
")"
])
def test_map_example(self):
self.add_shape({
"baz": {"type": "string"}
})
self.add_shape({
'bar': {
"type": "map",
"key": {"shape": "baz"},
"value": {"shape": "baz"}
}
})
self.add_shape_to_params('foo', 'bar')
self.documenter.document_shared_example(
example={
'input': {
'foo': {'bar': 'baz'}
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"foo.bar(",
" foo={",
" 'bar': 'baz',",
" },",
")"
])
def test_add_comment(self):
self.add_shape_to_params('foo', 'String')
self.documenter.document_shared_example(
example={
'input': {
'foo': 'bar'
},
'comments': {
'input': {
'foo': 'baz'
}
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"foo.bar(",
" # baz",
" foo='bar',",
")"
])
def test_unicode_exammple(self):
self.add_shape_to_params('foo', 'String')
self.documenter.document_shared_example(
example={
'input': {
'foo': u'\u2713'
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
u"foo.bar(",
u" foo='\u2713'",
u")"
])
def test_escape_character_example(self):
self.add_shape_to_params('foo', 'String')
self.documenter.document_shared_example(
example={
'output': {
'foo': 'good\n\rintentions!\n\r'
}
},
prefix='foo.bar',
section=self.doc_structure,
operation_model=self.operation_model
)
self.assert_contains_lines_in_order([
"Expected Output:",
" {",
" 'foo': 'good\\n\\rintentions!\\n\\r',",
" }",
])
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import tempfile
import mock
from tests.unit.docs import BaseDocsTest
from botocore.session import get_session
from botocore.docs import generate_docs
class TestGenerateDocs(BaseDocsTest):
def setUp(self):
super(TestGenerateDocs, self).setUp()
self.docs_root = tempfile.mkdtemp()
self.loader_patch = mock.patch(
'botocore.session.create_loader', return_value=self.loader)
self.available_service_patch = mock.patch(
'botocore.session.Session.get_available_services',
return_value=['myservice'])
self.loader_patch.start()
self.available_service_patch.start()
def tearDown(self):
super(TestGenerateDocs, self).tearDown()
shutil.rmtree(self.docs_root)
self.loader_patch.stop()
self.available_service_patch.stop()
def test_generate_docs(self):
session = get_session()
# Have the rst files get written to the temporary directory
generate_docs(self.docs_root, session)
reference_services_path = os.path.join(
self.docs_root, 'reference', 'services')
reference_service_path = os.path.join(
reference_services_path, 'myservice.rst')
self.assertTrue(os.path.exists(reference_service_path))
# Make sure the rst file has some the expected contents.
with open(reference_service_path, 'r') as f:
contents = f.read()
self.assertIn('AWS MyService', contents)
self.assertIn('Client', contents)
self.assertIn('Paginators', contents)
self.assertIn('Waiters', contents)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import json
import tempfile
import shutil
from botocore.docs.bcdoc.restdoc import DocumentStructure
import mock
from tests import unittest
from botocore.compat import OrderedDict
from botocore.hooks import HierarchicalEmitter
from botocore.model import ServiceModel, OperationModel
from botocore.client import ClientCreator
from botocore.loaders import Loader
class BaseDocsTest(unittest.TestCase):
def setUp(self):
self.root_dir = tempfile.mkdtemp()
self.version_dirs = os.path.join(
self.root_dir, 'myservice', '2014-01-01')
os.makedirs(self.version_dirs)
self.model_file = os.path.join(self.version_dirs, 'service-2.json')
self.waiter_model_file = os.path.join(
self.version_dirs, 'waiters-2.json')
self.paginator_model_file = os.path.join(
self.version_dirs, 'paginators-1.json')
self.example_model_file = os.path.join(
self.version_dirs, 'examples-1.json')
self.json_model = {}
self.nested_json_model = {}
self._setup_models()
self.build_models()
self.events = HierarchicalEmitter()
self.setup_client()
self.doc_name = 'MyDoc'
self.doc_structure = DocumentStructure(self.doc_name, target='html')
def tearDown(self):
shutil.rmtree(self.root_dir)
def setup_client(self):
with open(self.example_model_file, 'w') as f:
json.dump(self.example_json_model, f)
with open(self.waiter_model_file, 'w') as f:
json.dump(self.waiter_json_model, f)
with open(self.paginator_model_file, 'w') as f:
json.dump(self.paginator_json_model, f)
with open(self.model_file, 'w') as f:
json.dump(self.json_model, f)
self.loader = Loader(extra_search_paths=[self.root_dir])
endpoint_resolver = mock.Mock()
endpoint_resolver.construct_endpoint.return_value = {
'hostname': 'foo.us-east-1',
'partition': 'aws',
'endpointName': 'us-east-1',
'signatureVersions': ['v4']
}
self.creator = ClientCreator(
loader=self.loader, endpoint_resolver=endpoint_resolver,
user_agent='user-agent', event_emitter=self.events,
retry_handler_factory=mock.Mock(),
retry_config_translator=mock.Mock(),
exceptions_factory=mock.Mock())
self.client = self.creator.create_client('myservice', 'us-east-1')
def _setup_models(self):
self.json_model = {
'metadata': {
'apiVersion': '2014-01-01',
'endpointPrefix': 'myservice',
'signatureVersion': 'v4',
'serviceFullName': 'AWS MyService',
'uid': 'myservice-2014-01-01',
'protocol': 'query'
},
'operations': {
'SampleOperation': {
'name': 'SampleOperation',
'input': {'shape': 'SampleOperationInputOutput'},
'output': {'shape': 'SampleOperationInputOutput'}
}
},
'shapes': {
'SampleOperationInputOutput': {
'type': 'structure',
'members': OrderedDict()
},
'String': {
'type': 'string'
}
}
}
self.waiter_json_model = {
"version": 2,
"waiters": {
"SampleOperationComplete": {
"delay": 15,
"operation": "SampleOperation",
"maxAttempts": 40,
"acceptors": [
{"expected": "complete",
"matcher": "pathAll",
"state": "success",
"argument": "Biz"},
{"expected": "failed",
"matcher": "pathAny",
"state": "failure",
"argument": "Biz"}
]
}
}
}
self.paginator_json_model = {
"pagination": {
"SampleOperation": {
"input_token": "NextResult",
"output_token": "NextResult",
"limit_key": "MaxResults",
"result_key": "Biz"
}
}
}
self.example_json_model = {
"version": 1,
"examples": {
"SampleOperation": [{
"id": "sample-id",
"title": "sample-title",
"description": "Sample Description.",
"input": OrderedDict([
("Biz", "foo"),
]),
"comments": {
"input": {
"Biz": "bar"
},
}
}]
}
}
def build_models(self):
self.service_model = ServiceModel(self.json_model)
self.operation_model = OperationModel(
self.json_model['operations']['SampleOperation'],
self.service_model
)
def add_shape(self, shape):
shape_name = list(shape.keys())[0]
self.json_model['shapes'][shape_name] = shape[shape_name]
def add_shape_to_params(self, param_name, shape_name, documentation=None,
is_required=False):
params_shape = self.json_model['shapes']['SampleOperationInputOutput']
member = {'shape': shape_name}
if documentation is not None:
member['documentation'] = documentation
params_shape['members'][param_name] = member
if is_required:
required_list = params_shape.get('required', [])
required_list.append(param_name)
params_shape['required'] = required_list
def assert_contains_line(self, line):
contents = self.doc_structure.flush_structure().decode('utf-8')
self.assertIn(line, contents)
def assert_contains_lines_in_order(self, lines):
contents = self.doc_structure.flush_structure().decode('utf-8')
for line in lines:
self.assertIn(line, contents)
beginning = contents.find(line)
contents = contents[(beginning + len(line)):]
def assert_not_contains_line(self, line):
contents = self.doc_structure.flush_structure().decode('utf-8')
self.assertNotIn(line, contents)
def assert_not_contains_lines(self, lines):
contents = self.doc_structure.flush_structure().decode('utf-8')
for line in lines:
self.assertNotIn(line, contents)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
from tests.unit.docs import BaseDocsTest
from botocore.hooks import HierarchicalEmitter
from botocore.docs.params import RequestParamsDocumenter
from botocore.docs.params import ResponseParamsDocumenter
from botocore.docs.utils import DocumentedShape
class BaseParamsDocumenterTest(BaseDocsTest):
def setUp(self):
super(BaseParamsDocumenterTest, self).setUp()
self.event_emitter = HierarchicalEmitter()
self.request_params = RequestParamsDocumenter(
service_name='myservice', operation_name='SampleOperation',
event_emitter=self.event_emitter)
self.response_params = ResponseParamsDocumenter(
service_name='myservice', operation_name='SampleOperation',
event_emitter=self.event_emitter)
class TestDocumentDefaultValue(BaseParamsDocumenterTest):
def setUp(self):
super(TestDocumentDefaultValue, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
def test_request_params(self):
self.request_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
':type Foo: string',
':param Foo: This describes foo.'
])
def test_response_params(self):
self.response_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'- *(dict) --*',
' - **Foo** *(string) --* This describes foo.'
])
class TestTraverseAndDocumentShape(BaseParamsDocumenterTest):
def setUp(self):
super(TestTraverseAndDocumentShape, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
self.event_emitter = mock.Mock()
self.request_params = RequestParamsDocumenter(
service_name='myservice', operation_name='SampleOperation',
event_emitter=self.event_emitter)
self.response_params = ResponseParamsDocumenter(
service_name='myservice', operation_name='SampleOperation',
event_emitter=self.event_emitter)
def test_events_emitted_response_params(self):
self.response_params.traverse_and_document_shape(
section=self.doc_structure,
shape=self.operation_model.input_shape, history=[]
)
self.assertEqual(
self.event_emitter.emit.call_args_list,
[mock.call('docs.response-params.myservice.SampleOperation.Foo',
section=self.doc_structure.get_section('Foo')),
mock.call(('docs.response-params.myservice.SampleOperation'
'.complete-section'), section=self.doc_structure)]
)
def test_events_emitted_request_params(self):
self.request_params.traverse_and_document_shape(
section=self.doc_structure,
shape=self.operation_model.input_shape, history=[]
)
self.assertEqual(
self.event_emitter.emit.call_args_list,
[mock.call('docs.request-params.myservice.SampleOperation.Foo',
section=self.doc_structure.get_section('Foo')),
mock.call(('docs.request-params.myservice.SampleOperation'
'.complete-section'), section=self.doc_structure)]
)
class TestDocumentMultipleDefaultValues(BaseParamsDocumenterTest):
def setUp(self):
super(TestDocumentMultipleDefaultValues, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
self.add_shape_to_params('Bar', 'String', 'This describes bar.',
is_required=True)
def test_request_params(self):
self.request_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
':type Foo: string',
':param Foo: This describes foo.',
':type Bar: string',
':param Bar: **[REQUIRED]** This describes bar.'
])
def test_response_params(self):
self.response_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'- *(dict) --*',
' - **Foo** *(string) --* This describes foo.',
' - **Bar** *(string) --* This describes bar.'
])
class TestDocumentInclude(BaseParamsDocumenterTest):
def setUp(self):
super(TestDocumentInclude, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
self.include_params = [
DocumentedShape(
name='Baz', type_name='integer',
documentation='This describes baz.'
)
]
def test_request_params(self):
self.request_params.document_params(
self.doc_structure, self.operation_model.input_shape,
include=self.include_params
)
self.assert_contains_lines_in_order([
':type Foo: string',
':param Foo: This describes foo.',
':type Baz: int',
':param Baz: This describes baz.'
])
def test_response_params(self):
self.response_params.document_params(
self.doc_structure, self.operation_model.input_shape,
include=self.include_params
)
self.assert_contains_lines_in_order([
'- *(dict) --*',
' - **Foo** *(string) --* This describes foo.',
' - **Baz** *(integer) --* This describes baz.'
])
class TestDocumentExclude(BaseParamsDocumenterTest):
def setUp(self):
super(TestDocumentExclude, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
self.add_shape_to_params('Bar', 'String', 'This describes bar.',
is_required=True)
self.exclude_params = ['Foo']
def test_request_params(self):
self.request_params.document_params(
self.doc_structure, self.operation_model.input_shape,
exclude=self.exclude_params)
self.assert_contains_lines_in_order([
':type Bar: string',
':param Bar: **[REQUIRED]** This describes bar.'
])
self.assert_not_contains_lines([
':type Foo: string',
':param Foo: This describes foo.'
])
def test_response_params(self):
self.response_params.document_params(
self.doc_structure, self.operation_model.input_shape,
exclude=self.exclude_params)
self.assert_contains_lines_in_order([
'- *(dict) --*',
' - **Bar** *(string) --* This describes bar.'
])
self.assert_not_contains_line(
' - **Foo** *(string) --* This describes foo.')
class TestDocumentList(BaseParamsDocumenterTest):
def setUp(self):
super(TestDocumentList, self).setUp()
self.add_shape(
{'List': {
'type': 'list',
'member': {'shape': 'String',
'documentation': 'A string element'}}})
self.add_shape_to_params(
'Foo', 'List',
'This describes the list. Each element of this list is a string.')
def test_request_params(self):
self.request_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
':type Foo: list',
':param Foo: This describes the list.',
' - *(string) --* A string element'
])
def test_response_params(self):
self.response_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'- *(dict) --*',
(' - **Foo** *(list) --* This describes the list. '
'Each element of this list is a string.'),
' - *(string) --* A string element'
])
class TestDocumentMap(BaseParamsDocumenterTest):
def setUp(self):
super(TestDocumentMap, self).setUp()
self.add_shape(
{'Map': {
'type': 'map',
'key': {'shape': 'String'},
'value': {'shape': 'String'}}})
self.add_shape_to_params('Foo', 'Map', 'This describes the map.')
def test_request_params(self):
self.request_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
':type Foo: dict',
':param Foo: This describes the map.',
' - *(string) --*',
' - *(string) --*'
])
def test_response_params(self):
self.response_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'- *(dict) --*',
' - **Foo** *(dict) --* This describes the map.',
' - *(string) --*',
' - *(string) --*'
])
class TestDocumentStructure(BaseParamsDocumenterTest):
def setUp(self):
super(TestDocumentStructure, self).setUp()
self.add_shape(
{'Structure': {
'type': 'structure',
'members': {
'Member': {'shape': 'String',
'documentation': 'This is its member.'}}}})
self.add_shape_to_params(
'Foo', 'Structure', 'This describes the structure.')
def test_request_params(self):
self.request_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
':type Foo: dict',
':param Foo: This describes the structure.',
' - **Member** *(string) --* This is its member.'
])
def test_response_params(self):
self.response_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'- *(dict) --*',
' - **Foo** *(dict) --* This describes the structure.',
' - **Member** *(string) --* This is its member.'
])
class TestDocumentRecursiveShape(BaseParamsDocumenterTest):
def setUp(self):
super(TestDocumentRecursiveShape, self).setUp()
self.add_shape(
{'Structure': {
'type': 'structure',
'members': {
'Foo': {
'shape': 'Structure',
'documentation': 'This is a recursive structure.'}}}})
self.add_shape_to_params(
'Foo', 'Structure', 'This describes the structure.')
def test_request_params(self):
self.request_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
':type Foo: dict',
':param Foo: This describes the structure.',
' - **Foo** *(dict) --* This is a recursive structure.'
])
def test_response_params(self):
self.response_params.document_params(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'- *(dict) --*',
' - **Foo** *(dict) --* This is a recursive structure.',
])
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
from tests.unit.docs import BaseDocsTest
from botocore.hooks import HierarchicalEmitter
from botocore.docs.example import ResponseExampleDocumenter
from botocore.docs.example import RequestExampleDocumenter
from botocore.docs.utils import DocumentedShape
class BaseExampleDocumenterTest(BaseDocsTest):
def setUp(self):
super(BaseExampleDocumenterTest, self).setUp()
self.event_emitter = HierarchicalEmitter()
self.request_example = RequestExampleDocumenter(
service_name='myservice', operation_name='SampleOperation',
event_emitter=self.event_emitter)
self.response_example = ResponseExampleDocumenter(
service_name='myservice', operation_name='SampleOperation',
event_emitter=self.event_emitter)
class TestDocumentDefaultValue(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentDefaultValue, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call'
)
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Foo=\'string\'',
' )'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape,
)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Foo\': \'string\'',
' }'
])
class TestDocumentNoMembers(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentNoMembers, self).setUp()
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call'
)
self.assert_contains_lines_in_order([
'::',
' response = myclient.call()'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape,
)
self.assert_contains_lines_in_order([
'::',
' {}'
])
class TestTraverseAndDocumentShape(BaseExampleDocumenterTest):
def setUp(self):
super(TestTraverseAndDocumentShape, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
self.event_emitter = mock.Mock()
self.request_example = RequestExampleDocumenter(
service_name='myservice', operation_name='SampleOperation',
event_emitter=self.event_emitter)
self.response_example = ResponseExampleDocumenter(
service_name='myservice', operation_name='SampleOperation',
event_emitter=self.event_emitter)
def test_events_emitted_response_example(self):
self.response_example.traverse_and_document_shape(
section=self.doc_structure,
shape=self.operation_model.input_shape, history=[]
)
structure_section = self.doc_structure.get_section('structure-value')
print(self.event_emitter.emit.call_args_list[0][1]['section'].name)
self.assertEqual(
self.event_emitter.emit.call_args_list,
[mock.call('docs.response-example.myservice.SampleOperation.Foo',
section=structure_section.get_section(
'Foo').get_section('member-value')),
mock.call(('docs.response-example.myservice.SampleOperation'
'.complete-section'), section=self.doc_structure)]
)
def test_events_emitted_request_example(self):
self.request_example.traverse_and_document_shape(
section=self.doc_structure,
shape=self.operation_model.input_shape, history=[]
)
structure_section = self.doc_structure.get_section('structure-value')
self.assertEqual(
self.event_emitter.emit.call_args_list,
[mock.call('docs.request-example.myservice.SampleOperation.Foo',
section=structure_section.get_section(
'Foo').get_section('member-value')),
mock.call(('docs.request-example.myservice.SampleOperation'
'.complete-section'), section=self.doc_structure)]
)
class TestDocumentEnumValue(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentEnumValue, self).setUp()
self.add_shape(
{'EnumString': {
'type': 'string',
'enum': [
'foo',
'bar'
]
}}
)
self.add_shape_to_params('Foo', 'EnumString', 'This describes foo.')
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call'
)
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Foo=\'foo\'|\'bar\'',
' )'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape,
)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Foo\': \'foo\'|\'bar\'',
' }'
])
class TestDocumentMultipleDefaultValues(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentMultipleDefaultValues, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
self.add_shape_to_params('Bar', 'String', 'This describes bar.',
is_required=True)
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call'
)
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Foo=\'string\',',
' Bar=\'string\'',
' )'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape,
)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Foo\': \'string\',',
' \'Bar\': \'string\'',
' }'
])
class TestDocumentInclude(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentInclude, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
self.include_params = [
DocumentedShape(
name='Baz', type_name='integer',
documentation='This describes baz.'
)
]
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call',
include=self.include_params
)
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Foo=\'string\',',
' Baz=123',
' )'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape,
include=self.include_params
)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Foo\': \'string\',',
' \'Baz\': 123',
' }'
])
class TestDocumentExclude(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentExclude, self).setUp()
self.add_shape_to_params('Foo', 'String', 'This describes foo.')
self.add_shape_to_params('Bar', 'String', 'This describes bar.',
is_required=True)
self.exclude_params = ['Foo']
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call',
exclude=self.exclude_params
)
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Bar=\'string\'',
' )'
])
self.assert_not_contains_line(' Foo=\'string\'')
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape,
exclude=self.exclude_params
)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Bar\': \'string\'',
' }'
])
self.assert_not_contains_line('\'Foo\': \'string\',')
class TestDocumentList(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentList, self).setUp()
self.add_shape(
{'List': {
'type': 'list',
'member': {'shape': 'String',
'documentation': 'A string element'}}})
self.add_shape_to_params('Foo', 'List', 'This describes the list.')
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call')
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Foo=[',
' \'string\',',
' ]',
' )'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Foo\': [',
' \'string\',',
' ]',
' }'
])
class TestDocumentMap(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentMap, self).setUp()
self.add_shape(
{'Map': {
'type': 'map',
'key': {'shape': 'String'},
'value': {'shape': 'String'}}})
self.add_shape_to_params('Foo', 'Map', 'This describes the map.')
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call')
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Foo={',
' \'string\': \'string\'',
' }',
' )'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Foo\': {',
' \'string\': \'string\'',
' }',
' }'
])
class TestDocumentStructure(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentStructure, self).setUp()
self.add_shape(
{'Structure': {
'type': 'structure',
'members': {
'Member': {'shape': 'String',
'documentation': 'This is its member.'}}}})
self.add_shape_to_params(
'Foo', 'Structure', 'This describes the structure.')
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call')
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Foo={',
' \'Member\': \'string\'',
' }',
' )'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Foo\': {',
' \'Member\': \'string\'',
' }',
' }'
])
class TestDocumentRecursiveShape(BaseExampleDocumenterTest):
def setUp(self):
super(TestDocumentRecursiveShape, self).setUp()
self.add_shape(
{'Structure': {
'type': 'structure',
'members': {
'Foo': {
'shape': 'Structure',
'documentation': 'This is a recursive structure.'}}}})
self.add_shape_to_params(
'Foo', 'Structure', 'This describes the structure.')
def test_request_example(self):
self.request_example.document_example(
self.doc_structure, self.operation_model.input_shape,
prefix='response = myclient.call')
self.assert_contains_lines_in_order([
'::',
' response = myclient.call(',
' Foo={',
' \'Foo\': {\'... recursive ...\'}',
' }',
' )'
])
def test_response_example(self):
self.response_example.document_example(
self.doc_structure, self.operation_model.input_shape)
self.assert_contains_lines_in_order([
'::',
' {',
' \'Foo\': {',
' \'Foo\': {\'... recursive ...\'}',
' }',
' }'
])
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.unit.docs import BaseDocsTest
from botocore.docs.client import ClientDocumenter
class TestClientDocumenter(BaseDocsTest):
def setUp(self):
super(TestClientDocumenter, self).setUp()
self.add_shape_to_params('Biz', 'String')
self.setup_client()
self.client_documenter = ClientDocumenter(self.client)
def test_document_client(self):
self.client_documenter.document_client(self.doc_structure)
self.assert_contains_lines_in_order([
'======',
'Client',
'======',
'.. py:class:: MyService.Client',
' A low-level client representing AWS MyService::',
' client = session.create_client(\'myservice\')',
' These are the available methods:',
' * :py:meth:`~MyService.Client.can_paginate`',
' * :py:meth:`~MyService.Client.get_paginator`',
' * :py:meth:`~MyService.Client.get_waiter`',
' * :py:meth:`~MyService.Client.sample_operation`',
' .. py:method:: can_paginate(operation_name)',
' .. py:method:: get_paginator(operation_name)',
' .. py:method:: get_waiter(waiter_name)',
' .. py:method:: sample_operation(**kwargs)',
' **Request Syntax**',
' ::',
' response = client.sample_operation(',
' Biz=\'string\'',
' )',
' :type Biz: string',
' :param Biz:',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
' \'Biz\': \'string\'',
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Biz** *(string) --*'
])
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.unit.docs import BaseDocsTest
from botocore.docs.waiter import WaiterDocumenter
from botocore.waiter import WaiterModel
class TestWaiterDocumenter(BaseDocsTest):
def setUp(self):
super(TestWaiterDocumenter, self).setUp()
self.add_shape_to_params('Biz', 'String')
self.setup_client()
waiter_model = WaiterModel(self.waiter_json_model)
self.waiter_documenter = WaiterDocumenter(
client=self.client, service_waiter_model=waiter_model)
def test_document_waiters(self):
self.waiter_documenter.document_waiters(
self.doc_structure)
self.assert_contains_lines_in_order([
'=======',
'Waiters',
'=======',
'The available waiters are:',
'* :py:class:`MyService.Waiter.SampleOperationComplete`',
'.. py:class:: MyService.Waiter.SampleOperationComplete',
' ::',
' waiter = client.get_waiter(\'sample_operation_complete\')',
' .. py:method:: wait(**kwargs)',
(' Polls :py:meth:`MyService.Client.sample_operation` '
'every 15 seconds until a successful state is reached. An error '
'is returned after 40 failed checks.'),
' **Request Syntax**',
' ::',
' waiter.wait(',
' Biz=\'string\'',
' )',
' :type Biz: string',
' :param Biz:',
' :type WaiterConfig: dict',
' :param WaiterConfig:',
('A dictionary that provides parameters to control waiting '
'behavior.'),
' - **Delay** *(integer) --*',
(' The amount of time in seconds to wait between attempts. '
'Default: 15'),
' - **MaxAttempts** *(integer) --*',
' The maximum number of attempts to be made. Default: 40',
' :returns: None'
])
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests import unittest
from botocore.compat import six
from botocore.docs.bcdoc.style import ReSTStyle
from botocore.docs.bcdoc.restdoc import ReSTDocument
class TestStyle(unittest.TestCase):
def test_spaces(self):
style = ReSTStyle(None, 4)
self.assertEqual(style.spaces(), '')
style.indent()
self.assertEqual(style.spaces(), ' ')
style.indent()
self.assertEqual(style.spaces(), ' ')
style.dedent()
self.assertEqual(style.spaces(), ' ')
style.dedent()
self.assertEqual(style.spaces(), '')
style.dedent()
self.assertEqual(style.spaces(), '')
def test_bold(self):
style = ReSTStyle(ReSTDocument())
style.bold('foobar')
self.assertEqual(style.doc.getvalue(), six.b('**foobar** '))
def test_italics(self):
style = ReSTStyle(ReSTDocument())
style.italics('foobar')
self.assertEqual(style.doc.getvalue(), six.b('*foobar* '))
def test_p(self):
style = ReSTStyle(ReSTDocument())
style.start_p()
style.doc.write('foo')
style.end_p()
self.assertEqual(style.doc.getvalue(), six.b('\n\nfoo\n\n'))
def test_code(self):
style = ReSTStyle(ReSTDocument())
style.code('foobar')
self.assertEqual(style.doc.getvalue(), six.b('``foobar`` '))
def test_h1(self):
style = ReSTStyle(ReSTDocument())
style.h1('foobar fiebaz')
self.assertEqual(
style.doc.getvalue(),
six.b('\n\n*************\nfoobar fiebaz\n*************\n\n'))
def test_h2(self):
style = ReSTStyle(ReSTDocument())
style.h2('foobar fiebaz')
self.assertEqual(
style.doc.getvalue(),
six.b('\n\n=============\nfoobar fiebaz\n=============\n\n'))
def test_h3(self):
style = ReSTStyle(ReSTDocument())
style.h3('foobar fiebaz')
self.assertEqual(
style.doc.getvalue(),
six.b('\n\n-------------\nfoobar fiebaz\n-------------\n\n'))
def test_ref(self):
style = ReSTStyle(ReSTDocument())
style.ref('foobar', 'http://foo.bar.com')
self.assertEqual(style.doc.getvalue(),
six.b(':doc:`foobar <http://foo.bar.com>`'))
def test_examples(self):
style = ReSTStyle(ReSTDocument())
self.assertTrue(style.doc.keep_data)
style.start_examples()
self.assertFalse(style.doc.keep_data)
style.end_examples()
self.assertTrue(style.doc.keep_data)
def test_codeblock(self):
style = ReSTStyle(ReSTDocument())
style.codeblock('foobar')
self.assertEqual(style.doc.getvalue(),
six.b('::\n\n foobar\n\n\n'))
def test_important(self):
style = ReSTStyle(ReSTDocument())
style.start_important()
style.end_important()
self.assertEqual(style.doc.getvalue(),
six.b('\n\n.. warning::\n\n \n\n'))
def test_note(self):
style = ReSTStyle(ReSTDocument())
style.start_note()
style.end_note()
self.assertEqual(style.doc.getvalue(),
six.b('\n\n.. note::\n\n \n\n'))
def test_danger(self):
style = ReSTStyle(ReSTDocument())
style.start_danger()
style.end_danger()
self.assertEqual(style.doc.getvalue(),
six.b('\n\n.. danger::\n\n \n\n'))
def test_toctree_html(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'html'
style.toctree()
style.tocitem('foo')
style.tocitem('bar')
self.assertEqual(
style.doc.getvalue(),
six.b('\n.. toctree::\n :maxdepth: 1'
'\n :titlesonly:\n\n foo\n bar\n'))
def test_toctree_man(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'man'
style.toctree()
style.tocitem('foo')
style.tocitem('bar')
self.assertEqual(style.doc.getvalue(),
six.b('\n\n\n* foo\n\n\n* bar\n\n'))
def test_hidden_toctree_html(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'html'
style.hidden_toctree()
style.hidden_tocitem('foo')
style.hidden_tocitem('bar')
self.assertEqual(
style.doc.getvalue(),
six.b('\n.. toctree::\n :maxdepth: 1'
'\n :hidden:\n\n foo\n bar\n'))
def test_hidden_toctree_non_html(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'man'
style.hidden_toctree()
style.hidden_tocitem('foo')
style.hidden_tocitem('bar')
self.assertEqual(
style.doc.getvalue(),
six.b(''))
def test_href_link(self):
style = ReSTStyle(ReSTDocument())
style.start_a(attrs=[('href', 'http://example.org')])
style.doc.write('example')
style.end_a()
self.assertEqual(
style.doc.getvalue(),
six.b('`example <http://example.org>`__ ')
)
def test_escape_href_link(self):
style = ReSTStyle(ReSTDocument())
style.start_a(attrs=[('href', 'http://example.org')])
style.doc.write('foo: the next bar')
style.end_a()
self.assertEqual(
style.doc.getvalue(),
six.b('`foo\\: the next bar <http://example.org>`__ '))
def test_handle_no_text_hrefs(self):
style = ReSTStyle(ReSTDocument())
style.start_a(attrs=[('href', 'http://example.org')])
style.end_a()
self.assertEqual(style.doc.getvalue(),
six.b('`<http://example.org>`__ '))
def test_sphinx_reference_label_html(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'html'
style.sphinx_reference_label('foo', 'bar')
self.assertEqual(style.doc.getvalue(), six.b(':ref:`bar <foo>`'))
def test_sphinx_reference_label_html_no_text(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'html'
style.sphinx_reference_label('foo')
self.assertEqual(style.doc.getvalue(), six.b(':ref:`foo <foo>`'))
def test_sphinx_reference_label_non_html(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'man'
style.sphinx_reference_label('foo', 'bar')
self.assertEqual(style.doc.getvalue(), six.b('bar'))
def test_sphinx_reference_label_non_html_no_text(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'man'
style.sphinx_reference_label('foo')
self.assertEqual(style.doc.getvalue(), six.b('foo'))
def test_table_of_contents(self):
style = ReSTStyle(ReSTDocument())
style.table_of_contents()
self.assertEqual(style.doc.getvalue(), six.b('.. contents:: '))
def test_table_of_contents_with_title(self):
style = ReSTStyle(ReSTDocument())
style.table_of_contents(title='Foo')
self.assertEqual(style.doc.getvalue(), six.b('.. contents:: Foo\n'))
def test_table_of_contents_with_title_and_depth(self):
style = ReSTStyle(ReSTDocument())
style.table_of_contents(title='Foo', depth=2)
self.assertEqual(style.doc.getvalue(),
six.b('.. contents:: Foo\n :depth: 2\n'))
def test_sphinx_py_class(self):
style = ReSTStyle(ReSTDocument())
style.start_sphinx_py_class('FooClass')
style.end_sphinx_py_class()
self.assertEqual(style.doc.getvalue(),
six.b('\n\n.. py:class:: FooClass\n\n \n\n'))
def test_sphinx_py_method(self):
style = ReSTStyle(ReSTDocument())
style.start_sphinx_py_method('method')
style.end_sphinx_py_method()
self.assertEqual(style.doc.getvalue(),
six.b('\n\n.. py:method:: method\n\n \n\n'))
def test_sphinx_py_method_with_params(self):
style = ReSTStyle(ReSTDocument())
style.start_sphinx_py_method('method', 'foo=None')
style.end_sphinx_py_method()
self.assertEqual(
style.doc.getvalue(),
six.b('\n\n.. py:method:: method(foo=None)\n\n \n\n'))
def test_sphinx_py_attr(self):
style = ReSTStyle(ReSTDocument())
style.start_sphinx_py_attr('Foo')
style.end_sphinx_py_attr()
self.assertEqual(style.doc.getvalue(),
six.b('\n\n.. py:attribute:: Foo\n\n \n\n'))
def test_write_py_doc_string(self):
style = ReSTStyle(ReSTDocument())
docstring = (
'This describes a function\n'
':param foo: Describes foo\n'
'returns: None'
)
style.write_py_doc_string(docstring)
self.assertEqual(style.doc.getvalue(), six.b(docstring + '\n'))
def test_new_line(self):
style = ReSTStyle(ReSTDocument())
style.new_line()
self.assertEqual(style.doc.getvalue(), six.b('\n'))
style.do_p = False
style.new_line()
self.assertEqual(style.doc.getvalue(), six.b('\n\n'))
def test_list(self):
style = ReSTStyle(ReSTDocument())
style.li('foo')
self.assertEqual(style.doc.getvalue(), six.b('\n* foo\n\n'))
def test_non_top_level_lists_are_indented(self):
style = ReSTStyle(ReSTDocument())
# Start the top level list
style.start_ul()
# Write one list element
style.start_li()
style.doc.handle_data('foo')
style.end_li()
self.assertEqual(style.doc.getvalue(), six.b("\n\n\n* foo\n"))
# Start the nested list
style.start_ul()
# Write an element to the nested list
style.start_li()
style.doc.handle_data('bar')
style.end_li()
self.assertEqual(style.doc.getvalue(),
six.b("\n\n\n* foo\n\n\n \n * bar\n "))
def test_external_link(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'html'
style.external_link('MyLink', 'http://example.com/foo')
self.assertEqual(style.doc.getvalue(),
six.b('`MyLink <http://example.com/foo>`_'))
def test_external_link_in_man_page(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'man'
style.external_link('MyLink', 'http://example.com/foo')
self.assertEqual(style.doc.getvalue(), six.b('MyLink'))
def test_internal_link(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'html'
style.internal_link('MyLink', '/index')
self.assertEqual(
style.doc.getvalue(),
six.b(':doc:`MyLink </index>`')
)
def test_internal_link_in_man_page(self):
style = ReSTStyle(ReSTDocument())
style.doc.target = 'man'
style.internal_link('MyLink', '/index')
self.assertEqual(style.doc.getvalue(), six.b('MyLink'))
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests import unittest
from botocore.compat import six
from botocore.docs.bcdoc.restdoc import ReSTDocument, DocumentStructure
class TestReSTDocument(unittest.TestCase):
def test_write(self):
doc = ReSTDocument()
doc.write('foo')
self.assertEqual(doc.getvalue(), six.b('foo'))
def test_writeln(self):
doc = ReSTDocument()
doc.writeln('foo')
self.assertEqual(doc.getvalue(), six.b('foo\n'))
def test_include_doc_string(self):
doc = ReSTDocument()
doc.include_doc_string('<p>this is a <code>test</code></p>')
self.assertEqual(doc.getvalue(), six.b('\n\nthis is a ``test`` \n\n'))
def test_remove_doc_string(self):
doc = ReSTDocument()
doc.writeln('foo')
doc.include_doc_string('<p>this is a <code>test</code></p>')
doc.remove_last_doc_string()
self.assertEqual(doc.getvalue(), six.b('foo\n'))
def test_add_links(self):
doc = ReSTDocument()
doc.hrefs['foo'] = 'https://example.com/'
self.assertEqual(
doc.getvalue(), six.b('\n\n.. _foo: https://example.com/\n'))
class TestDocumentStructure(unittest.TestCase):
def setUp(self):
self.name = 'mydoc'
self.doc_structure = DocumentStructure(self.name)
def test_name(self):
self.assertEqual(self.doc_structure.name, self.name)
def test_path(self):
self.assertEqual(self.doc_structure.path, [self.name])
self.doc_structure.path = ['foo']
self.assertEqual(self.doc_structure.path, ['foo'])
def test_add_new_section(self):
section = self.doc_structure.add_new_section('mysection')
# Ensure the name of the section is correct
self.assertEqual(section.name, 'mysection')
# Ensure we can get the section.
self.assertEqual(
self.doc_structure.get_section('mysection'), section)
# Ensure the path is correct
self.assertEqual(section.path, ['mydoc', 'mysection'])
# Ensure some of the necessary attributes are passed to the
# the section.
self.assertEqual(section.style.indentation,
self.doc_structure.style.indentation)
self.assertEqual(section.translation_map,
self.doc_structure.translation_map)
self.assertEqual(section.hrefs,
self.doc_structure.hrefs)
def test_delete_section(self):
section = self.doc_structure.add_new_section('mysection')
self.assertEqual(
self.doc_structure.get_section('mysection'), section)
self.doc_structure.delete_section('mysection')
with self.assertRaises(KeyError):
section.get_section('mysection')
def test_create_sections_at_instantiation(self):
sections = ['intro', 'middle', 'end']
self.doc_structure = DocumentStructure(
self.name, section_names=sections)
# Ensure the sections are attached to the new document structure.
for section_name in sections:
section = self.doc_structure.get_section(section_name)
self.assertEqual(section.name, section_name)
def test_flush_structure(self):
section = self.doc_structure.add_new_section('mysection')
subsection = section.add_new_section('mysubsection')
self.doc_structure.writeln('1')
section.writeln('2')
subsection.writeln('3')
second_section = self.doc_structure.add_new_section('mysection2')
second_section.writeln('4')
contents = self.doc_structure.flush_structure()
# Ensure the contents were flushed out correctly
self.assertEqual(contents, six.b('1\n2\n3\n4\n'))
def test_flush_structure_hrefs(self):
section = self.doc_structure.add_new_section('mysection')
section.writeln('section contents')
self.doc_structure.hrefs['foo'] = 'www.foo.com'
section.hrefs['bar'] = 'www.bar.com'
contents = self.doc_structure.flush_structure()
self.assertIn(six.b('.. _foo: www.foo.com'), contents)
self.assertIn(six.b('.. _bar: www.bar.com'), contents)
def test_available_sections(self):
self.doc_structure.add_new_section('mysection')
self.doc_structure.add_new_section('mysection2')
self.assertEqual(
self.doc_structure.available_sections,
['mysection', 'mysection2']
)
def test_context(self):
context = {'Foo': 'Bar'}
section = self.doc_structure.add_new_section(
'mysection', context=context)
self.assertEqual(section.context, context)
# Make sure if context is not specified it is empty.
section = self.doc_structure.add_new_section('mysection2')
self.assertEqual(section.context, {})
def test_remove_all_sections(self):
self.doc_structure.add_new_section('mysection2')
self.doc_structure.remove_all_sections()
self.assertEqual(self.doc_structure.available_sections, [])
def test_clear_text(self):
self.doc_structure.write('Foo')
self.doc_structure.clear_text()
self.assertEqual(self.doc_structure.flush_structure(), six.b(''))
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import mock
from tests import unittest
import botocore.docs.bcdoc.docstringparser as parser
from botocore.docs.bcdoc.restdoc import ReSTDocument
class TestDocStringParser(unittest.TestCase):
def parse(self, html):
docstring_parser = parser.DocStringParser(ReSTDocument())
docstring_parser.feed(html)
docstring_parser.close()
return docstring_parser.doc.getvalue()
def assert_contains_exact_lines_in_order(self, actual, expected):
# Get each line and filter out empty lines
contents = actual.split(b'\n')
contents = [line for line in contents if line and not line.isspace()]
for line in expected:
self.assertIn(line, contents)
beginning = contents.index(line)
contents = contents[beginning:]
def test_nested_lists(self):
html = "<ul><li>Wello</li><ul><li>Horld</li></ul></ul>"
result = self.parse(html)
self.assert_contains_exact_lines_in_order(result, [
b'* Wello',
b' * Horld'
])
def test_nested_lists_with_extra_white_space(self):
html = "<ul> <li> Wello</li><ul> <li> Horld</li></ul></ul>"
result = self.parse(html)
self.assert_contains_exact_lines_in_order(result, [
b'* Wello',
b' * Horld'
])
class TestHTMLTree(unittest.TestCase):
def setUp(self):
self.style = mock.Mock()
self.doc = mock.Mock()
self.doc.style = self.style
self.tree = parser.HTMLTree(self.doc)
def test_add_tag(self):
self.tree.add_tag('foo')
self.assertIsInstance(self.tree.current_node, parser.TagNode)
self.assertEqual(self.tree.current_node.tag, 'foo')
def test_add_unsupported_tag(self):
del self.style.start_foo
del self.style.end_foo
self.tree.add_tag('foo')
self.assertIn('foo', self.tree.unhandled_tags)
def test_add_data(self):
self.tree.add_data('foo')
self.assertNotIsInstance(self.tree.current_node, parser.DataNode)
node = self.tree.head.children[0]
self.assertIsInstance(node, parser.DataNode)
self.assertEqual(node.data, 'foo')
class TestStemNode(unittest.TestCase):
def setUp(self):
self.style = mock.Mock()
self.doc = mock.Mock()
self.doc.style = self.style
self.node = parser.StemNode()
def test_add_child(self):
child = parser.StemNode()
self.node.add_child(child)
self.assertIn(child, self.node.children)
self.assertEqual(child.parent, self.node)
def test_write(self):
self.node.add_child(mock.Mock())
self.node.add_child(mock.Mock())
self.node.write(mock.Mock())
for child in self.node.children:
self.assertTrue(child.write.called)
class TestTagNode(unittest.TestCase):
def setUp(self):
self.style = mock.Mock()
self.doc = mock.Mock()
self.doc.style = self.style
self.tag = 'foo'
self.node = parser.TagNode(self.tag)
def test_write_calls_style(self):
self.node.write(self.doc)
self.assertTrue(self.style.start_foo.called)
self.assertTrue(self.style.end_foo.called)
def test_write_unsupported_tag(self):
del self.style.start_foo
del self.style.end_foo
try:
self.node.write(self.doc)
except AttributeError as e:
self.fail(str(e))
class TestDataNode(unittest.TestCase):
def setUp(self):
self.style = mock.Mock()
self.doc = mock.Mock()
self.doc.style = self.style
def test_string_data(self):
node = parser.DataNode('foo')
self.assertEqual(node.data, 'foo')
def test_non_string_data_raises_error(self):
with self.assertRaises(ValueError):
parser.DataNode(5)
def test_lstrip(self):
node = parser.DataNode(' foo')
node.lstrip()
self.assertEqual(node.data, 'foo')
def test_write(self):
node = parser.DataNode('foo bar baz')
self.doc.translate_words.return_value = ['foo', 'bar', 'baz']
node.write(self.doc)
self.doc.handle_data.assert_called_once_with('foo bar baz')
def test_write_space(self):
node = parser.DataNode(' ')
node.write(self.doc)
self.doc.handle_data.assert_called_once_with(' ')
self.doc.handle_data.reset_mock()
node = parser.DataNode(' ')
node.write(self.doc)
self.doc.handle_data.assert_called_once_with(' ')
def test_write_empty_string(self):
node = parser.DataNode('')
node.write(self.doc)
self.assertFalse(self.doc.handle_data.called)
class TestLineItemNode(unittest.TestCase):
def setUp(self):
self.style = mock.Mock()
self.doc = mock.Mock()
self.doc.style = self.style
self.doc.translate_words.return_value = ['foo']
self.node = parser.LineItemNode()
def test_write_strips_white_space(self):
self.node.add_child(parser.DataNode(' foo'))
self.node.write(self.doc)
self.doc.handle_data.assert_called_once_with('foo')
def test_write_strips_nested_white_space(self):
self.node.add_child(parser.DataNode(' '))
tag_child = parser.TagNode('foo')
tag_child.add_child(parser.DataNode(' '))
tag_child_2 = parser.TagNode('foo')
tag_child_2.add_child(parser.DataNode(' foo'))
tag_child.add_child(tag_child_2)
self.node.add_child(tag_child)
self.node.write(self.doc)
self.doc.handle_data.assert_called_once_with('foo')
def test_write_only_strips_until_text_is_found(self):
self.node.add_child(parser.DataNode(' '))
tag_child = parser.TagNode('foo')
tag_child.add_child(parser.DataNode(' '))
tag_child_2 = parser.TagNode('foo')
tag_child_2.add_child(parser.DataNode(' foo'))
tag_child_2.add_child(parser.DataNode(' '))
tag_child.add_child(tag_child_2)
self.node.add_child(tag_child)
self.node.write(self.doc)
calls = [mock.call('foo'), mock.call(' ')]
self.doc.handle_data.assert_has_calls(calls)
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import glob
import json
import pprint
import logging
import difflib
from tests import create_session
import botocore.session
from botocore import xform_name
from botocore import parsers
log = logging.getLogger(__name__)
SPECIAL_CASES = [
'iam-get-user-policy.xml', # Needs the JSON decode from handlers.py
'iam-list-roles.xml', # Needs the JSON decode from handlers.py for the policy
's3-get-bucket-location.xml', # Confirmed, this will need a special handler
#'s3-list-multipart-uploads.xml', # Bug in model, missing delimeter
'cloudformation-get-template.xml', # Need to JSON decode the template body.
]
def _test_parsed_response(xmlfile, response_body, operation_model, expected):
response = {
'body': response_body,
'status_code': 200,
'headers': {}
}
for case in SPECIAL_CASES:
if case in xmlfile:
print("SKIP: %s" % xmlfile)
return
if 'errors' in xmlfile:
response['status_code'] = 400
# Handle the special cased __headers__ key if it exists.
if b'__headers__' in response_body:
loaded = json.loads(response_body.decode('utf-8'))
response['headers'] = loaded.pop('__headers__')
response['body'] = json.dumps(loaded).encode('utf-8')
protocol = operation_model.service_model.protocol
parser_cls = parsers.PROTOCOL_PARSERS[protocol]
parser = parser_cls(timestamp_parser=lambda x: x)
parsed = parser.parse(response, operation_model.output_shape)
parsed = _convert_bytes_to_str(parsed)
expected['ResponseMetadata']['HTTPStatusCode'] = response['status_code']
expected['ResponseMetadata']['HTTPHeaders'] = response['headers']
d2 = parsed
d1 = expected
if d1 != d2:
log.debug('-' * 40)
log.debug("XML FILE:\n" + xmlfile)
log.debug('-' * 40)
log.debug("ACTUAL:\n" + pprint.pformat(parsed))
log.debug('-' * 40)
log.debug("EXPECTED:\n" + pprint.pformat(expected))
if not d1 == d2:
# Borrowed from assertDictEqual, though this doesn't
# handle the case when unicode literals are used in one
# dict but not in the other (and we want to consider them
# as being equal).
print(d1)
print()
print(d2)
pretty_d1 = pprint.pformat(d1, width=1).splitlines()
pretty_d2 = pprint.pformat(d2, width=1).splitlines()
diff = ('\n' + '\n'.join(difflib.ndiff(pretty_d1, pretty_d2)))
raise AssertionError("Dicts are not equal:\n%s" % diff)
def _convert_bytes_to_str(parsed):
if isinstance(parsed, dict):
new_dict = {}
for key, value in parsed.items():
new_dict[key] = _convert_bytes_to_str(value)
return new_dict
elif isinstance(parsed, bytes):
return parsed.decode('utf-8')
elif isinstance(parsed, list):
new_list = []
for item in parsed:
new_list.append(_convert_bytes_to_str(item))
return new_list
else:
return parsed
def test_xml_parsing():
for dp in ['responses', 'errors']:
data_path = os.path.join(os.path.dirname(__file__), 'xml')
data_path = os.path.join(data_path, dp)
session = create_session()
xml_files = glob.glob('%s/*.xml' % data_path)
service_names = set()
for fn in xml_files:
service_names.add(os.path.split(fn)[1].split('-')[0])
for service_name in service_names:
service_model = session.get_service_model(service_name)
service_xml_files = glob.glob('%s/%s-*.xml' % (data_path,
service_name))
for xmlfile in service_xml_files:
expected = _get_expected_parsed_result(xmlfile)
operation_model = _get_operation_model(service_model, xmlfile)
raw_response_body = _get_raw_response_body(xmlfile)
yield _test_parsed_response, xmlfile, raw_response_body, \
operation_model, expected
def _get_raw_response_body(xmlfile):
with open(xmlfile, 'rb') as f:
return f.read()
def _get_operation_model(service_model, filename):
dirname, filename = os.path.split(filename)
basename = os.path.splitext(filename)[0]
sn, opname = basename.split('-', 1)
# In order to have multiple tests for the same
# operation a '#' char is used to separate
# operation names from some other suffix so that
# the tests have a different filename, e.g
# my-operation#1.xml, my-operation#2.xml.
opname = opname.split('#')[0]
operation_names = service_model.operation_names
for operation_name in operation_names:
if xform_name(operation_name) == opname.replace('-', '_'):
return service_model.operation_model(operation_name)
return operation
def _get_expected_parsed_result(filename):
dirname, filename = os.path.split(filename)
basename = os.path.splitext(filename)[0]
jsonfile = os.path.join(dirname, basename + '.json')
with open(jsonfile) as f:
return json.load(f)
def test_json_errors_parsing():
# The outputs/ directory has sample output responses
# For each file in outputs/ there's a corresponding file
# in expected/ that has the expected parsed response.
base_dir = os.path.join(os.path.dirname(__file__), 'json')
json_responses_dir = os.path.join(base_dir, 'errors')
expected_parsed_dir = os.path.join(base_dir, 'expected')
session = botocore.session.get_session()
for json_response_file in os.listdir(json_responses_dir):
# Files look like: 'datapipeline-create-pipeline.json'
service_name, operation_name = os.path.splitext(
json_response_file)[0].split('-', 1)
expected_parsed_response = os.path.join(expected_parsed_dir,
json_response_file)
raw_response_file = os.path.join(json_responses_dir,
json_response_file)
with open(expected_parsed_response) as f:
expected = json.load(f)
service_model = session.get_service_model(service_name)
operation_names = service_model.operation_names
operation_model = None
for op_name in operation_names:
if xform_name(op_name) == operation_name.replace('-', '_'):
operation_model = service_model.operation_model(op_name)
with open(raw_response_file, 'rb') as f:
raw_response_body = f.read()
yield _test_parsed_response, raw_response_file, \
raw_response_body, operation_model, expected
def _uhg_test_json_parsing():
input_path = os.path.join(os.path.dirname(__file__), 'json')
input_path = os.path.join(input_path, 'inputs')
output_path = os.path.join(os.path.dirname(__file__), 'json')
output_path = os.path.join(output_path, 'outputs')
session = botocore.session.get_session()
jsonfiles = glob.glob('%s/*.json' % input_path)
service_names = set()
for fn in jsonfiles:
service_names.add(os.path.split(fn)[1].split('-')[0])
for service_name in service_names:
service_model = session.get_service_model(service_name)
service_json_files = glob.glob('%s/%s-*.json' % (input_path,
service_name))
for jsonfile in service_json_files:
expected = _get_expected_parsed_result(jsonfile)
operation_model = _get_operation_model(service_model, jsonfile)
with open(jsonfile, 'rb') as f:
raw_response_body = f.read()
yield _test_parsed_response, jsonfile, \
raw_response_body, operation_model, expected
# TODO: handle the __headers crap.
#class TestHeaderParsing(unittest.TestCase):
#
# maxDiff = None
#
# def setUp(self):
# self.session = botocore.session.get_session()
# self.s3 = self.session.get_service('s3')
#
# def test_put_object(self):
# http_response = Mock()
# http_response.encoding = 'utf-8'
# http_response.headers = CaseInsensitiveDict(
# {'Date': 'Thu, 22 Aug 2013 02:11:57 GMT',
# 'Content-Length': '0',
# 'x-amz-request-id': '2B74ECB010FF029E',
# 'ETag': '"b081e66e7e0c314285c655cafb4d1e71"',
# 'x-amz-id-2': 'bKECRRBFttBRVbJPIVBLQwwipI0i+s9HMvNFdttR17ouR0pvQSKEJUR+1c6cW1nQ',
# 'Server': 'AmazonS3',
# 'content-type': 'text/xml'})
# http_response.content = ''
# put_object = self.s3.get_operation('PutObject')
# expected = {"ETag": '"b081e66e7e0c314285c655cafb4d1e71"'}
# response_data = get_response(self.session, put_object, http_response)[1]
# self.assertEqual(response_data, expected)
#
# def test_head_object(self):
# http_response = Mock()
# http_response.encoding = 'utf-8'
# http_response.headers = CaseInsensitiveDict(
# {'Date': 'Thu, 22 Aug 2013 02:11:57 GMT',
# 'Content-Length': '265',
# 'x-amz-request-id': '2B74ECB010FF029E',
# 'ETag': '"40d06eb6194712ac1c915783004ef730"',
# 'Server': 'AmazonS3',
# 'content-type': 'binary/octet-stream',
# 'Content-Type': 'binary/octet-stream',
# 'accept-ranges': 'bytes',
# 'Last-Modified': 'Tue, 20 Aug 2013 18:33:25 GMT',
# 'x-amz-server-side-encryption': 'AES256',
# 'x-amz-meta-mykey1': 'value1',
# 'x-amz-meta-mykey2': 'value2',
# })
# http_response.content = ''
# http_response.request.method = 'HEAD'
# put_object = self.s3.get_operation('HeadObject')
# expected = {"AcceptRanges": "bytes",
# "ContentType": "binary/octet-stream",
# "LastModified": "Tue, 20 Aug 2013 18:33:25 GMT",
# "ContentLength": "265",
# "ETag": '"40d06eb6194712ac1c915783004ef730"',
# "ServerSideEncryption": "AES256",
# "Metadata": {
# 'mykey1': 'value1',
# 'mykey2': 'value2',
# }}
# response_data = get_response(self.session, put_object,
# http_response)[1]
# self.assertEqual(response_data, expected)
#
# def test_list_objects_with_invalid_content_length(self):
# http_response = Mock()
# http_response.encoding = 'utf-8'
# http_response.headers = CaseInsensitiveDict(
# {'Date': 'Thu, 22 Aug 2013 02:11:57 GMT',
# # We say we have 265 bytes but we're returning 0,
# # this should raise an exception because this is not
# # a HEAD request.
# 'Content-Length': '265',
# 'x-amz-request-id': '2B74ECB010FF029E',
# 'ETag': '"40d06eb6194712ac1c915783004ef730"',
# 'Server': 'AmazonS3',
# 'content-type': 'binary/octet-stream',
# 'Content-Type': 'binary/octet-stream',
# 'accept-ranges': 'bytes',
# 'Last-Modified': 'Tue, 20 Aug 2013 18:33:25 GMT',
# 'x-amz-server-side-encryption': 'AES256'
# })
# http_response.content = ''
# http_response.request.method = 'GET'
# list_objects = self.s3.get_operation('ListObjects')
# expected = {"AcceptRanges": "bytes",
# "ContentType": "binary/octet-stream",
# "LastModified": "Tue, 20 Aug 2013 18:33:25 GMT",
# "ContentLength": "265",
# "ETag": '"40d06eb6194712ac1c915783004ef730"',
# "ServerSideEncryption": "AES256"
# }
# with self.assertRaises(IncompleteReadError):
# response_data = get_response(self.session, list_objects,
# http_response)[1]
#
# def test_head_object_with_json(self):
# http_response = Mock()
# http_response.encoding = 'utf-8'
# http_response.headers = CaseInsensitiveDict(
# {'Date': 'Thu, 22 Aug 2013 02:11:57 GMT',
# 'Content-Length': '0',
# 'x-amz-request-id': '2B74ECB010FF029E',
# 'ETag': '"40d06eb6194712ac1c915783004ef730"',
# 'Server': 'AmazonS3',
# 'content-type': 'application/json',
# 'Content-Type': 'application/json',
# 'accept-ranges': 'bytes',
# 'Last-Modified': 'Tue, 20 Aug 2013 18:33:25 GMT',
# 'x-amz-server-side-encryption': 'AES256'})
# http_response.content = ''
# put_object = self.s3.get_operation('HeadObject')
# expected = {"AcceptRanges": "bytes",
# "ContentType": "application/json",
# "LastModified": "Tue, 20 Aug 2013 18:33:25 GMT",
# "ContentLength": "0",
# "ETag": '"40d06eb6194712ac1c915783004ef730"',
# "ServerSideEncryption": "AES256"
# }
# response_data = get_response(self.session, put_object,
# http_response)[1]
# self.assertEqual(response_data, expected)
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import botocore.session
from botocore.utils import ArgumentGenerator
class ArgumentGeneratorError(AssertionError):
def __init__(self, service_name, operation_name,
generated, message):
full_msg = (
'Error generating skeleton for %s:%s, %s\nActual:\n%s' % (
service_name, operation_name, message, generated))
super(AssertionError, self).__init__(full_msg)
def test_can_generate_all_inputs():
session = botocore.session.get_session()
generator = ArgumentGenerator()
for service_name in session.get_available_services():
service_model = session.get_service_model(service_name)
for operation_name in service_model.operation_names:
operation_model = service_model.operation_model(operation_name)
input_shape = operation_model.input_shape
if input_shape is not None and input_shape.members:
yield (_test_can_generate_skeleton, generator,
input_shape, service_name, operation_name)
def _test_can_generate_skeleton(generator, shape, service_name,
operation_name):
generated = generator.generate_skeleton(shape)
# Do some basic sanity checks to make sure the generated shape
# looks right. We're mostly just ensuring that the generate_skeleton
# doesn't throw an exception.
if not isinstance(generated, dict):
raise ArgumentGeneratorError(
service_name, operation_name,
generated, 'expected a dict')
# The generated skeleton also shouldn't be empty (the test
# generator has already filtered out input_shapes of None).
if len(generated) == 0:
raise ArgumentGeneratorError(
service_name, operation_name,
generated, "generated arguments were empty")
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
from tests import unittest, random_chars
from nose.plugins.attrib import attr
import botocore.session
class TestKinesisListStreams(unittest.TestCase):
REGION = 'us-east-1'
def setUp(self):
self.client = self.session.create_client('kinesis', self.REGION)
@classmethod
def setUpClass(cls):
cls.session = botocore.session.get_session()
cls.stream_name = 'botocore-test-%s' % random_chars(10)
client = cls.session.create_client('kinesis', cls.REGION)
client.create_stream(StreamName=cls.stream_name,
ShardCount=1)
waiter = client.get_waiter('stream_exists')
waiter.wait(StreamName=cls.stream_name)
@classmethod
def tearDownClass(cls):
client = cls.session.create_client('kinesis', cls.REGION)
client.delete_stream(StreamName=cls.stream_name)
def test_list_streams(self):
parsed = self.client.list_streams()
self.assertIn('StreamNames', parsed)
@attr('slow')
def test_can_put_stream_blob(self):
self.client.put_record(
StreamName=self.stream_name, PartitionKey='foo', Data='foobar')
# Give it a few seconds for the record to get into the stream.
time.sleep(10)
stream = self.client.describe_stream(StreamName=self.stream_name)
shard = stream['StreamDescription']['Shards'][0]
shard_iterator = self.client.get_shard_iterator(
StreamName=self.stream_name, ShardId=shard['ShardId'],
ShardIteratorType='TRIM_HORIZON')
records = self.client.get_records(
ShardIterator=shard_iterator['ShardIterator'])
self.assertTrue(len(records['Records']) > 0)
self.assertEqual(records['Records'][0]['Data'], b'foobar')
@attr('slow')
def test_can_put_records_single_blob(self):
self.client.put_records(
StreamName=self.stream_name,
Records=[{
'Data': 'foobar',
'PartitionKey': 'foo'
}]
)
# Give it a few seconds for the record to get into the stream.
time.sleep(10)
stream = self.client.describe_stream(StreamName=self.stream_name)
shard = stream['StreamDescription']['Shards'][0]
shard_iterator = self.client.get_shard_iterator(
StreamName=self.stream_name, ShardId=shard['ShardId'],
ShardIteratorType='TRIM_HORIZON')
records = self.client.get_records(
ShardIterator=shard_iterator['ShardIterator'])
self.assertTrue(len(records['Records']) > 0)
self.assertEqual(records['Records'][0]['Data'], b'foobar')
@attr('slow')
def test_can_put_records_multiple_blob(self):
self.client.put_records(
StreamName=self.stream_name,
Records=[{
'Data': 'foobar',
'PartitionKey': 'foo'
}, {
'Data': 'barfoo',
'PartitionKey': 'foo'
}]
)
# Give it a few seconds for the record to get into the stream.
time.sleep(10)
stream = self.client.describe_stream(StreamName=self.stream_name)
shard = stream['StreamDescription']['Shards'][0]
shard_iterator = self.client.get_shard_iterator(
StreamName=self.stream_name, ShardId=shard['ShardId'],
ShardIteratorType='TRIM_HORIZON')
records = self.client.get_records(
ShardIterator=shard_iterator['ShardIterator'])
self.assertTrue(len(records['Records']) == 2)
# Verify that both made it through.
record_data = [r['Data'] for r in records['Records']]
self.assertEqual(sorted([b'foobar', b'barfoo']), sorted(record_data))
if __name__ == '__main__':
unittest.main()
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestCanChangeParsing(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_can_change_timestamp_with_clients(self):
factory = self.session.get_component('response_parser_factory')
factory.set_parser_defaults(timestamp_parser=lambda x: str(x))
# Now if we get a response with timestamps in the model, they
# will be returned as strings. We're testing service/operation
# objects, but we should also add a test for clients.
s3 = self.session.create_client('s3', 'us-west-2')
parsed = s3.list_buckets()
dates = [bucket['CreationDate'] for bucket in parsed['Buckets']]
self.assertTrue(all(isinstance(date, str) for date in dates),
"Expected all str types but instead got: %s" % dates)
def test_maps_service_name_when_overriden(self):
ses = self.session.get_service_model('ses')
self.assertEqual(ses.endpoint_prefix, 'email')
# But we should map the service_name to be the same name
# used when calling get_service_model which is different
# than the endpoint_prefix.
self.assertEqual(ses.service_name, 'ses')
def test_maps_service_name_from_client(self):
# Same thing as test_maps_service_name_from_client,
# except through the client interface.
client = self.session.create_client('ses', region_name='us-east-1')
self.assertEqual(client.meta.service_model.service_name, 'ses')
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from botocore.exceptions import ClientError
class TestSTS(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
credentials = self.session.get_credentials()
if credentials.token is not None:
self.skipTest('STS tests require long-term credentials')
def test_regionalized_endpoints(self):
sts = self.session.create_client('sts', region_name='ap-southeast-1')
response = sts.get_session_token()
# Do not want to be revealing any temporary keys if the assertion fails
self.assertIn('Credentials', response.keys())
# Since we have to activate STS regionalization, we will test
# that you can send an STS request to a regionalized endpoint
# by making a call with the explicitly wrong region name
sts = self.session.create_client(
'sts', region_name='ap-southeast-1',
endpoint_url='https://sts.us-west-2.amazonaws.com')
self.assertEqual(sts.meta.region_name, 'ap-southeast-1')
self.assertEqual(sts.meta.endpoint_url,
'https://sts.us-west-2.amazonaws.com')
# Signing error will be thrown with the incorrect region name included.
with self.assertRaisesRegexp(ClientError, 'ap-southeast-1') as e:
sts.get_session_token()
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import itertools
import botocore.session
class TestRDSPagination(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('rds', 'us-west-2')
def test_can_paginate_reserved_instances(self):
# Using an operation that we know will paginate.
paginator = self.client.get_paginator(
'describe_reserved_db_instances_offerings')
generator = paginator.paginate()
results = list(itertools.islice(generator, 0, 3))
self.assertEqual(len(results), 3)
self.assertTrue(results[0]['Marker'] != results[1]['Marker'])
def test_can_paginate_orderable_db(self):
paginator = self.client.get_paginator(
'describe_orderable_db_instance_options')
generator = paginator.paginate(Engine='mysql')
results = list(itertools.islice(generator, 0, 2))
self.assertEqual(len(results), 2)
self.assertTrue(results[0].get('Marker') != results[1].get('Marker'))
if __name__ == '__main__':
unittest.main()
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, random_chars
import botocore.session
DEFAULT_ROLE_POLICY = """\
{"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "elastictranscoder.amazonaws.com"
},
"Effect": "Allow",
"Sid": "1"
}
]}
"""
class TestElasticTranscoder(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client(
'elastictranscoder', 'us-east-1')
self.s3_client = self.session.create_client('s3', 'us-east-1')
self.iam_client = self.session.create_client('iam', 'us-east-1')
def create_bucket(self):
bucket_name = 'ets-bucket-1-%s' % random_chars(50)
self.s3_client.create_bucket(Bucket=bucket_name)
self.addCleanup(
self.s3_client.delete_bucket, Bucket=bucket_name)
return bucket_name
def create_iam_role(self):
role_name = 'ets-role-name-1-%s' % random_chars(10)
parsed = self.iam_client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=DEFAULT_ROLE_POLICY)
arn = parsed['Role']['Arn']
self.addCleanup(
self.iam_client.delete_role, RoleName=role_name)
return arn
def test_list_streams(self):
parsed = self.client.list_pipelines()
self.assertIn('Pipelines', parsed)
def test_list_presets(self):
parsed = self.client.list_presets(Ascending='true')
self.assertIn('Presets', parsed)
def test_create_pipeline(self):
# In order to create a pipeline, we need to create 2 s3 buckets
# and 1 iam role.
input_bucket = self.create_bucket()
output_bucket = self.create_bucket()
role = self.create_iam_role()
pipeline_name = 'botocore-test-create-%s' % random_chars(10)
parsed = self.client.create_pipeline(
InputBucket=input_bucket, OutputBucket=output_bucket,
Role=role, Name=pipeline_name,
Notifications={'Progressing': '', 'Completed': '',
'Warning': '', 'Error': ''})
pipeline_id = parsed['Pipeline']['Id']
self.addCleanup(self.client.delete_pipeline, Id=pipeline_id)
self.assertIn('Pipeline', parsed)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from botocore.exceptions import ClientError
from botocore.vendored import six
import botocore.session
class TestGlacier(unittest.TestCase):
# We have to use a single vault for all the integration tests.
# This is because if we create a vault and upload then delete
# an archive, we cannot immediately clean up and delete the vault.
# The compromise is that we'll use a single vault and use
# get_or_create semantics for the integ tests. This does mean you
# need to be careful when writing tests. Assume that other code
# is also using this vault in parallel, so don't rely on things like
# number of archives in a vault.
VAULT_NAME = 'botocore-integ-test-vault'
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('glacier', 'us-west-2')
# There's no error if the vault already exists so we don't
# need to catch any exceptions here.
self.client.create_vault(vaultName=self.VAULT_NAME)
def test_can_list_vaults_without_account_id(self):
response = self.client.list_vaults()
self.assertIn('VaultList', response)
def test_can_handle_error_responses(self):
with self.assertRaises(ClientError):
self.client.list_vaults(accountId='asdf')
def test_can_upload_archive(self):
body = six.BytesIO(b"bytes content")
response = self.client.upload_archive(vaultName=self.VAULT_NAME,
archiveDescription='test upload',
body=body)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 201)
archive_id = response['archiveId']
response = self.client.delete_archive(vaultName=self.VAULT_NAME,
archiveId=archive_id)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 204)
def test_can_upload_archive_from_bytes(self):
response = self.client.upload_archive(vaultName=self.VAULT_NAME,
archiveDescription='test upload',
body=b'bytes body')
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 201)
archive_id = response['archiveId']
response = self.client.delete_archive(vaultName=self.VAULT_NAME,
archiveId=archive_id)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 204)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from nose.tools import assert_true
import botocore.session
from botocore.paginate import PageIterator
from botocore.exceptions import OperationNotPageableError
def test_emr_endpoints_work_with_py26():
# Verify that we can talk to all currently supported EMR endpoints.
# Python2.6 has an SSL cert bug where it can't read the SAN of
# certain SSL certs. We therefore need to always use the CN
# as the hostname.
session = botocore.session.get_session()
for region in ['us-east-1', 'us-west-2', 'us-west-2', 'ap-northeast-1',
'ap-southeast-1', 'ap-southeast-2', 'sa-east-1', 'eu-west-1',
'eu-central-1']:
yield _test_can_list_clusters_in_region, session, region
def _test_can_list_clusters_in_region(session, region):
client = session.create_client('emr', region_name=region)
response = client.list_clusters()
assert_true('Clusters' in response)
# I consider these integration tests because they're
# testing more than a single unit, we're ensuring everything
# accessible from the session works as expected.
class TestEMRGetExtraResources(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('emr', 'us-west-2')
def test_can_access_pagination_configs(self):
# Using an operation that we know will paginate.
paginator = self.client.get_paginator('list_clusters')
page_iterator = paginator.paginate()
self.assertIsInstance(page_iterator, PageIterator)
def test_operation_cant_be_paginated(self):
with self.assertRaises(OperationNotPageableError):
self.client.get_paginator('add_instance_groups')
def test_can_get_waiters(self):
waiter = self.client.get_waiter('cluster_running')
self.assertTrue(hasattr(waiter, 'wait'))
def test_waiter_does_not_exist(self):
with self.assertRaises(ValueError):
self.client.get_waiter('does_not_exist')
if __name__ == '__main__':
unittest.main()
|
"""Smoke tests to verify basic communication to all AWS services.
If you want to control what services/regions are used you can
also provide two separate env vars:
* AWS_SMOKE_TEST_REGION - The region used to create clients.
* AWS_SMOKE_TEST_SERVICES - A CSV list of service names to test.
Otherwise, the ``REGION`` variable specifies the default region
to use and all the services in SMOKE_TESTS/ERROR_TESTS will be tested.
"""
import os
import mock
from pprint import pformat
import warnings
from nose.tools import assert_equal, assert_true
from botocore import xform_name
import botocore.session
from botocore.client import ClientError
from botocore.vendored.requests import adapters
from botocore.vendored.requests.exceptions import ConnectionError
# Mapping of service -> api calls to try.
# Each api call is a dict of OperationName->params.
# Empty params means that the operation will be called with no params. This is
# used as a quick verification that we can successfully make calls to services.
SMOKE_TESTS = {
'acm': {'ListCertificates': {}},
'apigateway': {'GetRestApis': {}},
'application-autoscaling': {
'DescribeScalableTargets': {
'ServiceNamespace': 'ecs'
}},
'autoscaling': {'DescribeAccountLimits': {},
'DescribeAdjustmentTypes': {}},
'cloudformation': {'DescribeStacks': {},
'ListStacks': {}},
'cloudfront': {'ListDistributions': {},
'ListStreamingDistributions': {}},
'cloudhsmv2': {'DescribeBackups': {}},
'cloudsearch': {'DescribeDomains': {},
'ListDomainNames': {}},
'cloudtrail': {'DescribeTrails': {}},
'cloudwatch': {'ListMetrics': {}},
'codecommit': {'ListRepositories': {}},
'codedeploy': {'ListApplications': {}},
'codepipeline': {'ListActionTypes': {}},
'cognito-identity': {'ListIdentityPools': {'MaxResults': 1}},
'cognito-sync': {'ListIdentityPoolUsage': {}},
'config': {'DescribeDeliveryChannels': {}},
'datapipeline': {'ListPipelines': {}},
'devicefarm': {'ListProjects': {}},
'directconnect': {'DescribeConnections': {}},
'ds': {'DescribeDirectories': {}},
'dynamodb': {'ListTables': {}},
'dynamodbstreams': {'ListStreams': {}},
'ec2': {'DescribeRegions': {},
'DescribeInstances': {}},
'ecr': {'DescribeRepositories': {}},
'ecs': {'DescribeClusters': {}},
'elasticache': {'DescribeCacheClusters': {}},
'elasticbeanstalk': {'DescribeApplications': {}},
'elastictranscoder': {'ListPipelines': {}},
'elb': {'DescribeLoadBalancers': {}},
'emr': {'ListClusters': {}},
'es': {'ListDomainNames': {}},
'events': {'ListRules': {}},
'firehose': {'ListDeliveryStreams': {}},
'gamelift': {'ListBuilds': {}},
'glacier': {'ListVaults': {}},
'iam': {'ListUsers': {}},
# Does not work with session credentials so
# importexport tests are not run.
#'importexport': {'ListJobs': {}},
'importexport': {},
'inspector': {'DescribeCrossAccountAccessRole': {}},
'iot': {'DescribeEndpoint': {}},
'kinesis': {'ListStreams': {}},
'kms': {'ListKeys': {}},
'lambda': {'ListFunctions': {}},
'logs': {'DescribeLogGroups': {}},
'machinelearning': {'DescribeMLModels': {}},
'opsworks': {'DescribeStacks': {}},
'rds': {'DescribeDBInstances': {}},
'redshift': {'DescribeClusters': {}},
'route53': {'ListHostedZones': {}},
'route53domains': {'ListDomains': {}},
's3': {'ListBuckets': {}},
'sdb': {'ListDomains': {}},
'ses': {'ListIdentities': {}},
'sns': {'ListTopics': {}},
'sqs': {'ListQueues': {}},
'ssm': {'ListDocuments': {}},
'storagegateway': {'ListGateways': {}},
# sts tests would normally go here, but
# there aren't any calls you can make when
# using session credentials so we don't run any
# sts tests.
'sts': {},
#'sts': {'GetSessionToken': {}},
# Subscription needed for support API calls.
'support': {},
'swf': {'ListDomains': {'registrationStatus': 'REGISTERED'}},
'waf': {'ListWebACLs': {'Limit': 1}},
'workspaces': {'DescribeWorkspaces': {}},
}
# Same thing as the SMOKE_TESTS hash above, except these verify
# that we get an error response back from the server because
# we've sent invalid params.
ERROR_TESTS = {
'apigateway': {'GetRestApi': {'restApiId': 'fake-id'}},
'application-autoscaling': {
'DescribeScalableTargets': {
'ServiceNamespace': 'fake-service-namespace'
}},
'autoscaling': {'CreateLaunchConfiguration': {
'LaunchConfigurationName': 'foo',
'ImageId': 'ami-12345678',
'InstanceType': 'm1.small',
}},
'cloudformation': {'CreateStack': {
'StackName': 'fake',
'TemplateURL': 'http://s3.amazonaws.com/foo/bar',
}},
'cloudfront': {'GetDistribution': {'Id': 'fake-id'}},
'cloudhsmv2': {'ListTags': {'ResourceId': 'fake-id'}},
'cloudsearch': {'DescribeIndexFields': {'DomainName': 'fakedomain'}},
'cloudtrail': {'DeleteTrail': {'Name': 'fake-trail'}},
'cloudwatch': {'SetAlarmState': {
'AlarmName': 'abc',
'StateValue': 'mno',
'StateReason': 'xyz',
}},
'logs': {'GetLogEvents': {'logGroupName': 'a', 'logStreamName': 'b'}},
'codecommit': {'ListBranches': {'repositoryName': 'fake-repo'}},
'codedeploy': {'GetDeployment': {'deploymentId': 'fake-id'}},
'codepipeline': {'GetPipeline': {'name': 'fake-pipeline'}},
'cognito-identity': {'DescribeIdentityPool': {'IdentityPoolId': 'fake'}},
'cognito-sync': {'DescribeIdentityPoolUsage': {'IdentityPoolId': 'fake'}},
'config': {
'GetResourceConfigHistory': {'resourceType': '', 'resourceId': ''},
},
'datapipeline': {'GetPipelineDefinition': {'pipelineId': 'fake'}},
'devicefarm': {'GetDevice': {'arn': 'arn:aws:devicefarm:REGION::device:f'}},
'directconnect': {'DescribeConnections': {'connectionId': 'fake'}},
'ds': {'CreateDirectory': {'Name': 'n', 'Password': 'p', 'Size': '1'}},
'dynamodb': {'DescribeTable': {'TableName': 'fake'}},
'dynamodbstreams': {'DescribeStream': {'StreamArn': 'x'*37}},
'ec2': {'DescribeInstances': {'InstanceIds': ['i-12345678']}},
'ecs': {'StopTask': {'task': 'fake'}},
'efs': {'DeleteFileSystem': {'FileSystemId': 'fake'}},
'elasticache': {'DescribeCacheClusters': {'CacheClusterId': 'fake'}},
'elasticbeanstalk': {
'DescribeEnvironmentResources': {'EnvironmentId': 'x'},
},
'elb': {'DescribeLoadBalancers': {'LoadBalancerNames': ['fake']}},
'elastictranscoder': {'ReadJob': {'Id': 'fake'}},
'emr': {'DescribeCluster': {'ClusterId': 'fake'}},
'es': {'DescribeElasticsearchDomain': {'DomainName': 'not-a-domain'}},
'gamelift': {'DescribeBuild': {'BuildId': 'fake-build-id'}},
'glacier': {'ListVaults': {'accountId': 'fake'}},
'iam': {'GetUser': {'UserName': 'fake'}},
'importexport': {'CreateJob': {
'JobType': 'Import',
'ValidateOnly': False,
'Manifest': 'fake',
}},
'kinesis': {'DescribeStream': {'StreamName': 'fake'}},
'kms': {'GetKeyPolicy': {'KeyId': 'fake', 'PolicyName': 'fake'}},
'lambda': {'Invoke': {'FunctionName': 'fake'}},
'machinelearning': {'GetBatchPrediction': {'BatchPredictionId': 'fake'}},
'opsworks': {'DescribeLayers': {'StackId': 'fake'}},
'rds': {'DescribeDBInstances': {'DBInstanceIdentifier': 'fake'}},
'redshift': {'DescribeClusters': {'ClusterIdentifier': 'fake'}},
'route53': {'GetHostedZone': {'Id': 'fake'}},
'route53domains': {'GetDomainDetail': {'DomainName': 'fake'}},
's3': {'ListObjects': {'Bucket': 'thisbucketdoesnotexistasdf'}},
'ses': {'VerifyEmailIdentity': {'EmailAddress': 'fake'}},
'sdb': {'CreateDomain': {'DomainName': ''}},
'sns': {
'ConfirmSubscription': {'TopicArn': 'a', 'Token': 'b'},
'Publish': {'Message': 'hello', 'TopicArn': 'fake'},
},
'sqs': {'GetQueueUrl': {'QueueName': 'fake'}},
'ssm': {'GetDocument': {'Name': 'fake'}},
'storagegateway': {'ListVolumes': {'GatewayARN': 'x'*50}},
'sts': {'GetFederationToken': {'Name': 'fake', 'Policy': 'fake'}},
'support': {'CreateCase': {
'subject': 'x',
'communicationBody': 'x',
'categoryCode': 'x',
'serviceCode': 'x',
'severityCode': 'low',
}},
'swf': {'DescribeDomain': {'name': 'fake'}},
'waf': {'GetWebACL': {'WebACLId': 'fake'}},
'workspaces': {'DescribeWorkspaces': {'DirectoryId': 'fake'}},
}
REGION = 'us-east-1'
REGION_OVERRIDES = {
'devicefarm': 'us-west-2',
'efs': 'us-west-2',
'inspector': 'us-west-2',
}
def _get_client(session, service):
if os.environ.get('AWS_SMOKE_TEST_REGION', ''):
region_name = os.environ['AWS_SMOKE_TEST_REGION']
else:
region_name = REGION_OVERRIDES.get(service, REGION)
return session.create_client(service, region_name=region_name)
def _list_services(dict_entries):
# List all services in the provided dict_entry.
# If the AWS_SMOKE_TEST_SERVICES is provided,
# it's a comma separated list of services you can provide
# if you only want to run the smoke tests for certain services.
if 'AWS_SMOKE_TEST_SERVICES' not in os.environ:
return dict_entries.keys()
else:
wanted_services = os.environ.get(
'AWS_SMOKE_TEST_SERVICES', '').split(',')
return [key for key in dict_entries if key in wanted_services]
def test_can_make_request_with_client():
# Same as test_can_make_request, but with Client objects
# instead of service/operations.
session = botocore.session.get_session()
for service_name in _list_services(SMOKE_TESTS):
client = _get_client(session, service_name)
for operation_name in SMOKE_TESTS[service_name]:
kwargs = SMOKE_TESTS[service_name][operation_name]
method_name = xform_name(operation_name)
yield _make_client_call, client, method_name, kwargs
def _make_client_call(client, operation_name, kwargs):
method = getattr(client, operation_name)
with warnings.catch_warnings(record=True) as caught_warnings:
response = method(**kwargs)
assert_equal(len(caught_warnings), 0,
"Warnings were emitted during smoke test: %s"
% caught_warnings)
assert_true('Errors' not in response)
def test_can_make_request_and_understand_errors_with_client():
session = botocore.session.get_session()
for service_name in _list_services(ERROR_TESTS):
client = _get_client(session, service_name)
for operation_name in ERROR_TESTS[service_name]:
kwargs = ERROR_TESTS[service_name][operation_name]
method_name = xform_name(operation_name)
yield _make_error_client_call, client, method_name, kwargs
def _make_error_client_call(client, operation_name, kwargs):
method = getattr(client, operation_name)
try:
response = method(**kwargs)
except ClientError as e:
pass
else:
raise AssertionError("Expected client error was not raised "
"for %s.%s" % (client, operation_name))
def test_client_can_retry_request_properly():
session = botocore.session.get_session()
for service_name in _list_services(SMOKE_TESTS):
client = _get_client(session, service_name)
for operation_name in SMOKE_TESTS[service_name]:
kwargs = SMOKE_TESTS[service_name][operation_name]
yield (_make_client_call_with_errors, client,
operation_name, kwargs)
def _make_client_call_with_errors(client, operation_name, kwargs):
operation = getattr(client, xform_name(operation_name))
original_send = adapters.HTTPAdapter.send
def mock_http_adapter_send(self, *args, **kwargs):
if not getattr(self, '_integ_test_error_raised', False):
self._integ_test_error_raised = True
raise ConnectionError("Simulated ConnectionError raised.")
else:
return original_send(self, *args, **kwargs)
with mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send',
mock_http_adapter_send):
try:
response = operation(**kwargs)
except ClientError as e:
assert False, ('Request was not retried properly, '
'received error:\n%s' % pformat(e))
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import mock
import tempfile
import shutil
import json
import time
from uuid import uuid4
from botocore.session import Session
from botocore.exceptions import ClientError
from tests import BaseEnvVar, temporary_file, random_chars
S3_READ_POLICY_ARN = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
class TestCredentialPrecedence(BaseEnvVar):
def setUp(self):
super(TestCredentialPrecedence, self).setUp()
# Set the config file to something that doesn't exist so
# that we don't accidentally load a config.
os.environ['AWS_CONFIG_FILE'] = '~/.aws/config-missing'
def create_session(self, *args, **kwargs):
"""
Create a new session with the given arguments. Additionally,
this method will set the credentials file to the test credentials
used by the following test cases.
"""
kwargs['session_vars'] = {
'credentials_file': (
None, None,
os.path.join(os.path.dirname(__file__), 'test-credentials'),
None)
}
return Session(*args, **kwargs)
def test_access_secret_vs_profile_env(self):
# If all three are given, then the access/secret keys should
# take precedence.
os.environ['AWS_ACCESS_KEY_ID'] = 'env'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'env-secret'
os.environ['AWS_DEFAULT_PROFILE'] = 'test'
s = self.create_session()
credentials = s.get_credentials()
self.assertEqual(credentials.access_key, 'env')
self.assertEqual(credentials.secret_key, 'env-secret')
@mock.patch('botocore.credentials.Credentials')
def test_access_secret_vs_profile_code(self, credentials_cls):
# If all three are given, then the access/secret keys should
# take precedence.
s = self.create_session(profile='test')
client = s.create_client('s3', aws_access_key_id='code',
aws_secret_access_key='code-secret')
credentials_cls.assert_called_with(
access_key='code', secret_key='code-secret', token=mock.ANY)
def test_profile_env_vs_code(self):
# If the profile is set both by the env var and by code,
# then the one set by code should take precedence.
os.environ['AWS_DEFAULT_PROFILE'] = 'test'
s = self.create_session(profile='default')
credentials = s.get_credentials()
self.assertEqual(credentials.access_key, 'default')
self.assertEqual(credentials.secret_key, 'default-secret')
@mock.patch('botocore.credentials.Credentials')
def test_access_secret_env_vs_code(self, credentials_cls):
# If the access/secret keys are set both as env vars and via
# code, then those set by code should take precedence.
os.environ['AWS_ACCESS_KEY_ID'] = 'env'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'secret'
s = self.create_session()
client = s.create_client('s3', aws_access_key_id='code',
aws_secret_access_key='code-secret')
credentials_cls.assert_called_with(
access_key='code', secret_key='code-secret', token=mock.ANY)
def test_access_secret_env_vs_profile_code(self):
# If access/secret keys are set in the environment, but then a
# specific profile is passed via code, then the access/secret
# keys defined in that profile should take precedence over
# the environment variables. Example:
#
# ``aws --profile dev s3 ls``
#
os.environ['AWS_ACCESS_KEY_ID'] = 'env'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'env-secret'
s = self.create_session(profile='test')
credentials = s.get_credentials()
self.assertEqual(credentials.access_key, 'test')
self.assertEqual(credentials.secret_key, 'test-secret')
def test_honors_aws_shared_credentials_file_env_var(self):
with temporary_file('w') as f:
f.write('[default]\n'
'aws_access_key_id=custom1\n'
'aws_secret_access_key=custom2\n')
f.flush()
os.environ['AWS_SHARED_CREDENTIALS_FILE'] = f.name
s = Session()
credentials = s.get_credentials()
self.assertEqual(credentials.access_key, 'custom1')
self.assertEqual(credentials.secret_key, 'custom2')
class TestAssumeRoleCredentials(BaseEnvVar):
def setUp(self):
self.env_original = os.environ.copy()
self.environ_copy = os.environ.copy()
super(TestAssumeRoleCredentials, self).setUp()
os.environ = self.environ_copy
# The tests rely on manipulating AWS_CONFIG_FILE,
# but we also need to make sure we don't accidentally
# pick up the ~/.aws/credentials file either.
os.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(uuid4())
self.parent_session = Session()
self.iam = self.parent_session.create_client('iam')
self.sts = self.parent_session.create_client('sts')
self.tempdir = tempfile.mkdtemp()
self.config_file = os.path.join(self.tempdir, 'config')
# A role trust policy that allows the current account to call assume
# role on itself.
account_id = self.sts.get_caller_identity()['Account']
self.role_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::%s:root" % account_id
},
"Action": "sts:AssumeRole"
}
]
}
def tearDown(self):
super(TestAssumeRoleCredentials, self).tearDown()
shutil.rmtree(self.tempdir)
os.environ = self.env_original.copy()
def random_name(self):
return 'botocoretest-' + random_chars(10)
def create_role(self, policy_document, policy_arn=None):
name = self.random_name()
response = self.iam.create_role(
RoleName=name,
AssumeRolePolicyDocument=json.dumps(policy_document)
)
self.addCleanup(self.iam.delete_role, RoleName=name)
if policy_arn:
self.iam.attach_role_policy(RoleName=name, PolicyArn=policy_arn)
self.addCleanup(
self.iam.detach_role_policy, RoleName=name,
PolicyArn=policy_arn
)
return response['Role']
def create_user(self, policy_arns):
name = self.random_name()
user = self.iam.create_user(UserName=name)['User']
self.addCleanup(self.iam.delete_user, UserName=name)
for arn in policy_arns:
self.iam.attach_user_policy(
UserName=name,
PolicyArn=arn
)
self.addCleanup(
self.iam.detach_user_policy,
UserName=name, PolicyArn=arn
)
return user
def create_creds(self, user_name):
creds = self.iam.create_access_key(UserName=user_name)['AccessKey']
self.addCleanup(
self.iam.delete_access_key,
UserName=user_name, AccessKeyId=creds['AccessKeyId']
)
return creds
def wait_for_assume_role(self, role_arn, access_key, secret_key,
token=None, attempts=30, delay=10,
success_delay=1,
num_success=4):
for _ in range(num_success):
creds = self._wait_for_assume_role(
role_arn, access_key, secret_key, token, attempts, delay)
time.sleep(success_delay)
return creds
def _wait_for_assume_role(self, role_arn, access_key, secret_key,
token, attempts, delay):
# "Why not use the policy simulator?" you might ask. The answer is
# that the policy simulator will return success far before you can
# actually make the calls.
client = self.parent_session.create_client(
'sts', aws_access_key_id=access_key,
aws_secret_access_key=secret_key, aws_session_token=token
)
attempts_remaining = attempts
role_session_name = random_chars(10)
while attempts_remaining > 0:
attempts_remaining -= 1
try:
result = client.assume_role(
RoleArn=role_arn, RoleSessionName=role_session_name)
return result['Credentials']
except ClientError as e:
code = e.response.get('Error', {}).get('Code')
if code in ["InvalidClientTokenId", "AccessDenied"]:
time.sleep(delay)
else:
raise
raise Exception("Unable to assume role %s" % role_arn)
def create_assume_policy(self, role_arn):
policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Resource": role_arn,
"Action": "sts:AssumeRole"
}
]
}
name = self.random_name()
response = self.iam.create_policy(
PolicyName=name,
PolicyDocument=json.dumps(policy_document)
)
self.addCleanup(
self.iam.delete_policy, PolicyArn=response['Policy']['Arn']
)
return response['Policy']['Arn']
def assert_s3_read_only_session(self, session):
# Calls to S3 should succeed
s3 = session.create_client('s3')
s3.list_buckets()
# Calls to other services should not
iam = session.create_client('iam')
try:
iam.list_groups()
self.fail("Expected call to list_groups to fail, but it passed.")
except ClientError as e:
code = e.response.get('Error', {}).get('Code')
if code != 'AccessDenied':
raise
def test_recursive_assume_role(self):
# Create the final role, the one that will actually have access to s3
final_role = self.create_role(self.role_policy, S3_READ_POLICY_ARN)
# Create the role that can assume the final role
middle_policy_arn = self.create_assume_policy(final_role['Arn'])
middle_role = self.create_role(self.role_policy, middle_policy_arn)
# Create a user that can only assume the middle-man role, and then get
# static credentials for it.
user_policy_arn = self.create_assume_policy(middle_role['Arn'])
user = self.create_user([user_policy_arn])
user_creds = self.create_creds(user['UserName'])
# Setup the config file with the profiles we'll be using. For
# convenience static credentials are placed here instead of putting
# them in the credentials file.
config = (
'[default]\n'
'aws_access_key_id = %s\n'
'aws_secret_access_key = %s\n'
'[profile middle]\n'
'source_profile = default\n'
'role_arn = %s\n'
'[profile final]\n'
'source_profile = middle\n'
'role_arn = %s\n'
)
config = config % (
user_creds['AccessKeyId'], user_creds['SecretAccessKey'],
middle_role['Arn'], final_role['Arn']
)
with open(self.config_file, 'w') as f:
f.write(config)
# Wait for IAM permissions to propagate
middle_creds = self.wait_for_assume_role(
role_arn=middle_role['Arn'],
access_key=user_creds['AccessKeyId'],
secret_key=user_creds['SecretAccessKey'],
)
self.wait_for_assume_role(
role_arn=final_role['Arn'],
access_key=middle_creds['AccessKeyId'],
secret_key=middle_creds['SecretAccessKey'],
token=middle_creds['SessionToken'],
)
# Configure our credentials file to be THE credentials file
os.environ['AWS_CONFIG_FILE'] = self.config_file
self.assert_s3_read_only_session(Session(profile='final'))
def test_assume_role_with_credential_source(self):
# Create a role with read access to S3
role = self.create_role(self.role_policy, S3_READ_POLICY_ARN)
# Create a user that can assume the role and get static credentials
# for it.
user_policy_arn = self.create_assume_policy(role['Arn'])
user = self.create_user([user_policy_arn])
user_creds = self.create_creds(user['UserName'])
# Setup the config file with the profile we'll be using.
config = (
'[profile assume]\n'
'role_arn = %s\n'
'credential_source = Environment\n'
)
config = config % role['Arn']
with open(self.config_file, 'w') as f:
f.write(config)
# Wait for IAM permissions to propagate
self.wait_for_assume_role(
role_arn=role['Arn'],
access_key=user_creds['AccessKeyId'],
secret_key=user_creds['SecretAccessKey'],
)
# Setup the environment so that our new config file is THE config
# file and add the expected credentials since we're using the
# environment as our credential source.
os.environ['AWS_CONFIG_FILE'] = self.config_file
os.environ['AWS_SECRET_ACCESS_KEY'] = user_creds['SecretAccessKey']
os.environ['AWS_ACCESS_KEY_ID'] = user_creds['AccessKeyId']
self.assert_s3_read_only_session(Session(profile='assume'))
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
from tests import unittest
import mock
import botocore.session
# Basic sanity checks for loader functionality.
# We're not using BaseEnvVar here because we don't actually
# want to patch out all of os.environ, we just want to ensure
# AWS_DATA_PATH doesn't affect our test results.
class TestLoaderBasicFunctionality(unittest.TestCase):
def setUp(self):
self.environ = os.environ.copy()
self.patched = mock.patch('os.environ', self.environ)
self.patched.start()
self.environ.pop('AWS_DATA_PATH', None)
self.session = botocore.session.get_session()
self.loader = self.session.get_component('data_loader')
def tearDown(self):
self.patched.stop()
def test_search_path_has_at_least_one_entry(self):
self.assertTrue(len(self.loader.search_paths) > 0)
def test_can_list_available_services(self):
# We don't want an exact check, as this list changes over time.
# We just need a basic sanity check.
available_services = self.loader.list_available_services(
type_name='service-2')
self.assertIn('ec2', available_services)
self.assertIn('s3', available_services)
def test_can_determine_latest_version(self):
api_versions = self.loader.list_api_versions(
service_name='ec2', type_name='service-2')
self.assertEqual(
self.loader.determine_latest_version(
service_name='ec2', type_name='service-2'),
max(api_versions))
def test_can_load_service_model(self):
waiters = self.loader.load_service_model(
service_name='ec2', type_name='waiters-2')
self.assertIn('waiters', waiters)
def test_can_load_data(self):
api_version = self.loader.determine_latest_version(
service_name='ec2', type_name='service-2')
data = self.loader.load_data(
os.path.join('ec2', api_version, 'service-2'))
self.assertIn('metadata', data)
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from botocore.exceptions import ClientError
class TestRoute53Pagination(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('route53', 'us-west-2')
def test_paginate_with_max_items(self):
# Route53 has a string type for MaxItems. We need to ensure that this
# still works without any issues.
paginator = self.client.get_paginator('list_hosted_zones')
results = list(paginator.paginate(PaginationConfig={'MaxItems': '1'}))
self.assertTrue(len(results) >= 0)
def test_paginate_with_deprecated_paginator_and_limited_input_tokens(self):
paginator = self.client.get_paginator('list_resource_record_sets')
# We're making sure the paginator gets set without failing locally, so
# a ClientError is acceptable. In this case, the Hosted Zone specified
# does not exist.
with self.assertRaises(ClientError):
results = list(paginator.paginate(
PaginationConfig={
'MaxItems': '1',
'StartingToken': 'my.domain.name.'
},
HostedZoneId="foo"
))
self.assertTrue(len(results) >= 0)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, random_chars
import botocore.session
from botocore.exceptions import ClientError
class TestCloudformation(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('cloudformation', 'us-east-1')
def test_handles_errors_with_template_body(self):
# GetTemplate has a customization in handlers.py, so we're ensuring
# it handles the case when a stack does not exist.
with self.assertRaises(ClientError):
self.client.get_template(
StackName='does-not-exist-%s' % random_chars(10))
if __name__ == '__main__':
unittest.main()
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, random_chars
import botocore.session
class TestCognitoIdentity(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('cognito-identity', 'us-east-1')
def test_can_create_and_delete_identity_pool(self):
pool_name = 'test%s' % random_chars(10)
response = self.client.create_identity_pool(
IdentityPoolName=pool_name, AllowUnauthenticatedIdentities=True)
self.client.delete_identity_pool(IdentityPoolId=response['IdentityPoolId'])
if __name__ == '__main__':
unittest.main()
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
from tests import unittest
import botocore.session
from botocore import exceptions
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resource to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.create_rest_api_or_skip()
def create_rest_api_or_skip(self):
try:
api_id = self.client.create_rest_api(name=self.api_name)['id']
except exceptions.ClientError as e:
if e.response['Error']['Code'] == 'TooManyRequestsException':
raise unittest.SkipTest(
"Hit API gateway throttle limit, skipping test.")
raise
return api_id
def delete_api(self):
retries = 0
while retries < 10:
try:
self.client.delete_rest_api(restApiId=self.api_id)
break
except exceptions.ClientError as e:
if e.response['Error']['Code'] == 'TooManyRequestsException':
retries += 1
time.sleep(5)
else:
raise
def tearDown(self):
self.delete_api()
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import datetime
from tests import unittest, random_chars
import botocore.session
from botocore.client import ClientError
from botocore.compat import six
from botocore.exceptions import EndpointConnectionError
class TestBucketWithVersions(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('s3', region_name='us-west-2')
self.bucket_name = 'botocoretest%s' % random_chars(50)
def extract_version_ids(self, versions):
version_ids = []
for marker in versions['DeleteMarkers']:
version_ids.append(marker['VersionId'])
for version in versions['Versions']:
version_ids.append(version['VersionId'])
return version_ids
def test_create_versioned_bucket(self):
# Verifies we can:
# 1. Create a bucket
# 2. Enable versioning
# 3. Put an Object
self.client.create_bucket(
Bucket=self.bucket_name,
CreateBucketConfiguration={
'LocationConstraint': 'us-west-2'
}
)
self.addCleanup(self.client.delete_bucket, Bucket=self.bucket_name)
self.client.put_bucket_versioning(
Bucket=self.bucket_name,
VersioningConfiguration={"Status": "Enabled"})
response = self.client.put_object(
Bucket=self.bucket_name, Key='testkey', Body='bytes body')
self.addCleanup(self.client.delete_object,
Bucket=self.bucket_name,
Key='testkey',
VersionId=response['VersionId'])
response = self.client.get_object(
Bucket=self.bucket_name, Key='testkey')
self.assertEqual(response['Body'].read(), b'bytes body')
response = self.client.delete_object(Bucket=self.bucket_name,
Key='testkey')
# This cleanup step removes the DeleteMarker that's created
# from the delete_object call above.
self.addCleanup(self.client.delete_object,
Bucket=self.bucket_name,
Key='testkey',
VersionId=response['VersionId'])
# Object does not exist anymore.
with self.assertRaises(ClientError):
self.client.get_object(Bucket=self.bucket_name, Key='testkey')
versions = self.client.list_object_versions(Bucket=self.bucket_name)
version_ids = self.extract_version_ids(versions)
self.assertEqual(len(version_ids), 2)
# This is really a combination of testing the debug logging mechanism
# as well as the response wire log, which theoretically could be
# implemented in any number of modules, which makes it hard to pick
# which integration test module this code should live in, so I picked
# the client module.
class TestResponseLog(unittest.TestCase):
def test_debug_log_contains_headers_and_body(self):
# This test just verifies that the response headers/body
# are in the debug log. It's an integration test so that
# we can refactor the code however we want, as long as we don't
# lose this feature.
session = botocore.session.get_session()
client = session.create_client('s3', region_name='us-west-2')
debug_log = six.StringIO()
session.set_stream_logger('', logging.DEBUG, debug_log)
client.list_buckets()
debug_log_contents = debug_log.getvalue()
self.assertIn('Response headers', debug_log_contents)
self.assertIn('Response body', debug_log_contents)
class TestAcceptedDateTimeFormats(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('emr', 'us-west-2')
def test_accepts_datetime_object(self):
response = self.client.list_clusters(
CreatedAfter=datetime.datetime.now())
self.assertIn('Clusters', response)
def test_accepts_epoch_format(self):
response = self.client.list_clusters(CreatedAfter=0)
self.assertIn('Clusters', response)
def test_accepts_iso_8601_unaware(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00')
self.assertIn('Clusters', response)
def test_accepts_iso_8601_utc(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00Z')
self.assertIn('Clusters', response)
def test_accepts_iso_8701_local(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00-08:00')
self.assertIn('Clusters', response)
class TestCreateClients(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_client_can_clone_with_service_events(self):
# We should also be able to create a client object.
client = self.session.create_client('s3', region_name='us-west-2')
# We really just want to ensure create_client doesn't raise
# an exception, but we'll double check that the client looks right.
self.assertTrue(hasattr(client, 'list_buckets'))
def test_client_raises_exception_invalid_region(self):
with self.assertRaisesRegexp(ValueError, ('Invalid endpoint')):
self.session.create_client(
'cloudformation', region_name='invalid region name')
class TestClientErrors(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_region_mentioned_in_invalid_region(self):
client = self.session.create_client(
'cloudformation', region_name='us-east-999')
with self.assertRaisesRegexp(EndpointConnectionError,
'Could not connect to the endpoint URL'):
client.list_stacks()
def test_client_modeled_exception(self):
client = self.session.create_client(
'dynamodb', region_name='us-west-2')
with self.assertRaises(client.exceptions.ResourceNotFoundException):
client.describe_table(TableName="NonexistentTable")
def test_client_modeleded_exception_with_differing_code(self):
client = self.session.create_client('iam', region_name='us-west-2')
# The NoSuchEntityException should be raised on NoSuchEntity error
# code.
with self.assertRaises(client.exceptions.NoSuchEntityException):
client.get_role(RoleName="NonexistentIAMRole")
def test_raises_general_client_error_for_non_modeled_exception(self):
client = self.session.create_client('ec2', region_name='us-west-2')
try:
client.describe_regions(DryRun=True)
except client.exceptions.ClientError as e:
self.assertIs(e.__class__, ClientError)
def test_can_catch_client_exceptions_across_two_different_clients(self):
client = self.session.create_client(
'dynamodb', region_name='us-west-2')
client2 = self.session.create_client(
'dynamodb', region_name='us-west-2')
with self.assertRaises(client2.exceptions.ResourceNotFoundException):
client.describe_table(TableName="NonexistentTable")
class TestClientMeta(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_region_name_on_meta(self):
client = self.session.create_client('s3', 'us-west-2')
self.assertEqual(client.meta.region_name, 'us-west-2')
def test_endpoint_url_on_meta(self):
client = self.session.create_client('s3', 'us-west-2',
endpoint_url='https://foo')
self.assertEqual(client.meta.endpoint_url, 'https://foo')
class TestClientInjection(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_can_inject_client_methods(self):
def extra_client_method(self, name):
return name
def inject_client_method(class_attributes, **kwargs):
class_attributes['extra_client_method'] = extra_client_method
self.session.register('creating-client-class.s3',
inject_client_method)
client = self.session.create_client('s3', 'us-west-2')
# We should now have access to the extra_client_method above.
self.assertEqual(client.extra_client_method('foo'), 'foo')
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import itertools
from nose.plugins.attrib import attr
import botocore.session
from botocore.exceptions import ClientError
class TestEC2(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client(
'ec2', region_name='us-west-2')
def test_can_make_request(self):
# Basic smoke test to ensure we can talk to ec2.
result = self.client.describe_availability_zones()
zones = list(
sorted(a['ZoneName'] for a in result['AvailabilityZones']))
self.assertEqual(zones, ['us-west-2a', 'us-west-2b', 'us-west-2c'])
def test_get_console_output_handles_error(self):
# Want to ensure the underlying ClientError is propogated
# on error.
with self.assertRaises(ClientError):
self.client.get_console_output(InstanceId='i-12345')
class TestEC2Pagination(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client(
'ec2', region_name='us-west-2')
def test_can_paginate(self):
# Using an operation that we know will paginate.
paginator = self.client.get_paginator(
'describe_reserved_instances_offerings')
pages = paginator.paginate()
results = list(itertools.islice(pages, 0, 3))
self.assertEqual(len(results), 3)
self.assertTrue(results[0]['NextToken'] != results[1]['NextToken'])
def test_can_paginate_with_page_size(self):
# Using an operation that we know will paginate.
paginator = self.client.get_paginator(
'describe_reserved_instances_offerings')
pages = paginator.paginate(PaginationConfig={'PageSize': 1})
results = list(itertools.islice(pages, 0, 3))
self.assertEqual(len(results), 3)
for parsed in results:
reserved_inst_offer = parsed['ReservedInstancesOfferings']
# There should only be one reserved instance offering on each
# page.
self.assertEqual(len(reserved_inst_offer), 1)
def test_can_fall_back_to_old_starting_token(self):
# Using an operation that we know will paginate.
paginator = self.client.get_paginator(
'describe_reserved_instances_offerings')
pages = paginator.paginate(PaginationConfig={'NextToken': 'None___1'})
try:
results = list(itertools.islice(pages, 0, 3))
self.assertEqual(len(results), 3)
self.assertTrue(results[0]['NextToken'] != results[1]['NextToken'])
except ValueError:
self.fail("Old style paginator failed.")
@attr('slow')
class TestCopySnapshotCustomization(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
# However, all the test fixture setup/cleanup can use
# the client interface.
self.client = self.session.create_client('ec2', 'us-west-2')
self.client_us_east_1 = self.session.create_client(
'ec2', 'us-east-1')
def create_volume(self, encrypted=False):
available_zones = self.client.describe_availability_zones()
first_zone = available_zones['AvailabilityZones'][0]['ZoneName']
response = self.client.create_volume(
Size=1, AvailabilityZone=first_zone, Encrypted=encrypted)
volume_id = response['VolumeId']
self.addCleanup(self.client.delete_volume, VolumeId=volume_id)
self.client.get_waiter('volume_available').wait(VolumeIds=[volume_id])
return volume_id
def create_snapshot(self, volume_id):
response = self.client.create_snapshot(VolumeId=volume_id)
snapshot_id = response['SnapshotId']
self.client.get_waiter('snapshot_completed').wait(
SnapshotIds=[snapshot_id])
self.addCleanup(self.client.delete_snapshot, SnapshotId=snapshot_id)
return snapshot_id
def cleanup_copied_snapshot(self, snapshot_id):
dest_client = self.session.create_client('ec2', 'us-east-1')
self.addCleanup(dest_client.delete_snapshot,
SnapshotId=snapshot_id)
dest_client.get_waiter('snapshot_completed').wait(
SnapshotIds=[snapshot_id])
def test_can_copy_snapshot(self):
volume_id = self.create_volume()
snapshot_id = self.create_snapshot(volume_id)
result = self.client_us_east_1.copy_snapshot(
SourceRegion='us-west-2',
SourceSnapshotId=snapshot_id)
self.assertIn('SnapshotId', result)
# Cleanup code. We can wait for the snapshot to be complete
# and then we can delete the snapshot.
self.cleanup_copied_snapshot(result['SnapshotId'])
def test_can_copy_encrypted_snapshot(self):
# Note that we're creating an encrypted volume here.
volume_id = self.create_volume(encrypted=True)
snapshot_id = self.create_snapshot(volume_id)
result = self.client_us_east_1.copy_snapshot(
SourceRegion='us-west-2',
SourceSnapshotId=snapshot_id)
self.assertIn('SnapshotId', result)
self.cleanup_copied_snapshot(result['SnapshotId'])
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, temporary_file, random_chars
import os
import time
from collections import defaultdict
import tempfile
import shutil
import threading
import logging
import mock
from tarfile import TarFile
from contextlib import closing
from nose.plugins.attrib import attr
from botocore.vendored.requests import adapters
from botocore.vendored.requests.exceptions import ConnectionError
from botocore.compat import six, zip_longest
import botocore.session
import botocore.auth
import botocore.credentials
import botocore.vendored.requests as requests
from botocore.config import Config
from botocore.exceptions import ClientError
def random_bucketname():
return 'botocoretest-' + random_chars(10)
LOG = logging.getLogger('botocore.tests.integration')
_SHARED_BUCKET = random_bucketname()
_DEFAULT_REGION = 'us-west-2'
def setup_module():
s3 = botocore.session.get_session().create_client('s3')
waiter = s3.get_waiter('bucket_exists')
params = {
'Bucket': _SHARED_BUCKET,
'CreateBucketConfiguration': {
'LocationConstraint': _DEFAULT_REGION,
}
}
try:
s3.create_bucket(**params)
except Exception as e:
# A create_bucket can fail for a number of reasons.
# We're going to defer to the waiter below to make the
# final call as to whether or not the bucket exists.
LOG.debug("create_bucket() raised an exception: %s", e, exc_info=True)
waiter.wait(Bucket=_SHARED_BUCKET)
def clear_out_bucket(bucket, region, delete_bucket=False):
s3 = botocore.session.get_session().create_client(
's3', region_name=region)
page = s3.get_paginator('list_objects')
# Use pages paired with batch delete_objects().
for page in page.paginate(Bucket=bucket):
keys = [{'Key': obj['Key']} for obj in page.get('Contents', [])]
if keys:
s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
if delete_bucket:
try:
s3.delete_bucket(Bucket=bucket)
except Exception as e:
# We can sometimes get exceptions when trying to
# delete a bucket. We'll let the waiter make
# the final call as to whether the bucket was able
# to be deleted.
LOG.debug("delete_bucket() raised an exception: %s",
e, exc_info=True)
waiter = s3.get_waiter('bucket_not_exists')
waiter.wait(Bucket=bucket)
def teardown_module():
clear_out_bucket(_SHARED_BUCKET, _DEFAULT_REGION, delete_bucket=True)
class BaseS3ClientTest(unittest.TestCase):
def setUp(self):
self.bucket_name = _SHARED_BUCKET
self.region = _DEFAULT_REGION
clear_out_bucket(self.bucket_name, self.region)
self.session = botocore.session.get_session()
self.client = self.session.create_client('s3', region_name=self.region)
def assert_status_code(self, response, status_code):
self.assertEqual(
response['ResponseMetadata']['HTTPStatusCode'],
status_code
)
def create_bucket(self, region_name, bucket_name=None, client=None):
bucket_client = client or self.client
if bucket_name is None:
bucket_name = random_bucketname()
bucket_kwargs = {'Bucket': bucket_name}
if region_name != 'us-east-1':
bucket_kwargs['CreateBucketConfiguration'] = {
'LocationConstraint': region_name,
}
response = bucket_client.create_bucket(**bucket_kwargs)
self.assert_status_code(response, 200)
waiter = bucket_client.get_waiter('bucket_exists')
waiter.wait(Bucket=bucket_name)
self.addCleanup(clear_out_bucket, bucket_name, region_name, True)
return bucket_name
def make_tempdir(self):
tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tempdir)
return tempdir
class TestS3BaseWithBucket(BaseS3ClientTest):
def setUp(self):
super(TestS3BaseWithBucket, self).setUp()
self.caught_exceptions = []
def create_object(self, key_name, body='foo'):
self.client.put_object(
Bucket=self.bucket_name, Key=key_name,
Body=body)
def create_multipart_upload(self, key_name):
parsed = self.client.create_multipart_upload(
Bucket=self.bucket_name, Key=key_name)
upload_id = parsed['UploadId']
self.addCleanup(
self.client.abort_multipart_upload,
UploadId=upload_id,
Bucket=self.bucket_name, Key=key_name)
def abort_multipart_upload(self, bucket_name, key, upload_id):
self.client.abort_multipart_upload(
UploadId=upload_id, Bucket=self.bucket_name, Key=key)
def delete_object(self, key, bucket_name):
response = self.client.delete_object(Bucket=bucket_name, Key=key)
self.assert_status_code(response, 204)
def delete_bucket(self, bucket_name):
response = self.client.delete_bucket(Bucket=bucket_name)
self.assert_status_code(response, 204)
def create_object_catch_exceptions(self, key_name):
try:
self.create_object(key_name=key_name)
except Exception as e:
self.caught_exceptions.append(e)
def assert_num_uploads_found(self, operation, num_uploads,
max_items=None, num_attempts=5):
amount_seen = None
paginator = self.client.get_paginator(operation)
for _ in range(num_attempts):
pages = paginator.paginate(Bucket=self.bucket_name,
PaginationConfig={
'MaxItems': max_items})
iterators = pages.result_key_iters()
self.assertEqual(len(iterators), 2)
self.assertEqual(iterators[0].result_key.expression, 'Uploads')
# It sometimes takes a while for all the uploads to show up,
# especially if the upload was just created. If we don't
# see the expected amount, we retry up to num_attempts time
# before failing.
amount_seen = len(list(iterators[0]))
if amount_seen == num_uploads:
# Test passed.
return
else:
# Sleep and try again.
time.sleep(2)
self.fail("Expected to see %s uploads, instead saw: %s" % (
num_uploads, amount_seen))
def create_client(self):
# Even though the default signature_version is s3,
# we're being explicit in case this ever changes.
client_config = Config(signature_version='s3')
return self.session.create_client('s3', self.region,
config=client_config)
def assert_can_put_object(self, body):
client = self.create_client()
response = client.put_object(
Bucket=self.bucket_name, Key='foo',
Body=body)
self.assert_status_code(response, 200)
self.addCleanup(
client.delete_object, Bucket=self.bucket_name, Key='foo')
class TestS3Buckets(TestS3BaseWithBucket):
def setUp(self):
super(TestS3Buckets, self).setUp()
def test_can_make_request(self):
# Basic smoke test to ensure we can talk to s3.
result = self.client.list_buckets()
# Can't really assume anything about whether or not they have buckets,
# but we can assume something about the structure of the response.
self.assertEqual(sorted(list(result.keys())),
['Buckets', 'Owner', 'ResponseMetadata'])
def test_can_get_bucket_location(self):
result = self.client.get_bucket_location(Bucket=self.bucket_name)
self.assertIn('LocationConstraint', result)
self.assertEqual(result['LocationConstraint'], self.region)
class TestS3Objects(TestS3BaseWithBucket):
def increment_auth(self, request, **kwargs):
self.auth_paths.append(request.auth_path)
def test_can_delete_urlencoded_object(self):
key_name = 'a+b/foo'
self.create_object(key_name=key_name)
bucket_contents = self.client.list_objects(
Bucket=self.bucket_name)['Contents']
self.assertEqual(len(bucket_contents), 1)
self.assertEqual(bucket_contents[0]['Key'], 'a+b/foo')
subdir_contents = self.client.list_objects(
Bucket=self.bucket_name, Prefix='a+b')['Contents']
self.assertEqual(len(subdir_contents), 1)
self.assertEqual(subdir_contents[0]['Key'], 'a+b/foo')
response = self.client.delete_object(
Bucket=self.bucket_name, Key=key_name)
self.assert_status_code(response, 204)
@attr('slow')
def test_can_paginate(self):
for i in range(5):
key_name = 'key%s' % i
self.create_object(key_name)
# Eventual consistency.
time.sleep(3)
paginator = self.client.get_paginator('list_objects')
generator = paginator.paginate(MaxKeys=1,
Bucket=self.bucket_name)
responses = list(generator)
self.assertEqual(len(responses), 5, responses)
key_names = [el['Contents'][0]['Key']
for el in responses]
self.assertEqual(key_names, ['key0', 'key1', 'key2', 'key3', 'key4'])
@attr('slow')
def test_can_paginate_with_page_size(self):
for i in range(5):
key_name = 'key%s' % i
self.create_object(key_name)
# Eventual consistency.
time.sleep(3)
paginator = self.client.get_paginator('list_objects')
generator = paginator.paginate(PaginationConfig={'PageSize': 1},
Bucket=self.bucket_name)
responses = list(generator)
self.assertEqual(len(responses), 5, responses)
data = [r for r in responses]
key_names = [el['Contents'][0]['Key']
for el in data]
self.assertEqual(key_names, ['key0', 'key1', 'key2', 'key3', 'key4'])
@attr('slow')
def test_result_key_iters(self):
for i in range(5):
key_name = 'key/%s/%s' % (i, i)
self.create_object(key_name)
key_name2 = 'key/%s' % i
self.create_object(key_name2)
time.sleep(3)
paginator = self.client.get_paginator('list_objects')
generator = paginator.paginate(MaxKeys=2,
Prefix='key/',
Delimiter='/',
Bucket=self.bucket_name)
iterators = generator.result_key_iters()
response = defaultdict(list)
key_names = [i.result_key for i in iterators]
for vals in zip_longest(*iterators):
for k, val in zip(key_names, vals):
response.setdefault(k.expression, [])
response[k.expression].append(val)
self.assertIn('Contents', response)
self.assertIn('CommonPrefixes', response)
@attr('slow')
def test_can_get_and_put_object(self):
self.create_object('foobarbaz', body='body contents')
time.sleep(3)
data = self.client.get_object(
Bucket=self.bucket_name, Key='foobarbaz')
self.assertEqual(data['Body'].read().decode('utf-8'), 'body contents')
def test_can_put_large_string_body_on_new_bucket(self):
body = '*' * (5 * (1024 ** 2))
self.assert_can_put_object(body)
def test_get_object_stream_wrapper(self):
self.create_object('foobarbaz', body='body contents')
response = self.client.get_object(
Bucket=self.bucket_name, Key='foobarbaz')
body = response['Body']
# Am able to set a socket timeout
body.set_socket_timeout(10)
self.assertEqual(body.read(amt=1).decode('utf-8'), 'b')
self.assertEqual(body.read().decode('utf-8'), 'ody contents')
def test_paginate_max_items(self):
self.create_multipart_upload('foo/key1')
self.create_multipart_upload('foo/key1')
self.create_multipart_upload('foo/key1')
self.create_multipart_upload('foo/key2')
self.create_multipart_upload('foobar/key1')
self.create_multipart_upload('foobar/key2')
self.create_multipart_upload('bar/key1')
self.create_multipart_upload('bar/key2')
# Verify when we have MaxItems=None, we get back all 8 uploads.
self.assert_num_uploads_found('list_multipart_uploads',
max_items=None, num_uploads=8)
# Verify when we have MaxItems=1, we get back 1 upload.
self.assert_num_uploads_found('list_multipart_uploads',
max_items=1, num_uploads=1)
paginator = self.client.get_paginator('list_multipart_uploads')
# Works similar with build_full_result()
pages = paginator.paginate(PaginationConfig={'MaxItems': 1},
Bucket=self.bucket_name)
full_result = pages.build_full_result()
self.assertEqual(len(full_result['Uploads']), 1)
def test_paginate_within_page_boundaries(self):
self.create_object('a')
self.create_object('b')
self.create_object('c')
self.create_object('d')
paginator = self.client.get_paginator('list_objects')
# First do it without a max keys so we're operating on a single page of
# results.
pages = paginator.paginate(PaginationConfig={'MaxItems': 1},
Bucket=self.bucket_name)
first = pages.build_full_result()
t1 = first['NextToken']
pages = paginator.paginate(
PaginationConfig={'MaxItems': 1, 'StartingToken': t1},
Bucket=self.bucket_name)
second = pages.build_full_result()
t2 = second['NextToken']
pages = paginator.paginate(
PaginationConfig={'MaxItems': 1, 'StartingToken': t2},
Bucket=self.bucket_name)
third = pages.build_full_result()
t3 = third['NextToken']
pages = paginator.paginate(
PaginationConfig={'MaxItems': 1, 'StartingToken': t3},
Bucket=self.bucket_name)
fourth = pages.build_full_result()
self.assertEqual(first['Contents'][-1]['Key'], 'a')
self.assertEqual(second['Contents'][-1]['Key'], 'b')
self.assertEqual(third['Contents'][-1]['Key'], 'c')
self.assertEqual(fourth['Contents'][-1]['Key'], 'd')
def test_unicode_key_put_list(self):
# Verify we can upload a key with a unicode char and list it as well.
key_name = u'\u2713'
self.create_object(key_name)
parsed = self.client.list_objects(Bucket=self.bucket_name)
self.assertEqual(len(parsed['Contents']), 1)
self.assertEqual(parsed['Contents'][0]['Key'], key_name)
parsed = self.client.get_object(
Bucket=self.bucket_name, Key=key_name)
self.assertEqual(parsed['Body'].read().decode('utf-8'), 'foo')
def test_unicode_system_character(self):
# Verify we can use a unicode system character which would normally
# break the xml parser
key_name = 'foo\x08'
self.create_object(key_name)
self.addCleanup(self.delete_object, key_name, self.bucket_name)
parsed = self.client.list_objects(Bucket=self.bucket_name)
self.assertEqual(len(parsed['Contents']), 1)
self.assertEqual(parsed['Contents'][0]['Key'], key_name)
parsed = self.client.list_objects(Bucket=self.bucket_name,
EncodingType='url')
self.assertEqual(len(parsed['Contents']), 1)
self.assertEqual(parsed['Contents'][0]['Key'], 'foo%08')
def test_thread_safe_auth(self):
self.auth_paths = []
self.session.register('before-sign', self.increment_auth)
# This test depends on auth_path, which is only added in virtual host
# style requests.
config = Config(s3={'addressing_style': 'virtual'})
self.client = self.session.create_client('s3', self.region,
config=config)
self.create_object(key_name='foo1')
threads = []
for i in range(10):
t = threading.Thread(target=self.create_object_catch_exceptions,
args=('foo%s' % i,))
t.daemon = True
threads.append(t)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(
self.caught_exceptions, [],
"Unexpectedly caught exceptions: %s" % self.caught_exceptions)
self.assertEqual(
len(set(self.auth_paths)), 10,
"Expected 10 unique auth paths, instead received: %s" %
(self.auth_paths))
def test_non_normalized_key_paths(self):
# The create_object method has assertEqual checks for 200 status.
self.create_object('key./././name')
bucket_contents = self.client.list_objects(
Bucket=self.bucket_name)['Contents']
self.assertEqual(len(bucket_contents), 1)
self.assertEqual(bucket_contents[0]['Key'], 'key./././name')
class TestS3Regions(BaseS3ClientTest):
def setUp(self):
super(TestS3Regions, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client(
's3', region_name=self.region)
def test_reset_stream_on_redirects(self):
# Create a bucket in a non classic region.
bucket_name = self.create_bucket(self.region)
# Then try to put a file like object to this location.
tempdir = self.make_tempdir()
filename = os.path.join(tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'foo' * 1024)
with open(filename, 'rb') as f:
self.client.put_object(
Bucket=bucket_name, Key='foo', Body=f)
data = self.client.get_object(
Bucket=bucket_name, Key='foo')
self.assertEqual(data['Body'].read(), b'foo' * 1024)
class TestS3Copy(TestS3BaseWithBucket):
def test_copy_with_quoted_char(self):
key_name = 'a+b/foo'
self.create_object(key_name=key_name)
key_name2 = key_name + 'bar'
self.client.copy_object(
Bucket=self.bucket_name, Key=key_name2,
CopySource='%s/%s' % (self.bucket_name, key_name))
# Now verify we can retrieve the copied object.
data = self.client.get_object(
Bucket=self.bucket_name, Key=key_name2)
self.assertEqual(data['Body'].read().decode('utf-8'), 'foo')
def test_copy_with_query_string(self):
key_name = 'a+b/foo?notVersionid=bar'
self.create_object(key_name=key_name)
key_name2 = key_name + 'bar'
self.client.copy_object(
Bucket=self.bucket_name, Key=key_name2,
CopySource='%s/%s' % (self.bucket_name, key_name))
# Now verify we can retrieve the copied object.
data = self.client.get_object(
Bucket=self.bucket_name, Key=key_name2)
self.assertEqual(data['Body'].read().decode('utf-8'), 'foo')
def test_can_copy_with_dict_form(self):
key_name = 'a+b/foo?versionId=abcd'
self.create_object(key_name=key_name)
key_name2 = key_name + 'bar'
self.client.copy_object(
Bucket=self.bucket_name, Key=key_name2,
CopySource={'Bucket': self.bucket_name,
'Key': key_name})
# Now verify we can retrieve the copied object.
data = self.client.get_object(
Bucket=self.bucket_name, Key=key_name2)
self.assertEqual(data['Body'].read().decode('utf-8'), 'foo')
def test_copy_with_s3_metadata(self):
key_name = 'foo.txt'
self.create_object(key_name=key_name)
copied_key = 'copied.txt'
parsed = self.client.copy_object(
Bucket=self.bucket_name, Key=copied_key,
CopySource='%s/%s' % (self.bucket_name, key_name),
MetadataDirective='REPLACE',
Metadata={"mykey": "myvalue", "mykey2": "myvalue2"})
self.assert_status_code(parsed, 200)
class BaseS3PresignTest(BaseS3ClientTest):
def setup_bucket(self):
self.key = 'myobject'
self.create_object(key_name=self.key)
def create_object(self, key_name, body='foo'):
self.client.put_object(
Bucket=self.bucket_name, Key=key_name,
Body=body)
class TestS3PresignUsStandard(BaseS3PresignTest):
def setUp(self):
super(TestS3PresignUsStandard, self).setUp()
self.region = 'us-east-1'
self.client_config = Config(
region_name=self.region, signature_version='s3')
self.client = self.session.create_client(
's3', config=self.client_config)
self.bucket_name = self.create_bucket(self.region)
self.setup_bucket()
def test_presign_sigv2(self):
presigned_url = self.client.generate_presigned_url(
'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key})
self.assertTrue(
presigned_url.startswith(
'https://%s.s3.amazonaws.com/%s' % (
self.bucket_name, self.key)),
"Host was suppose to use DNS style, instead "
"got: %s" % presigned_url)
# Try to retrieve the object using the presigned url.
self.assertEqual(requests.get(presigned_url).content, b'foo')
def test_presign_with_existing_query_string_values(self):
content_disposition = 'attachment; filename=foo.txt;'
presigned_url = self.client.generate_presigned_url(
'get_object', Params={
'Bucket': self.bucket_name, 'Key': self.key,
'ResponseContentDisposition': content_disposition})
response = requests.get(presigned_url)
self.assertEqual(response.headers['Content-Disposition'],
content_disposition)
self.assertEqual(response.content, b'foo')
def test_presign_sigv4(self):
self.client_config.signature_version = 's3v4'
self.client = self.session.create_client(
's3', config=self.client_config)
presigned_url = self.client.generate_presigned_url(
'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key})
self.assertTrue(
presigned_url.startswith(
'https://s3.amazonaws.com/%s/%s' % (
self.bucket_name, self.key)),
"Host was suppose to be the us-east-1 endpoint, instead "
"got: %s" % presigned_url)
# Try to retrieve the object using the presigned url.
self.assertEqual(requests.get(presigned_url).content, b'foo')
def test_presign_post_sigv2(self):
# Create some of the various supported conditions.
conditions = [
{"acl": "public-read"},
]
# Create the fields that follow the policy.
fields = {
'acl': 'public-read',
}
# Retrieve the args for the presigned post.
post_args = self.client.generate_presigned_post(
self.bucket_name, self.key, Fields=fields,
Conditions=conditions)
# Make sure that the form can be posted successfully.
files = {'file': ('baz', 'some data')}
# Make sure the correct endpoint is being used
self.assertTrue(
post_args['url'].startswith(
'https://%s.s3.amazonaws.com' % self.bucket_name),
"Host was suppose to use DNS style, instead "
"got: %s" % post_args['url'])
# Try to retrieve the object using the presigned url.
r = requests.post(
post_args['url'], data=post_args['fields'], files=files)
self.assertEqual(r.status_code, 204)
def test_presign_post_sigv4(self):
self.client_config.signature_version = 's3v4'
self.client = self.session.create_client(
's3', config=self.client_config)
# Create some of the various supported conditions.
conditions = [
{"acl": 'public-read'},
]
# Create the fields that follow the policy.
fields = {
'acl': 'public-read',
}
# Retrieve the args for the presigned post.
post_args = self.client.generate_presigned_post(
self.bucket_name, self.key, Fields=fields,
Conditions=conditions)
# Make sure that the form can be posted successfully.
files = {'file': ('baz', 'some data')}
# Make sure the correct endpoint is being used
self.assertTrue(
post_args['url'].startswith(
'https://s3.amazonaws.com/%s' % self.bucket_name),
"Host was suppose to use us-east-1 endpoint, instead "
"got: %s" % post_args['url'])
r = requests.post(
post_args['url'], data=post_args['fields'], files=files)
self.assertEqual(r.status_code, 204)
class TestS3PresignNonUsStandard(BaseS3PresignTest):
def setUp(self):
super(TestS3PresignNonUsStandard, self).setUp()
self.client_config = Config(
region_name=self.region, signature_version='s3')
self.client = self.session.create_client(
's3', config=self.client_config)
self.setup_bucket()
def test_presign_sigv2(self):
presigned_url = self.client.generate_presigned_url(
'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key})
self.assertTrue(
presigned_url.startswith(
'https://%s.s3.amazonaws.com/%s' % (
self.bucket_name, self.key)),
"Host was suppose to use DNS style, instead "
"got: %s" % presigned_url)
# Try to retrieve the object using the presigned url.
self.assertEqual(requests.get(presigned_url).content, b'foo')
def test_presign_sigv4(self):
self.client_config.signature_version = 's3v4'
self.client = self.session.create_client(
's3', config=self.client_config)
presigned_url = self.client.generate_presigned_url(
'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key})
self.assertTrue(
presigned_url.startswith(
'https://s3.us-west-2.amazonaws.com/%s/%s' % (
self.bucket_name, self.key)),
"Host was suppose to be the us-west-2 endpoint, instead "
"got: %s" % presigned_url)
# Try to retrieve the object using the presigned url.
self.assertEqual(requests.get(presigned_url).content, b'foo')
def test_presign_post_sigv2(self):
# Create some of the various supported conditions.
conditions = [
{"acl": "public-read"},
]
# Create the fields that follow the policy.
fields = {
'acl': 'public-read',
}
# Retrieve the args for the presigned post.
post_args = self.client.generate_presigned_post(
self.bucket_name, self.key, Fields=fields, Conditions=conditions)
# Make sure that the form can be posted successfully.
files = {'file': ('baz', 'some data')}
# Make sure the correct endpoint is being used
self.assertTrue(
post_args['url'].startswith(
'https://%s.s3.amazonaws.com' % self.bucket_name),
"Host was suppose to use DNS style, instead "
"got: %s" % post_args['url'])
r = requests.post(
post_args['url'], data=post_args['fields'], files=files)
self.assertEqual(r.status_code, 204)
def test_presign_post_sigv4(self):
self.client_config.signature_version = 's3v4'
self.client = self.session.create_client(
's3', config=self.client_config)
# Create some of the various supported conditions.
conditions = [
{"acl": "public-read"},
]
# Create the fields that follow the policy.
fields = {
'acl': 'public-read',
}
# Retrieve the args for the presigned post.
post_args = self.client.generate_presigned_post(
self.bucket_name, self.key, Fields=fields, Conditions=conditions)
# Make sure that the form can be posted successfully.
files = {'file': ('baz', 'some data')}
# Make sure the correct endpoint is being used
self.assertTrue(
post_args['url'].startswith(
'https://s3.us-west-2.amazonaws.com/%s' % self.bucket_name),
"Host was suppose to use DNS style, instead "
"got: %s" % post_args['url'])
r = requests.post(
post_args['url'], data=post_args['fields'], files=files)
self.assertEqual(r.status_code, 204)
class TestCreateBucketInOtherRegion(TestS3BaseWithBucket):
def test_bucket_in_other_region(self):
# This verifies expect 100-continue behavior. We previously
# had a bug where we did not support this behavior and trying to
# create a bucket and immediately PutObject with a file like object
# would actually cause errors.
client = self.session.create_client('s3', 'us-east-1')
with temporary_file('w') as f:
f.write('foobarbaz' * 1024 * 1024)
f.flush()
with open(f.name, 'rb') as body_file:
response = client.put_object(
Bucket=self.bucket_name,
Key='foo.txt', Body=body_file)
self.assert_status_code(response, 200)
def test_bucket_in_other_region_using_http(self):
client = self.session.create_client(
's3', 'us-east-1', endpoint_url='http://s3.amazonaws.com/')
with temporary_file('w') as f:
f.write('foobarbaz' * 1024 * 1024)
f.flush()
with open(f.name, 'rb') as body_file:
response = client.put_object(
Bucket=self.bucket_name,
Key='foo.txt', Body=body_file)
self.assert_status_code(response, 200)
class TestS3SigV4Client(BaseS3ClientTest):
def setUp(self):
super(TestS3SigV4Client, self).setUp()
self.client = self.session.create_client(
's3', self.region, config=Config(signature_version='s3v4'))
def test_can_get_bucket_location(self):
# Even though the bucket is in us-west-2, we should still be able to
# use the us-east-1 endpoint class to get the bucket location.
client = self.session.create_client('s3', 'us-east-1')
# Also keep in mind that while this test is useful, it doesn't test
# what happens once DNS propogates which is arguably more interesting,
# as DNS will point us to the eu-central-1 endpoint.
response = client.get_bucket_location(Bucket=self.bucket_name)
self.assertEqual(response['LocationConstraint'], 'us-west-2')
def test_request_retried_for_sigv4(self):
body = six.BytesIO(b"Hello world!")
original_send = adapters.HTTPAdapter.send
state = mock.Mock()
state.error_raised = False
def mock_http_adapter_send(self, *args, **kwargs):
if not state.error_raised:
state.error_raised = True
raise ConnectionError("Simulated ConnectionError raised.")
else:
return original_send(self, *args, **kwargs)
with mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send',
mock_http_adapter_send):
response = self.client.put_object(Bucket=self.bucket_name,
Key='foo.txt', Body=body)
self.assert_status_code(response, 200)
@attr('slow')
def test_paginate_list_objects_unicode(self):
key_names = [
u'non-ascii-key-\xe4\xf6\xfc-01.txt',
u'non-ascii-key-\xe4\xf6\xfc-02.txt',
u'non-ascii-key-\xe4\xf6\xfc-03.txt',
u'non-ascii-key-\xe4\xf6\xfc-04.txt',
]
for key in key_names:
response = self.client.put_object(Bucket=self.bucket_name,
Key=key, Body='')
self.assert_status_code(response, 200)
list_objs_paginator = self.client.get_paginator('list_objects')
key_refs = []
for response in list_objs_paginator.paginate(Bucket=self.bucket_name,
PaginationConfig={
'PageSize': 2}):
for content in response['Contents']:
key_refs.append(content['Key'])
self.assertEqual(key_names, key_refs)
@attr('slow')
def test_paginate_list_objects_safe_chars(self):
key_names = [
u'-._~safe-chars-key-01.txt',
u'-._~safe-chars-key-02.txt',
u'-._~safe-chars-key-03.txt',
u'-._~safe-chars-key-04.txt',
]
for key in key_names:
response = self.client.put_object(Bucket=self.bucket_name,
Key=key, Body='')
self.assert_status_code(response, 200)
list_objs_paginator = self.client.get_paginator('list_objects')
key_refs = []
for response in list_objs_paginator.paginate(Bucket=self.bucket_name,
PaginationConfig={
'PageSize': 2}):
for content in response['Contents']:
key_refs.append(content['Key'])
self.assertEqual(key_names, key_refs)
def test_create_multipart_upload(self):
key = 'mymultipartupload'
response = self.client.create_multipart_upload(
Bucket=self.bucket_name, Key=key
)
self.assert_status_code(response, 200)
upload_id = response['UploadId']
self.addCleanup(
self.client.abort_multipart_upload,
Bucket=self.bucket_name, Key=key, UploadId=upload_id
)
response = self.client.list_multipart_uploads(
Bucket=self.bucket_name, Prefix=key
)
# Make sure there is only one multipart upload.
self.assertEqual(len(response['Uploads']), 1)
# Make sure the upload id is as expected.
self.assertEqual(response['Uploads'][0]['UploadId'], upload_id)
def test_can_add_double_space_metadata(self):
# Ensure we get no sigv4 errors when we send
# metadata with consecutive spaces.
response = self.client.put_object(
Bucket=self.bucket_name, Key='foo.txt',
Body=b'foobar', Metadata={'foo': ' multi spaces '})
self.assert_status_code(response, 200)
class TestSSEKeyParamValidation(BaseS3ClientTest):
def test_make_request_with_sse(self):
key_bytes = os.urandom(32)
# Obviously a bad key here, but we just want to ensure we can use
# a str/unicode type as a key.
key_str = 'abcd' * 8
# Put two objects with an sse key, one with random bytes,
# one with str/unicode. Then verify we can GetObject() both
# objects.
self.client.put_object(
Bucket=self.bucket_name, Key='foo.txt',
Body=six.BytesIO(b'mycontents'), SSECustomerAlgorithm='AES256',
SSECustomerKey=key_bytes)
self.addCleanup(self.client.delete_object,
Bucket=self.bucket_name, Key='foo.txt')
self.client.put_object(
Bucket=self.bucket_name, Key='foo2.txt',
Body=six.BytesIO(b'mycontents2'), SSECustomerAlgorithm='AES256',
SSECustomerKey=key_str)
self.addCleanup(self.client.delete_object,
Bucket=self.bucket_name, Key='foo2.txt')
self.assertEqual(
self.client.get_object(Bucket=self.bucket_name,
Key='foo.txt',
SSECustomerAlgorithm='AES256',
SSECustomerKey=key_bytes)['Body'].read(),
b'mycontents')
self.assertEqual(
self.client.get_object(Bucket=self.bucket_name,
Key='foo2.txt',
SSECustomerAlgorithm='AES256',
SSECustomerKey=key_str)['Body'].read(),
b'mycontents2')
def test_make_request_with_sse_copy_source(self):
encrypt_key = 'a' * 32
other_encrypt_key = 'b' * 32
# Upload the object using one encrypt key
self.client.put_object(
Bucket=self.bucket_name, Key='foo.txt',
Body=six.BytesIO(b'mycontents'), SSECustomerAlgorithm='AES256',
SSECustomerKey=encrypt_key)
self.addCleanup(self.client.delete_object,
Bucket=self.bucket_name, Key='foo.txt')
# Copy the object using the original encryption key as the copy source
# and encrypt with a new encryption key.
self.client.copy_object(
Bucket=self.bucket_name,
CopySource=self.bucket_name+'/foo.txt',
Key='bar.txt', CopySourceSSECustomerAlgorithm='AES256',
CopySourceSSECustomerKey=encrypt_key,
SSECustomerAlgorithm='AES256',
SSECustomerKey=other_encrypt_key
)
self.addCleanup(self.client.delete_object,
Bucket=self.bucket_name, Key='bar.txt')
# Download the object using the new encryption key.
# The content should not have changed.
self.assertEqual(
self.client.get_object(
Bucket=self.bucket_name, Key='bar.txt',
SSECustomerAlgorithm='AES256',
SSECustomerKey=other_encrypt_key)['Body'].read(),
b'mycontents')
class TestS3UTF8Headers(BaseS3ClientTest):
def test_can_set_utf_8_headers(self):
bucket_name = _SHARED_BUCKET
body = six.BytesIO(b"Hello world!")
response = self.client.put_object(
Bucket=bucket_name, Key="foo.txt", Body=body,
ContentDisposition="attachment; filename=5小時接力起跑.jpg;")
self.assert_status_code(response, 200)
self.addCleanup(self.client.delete_object,
Bucket=bucket_name, Key="foo.txt")
class TestSupportedPutObjectBodyTypes(TestS3BaseWithBucket):
def test_can_put_unicode_content(self):
self.assert_can_put_object(body=u'\u2713')
def test_can_put_non_ascii_bytes(self):
self.assert_can_put_object(body=u'\u2713'.encode('utf-8'))
def test_can_put_arbitrary_binary_data(self):
body = os.urandom(5 * (1024 ** 2))
self.assert_can_put_object(body)
def test_can_put_binary_file(self):
tempdir = self.make_tempdir()
filename = os.path.join(tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(u'\u2713'.encode('utf-8'))
with open(filename, 'rb') as binary_file:
self.assert_can_put_object(body=binary_file)
def test_can_put_extracted_file_from_tar(self):
tempdir = self.make_tempdir()
tarname = os.path.join(tempdir, 'mytar.tar')
filename = os.path.join(tempdir, 'foo')
# Set up a file to add the tarfile.
with open(filename, 'w') as f:
f.write('bar')
# Setup the tar file by adding the file to it.
# Note there is no context handler for TarFile in python 2.6
try:
tar = TarFile(tarname, 'w')
tar.add(filename, 'foo')
finally:
tar.close()
# See if an extracted file can be uploaded to s3.
try:
tar = TarFile(tarname, 'r')
with closing(tar.extractfile('foo')) as f:
self.assert_can_put_object(body=f)
finally:
tar.close()
class TestSupportedPutObjectBodyTypesSigv4(TestSupportedPutObjectBodyTypes):
def create_client(self):
client_config = Config(signature_version='s3v4')
return self.session.create_client('s3', self.region,
config=client_config)
class TestAutoS3Addressing(BaseS3ClientTest):
def setUp(self):
super(TestAutoS3Addressing, self).setUp()
self.addressing_style = 'auto'
self.client = self.create_client()
def create_client(self, signature_version='s3'):
return self.session.create_client(
's3', region_name=self.region,
config=Config(s3={
'addressing_style': self.addressing_style,
'signature_version': signature_version
}))
def test_can_list_buckets(self):
response = self.client.list_buckets()
self.assertIn('Buckets', response)
def test_can_make_bucket_and_put_object(self):
response = self.client.put_object(
Bucket=self.bucket_name, Key='foo', Body='contents')
self.assertEqual(
response['ResponseMetadata']['HTTPStatusCode'], 200)
def test_can_make_bucket_and_put_object_with_sigv4(self):
self.region = 'eu-central-1'
self.client = self.create_client()
bucket_name = self.create_bucket(self.region)
response = self.client.put_object(
Bucket=bucket_name, Key='foo', Body='contents')
self.assertEqual(
response['ResponseMetadata']['HTTPStatusCode'], 200)
class TestS3VirtualAddressing(TestAutoS3Addressing):
def setUp(self):
super(TestS3VirtualAddressing, self).setUp()
self.addressing_style = 'virtual'
self.client = self.create_client()
class TestS3PathAddressing(TestAutoS3Addressing):
def setUp(self):
super(TestS3PathAddressing, self).setUp()
self.addressing_style = 'path'
self.client = self.create_client()
class TestRegionRedirect(BaseS3ClientTest):
def setUp(self):
super(TestRegionRedirect, self).setUp()
self.bucket_region = self.region
self.client_region = 'eu-central-1'
self.client = self.session.create_client(
's3', region_name=self.client_region,
config=Config(signature_version='s3v4'))
self.bucket_client = self.session.create_client(
's3', region_name=self.bucket_region,
config=Config(signature_version='s3v4')
)
def test_region_redirects(self):
try:
response = self.client.list_objects(Bucket=self.bucket_name)
self.assertEqual(
response['ResponseMetadata']['HTTPStatusCode'], 200)
except ClientError as e:
error = e.response['Error'].get('Code', None)
if error == 'PermanentRedirect':
self.fail("S3 client failed to redirect to the proper region.")
def test_region_redirect_sigv2_to_sigv4_raises_error(self):
self.bucket_region = 'eu-central-1'
sigv2_client = self.session.create_client(
's3', region_name=self.client_region,
config=Config(signature_version='s3'))
eu_bucket = self.create_bucket(self.bucket_region)
msg = 'The authorization mechanism you have provided is not supported.'
with self.assertRaisesRegexp(ClientError, msg):
sigv2_client.list_objects(Bucket=eu_bucket)
def test_region_redirects_multiple_requests(self):
try:
response = self.client.list_objects(Bucket=self.bucket_name)
self.assertEqual(
response['ResponseMetadata']['HTTPStatusCode'], 200)
second_response = self.client.list_objects(Bucket=self.bucket_name)
self.assertEqual(
second_response['ResponseMetadata']['HTTPStatusCode'], 200)
except ClientError as e:
error = e.response['Error'].get('Code', None)
if error == 'PermanentRedirect':
self.fail("S3 client failed to redirect to the proper region.")
def test_redirects_head_bucket(self):
response = self.client.head_bucket(Bucket=self.bucket_name)
headers = response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region')
self.assertEqual(region, self.bucket_region)
def test_redirects_head_object(self):
key = 'foo'
self.bucket_client.put_object(
Bucket=self.bucket_name, Key=key, Body='bar')
try:
response = self.client.head_object(
Bucket=self.bucket_name, Key=key)
self.assertEqual(response.get('ContentLength'), len(key))
except ClientError as e:
self.fail("S3 Client failed to redirect Head Object: %s" % e)
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, random_chars
from nose.plugins.attrib import attr
import botocore.session
from botocore.exceptions import WaiterError
# This is the same test as above, except using the client interface.
@attr('slow')
class TestWaiterForDynamoDB(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('dynamodb', 'us-west-2')
def test_create_table_and_wait(self):
table_name = 'botocoretest-%s' % random_chars(10)
self.client.create_table(
TableName=table_name,
ProvisionedThroughput={"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5},
KeySchema=[{"AttributeName": "foo", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "foo",
"AttributeType": "S"}])
self.addCleanup(self.client.delete_table, TableName=table_name)
waiter = self.client.get_waiter('table_exists')
waiter.wait(TableName=table_name)
parsed = self.client.describe_table(TableName=table_name)
self.assertEqual(parsed['Table']['TableStatus'], 'ACTIVE')
class TestCanGetWaitersThroughClientInterface(unittest.TestCase):
def test_get_ses_waiter(self):
# We're checking this because ses is not the endpoint prefix
# for the service, it's email. We want to make sure this does
# not affect the lookup process.
session = botocore.session.get_session()
client = session.create_client('ses', 'us-east-1')
# If we have at least one waiter in the list, we know that we have
# actually loaded the waiters and this test has passed.
self.assertTrue(len(client.waiter_names) > 0)
class TestMatchersWithErrors(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client(
'ec2', region_name='us-west-2')
def test_dont_search_on_error_responses(self):
"""Test that InstanceExists can handle a nonexistent instance."""
waiter = self.client.get_waiter('instance_exists')
waiter.config.max_attempts = 1
with self.assertRaises(WaiterError):
waiter.wait(InstanceIds=['i-12345'])
|
import os
import botocore.session
SESSION = botocore.session.get_session()
KNOWN_SERVICES = SESSION.get_available_services()
# For the services where the tag name doesn't match
# the name we use to create_client(), we need to maintain
# a map until we can get these changes pushed upstream.
TAG_TO_ENDPOINT_PREFIX = {
'cognitoidentity': 'cognito-identity',
'cognitosync': 'cognito-sync',
'elasticloadbalancing': 'elb',
'elasticfilesystem': 'efs',
}
REGION = 'us-east-1'
REGION_OVERRIDES = {
'devicefarm': 'us-west-2',
'efs': 'us-west-2',
}
SKIP_SERVICES = set([
# efs/support require subscriptions and may not work on every machine.
'efs',
'support',
# sts and importexport are skipped because they do not
# work when using temporary credentials.
'sts',
'importexport',
])
def before_feature(context, feature):
for tag in feature.tags:
if tag in TAG_TO_ENDPOINT_PREFIX:
service_name = TAG_TO_ENDPOINT_PREFIX[tag]
break
elif tag in KNOWN_SERVICES:
service_name = tag
break
else:
raise RuntimeError("Unable to create a client for "
"feature: %s" % feature)
if service_name in SKIP_SERVICES:
feature.mark_skipped()
return
region_name = _get_region_for_service(service_name)
context.client = SESSION.create_client(service_name, region_name)
def _get_region_for_service(service_name):
if os.environ.get('AWS_SMOKE_TEST_REGION', ''):
region_name = os.environ['AWS_SMOKE_TEST_REGION']
else:
region_name = REGION_OVERRIDES.get(service_name, REGION)
return region_name
|
import json
from botocore import xform_name
from botocore.exceptions import ClientError
from behave import when, then
from nose.tools import assert_equal
def _params_from_table(table):
# Unfortunately the way we're using table is not quite how
# behave expects tables to be used:
# They expect:
#
# | name | department |
# | Barry | foo |
# | Pudey | bar |
# | Two-Lumps | bar |
#
# Where the first row are headings that indicate the
# key name you can use to retrieve row values,
# e.g row['name'] -> Barry.
#
#
# We just use:
# | LaunchConfigurationName | hello, world |
# | ImageId | ami-12345678 |
# | InstanceType | m1.small |
#
# So we have to grab the headings before iterating over
# the table rows.
params = {table.headings[0]: table.headings[1]}
for row in table:
params[row[0]] = row[1]
return params
@when(u'I call the "{}" API')
def api_call_no_args(context, operation):
context.response = getattr(context.client, xform_name(operation))()
@when(u'I call the "{}" API with')
def api_call_with_args(context, operation):
params = _params_from_table(context.table)
context.response = getattr(context.client, xform_name(operation))(**params)
@when(u'I call the "{}" API with JSON')
def api_call_with_json(context, operation):
params = json.loads(context.text)
context.response = getattr(context.client, xform_name(operation))(**params)
@when(u'I attempt to call the "{}" API with')
def api_call_with_error(context, operation):
params = _params_from_table(context.table)
try:
getattr(context.client, xform_name(operation))(**params)
except ClientError as e:
context.error_response = e
@when(u'I attempt to call the "{}" API with JSON')
def api_call_with_json_and_error(context, operation):
params = json.loads(context.text)
try:
getattr(context.client, xform_name(operation))(**params)
except ClientError as e:
context.error_response = e
@then(u'I expect the response error code to be "{}"')
def then_expected_error(context, code):
assert_equal(context.error_response.response['Error']['Code'], code)
@then(u'the value at "{}" should be a list')
def then_expected_type_is_list(context, expression):
# In botocore, if there are no values with an element,
# it will not appear in the response dict, so it's actually
# ok if the element does not exist (and is not a list).
# If an exception happened the test will have already failed,
# which makes this step a noop. We'll just verify
# the response is a dict to ensure it made it through
# our response parser properly.
if not isinstance(context.response, dict):
raise AssertionError("Response is not a dict: %s" % context.response)
@then(u'the response should contain a "{}"')
def then_should_contain_key(context, key):
# See then_expected_type_is_a_list for more background info.
# We really just care that the request succeeded for these
# smoke tests.
if not isinstance(context.response, dict):
raise AssertionError("Response is not a dict: %s" % context.response)
@then(u'I expect the response error to contain a message')
def then_error_has_message(context):
if 'Message' not in context.error_response.response['Error']:
raise AssertionError("Message key missing from error response: %s" %
context.error_response.response)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import jmespath
from jsonschema import Draft4Validator
import botocore.session
from botocore.exceptions import UnknownServiceError
from botocore.utils import ArgumentGenerator
WAITER_SCHEMA = {
"type": "object",
"properties": {
"version": {"type": "number"},
"waiters": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["api"]
},
"operation": {"type": "string"},
"description": {"type": "string"},
"delay": {
"type": "number",
"minimum": 0,
},
"maxAttempts": {
"type": "integer",
"minimum": 1
},
"acceptors": {
"type": "array",
"items": {
"type": "object",
"properties": {
"state": {
"type": "string",
"enum": ["success", "retry", "failure"]
},
"matcher": {
"type": "string",
"enum": [
"path", "pathAll", "pathAny",
"status", "error"
]
},
"argument": {"type": "string"},
"expected": {
"oneOf": [
{"type": "string"},
{"type": "number"},
{"type": "boolean"}
]
}
},
"required": [
"state", "matcher", "expected"
],
"additionalProperties": False
}
}
},
"required": ["operation", "delay", "maxAttempts", "acceptors"],
"additionalProperties": False
}
}
},
"additionalProperties": False
}
def test_lint_waiter_configs():
session = botocore.session.get_session()
validator = Draft4Validator(WAITER_SCHEMA)
for service_name in session.get_available_services():
client = session.create_client(service_name, 'us-east-1')
service_model = client.meta.service_model
try:
# We use the loader directly here because we need the entire
# json document, not just the portions exposed (either
# internally or externally) by the WaiterModel class.
loader = session.get_component('data_loader')
waiter_model = loader.load_service_model(
service_name, 'waiters-2')
except UnknownServiceError:
# The service doesn't have waiters
continue
yield _validate_schema, validator, waiter_model
for waiter_name in client.waiter_names:
yield _lint_single_waiter, client, waiter_name, service_model
def _lint_single_waiter(client, waiter_name, service_model):
try:
waiter = client.get_waiter(waiter_name)
# The 'acceptors' property is dynamic and will create
# the acceptor configs when first accessed. This is still
# considered a failure to construct the waiter which is
# why it's in this try/except block.
# This catches things like:
# * jmespath expression compiles
# * matcher has a known value
acceptors = waiter.config.acceptors
except Exception as e:
raise AssertionError("Could not create waiter '%s': %s"
% (waiter_name, e))
operation_name = waiter.config.operation
# Needs to reference an existing operation name.
if operation_name not in service_model.operation_names:
raise AssertionError("Waiter config references unknown "
"operation: %s" % operation_name)
# Needs to have at least one acceptor.
if not waiter.config.acceptors:
raise AssertionError("Waiter config must have at least "
"one acceptor state: %s" % waiter.name)
op_model = service_model.operation_model(operation_name)
for acceptor in acceptors:
_validate_acceptor(acceptor, op_model, waiter.name)
if not waiter.name.isalnum():
raise AssertionError(
"Waiter name %s is not alphanumeric." % waiter_name
)
def _validate_schema(validator, waiter_json):
errors = list(e.message for e in validator.iter_errors(waiter_json))
if errors:
raise AssertionError('\n'.join(errors))
def _validate_acceptor(acceptor, op_model, waiter_name):
if acceptor.matcher.startswith('path'):
expression = acceptor.argument
# The JMESPath expression should have the potential to match something
# in the response shape.
output_shape = op_model.output_shape
assert output_shape is not None, (
"Waiter '%s' has JMESPath expression with no output shape: %s"
% (waiter_name, op_model))
# We want to check if the JMESPath expression makes sense.
# To do this, we'll generate sample output and evaluate the
# JMESPath expression against the output. We'll then
# check a few things about this returned search result.
search_result = _search_jmespath_expression(expression, op_model)
if not search_result:
raise AssertionError("JMESPath expression did not match "
"anything for waiter '%s': %s"
% (waiter_name, expression))
if acceptor.matcher in ['pathAll', 'pathAny']:
assert isinstance(search_result, list), \
("Attempted to use '%s' matcher in waiter '%s' "
"with non list result in JMESPath expression: %s"
% (acceptor.matcher, waiter_name, expression))
def _search_jmespath_expression(expression, op_model):
arg_gen = ArgumentGenerator(use_member_names=True)
sample_output = arg_gen.generate_skeleton(op_model.output_shape)
search_result = jmespath.search(expression, sample_output)
return search_result
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import BaseSessionTest, mock
from botocore.exceptions import ClientError
from botocore.config import Config
class TestRetry(BaseSessionTest):
def setUp(self):
super(TestRetry, self).setUp()
self.region = 'us-west-2'
self.sleep_patch = mock.patch('time.sleep')
self.sleep_patch.start()
def tearDown(self):
self.sleep_patch.stop()
def add_n_retryable_responses(self, mock_send, num_responses):
responses = []
for _ in range(num_responses):
http_response = mock.Mock()
http_response.status_code = 500
http_response.headers = {}
http_response.content = b'{}'
responses.append(http_response)
mock_send.side_effect = responses
def assert_will_retry_n_times(self, method, num_retries):
num_responses = num_retries + 1
with mock.patch('botocore.endpoint.Session.send') as mock_send:
self.add_n_retryable_responses(mock_send, num_responses)
with self.assertRaisesRegexp(
ClientError, 'reached max retries: %s' % num_retries):
method()
self.assertEqual(mock_send.call_count, num_responses)
def test_can_override_max_attempts(self):
client = self.session.create_client(
'dynamodb', self.region, config=Config(
retries={'max_attempts': 1}))
self.assert_will_retry_n_times(client.list_tables, 1)
def test_do_not_attempt_retries(self):
client = self.session.create_client(
'dynamodb', self.region, config=Config(
retries={'max_attempts': 0}))
self.assert_will_retry_n_times(client.list_tables, 0)
def test_setting_max_attempts_does_not_set_for_other_clients(self):
# Make one client with max attempts configured.
self.session.create_client(
'codecommit', self.region, config=Config(
retries={'max_attempts': 1}))
# Make another client that has no custom retry configured.
client = self.session.create_client('codecommit', self.region)
# It should use the default max retries, which should be four retries
# for this service.
self.assert_will_retry_n_times(client.list_repositories, 4)
def test_service_specific_defaults_do_not_mutate_general_defaults(self):
# This tests for a bug where if you created a client for a service
# with specific retry configurations and then created a client for
# a service whose retry configurations fallback to the general
# defaults, the second client would actually use the defaults of
# the first client.
# Make a dynamodb client. It's a special case client that is
# configured to a make a maximum of 10 requests (9 retries).
client = self.session.create_client('dynamodb', self.region)
self.assert_will_retry_n_times(client.list_tables, 9)
# A codecommit client is not a special case for retries. It will at
# most make 5 requests (4 retries) for its default.
client = self.session.create_client('codecommit', self.region)
self.assert_will_retry_n_times(client.list_repositories, 4)
def test_set_max_attempts_on_session(self):
self.session.set_default_client_config(
Config(retries={'max_attempts': 1}))
# Max attempts should be inherited from the session.
client = self.session.create_client('codecommit', self.region)
self.assert_will_retry_n_times(client.list_repositories, 1)
def test_can_clobber_max_attempts_on_session(self):
self.session.set_default_client_config(
Config(retries={'max_attempts': 1}))
# Max attempts should override the session's configured max attempts.
client = self.session.create_client(
'codecommit', self.region, config=Config(
retries={'max_attempts': 0}))
self.assert_will_retry_n_times(client.list_repositories, 0)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestClientMeta(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_region_name_on_meta(self):
client = self.session.create_client('s3', 'us-west-2')
self.assertEqual(client.meta.region_name, 'us-west-2')
def test_endpoint_url_on_meta(self):
client = self.session.create_client('s3', 'us-west-2',
endpoint_url='https://foo')
self.assertEqual(client.meta.endpoint_url, 'https://foo')
def test_client_has_standard_partition_on_meta(self):
client = self.session.create_client('s3', 'us-west-2')
self.assertEqual(client.meta.partition, 'aws')
def test_client_has_china_partition_on_meta(self):
client = self.session.create_client('s3', 'cn-north-1')
self.assertEqual(client.meta.partition, 'aws-cn')
def test_client_has_gov_partition_on_meta(self):
client = self.session.create_client('s3', 'us-gov-west-1')
self.assertEqual(client.meta.partition, 'aws-us-gov')
def test_client_has_no_partition_on_meta_if_custom_region(self):
client = self.session.create_client('s3', 'myregion')
self.assertEqual(client.meta.partition, None)
|
# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from collections import defaultdict
import mock
from botocore.session import Session
from botocore.exceptions import NoCredentialsError
from botocore import xform_name
REGIONS = defaultdict(lambda: 'us-east-1')
PUBLIC_API_TESTS = {
"cognito-identity": {
"GetId": {"IdentityPoolId": "region:1234"},
"GetOpenIdToken": {"IdentityId": "region:1234"},
"UnlinkIdentity": {
"IdentityId": "region:1234", "Logins": {}, "LoginsToRemove": []},
"GetCredentialsForIdentity": {"IdentityId": "region:1234"},
},
"sts": {
"AssumeRoleWithSaml": {
"PrincipalArn": "a"*20, "RoleArn": "a"*20, "SAMLAssertion": "abcd",
},
"AssumeRoleWithWebIdentity": {
"RoleArn": "a"*20,
"RoleSessionName": "foo",
"WebIdentityToken": "abcd",
},
},
}
class EarlyExit(BaseException):
pass
def _test_public_apis_will_not_be_signed(func, kwargs):
with mock.patch('botocore.endpoint.Session.send') as _send:
_send.side_effect = EarlyExit("we don't care about response here")
try:
func(**kwargs)
except EarlyExit:
pass
except NoCredentialsError:
assert False, "NoCredentialsError should not be triggered"
request = _send.call_args[0][0]
sig_v2_disabled = 'SignatureVersion=2' not in request.url
assert sig_v2_disabled, "SigV2 is incorrectly enabled"
sig_v3_disabled = 'X-Amzn-Authorization' not in request.headers
assert sig_v3_disabled, "SigV3 is incorrectly enabled"
sig_v4_disabled = 'Authorization' not in request.headers
assert sig_v4_disabled, "SigV4 is incorrectly enabled"
def test_public_apis_will_not_be_signed():
session = Session()
# Mimic the scenario that user does not have aws credentials setup
session.get_credentials = mock.Mock(return_value=None)
for service_name in PUBLIC_API_TESTS:
client = session.create_client(service_name, REGIONS[service_name])
for operation_name in PUBLIC_API_TESTS[service_name]:
kwargs = PUBLIC_API_TESTS[service_name][operation_name]
method = getattr(client, xform_name(operation_name))
yield (_test_public_apis_will_not_be_signed, method, kwargs)
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
from nose.tools import assert_true
from botocore.session import get_session
BLACKLIST = [
]
# Service names are limited here to 50 characters here as that seems like a
# reasonable limit in the general case. Services can be added to the
# blacklist above to be given an exception.
VALID_NAME_REGEX = re.compile(
(
'[a-z]' # Starts with a letter
'[a-z0-9]*' # Followed by any number of letters or digits
'(-[a-z0-9]+)*$' # Dashes are allowed as long as they aren't
# consecutive or at the end
), re.M)
VALID_NAME_EXPLANATION = (
'Service names must be made up entirely of lowercase alphanumeric '
'characters and dashes. The name must start with a letter and may not end '
'with a dash'
)
MIN_SERVICE_NAME_LENGTH = 2
MAX_SERVICE_NAME_LENGTH = 50
def _assert_name_length(service_name):
if service_name not in BLACKLIST:
service_name_length = len(service_name)
assert_true(service_name_length >= MIN_SERVICE_NAME_LENGTH,
'Service name must be greater than or equal to 2 '
'characters in length.')
assert_true(service_name_length <= MAX_SERVICE_NAME_LENGTH,
'Service name must be less than or equal to 50 '
'characters in length.')
def _assert_name_pattern(service_name):
if service_name not in BLACKLIST:
valid = VALID_NAME_REGEX.match(service_name) is not None
assert_true(valid, VALID_NAME_EXPLANATION)
def test_service_names_are_valid():
session = get_session()
loader = session.get_component('data_loader')
service_names = loader.list_available_services('service-2')
for service_name in service_names:
yield _assert_name_length, service_name
yield _assert_name_pattern, service_name
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import division
from math import ceil
from datetime import datetime
from nose.tools import assert_equal
from tests import random_chars
from tests import BaseSessionTest
from botocore.stub import Stubber, StubAssertionError
from botocore.paginate import TokenDecoder, TokenEncoder
from botocore.compat import six
class TestRDSPagination(BaseSessionTest):
def setUp(self):
super(TestRDSPagination, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client(
'rds', self.region)
self.stubber = Stubber(self.client)
def test_can_specify_zero_marker(self):
service_response = {
'LogFileData': 'foo',
'Marker': '2',
'AdditionalDataPending': True
}
expected_params = {
'DBInstanceIdentifier': 'foo',
'LogFileName': 'bar',
'NumberOfLines': 2,
'Marker': '0'
}
function_name = 'download_db_log_file_portion'
# The stubber will assert that the function is called with the expected
# parameters.
self.stubber.add_response(
function_name, service_response, expected_params)
self.stubber.activate()
try:
paginator = self.client.get_paginator(function_name)
result = paginator.paginate(
DBInstanceIdentifier='foo',
LogFileName='bar',
NumberOfLines=2,
PaginationConfig={
'StartingToken': '0',
'MaxItems': 3
}).build_full_result()
self.assertEqual(result['LogFileData'], 'foo')
self.assertIn('NextToken', result)
except StubAssertionError as e:
self.fail(str(e))
class TestAutoscalingPagination(BaseSessionTest):
def setUp(self):
super(TestAutoscalingPagination, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client(
'autoscaling', self.region, aws_secret_access_key='foo',
aws_access_key_id='bar', aws_session_token='baz'
)
self.stubber = Stubber(self.client)
self.stubber.activate()
def _setup_scaling_pagination(self, page_size=200, max_items=100,
total_items=600):
"""
Add to the stubber to test paginating describe_scaling_activities.
WARNING: This only handles cases where max_items cleanly divides
page_size.
"""
requests_per_page = page_size / max_items
if requests_per_page != ceil(requests_per_page):
raise NotImplementedError(
"This only handles setup where max_items is less than "
"page_size and where max_items evenly divides page_size."
)
requests_per_page = int(requests_per_page)
num_pages = int(ceil(total_items / page_size))
previous_next_token = None
for i in range(num_pages):
page = self.create_describe_scaling_response(page_size=page_size)
# Don't create a next_token for the final page
if i + 1 == num_pages:
next_token = None
else:
next_token = random_chars(10)
expected_args = {}
if previous_next_token:
expected_args['StartingToken'] = previous_next_token
# The same page may be accessed multiple times because we are
# truncating it at max_items
for _ in range(requests_per_page - 1):
# The page is copied because the paginator will modify the
# response object, causing issues when using the stubber.
self.stubber.add_response(
'describe_scaling_activities', page.copy()
)
if next_token is not None:
page['NextToken'] = next_token
# Copying the page here isn't necessary because it is about to
# be blown away anyway.
self.stubber.add_response(
'describe_scaling_activities', page
)
previous_next_token = next_token
def create_describe_scaling_response(self, page_size=200):
"""Create a valid describe_scaling_activities response."""
page = []
date = datetime.now()
for _ in range(page_size):
page.append({
'AutoScalingGroupName': 'test',
'ActivityId': random_chars(10),
'Cause': 'test',
'StartTime': date,
'StatusCode': '200',
})
return {'Activities': page}
def test_repeated_build_full_results(self):
# This ensures that we can cleanly paginate using build_full_results.
max_items = 100
total_items = 600
self._setup_scaling_pagination(
max_items=max_items,
total_items=total_items,
page_size=200
)
paginator = self.client.get_paginator('describe_scaling_activities')
conf = {'MaxItems': max_items}
pagination_tokens = []
result = paginator.paginate(PaginationConfig=conf).build_full_result()
all_results = result['Activities']
while 'NextToken' in result:
starting_token = result['NextToken']
# We should never get a duplicate pagination token.
self.assertNotIn(starting_token, pagination_tokens)
pagination_tokens.append(starting_token)
conf['StartingToken'] = starting_token
pages = paginator.paginate(PaginationConfig=conf)
result = pages.build_full_result()
all_results.extend(result['Activities'])
self.assertEqual(len(all_results), total_items)
def test_token_encoding():
cases = [
{'foo': 'bar'},
{'foo': b'bar'},
{'foo': {'bar': b'baz'}},
{'foo': ['bar', b'baz']},
{'foo': b'\xff'},
{'foo': {'bar': b'baz', 'bin': [b'bam']}},
]
for token_dict in cases:
yield assert_token_encodes_and_decodes, token_dict
def assert_token_encodes_and_decodes(token_dict):
encoded = TokenEncoder().encode(token_dict)
assert isinstance(encoded, six.string_types)
decoded = TokenDecoder().decode(encoded)
assert_equal(decoded, token_dict)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, temporary_file
import mock
import botocore.session
from botocore.exceptions import ProfileNotFound
class TestSession(unittest.TestCase):
def setUp(self):
self.environ = {}
self.env_patch = mock.patch('os.environ', self.environ)
self.env_patch.start()
self.session = botocore.session.get_session()
def tearDown(self):
self.env_patch.stop()
def test_profile_precedence(self):
self.environ['AWS_PROFILE'] = 'from_env_var'
self.session.set_config_variable('profile', 'from_session_instance')
self.assertEqual(self.session.profile, 'from_session_instance')
def test_credentials_with_profile_precedence(self):
self.environ['AWS_PROFILE'] = 'from_env_var'
self.session.set_config_variable('profile', 'from_session_instance')
try:
creds = self.session.get_credentials()
except ProfileNotFound as e:
self.assertNotIn('from_env_var', str(e))
self.assertIn('from_session_instance', str(e))
def test_session_profile_overrides_env_vars(self):
# If the ".profile" attribute is set then the associated
# creds for that profile take precedence over the environment
# variables.
with temporary_file('w') as f:
# We test this by creating creds in two places,
# env vars and a fake shared creds file. We ensure
# that if an explicit profile is set we pull creds
# from the shared creds file.
self.environ['AWS_ACCESS_KEY_ID'] = 'env_var_akid'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_var_sak'
self.environ['AWS_SHARED_CREDENTIALS_FILE'] = f.name
f.write(
'[from_session_instance]\n'
'aws_access_key_id=shared_creds_akid\n'
'aws_secret_access_key=shared_creds_sak\n'
)
f.flush()
self.session.set_config_variable('profile', 'from_session_instance')
creds = self.session.get_credentials()
self.assertEqual(creds.access_key, 'shared_creds_akid')
self.assertEqual(creds.secret_key, 'shared_creds_sak')
def test_profile_does_not_win_if_all_from_env_vars(self):
# Creds should be pulled from the env vars because
# if access_key/secret_key/profile are all specified on
# the same "level", then the explicit creds take
# precedence.
with temporary_file('w') as f:
self.environ['AWS_SHARED_CREDENTIALS_FILE'] = f.name
self.environ['AWS_PROFILE'] = 'myprofile'
# Even though we don't use the profile for credentials,
# if you have a profile configured in any way
# (env vars, set when creating a session, etc.) that profile
# must exist. So we need to create an empty profile
# matching the value from AWS_PROFILE.
f.write(
'[myprofile]\n'
)
f.flush()
self.environ['AWS_ACCESS_KEY_ID'] = 'env_var_akid'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_var_sak'
creds = self.session.get_credentials()
self.assertEqual(creds.access_key, 'env_var_akid')
self.assertEqual(creds.secret_key, 'env_var_sak')
def test_provides_available_regions_for_same_endpoint_prefix(self):
regions = self.session.get_available_regions('s3')
self.assertTrue(regions)
def test_provides_available_regions_for_different_endpoint_prefix(self):
regions = self.session.get_available_regions('elb')
self.assertTrue(regions)
def test_does_not_provide_regions_for_mismatch_service_name(self):
# elb's endpoint prefix is elasticloadbalancing, but users should
# still be using the service name when getting regions
regions = self.session.get_available_regions('elasticloadbalancing')
self.assertEqual(regions, [])
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from nose.tools import assert_equal
import botocore.session
REGION = 'us-east-1'
SERVICE_TO_CLASS_NAME = {
'autoscaling': 'AutoScaling',
'cloudformation': 'CloudFormation',
'cloudfront': 'CloudFront',
'cloudhsm': 'CloudHSM',
'cloudsearch': 'CloudSearch',
'cloudsearchdomain': 'CloudSearchDomain',
'cloudtrail': 'CloudTrail',
'cloudwatch': 'CloudWatch',
'codedeploy': 'CodeDeploy',
'cognito-identity': 'CognitoIdentity',
'cognito-sync': 'CognitoSync',
'config': 'ConfigService',
'datapipeline': 'DataPipeline',
'directconnect': 'DirectConnect',
'ds': 'DirectoryService',
'dynamodb': 'DynamoDB',
'ec2': 'EC2',
'ecs': 'ECS',
'efs': 'EFS',
'elasticache': 'ElastiCache',
'elasticbeanstalk': 'ElasticBeanstalk',
'elastictranscoder': 'ElasticTranscoder',
'elb': 'ElasticLoadBalancing',
'emr': 'EMR',
'glacier': 'Glacier',
'iam': 'IAM',
'importexport': 'ImportExport',
'kinesis': 'Kinesis',
'kms': 'KMS',
'lambda': 'Lambda',
'logs': 'CloudWatchLogs',
'machinelearning': 'MachineLearning',
'opsworks': 'OpsWorks',
'rds': 'RDS',
'redshift': 'Redshift',
'route53': 'Route53',
'route53domains': 'Route53Domains',
's3': 'S3',
'sdb': 'SimpleDB',
'ses': 'SES',
'sns': 'SNS',
'sqs': 'SQS',
'ssm': 'SSM',
'storagegateway': 'StorageGateway',
'sts': 'STS',
'support': 'Support',
'swf': 'SWF',
'workspaces': 'WorkSpaces'
}
def test_client_has_correct_class_name():
session = botocore.session.get_session()
for service_name in SERVICE_TO_CLASS_NAME:
client = session.create_client(service_name, REGION)
yield (_assert_class_name_matches_ref_class_name, client,
SERVICE_TO_CLASS_NAME[service_name])
def _assert_class_name_matches_ref_class_name(client, ref_class_name):
assert_equal(client.__class__.__name__, ref_class_name)
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from datetime import datetime
import mock
from tests import BaseSessionTest
from tests import assert_url_equal
from botocore.stub import Stubber
class TestSTSPresignedUrl(BaseSessionTest):
def setUp(self):
super(TestSTSPresignedUrl, self).setUp()
self.client = self.session.create_client('sts', 'us-west-2')
# Makes sure that no requests will go through
self.stubber = Stubber(self.client)
self.stubber.activate()
def test_presigned_url_contains_no_content_type(self):
timestamp = datetime(2017, 3, 22, 0, 0)
with mock.patch('botocore.auth.datetime') as _datetime:
_datetime.datetime.utcnow.return_value = timestamp
url = self.client.generate_presigned_url('get_caller_identity', {})
# There should be no 'content-type' in x-amz-signedheaders
expected_url = (
'https://sts.amazonaws.com/?Action=GetCallerIdentity&'
'Version=2011-06-15&X-Amz-Algorithm=AWS4-HMAC-SHA256&'
'X-Amz-Credential=access_key%2F20170322%2Fus-east-1%2Fsts%2F'
'aws4_request&X-Amz-Date=20170322T000000Z&X-Amz-Expires=3600&'
'X-Amz-SignedHeaders=host&X-Amz-Signature=767845d2ee858069a598d5f'
'8b497b75c7d57356885b1b3dba46dbbc0fc62bf5a'
)
assert_url_equal(url, expected_url)
|
import os
import botocore
import ast
ROOTDIR = os.path.dirname(botocore.__file__)
def test_no_bare_six_imports():
for rootdir, dirnames, filenames in os.walk(ROOTDIR):
if 'vendored' in dirnames:
# We don't need to lint our vendored packages.
dirnames.remove('vendored')
for filename in filenames:
if not filename.endswith('.py'):
continue
fullname = os.path.join(rootdir, filename)
yield _assert_no_bare_six_imports, fullname
def _assert_no_bare_six_imports(filename):
with open(filename) as f:
contents = f.read()
parsed = ast.parse(contents, filename)
checker = SixImportChecker(filename).visit(parsed)
class SixImportChecker(ast.NodeVisitor):
def __init__(self, filename):
self.filename = filename
def visit_Import(self, node):
for alias in node.names:
if getattr(alias, 'name', '') == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'import six' was found in %s:\n"
"\n%s: %s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def visit_ImportFrom(self, node):
if node.module == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'from six import ...' was found in %s:\n"
"\n%s:%s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def _get_line_content(self, filename, lineno):
with open(filename) as f:
contents = f.readlines()
return contents[lineno - 1]
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
from datetime import datetime
from tests import BaseSessionTest
class TestLex(BaseSessionTest):
def setUp(self):
super(TestLex, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client('lex-runtime', self.region)
def test_unsigned_payload(self):
params = {
'botName': 'foo',
'botAlias': 'bar',
'userId': 'baz',
'contentType': 'application/octet-stream',
'inputStream': b''
}
timestamp = datetime(2017, 3, 22, 0, 0)
with mock.patch('botocore.auth.datetime') as _datetime:
_datetime.datetime.utcnow.return_value = timestamp
with mock.patch('botocore.endpoint.Session.send') as _send:
_send.return_value = mock.Mock(
status_code=200, headers={}, content=b'{}')
self.client.post_content(**params)
request = _send.call_args[0][0]
# The payload gets added to the string to sign, and then part of the
# signature. The signature will be part of the authorization header.
# Since we don't have direct access to the payload signature,
# we compare the authorization instead.
authorization = request.headers.get('authorization')
expected_authorization = (
b'AWS4-HMAC-SHA256 '
b'Credential=access_key/20170322/us-west-2/lex/aws4_request, '
b'SignedHeaders=content-type;host;x-amz-content-sha256;x-amz-date,'
b' Signature='
b'7f93fde5c36163dce6ee116fcfebab13474ab903782fea04c00bb1dedc3fc4cc'
)
self.assertEqual(authorization, expected_authorization)
content_header = request.headers.get('x-amz-content-sha256')
self.assertEqual(content_header, b'UNSIGNED-PAYLOAD')
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import json
from nose.tools import assert_equal
from botocore.session import get_session
# Several services have names that don't match for one reason or another.
SERVICE_RENAMES = {
'application-autoscaling': 'autoscaling',
'appstream': 'appstream2',
'autoscaling-plans': 'autoscaling',
'dynamodbstreams': 'streams.dynamodb',
'cloudwatch': 'monitoring',
'efs': 'elasticfilesystem',
'elb': 'elasticloadbalancing',
'elbv2': 'elasticloadbalancing',
'emr': 'elasticmapreduce',
'iot-data': 'data.iot',
'meteringmarketplace': 'metering.marketplace',
'opsworkscm': 'opsworks-cm',
'ses': 'email',
'stepfunctions': 'states',
'lex-runtime': 'runtime.lex',
'mturk': 'mturk-requester',
'resourcegroupstaggingapi': 'tagging',
'sagemaker-runtime': 'runtime.sagemaker',
'lex-models': 'models.lex',
'marketplace-entitlement': 'entitlement.marketplace',
'pricing': 'api.pricing',
'mediastore-data': 'data.mediastore',
'iot-jobs-data': 'data.jobs.iot',
'kinesis-video-media': 'kinesisvideo',
'kinesis-video-archived-media': 'kinesisvideo',
'alexaforbusiness': 'a4b',
}
BLACKLIST = [
'mobileanalytics',
]
def test_endpoint_matches_service():
backwards_renames = dict((v, k) for k, v in SERVICE_RENAMES.items())
session = get_session()
loader = session.get_component('data_loader')
expected_services = set(loader.list_available_services('service-2'))
pdir = os.path.dirname
endpoints_path = os.path.join(pdir(pdir(pdir(__file__))),
'botocore', 'data', 'endpoints.json')
with open(endpoints_path, 'r') as f:
data = json.loads(f.read())
for partition in data['partitions']:
for service in partition['services'].keys():
service = backwards_renames.get(service, service)
if service not in BLACKLIST:
yield _assert_endpoint_is_service, service, expected_services
def _assert_endpoint_is_service(service, expected_services):
assert service in expected_services
def test_service_name_matches_endpoint_prefix():
# Generates tests for each service to verify that the endpoint prefix
# matches the service name unless there is an explicit exception.
session = get_session()
loader = session.get_component('data_loader')
# Load the list of available services. The names here represent what
# will become the client names.
services = loader.list_available_services('service-2')
for service in services:
yield _assert_service_name_matches_endpoint_prefix, loader, service
def _assert_service_name_matches_endpoint_prefix(loader, service_name):
# Load the service model and grab its endpoint prefix
service_model = loader.load_service_model(service_name, 'service-2')
endpoint_prefix = service_model['metadata']['endpointPrefix']
# Handle known exceptions where we have renamed the service directory
# for one reason or another.
expected_endpoint_prefix = SERVICE_RENAMES.get(service_name, service_name)
assert_equal(
endpoint_prefix, expected_endpoint_prefix,
"Service name `%s` does not match expected endpoint "
"prefix `%s`, actual: `%s`" % (
service_name, expected_endpoint_prefix, endpoint_prefix))
|
"""
Regression test for six issue #98 (https://github.com/benjaminp/six/issues/98)
"""
from mock import patch
import sys
import threading
import time
from botocore.vendored import six
_original_setattr = six.moves.__class__.__setattr__
def _wrapped_setattr(key, value):
# Monkey patch six.moves.__setattr__ to simulate
# a poorly-timed thread context switch
time.sleep(0.1)
return _original_setattr(six.moves, key, value)
def _reload_six():
# Issue #98 is caused by a race condition in six._LazyDescr.__get__
# which is only called once per moved module. Reload six so all the
# moved modules are reset.
if sys.version_info < (3, 0):
reload(six)
elif sys.version_info < (3, 4):
import imp
imp.reload(six)
else:
import importlib
importlib.reload(six)
class _ExampleThread(threading.Thread):
def __init__(self):
super(_ExampleThread, self).__init__()
self.daemon = False
self.exc_info = None
def run(self):
try:
# Simulate use of six by
# botocore.configloader.raw_config_parse()
# Should raise AttributeError if six < 1.9.0
six.moves.configparser.RawConfigParser()
except Exception:
self.exc_info = sys.exc_info()
def test_six_thread_safety():
_reload_six()
with patch('botocore.vendored.six.moves.__class__.__setattr__',
wraps=_wrapped_setattr):
threads = []
for i in range(2):
t = _ExampleThread()
threads.append(t)
t.start()
while threads:
t = threads.pop()
t.join()
if t.exc_info:
six.reraise(*t.exc_info)
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
from contextlib import contextmanager
import botocore.session
from tests import BaseSessionTest
from botocore.stub import Stubber
from tests import unittest
class TestRDSPresignUrlInjection(BaseSessionTest):
def setUp(self):
super(TestRDSPresignUrlInjection, self).setUp()
self.client = self.session.create_client('rds', 'us-west-2')
@contextmanager
def patch_http_layer(self, response, status_code=200):
with mock.patch('botocore.endpoint.Session.send') as send:
send.return_value = mock.Mock(status_code=status_code,
headers={},
content=response)
yield send
def assert_presigned_url_injected_in_request(self, body):
self.assertIn('PreSignedUrl', body)
self.assertNotIn('SourceRegion', body)
def test_copy_snapshot(self):
params = {
'SourceDBSnapshotIdentifier': 'source-db',
'TargetDBSnapshotIdentifier': 'target-db',
'SourceRegion': 'us-east-1'
}
response_body = (
b'<CopyDBSnapshotResponse>'
b'<CopyDBSnapshotResult></CopyDBSnapshotResult>'
b'</CopyDBSnapshotResponse>'
)
with self.patch_http_layer(response_body) as send:
self.client.copy_db_snapshot(**params)
sent_request = send.call_args[0][0]
self.assert_presigned_url_injected_in_request(sent_request.body)
def test_create_db_instance_read_replica(self):
params = {
'SourceDBInstanceIdentifier': 'source-db',
'DBInstanceIdentifier': 'target-db',
'SourceRegion': 'us-east-1'
}
response_body = (
b'<CreateDBInstanceReadReplicaResponse>'
b'<CreateDBInstanceReadReplicaResult>'
b'</CreateDBInstanceReadReplicaResult>'
b'</CreateDBInstanceReadReplicaResponse>'
)
with self.patch_http_layer(response_body) as send:
self.client.create_db_instance_read_replica(**params)
sent_request = send.call_args[0][0]
self.assert_presigned_url_injected_in_request(sent_request.body)
class TestRDS(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('rds', 'us-west-2')
self.stubber = Stubber(self.client)
self.stubber.activate()
def test_generate_db_auth_token(self):
hostname = 'host.us-east-1.rds.amazonaws.com'
port = 3306
username = 'mySQLUser'
auth_token = self.client.generate_db_auth_token(
DBHostname=hostname, Port=port, DBUsername=username)
endpoint_url = 'host.us-east-1.rds.amazonaws.com:3306'
self.assertIn(endpoint_url, auth_token)
self.assertIn('Action=connect', auth_token)
# Asserts that there is no scheme in the url
self.assertTrue(auth_token.startswith(hostname))
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import uuid
_ORIGINAL = os.environ.copy()
# These are environment variables that allow users to control
# the location of config files used by botocore.
_CONFIG_FILE_ENV_VARS = [
'AWS_CONFIG_FILE',
'AWS_SHARED_CREDENTIALS_FILE',
'BOTO_CONFIG',
]
_CREDENTIAL_ENV_VARS = [
'AWS_ACCESS_KEY_ID',
'AWS_SECRET_ACCESS_KEY',
'AWS_SESSION_TOKEN',
]
def setup_package():
# We're using a random uuid to ensure we're pointing
# AWS_CONFIG_FILE and other env vars at a filename that
# does not exist.
random_file = str(uuid.uuid4())
for varname in _CONFIG_FILE_ENV_VARS:
# The reason we're doing this is to ensure we don't automatically pick
# up any credentials a developer might have configured on their local
# machine. Travis will not have any credentials available, so without
# this fixture setup, it's possible to have all the tests pass on your
# local machine (if you have credentials configured) but still fail on
# travis.
os.environ[varname] = random_file
for credvar in _CREDENTIAL_ENV_VARS:
os.environ.pop(credvar, None)
def teardown_package():
os.environ = _ORIGINAL
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.stub import Stubber
from tests import BaseSessionTest
class TestMturk(BaseSessionTest):
def setUp(self):
super(TestMturk, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client(
'mturk', self.region)
self.stubber = Stubber(self.client)
self.stubber.activate()
def tearDown(self):
self.stubber.deactivate()
def test_list_hits_aliased(self):
self.stubber.add_response('list_hits_for_qualification_type', {})
self.stubber.add_response('list_hits_for_qualification_type', {})
params = {'QualificationTypeId': 'foo'}
self.client.list_hi_ts_for_qualification_type(**params)
self.client.list_hits_for_qualification_type(**params)
self.stubber.assert_no_pending_responses()
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import create_session
import mock
from nose.tools import assert_equal, assert_raises
from botocore.client import ClientEndpointBridge
from botocore.exceptions import NoRegionError
# NOTE: sqs endpoint updated to be the CN in the SSL cert because
# a bug in python2.6 prevents subjectAltNames from being parsed
# and subsequently being used in cert validation.
# Same thing is needed for rds.
KNOWN_REGIONS = {
'ap-northeast-1': {
'apigateway': 'apigateway.ap-northeast-1.amazonaws.com',
'appstream': 'appstream.ap-northeast-1.amazonaws.com',
'autoscaling': 'autoscaling.ap-northeast-1.amazonaws.com',
'cloudformation': 'cloudformation.ap-northeast-1.amazonaws.com',
'cloudhsm': 'cloudhsm.ap-northeast-1.amazonaws.com',
'cloudsearch': 'cloudsearch.ap-northeast-1.amazonaws.com',
'cloudtrail': 'cloudtrail.ap-northeast-1.amazonaws.com',
'codedeploy': 'codedeploy.ap-northeast-1.amazonaws.com',
'cognito-identity': 'cognito-identity.ap-northeast-1.amazonaws.com',
'cognito-sync': 'cognito-sync.ap-northeast-1.amazonaws.com',
'config': 'config.ap-northeast-1.amazonaws.com',
'datapipeline': 'datapipeline.ap-northeast-1.amazonaws.com',
'directconnect': 'directconnect.ap-northeast-1.amazonaws.com',
'ds': 'ds.ap-northeast-1.amazonaws.com',
'dynamodb': 'dynamodb.ap-northeast-1.amazonaws.com',
'ec2': 'ec2.ap-northeast-1.amazonaws.com',
'ecs': 'ecs.ap-northeast-1.amazonaws.com',
'elasticache': 'elasticache.ap-northeast-1.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.ap-northeast-1.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.ap-northeast-1.amazonaws.com',
'elasticmapreduce': 'ap-northeast-1.elasticmapreduce.amazonaws.com',
'elastictranscoder': 'elastictranscoder.ap-northeast-1.amazonaws.com',
'glacier': 'glacier.ap-northeast-1.amazonaws.com',
'iot': 'iot.ap-northeast-1.amazonaws.com',
'kinesis': 'kinesis.ap-northeast-1.amazonaws.com',
'kms': 'kms.ap-northeast-1.amazonaws.com',
'lambda': 'lambda.ap-northeast-1.amazonaws.com',
'logs': 'logs.ap-northeast-1.amazonaws.com',
'monitoring': 'monitoring.ap-northeast-1.amazonaws.com',
'rds': 'rds.ap-northeast-1.amazonaws.com',
'redshift': 'redshift.ap-northeast-1.amazonaws.com',
's3': 's3-ap-northeast-1.amazonaws.com',
'sdb': 'sdb.ap-northeast-1.amazonaws.com',
'sns': 'sns.ap-northeast-1.amazonaws.com',
'sqs': 'ap-northeast-1.queue.amazonaws.com',
'storagegateway': 'storagegateway.ap-northeast-1.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.ap-northeast-1.amazonaws.com',
'sts': 'sts.amazonaws.com',
'swf': 'swf.ap-northeast-1.amazonaws.com',
'workspaces': 'workspaces.ap-northeast-1.amazonaws.com'
},
'ap-southeast-1': {
'autoscaling': 'autoscaling.ap-southeast-1.amazonaws.com',
'cloudformation': 'cloudformation.ap-southeast-1.amazonaws.com',
'cloudhsm': 'cloudhsm.ap-southeast-1.amazonaws.com',
'cloudsearch': 'cloudsearch.ap-southeast-1.amazonaws.com',
'cloudtrail': 'cloudtrail.ap-southeast-1.amazonaws.com',
'config': 'config.ap-southeast-1.amazonaws.com',
'directconnect': 'directconnect.ap-southeast-1.amazonaws.com',
'ds': 'ds.ap-southeast-1.amazonaws.com',
'dynamodb': 'dynamodb.ap-southeast-1.amazonaws.com',
'ec2': 'ec2.ap-southeast-1.amazonaws.com',
'elasticache': 'elasticache.ap-southeast-1.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.ap-southeast-1.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.ap-southeast-1.amazonaws.com',
'elasticmapreduce': 'ap-southeast-1.elasticmapreduce.amazonaws.com',
'elastictranscoder': 'elastictranscoder.ap-southeast-1.amazonaws.com',
'kinesis': 'kinesis.ap-southeast-1.amazonaws.com',
'kms': 'kms.ap-southeast-1.amazonaws.com',
'logs': 'logs.ap-southeast-1.amazonaws.com',
'monitoring': 'monitoring.ap-southeast-1.amazonaws.com',
'rds': 'rds.ap-southeast-1.amazonaws.com',
'redshift': 'redshift.ap-southeast-1.amazonaws.com',
's3': 's3-ap-southeast-1.amazonaws.com',
'sdb': 'sdb.ap-southeast-1.amazonaws.com',
'sns': 'sns.ap-southeast-1.amazonaws.com',
'sqs': 'ap-southeast-1.queue.amazonaws.com',
'storagegateway': 'storagegateway.ap-southeast-1.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.ap-southeast-1.amazonaws.com',
'sts': 'sts.amazonaws.com',
'swf': 'swf.ap-southeast-1.amazonaws.com',
'workspaces': 'workspaces.ap-southeast-1.amazonaws.com'
},
'ap-southeast-2': {
'autoscaling': 'autoscaling.ap-southeast-2.amazonaws.com',
'cloudformation': 'cloudformation.ap-southeast-2.amazonaws.com',
'cloudhsm': 'cloudhsm.ap-southeast-2.amazonaws.com',
'cloudsearch': 'cloudsearch.ap-southeast-2.amazonaws.com',
'cloudtrail': 'cloudtrail.ap-southeast-2.amazonaws.com',
'codedeploy': 'codedeploy.ap-southeast-2.amazonaws.com',
'config': 'config.ap-southeast-2.amazonaws.com',
'datapipeline': 'datapipeline.ap-southeast-2.amazonaws.com',
'directconnect': 'directconnect.ap-southeast-2.amazonaws.com',
'ds': 'ds.ap-southeast-2.amazonaws.com',
'dynamodb': 'dynamodb.ap-southeast-2.amazonaws.com',
'ec2': 'ec2.ap-southeast-2.amazonaws.com',
'ecs': 'ecs.ap-southeast-2.amazonaws.com',
'elasticache': 'elasticache.ap-southeast-2.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.ap-southeast-2.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.ap-southeast-2.amazonaws.com',
'elasticmapreduce': 'ap-southeast-2.elasticmapreduce.amazonaws.com',
'glacier': 'glacier.ap-southeast-2.amazonaws.com',
'kinesis': 'kinesis.ap-southeast-2.amazonaws.com',
'kms': 'kms.ap-southeast-2.amazonaws.com',
'logs': 'logs.ap-southeast-2.amazonaws.com',
'monitoring': 'monitoring.ap-southeast-2.amazonaws.com',
'rds': 'rds.ap-southeast-2.amazonaws.com',
'redshift': 'redshift.ap-southeast-2.amazonaws.com',
's3': 's3-ap-southeast-2.amazonaws.com',
'sdb': 'sdb.ap-southeast-2.amazonaws.com',
'sns': 'sns.ap-southeast-2.amazonaws.com',
'sqs': 'ap-southeast-2.queue.amazonaws.com',
'storagegateway': 'storagegateway.ap-southeast-2.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.ap-southeast-2.amazonaws.com',
'sts': 'sts.amazonaws.com',
'swf': 'swf.ap-southeast-2.amazonaws.com',
'workspaces': 'workspaces.ap-southeast-2.amazonaws.com'
},
'aws-us-gov-global': {
'iam': 'iam.us-gov.amazonaws.com'
},
'cn-north-1': {
'autoscaling': 'autoscaling.cn-north-1.amazonaws.com.cn',
'cloudformation': 'cloudformation.cn-north-1.amazonaws.com.cn',
'cloudtrail': 'cloudtrail.cn-north-1.amazonaws.com.cn',
'directconnect': 'directconnect.cn-north-1.amazonaws.com.cn',
'dynamodb': 'dynamodb.cn-north-1.amazonaws.com.cn',
'ec2': 'ec2.cn-north-1.amazonaws.com.cn',
'elasticache': 'elasticache.cn-north-1.amazonaws.com.cn',
'elasticbeanstalk': 'elasticbeanstalk.cn-north-1.amazonaws.com.cn',
'elasticloadbalancing': 'elasticloadbalancing.cn-north-1.amazonaws.com.cn',
'elasticmapreduce': 'elasticmapreduce.cn-north-1.amazonaws.com.cn',
'glacier': 'glacier.cn-north-1.amazonaws.com.cn',
'iam': 'iam.cn-north-1.amazonaws.com.cn',
'kinesis': 'kinesis.cn-north-1.amazonaws.com.cn',
'monitoring': 'monitoring.cn-north-1.amazonaws.com.cn',
'rds': 'rds.cn-north-1.amazonaws.com.cn',
's3': 's3.cn-north-1.amazonaws.com.cn',
'sns': 'sns.cn-north-1.amazonaws.com.cn',
'sqs': 'cn-north-1.queue.amazonaws.com.cn',
'storagegateway': 'storagegateway.cn-north-1.amazonaws.com.cn',
'streams.dynamodb': 'streams.dynamodb.cn-north-1.amazonaws.com.cn',
'sts': 'sts.cn-north-1.amazonaws.com.cn',
'swf': 'swf.cn-north-1.amazonaws.com.cn'
},
'eu-central-1': {
'autoscaling': 'autoscaling.eu-central-1.amazonaws.com',
'cloudformation': 'cloudformation.eu-central-1.amazonaws.com',
'cloudhsm': 'cloudhsm.eu-central-1.amazonaws.com',
'cloudsearch': 'cloudsearch.eu-central-1.amazonaws.com',
'cloudtrail': 'cloudtrail.eu-central-1.amazonaws.com',
'codedeploy': 'codedeploy.eu-central-1.amazonaws.com',
'config': 'config.eu-central-1.amazonaws.com',
'directconnect': 'directconnect.eu-central-1.amazonaws.com',
'dynamodb': 'dynamodb.eu-central-1.amazonaws.com',
'ec2': 'ec2.eu-central-1.amazonaws.com',
'elasticache': 'elasticache.eu-central-1.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.eu-central-1.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.eu-central-1.amazonaws.com',
'elasticmapreduce': 'elasticmapreduce.eu-central-1.amazonaws.com',
'glacier': 'glacier.eu-central-1.amazonaws.com',
'kinesis': 'kinesis.eu-central-1.amazonaws.com',
'kms': 'kms.eu-central-1.amazonaws.com',
'logs': 'logs.eu-central-1.amazonaws.com',
'monitoring': 'monitoring.eu-central-1.amazonaws.com',
'rds': 'rds.eu-central-1.amazonaws.com',
'redshift': 'redshift.eu-central-1.amazonaws.com',
's3': 's3.eu-central-1.amazonaws.com',
'sns': 'sns.eu-central-1.amazonaws.com',
'sqs': 'eu-central-1.queue.amazonaws.com',
'storagegateway': 'storagegateway.eu-central-1.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.eu-central-1.amazonaws.com',
'sts': 'sts.amazonaws.com',
'swf': 'swf.eu-central-1.amazonaws.com'
},
'eu-west-1': {
'apigateway': 'apigateway.eu-west-1.amazonaws.com',
'autoscaling': 'autoscaling.eu-west-1.amazonaws.com',
'cloudformation': 'cloudformation.eu-west-1.amazonaws.com',
'cloudhsm': 'cloudhsm.eu-west-1.amazonaws.com',
'cloudsearch': 'cloudsearch.eu-west-1.amazonaws.com',
'cloudtrail': 'cloudtrail.eu-west-1.amazonaws.com',
'codedeploy': 'codedeploy.eu-west-1.amazonaws.com',
'cognito-identity': 'cognito-identity.eu-west-1.amazonaws.com',
'cognito-sync': 'cognito-sync.eu-west-1.amazonaws.com',
'config': 'config.eu-west-1.amazonaws.com',
'datapipeline': 'datapipeline.eu-west-1.amazonaws.com',
'directconnect': 'directconnect.eu-west-1.amazonaws.com',
'ds': 'ds.eu-west-1.amazonaws.com',
'dynamodb': 'dynamodb.eu-west-1.amazonaws.com',
'ec2': 'ec2.eu-west-1.amazonaws.com',
'ecs': 'ecs.eu-west-1.amazonaws.com',
'elasticache': 'elasticache.eu-west-1.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.eu-west-1.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.eu-west-1.amazonaws.com',
'elasticmapreduce': 'eu-west-1.elasticmapreduce.amazonaws.com',
'elastictranscoder': 'elastictranscoder.eu-west-1.amazonaws.com',
'email': 'email.eu-west-1.amazonaws.com',
'glacier': 'glacier.eu-west-1.amazonaws.com',
'iot': 'iot.eu-west-1.amazonaws.com',
'kinesis': 'kinesis.eu-west-1.amazonaws.com',
'kms': 'kms.eu-west-1.amazonaws.com',
'lambda': 'lambda.eu-west-1.amazonaws.com',
'logs': 'logs.eu-west-1.amazonaws.com',
'machinelearning': 'machinelearning.eu-west-1.amazonaws.com',
'monitoring': 'monitoring.eu-west-1.amazonaws.com',
'rds': 'rds.eu-west-1.amazonaws.com',
'redshift': 'redshift.eu-west-1.amazonaws.com',
's3': 's3-eu-west-1.amazonaws.com',
'sdb': 'sdb.eu-west-1.amazonaws.com',
'sns': 'sns.eu-west-1.amazonaws.com',
'sqs': 'eu-west-1.queue.amazonaws.com',
'ssm': 'ssm.eu-west-1.amazonaws.com',
'storagegateway': 'storagegateway.eu-west-1.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.eu-west-1.amazonaws.com',
'sts': 'sts.amazonaws.com',
'swf': 'swf.eu-west-1.amazonaws.com',
'workspaces': 'workspaces.eu-west-1.amazonaws.com'
},
'fips-us-gov-west-1': {
's3': 's3-fips-us-gov-west-1.amazonaws.com'
},
'local': {
'dynamodb': 'localhost:8000'
},
's3-external-1': {
's3': 's3-external-1.amazonaws.com'
},
'sa-east-1': {
'autoscaling': 'autoscaling.sa-east-1.amazonaws.com',
'cloudformation': 'cloudformation.sa-east-1.amazonaws.com',
'cloudsearch': 'cloudsearch.sa-east-1.amazonaws.com',
'cloudtrail': 'cloudtrail.sa-east-1.amazonaws.com',
'config': 'config.sa-east-1.amazonaws.com',
'directconnect': 'directconnect.sa-east-1.amazonaws.com',
'dynamodb': 'dynamodb.sa-east-1.amazonaws.com',
'ec2': 'ec2.sa-east-1.amazonaws.com',
'elasticache': 'elasticache.sa-east-1.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.sa-east-1.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.sa-east-1.amazonaws.com',
'elasticmapreduce': 'sa-east-1.elasticmapreduce.amazonaws.com',
'kms': 'kms.sa-east-1.amazonaws.com',
'monitoring': 'monitoring.sa-east-1.amazonaws.com',
'rds': 'rds.sa-east-1.amazonaws.com',
's3': 's3-sa-east-1.amazonaws.com',
'sdb': 'sdb.sa-east-1.amazonaws.com',
'sns': 'sns.sa-east-1.amazonaws.com',
'sqs': 'sa-east-1.queue.amazonaws.com',
'storagegateway': 'storagegateway.sa-east-1.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.sa-east-1.amazonaws.com',
'sts': 'sts.amazonaws.com',
'swf': 'swf.sa-east-1.amazonaws.com'
},
'us-east-1': {
'apigateway': 'apigateway.us-east-1.amazonaws.com',
'appstream': 'appstream.us-east-1.amazonaws.com',
'autoscaling': 'autoscaling.us-east-1.amazonaws.com',
'cloudformation': 'cloudformation.us-east-1.amazonaws.com',
'cloudfront': 'cloudfront.amazonaws.com',
'cloudhsm': 'cloudhsm.us-east-1.amazonaws.com',
'cloudsearch': 'cloudsearch.us-east-1.amazonaws.com',
'cloudtrail': 'cloudtrail.us-east-1.amazonaws.com',
'codecommit': 'codecommit.us-east-1.amazonaws.com',
'codedeploy': 'codedeploy.us-east-1.amazonaws.com',
'codepipeline': 'codepipeline.us-east-1.amazonaws.com',
'cognito-identity': 'cognito-identity.us-east-1.amazonaws.com',
'cognito-sync': 'cognito-sync.us-east-1.amazonaws.com',
'config': 'config.us-east-1.amazonaws.com',
'datapipeline': 'datapipeline.us-east-1.amazonaws.com',
'directconnect': 'directconnect.us-east-1.amazonaws.com',
'ds': 'ds.us-east-1.amazonaws.com',
'dynamodb': 'dynamodb.us-east-1.amazonaws.com',
'ec2': 'ec2.us-east-1.amazonaws.com',
'ecs': 'ecs.us-east-1.amazonaws.com',
'elasticache': 'elasticache.us-east-1.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.us-east-1.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.us-east-1.amazonaws.com',
'elasticmapreduce': 'elasticmapreduce.us-east-1.amazonaws.com',
'elastictranscoder': 'elastictranscoder.us-east-1.amazonaws.com',
'email': 'email.us-east-1.amazonaws.com',
'glacier': 'glacier.us-east-1.amazonaws.com',
'iam': 'iam.amazonaws.com',
'importexport': 'importexport.amazonaws.com',
'iot': 'iot.us-east-1.amazonaws.com',
'kinesis': 'kinesis.us-east-1.amazonaws.com',
'kms': 'kms.us-east-1.amazonaws.com',
'lambda': 'lambda.us-east-1.amazonaws.com',
'logs': 'logs.us-east-1.amazonaws.com',
'machinelearning': 'machinelearning.us-east-1.amazonaws.com',
'mobileanalytics': 'mobileanalytics.us-east-1.amazonaws.com',
'monitoring': 'monitoring.us-east-1.amazonaws.com',
'opsworks': 'opsworks.us-east-1.amazonaws.com',
'rds': 'rds.amazonaws.com',
'redshift': 'redshift.us-east-1.amazonaws.com',
'route53': 'route53.amazonaws.com',
'route53domains': 'route53domains.us-east-1.amazonaws.com',
's3': 's3.amazonaws.com',
'sdb': 'sdb.amazonaws.com',
'sns': 'sns.us-east-1.amazonaws.com',
'sqs': 'queue.amazonaws.com',
'ssm': 'ssm.us-east-1.amazonaws.com',
'storagegateway': 'storagegateway.us-east-1.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.us-east-1.amazonaws.com',
'sts': 'sts.amazonaws.com',
'support': 'support.us-east-1.amazonaws.com',
'swf': 'swf.us-east-1.amazonaws.com',
'workspaces': 'workspaces.us-east-1.amazonaws.com',
'waf': 'waf.amazonaws.com'
},
'us-gov-west-1': {
'autoscaling': 'autoscaling.us-gov-west-1.amazonaws.com',
'cloudformation': 'cloudformation.us-gov-west-1.amazonaws.com',
'cloudhsm': 'cloudhsm.us-gov-west-1.amazonaws.com',
'cloudtrail': 'cloudtrail.us-gov-west-1.amazonaws.com',
'dynamodb': 'dynamodb.us-gov-west-1.amazonaws.com',
'ec2': 'ec2.us-gov-west-1.amazonaws.com',
'elasticache': 'elasticache.us-gov-west-1.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.us-gov-west-1.amazonaws.com',
'elasticmapreduce': 'elasticmapreduce.us-gov-west-1.amazonaws.com',
'glacier': 'glacier.us-gov-west-1.amazonaws.com',
'iam': 'iam.us-gov.amazonaws.com',
'kms': 'kms.us-gov-west-1.amazonaws.com',
'monitoring': 'monitoring.us-gov-west-1.amazonaws.com',
'rds': 'rds.us-gov-west-1.amazonaws.com',
'redshift': 'redshift.us-gov-west-1.amazonaws.com',
's3': 's3-us-gov-west-1.amazonaws.com',
'sns': 'sns.us-gov-west-1.amazonaws.com',
'sqs': 'us-gov-west-1.queue.amazonaws.com',
'sts': 'sts.us-gov-west-1.amazonaws.com',
'swf': 'swf.us-gov-west-1.amazonaws.com'
},
'us-west-1': {
'autoscaling': 'autoscaling.us-west-1.amazonaws.com',
'cloudformation': 'cloudformation.us-west-1.amazonaws.com',
'cloudsearch': 'cloudsearch.us-west-1.amazonaws.com',
'cloudtrail': 'cloudtrail.us-west-1.amazonaws.com',
'config': 'config.us-west-1.amazonaws.com',
'directconnect': 'directconnect.us-west-1.amazonaws.com',
'dynamodb': 'dynamodb.us-west-1.amazonaws.com',
'ec2': 'ec2.us-west-1.amazonaws.com',
'ecs': 'ecs.us-west-1.amazonaws.com',
'elasticache': 'elasticache.us-west-1.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.us-west-1.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.us-west-1.amazonaws.com',
'elasticmapreduce': 'us-west-1.elasticmapreduce.amazonaws.com',
'elastictranscoder': 'elastictranscoder.us-west-1.amazonaws.com',
'glacier': 'glacier.us-west-1.amazonaws.com',
'kinesis': 'kinesis.us-west-1.amazonaws.com',
'kms': 'kms.us-west-1.amazonaws.com',
'logs': 'logs.us-west-1.amazonaws.com',
'monitoring': 'monitoring.us-west-1.amazonaws.com',
'rds': 'rds.us-west-1.amazonaws.com',
's3': 's3-us-west-1.amazonaws.com',
'sdb': 'sdb.us-west-1.amazonaws.com',
'sns': 'sns.us-west-1.amazonaws.com',
'sqs': 'us-west-1.queue.amazonaws.com',
'storagegateway': 'storagegateway.us-west-1.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.us-west-1.amazonaws.com',
'sts': 'sts.amazonaws.com',
'swf': 'swf.us-west-1.amazonaws.com'
},
'us-west-2': {
'apigateway': 'apigateway.us-west-2.amazonaws.com',
'autoscaling': 'autoscaling.us-west-2.amazonaws.com',
'cloudformation': 'cloudformation.us-west-2.amazonaws.com',
'cloudhsm': 'cloudhsm.us-west-2.amazonaws.com',
'cloudsearch': 'cloudsearch.us-west-2.amazonaws.com',
'cloudtrail': 'cloudtrail.us-west-2.amazonaws.com',
'codedeploy': 'codedeploy.us-west-2.amazonaws.com',
'codepipeline': 'codepipeline.us-west-2.amazonaws.com',
'config': 'config.us-west-2.amazonaws.com',
'datapipeline': 'datapipeline.us-west-2.amazonaws.com',
'devicefarm': 'devicefarm.us-west-2.amazonaws.com',
'directconnect': 'directconnect.us-west-2.amazonaws.com',
'ds': 'ds.us-west-2.amazonaws.com',
'dynamodb': 'dynamodb.us-west-2.amazonaws.com',
'ec2': 'ec2.us-west-2.amazonaws.com',
'ecs': 'ecs.us-west-2.amazonaws.com',
'elasticache': 'elasticache.us-west-2.amazonaws.com',
'elasticbeanstalk': 'elasticbeanstalk.us-west-2.amazonaws.com',
'elasticfilesystem': 'elasticfilesystem.us-west-2.amazonaws.com',
'elasticloadbalancing': 'elasticloadbalancing.us-west-2.amazonaws.com',
'elasticmapreduce': 'us-west-2.elasticmapreduce.amazonaws.com',
'elastictranscoder': 'elastictranscoder.us-west-2.amazonaws.com',
'email': 'email.us-west-2.amazonaws.com',
'glacier': 'glacier.us-west-2.amazonaws.com',
'iot': 'iot.us-west-2.amazonaws.com',
'kinesis': 'kinesis.us-west-2.amazonaws.com',
'kms': 'kms.us-west-2.amazonaws.com',
'lambda': 'lambda.us-west-2.amazonaws.com',
'logs': 'logs.us-west-2.amazonaws.com',
'monitoring': 'monitoring.us-west-2.amazonaws.com',
'rds': 'rds.us-west-2.amazonaws.com',
'redshift': 'redshift.us-west-2.amazonaws.com',
's3': 's3-us-west-2.amazonaws.com',
'sdb': 'sdb.us-west-2.amazonaws.com',
'sns': 'sns.us-west-2.amazonaws.com',
'sqs': 'us-west-2.queue.amazonaws.com',
'ssm': 'ssm.us-west-2.amazonaws.com',
'storagegateway': 'storagegateway.us-west-2.amazonaws.com',
'streams.dynamodb': 'streams.dynamodb.us-west-2.amazonaws.com',
'sts': 'sts.amazonaws.com',
'swf': 'swf.us-west-2.amazonaws.com',
'workspaces': 'workspaces.us-west-2.amazonaws.com'
}
}
# Lists the services in the aws partition that do not require a region
# when resolving an endpoint because these services have partitionWide
# endpoints.
KNOWN_AWS_PARTITION_WIDE = {
'importexport': 'https://importexport.amazonaws.com',
'cloudfront': 'https://cloudfront.amazonaws.com',
'waf': 'https://waf.amazonaws.com',
'route53': 'https://route53.amazonaws.com',
's3': 'https://s3.amazonaws.com',
'sts': 'https://sts.amazonaws.com',
'iam': 'https://iam.amazonaws.com'
}
def _get_patched_session():
with mock.patch('os.environ') as environ:
environ['AWS_ACCESS_KEY_ID'] = 'access_key'
environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
environ['AWS_CONFIG_FILE'] = 'no-exist-foo'
session = create_session()
return session
def test_known_endpoints():
# Verify the actual values from the partition files. While
# TestEndpointHeuristics verified the generic functionality given any
# endpoints file, this test actually verifies the partition data against a
# fixed list of known endpoints. This list doesn't need to be kept 100% up
# to date, but serves as a basis for regressions as the endpoint data
# logic evolves.
resolver = _get_patched_session().get_component('endpoint_resolver')
for region_name, service_dict in KNOWN_REGIONS.items():
for service_name, endpoint in service_dict.items():
yield (_test_single_service_region, service_name,
region_name, endpoint, resolver)
def _test_single_service_region(service_name, region_name,
expected_endpoint, resolver):
bridge = ClientEndpointBridge(resolver, None, None)
result = bridge.resolve(service_name, region_name)
expected = 'https://%s' % expected_endpoint
assert_equal(result['endpoint_url'], expected)
# Ensure that all S3 regions use s3v4 instead of v4
def test_all_s3_endpoints_have_s3v4():
session = _get_patched_session()
partitions = session.get_available_partitions()
resolver = session.get_component('endpoint_resolver')
for partition_name in partitions:
for endpoint in session.get_available_regions('s3', partition_name):
resolved = resolver.construct_endpoint('s3', endpoint)
assert 's3v4' in resolved['signatureVersions']
assert 'v4' not in resolved['signatureVersions']
def test_known_endpoints():
resolver = _get_patched_session().get_component('endpoint_resolver')
for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items():
yield (_test_single_service_partition_endpoint, service_name,
endpoint, resolver)
def _test_single_service_partition_endpoint(service_name, expected_endpoint,
resolver):
bridge = ClientEndpointBridge(resolver)
result = bridge.resolve(service_name)
assert_equal(result['endpoint_url'], expected_endpoint)
def test_non_partition_endpoint_requires_region():
resolver = _get_patched_session().get_component('endpoint_resolver')
assert_raises(NoRegionError, resolver.construct_endpoint, 'ec2')
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import threading
import os
import math
import time
import mock
import tempfile
import shutil
from datetime import datetime, timedelta
import sys
from botocore.vendored import requests
from dateutil.tz import tzlocal
from botocore.exceptions import CredentialRetrievalError
from tests import unittest, IntegerRefresher, BaseEnvVar, random_chars
from tests import temporary_file
from botocore.credentials import EnvProvider, ContainerProvider
from botocore.credentials import InstanceMetadataProvider
from botocore.credentials import Credentials, ReadOnlyCredentials
from botocore.credentials import AssumeRoleProvider
from botocore.credentials import CanonicalNameCredentialSourcer
from botocore.session import Session
from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError
from botocore.stub import Stubber
class TestCredentialRefreshRaces(unittest.TestCase):
def assert_consistent_credentials_seen(self, creds, func):
collected = []
threads = []
for _ in range(20):
threads.append(threading.Thread(target=func, args=(collected,)))
start = time.time()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for creds in collected:
# During testing, the refresher uses it's current
# refresh count as the values for the access, secret, and
# token value. This means that at any given point in time,
# the credentials should be something like:
#
# ReadOnlyCredentials('1', '1', '1')
# ReadOnlyCredentials('2', '2', '2')
# ...
# ReadOnlyCredentials('30', '30', '30')
#
# This makes it really easy to verify we see a consistent
# set of credentials from the same time period. We just
# check if all the credential values are the same. If
# we ever see something like:
#
# ReadOnlyCredentials('1', '2', '1')
#
# We fail. This is because we're using the access_key
# from the first refresh ('1'), the secret key from
# the second refresh ('2'), and the token from the
# first refresh ('1').
self.assertTrue(creds[0] == creds[1] == creds[2], creds)
def test_has_no_race_conditions(self):
creds = IntegerRefresher(
creds_last_for=2,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(4000):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
start = time.time()
self.assert_consistent_credentials_seen(creds, _run_in_thread)
end = time.time()
# creds_last_for = 2 seconds (from above)
# So, for example, if execution time took 6.1 seconds, then
# we should see a maximum number of refreshes being (6 / 2.0) + 1 = 4
max_calls_allowed = math.ceil((end - start) / 2.0) + 1
self.assertTrue(creds.refresh_counter <= max_calls_allowed,
"Too many cred refreshes, max: %s, actual: %s, "
"time_delta: %.4f" % (max_calls_allowed,
creds.refresh_counter,
(end - start)))
def test_no_race_for_immediate_advisory_expiration(self):
creds = IntegerRefresher(
creds_last_for=1,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(100):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
self.assert_consistent_credentials_seen(creds, _run_in_thread)
class TestAssumeRole(BaseEnvVar):
def setUp(self):
super(TestAssumeRole, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.config_file = os.path.join(self.tempdir, 'config')
self.environ['AWS_CONFIG_FILE'] = self.config_file
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.metadata_provider = self.mock_provider(InstanceMetadataProvider)
self.env_provider = self.mock_provider(EnvProvider)
self.container_provider = self.mock_provider(ContainerProvider)
def mock_provider(self, provider_cls):
mock_instance = mock.Mock(spec=provider_cls)
mock_instance.load.return_value = None
mock_instance.METHOD = provider_cls.METHOD
mock_instance.CANONICAL_NAME = provider_cls.CANONICAL_NAME
return mock_instance
def tearDown(self):
shutil.rmtree(self.tempdir)
def create_session(self, profile=None):
session = Session(profile=profile)
# We have to set bogus credentials here or otherwise we'll trigger
# an early credential chain resolution.
sts = session.create_client(
'sts',
aws_access_key_id='spam',
aws_secret_access_key='eggs',
)
stubber = Stubber(sts)
stubber.activate()
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=lambda *args, **kwargs: sts,
cache={},
profile_name=profile,
credential_sourcer=CanonicalNameCredentialSourcer([
self.env_provider, self.container_provider,
self.metadata_provider
])
)
component_name = 'credential_provider'
resolver = session.get_component(component_name)
available_methods = [p.METHOD for p in resolver.providers]
replacements = {
'env': self.env_provider,
'iam-role': self.metadata_provider,
'container-role': self.container_provider,
'assume-role': assume_role_provider
}
for name, provider in replacements.items():
try:
index = available_methods.index(name)
except ValueError:
# The provider isn't in the session
continue
resolver.providers[index] = provider
session.register_component(
'credential_provider', resolver
)
return session, stubber
def create_assume_role_response(self, credentials, expiration=None):
if expiration is None:
expiration = self.some_future_time()
response = {
'Credentials': {
'AccessKeyId': credentials.access_key,
'SecretAccessKey': credentials.secret_key,
'SessionToken': credentials.token,
'Expiration': expiration
},
'AssumedRoleUser': {
'AssumedRoleId': 'myroleid',
'Arn': 'arn:aws:iam::1234567890:user/myuser'
}
}
return response
def create_random_credentials(self):
return Credentials(
'fake-%s' % random_chars(15),
'fake-%s' % random_chars(35),
'fake-%s' % random_chars(45)
)
def some_future_time(self):
timeobj = datetime.now(tzlocal())
return timeobj + timedelta(hours=24)
def write_config(self, config):
with open(self.config_file, 'w') as f:
f.write(config)
def assert_creds_equal(self, c1, c2):
c1_frozen = c1
if not isinstance(c1_frozen, ReadOnlyCredentials):
c1_frozen = c1.get_frozen_credentials()
c2_frozen = c2
if not isinstance(c2_frozen, ReadOnlyCredentials):
c2_frozen = c2.get_frozen_credentials()
self.assertEqual(c1_frozen, c2_frozen)
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_environment_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
environment_creds = self.create_random_credentials()
self.env_provider.load.return_value = environment_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.env_provider.load.call_count, 1)
def test_instance_metadata_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Ec2InstanceMetadata\n'
)
self.write_config(config)
metadata_creds = self.create_random_credentials()
self.metadata_provider.load.return_value = metadata_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.metadata_provider.load.call_count, 1)
def test_container_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = EcsContainer\n'
)
self.write_config(config)
container_creds = self.create_random_credentials()
self.container_provider.load.return_value = container_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.container_provider.load.call_count, 1)
def test_invalid_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = CustomInvalidProvider\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_misconfigured_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'credential_process = command\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_b_creds = self.create_random_credentials()
profile_b_response = self.create_assume_role_response(profile_b_creds)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_b_response)
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_recursive_assume_role_stops_at_static_creds(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_infinitely_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
)
self.write_config(config)
with self.assertRaises(InfiniteLoopConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_self_referential_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
class TestProcessProvider(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
self.environ = os.environ.copy()
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
def test_credential_process(self):
config = (
'[profile processcreds]\n'
'credential_process = %s\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
credentials = Session(profile='processcreds').get_credentials()
self.assertEqual(credentials.access_key, 'spam')
self.assertEqual(credentials.secret_key, 'eggs')
def test_credential_process_returns_error(self):
config = (
'[profile processcreds]\n'
'credential_process = %s --raise-error\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
session = Session(profile='processcreds')
# This regex validates that there is no substring: b'
# The reason why we want to validate that is that we want to
# make sure that stderr is actually decoded so that in
# exceptional cases the error is properly formatted.
# As for how the regex works:
# `(?!b').` is a negative lookahead, meaning that it will only
# match if it is not followed by the pattern `b'`. Since it is
# followed by a `.` it will match any character not followed by
# that pattern. `((?!hede).)*` does that zero or more times. The
# final pattern adds `^` and `$` to anchor the beginning and end
# of the string so we can know the whole string is consumed.
# Finally `(?s)` at the beginning makes dots match newlines so
# we can handle a multi-line string.
reg = r"(?s)^((?!b').)*$"
with self.assertRaisesRegexp(CredentialRetrievalError, reg):
session.get_credentials()
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
from botocore import loaders
from tests import unittest, temporary_file
class TestLoaderAllowsDataPathOverride(unittest.TestCase):
def create_file(self, f, contents, name):
f.write(contents)
f.flush()
dirname = os.path.dirname(os.path.abspath(f.name))
override_name = os.path.join(dirname, name)
shutil.copy(f.name, override_name)
return override_name
def test_can_override_session(self):
with temporary_file('w') as f:
# We're going to override _retry.json in
# botocore/data by setting our own data directory.
override_name = self.create_file(
f, contents='{"foo": "bar"}', name='_retry.json')
new_data_path = os.path.dirname(override_name)
loader = loaders.create_loader(search_path_string=new_data_path)
new_content = loader.load_data('_retry')
# This should contain the content we just created.
self.assertEqual(new_content, {"foo": "bar"})
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
from botocore.loaders import Loader
from botocore.exceptions import DataNotFoundError
def _test_model_is_not_lost(service_name, type_name,
previous_version, latest_version):
# Make sure if a paginator and/or waiter exists in previous version,
# there will be a successor existing in latest version.
loader = Loader()
try:
previous = loader.load_service_model(
service_name, type_name, previous_version)
except DataNotFoundError:
pass
else:
try:
latest = loader.load_service_model(
service_name, type_name, latest_version)
except DataNotFoundError as e:
raise AssertionError(
"%s must exist for %s: %s" % (type_name, service_name, e))
def test_paginators_and_waiters_are_not_lost_in_new_version():
for service_name in Session().get_available_services():
versions = Loader().list_api_versions(service_name, 'service-2')
if len(versions) > 1:
for type_name in ['paginators-1', 'waiters-2']:
yield (_test_model_is_not_lost, service_name,
type_name, versions[-2], versions[-1])
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from botocore.stub import Stubber
class TestRoute53Pagination(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('route53', 'us-west-2')
self.stubber = Stubber(self.client)
# response has required fields
self.response = {
'HostedZones': [],
'Marker': '',
'IsTruncated': True,
'MaxItems': '1'
}
self.operation_name = 'list_hosted_zones'
def test_paginate_with_max_items_int(self):
# Route53 has a string type for MaxItems. We need to ensure that this
# still works with integers as the cli auto converts the page size
# argument to an integer.
self.stubber.add_response(self.operation_name, self.response)
paginator = self.client.get_paginator('list_hosted_zones')
with self.stubber:
config={'PageSize': 1}
results = list(paginator.paginate(PaginationConfig=config))
self.assertTrue(len(results) >= 0)
def test_paginate_with_max_items_str(self):
# Route53 has a string type for MaxItems. We need to ensure that this
# still works with strings as that's the expected type for this key.
self.stubber.add_response(self.operation_name, self.response)
paginator = self.client.get_paginator('list_hosted_zones')
with self.stubber:
config={'PageSize': '1'}
results = list(paginator.paginate(PaginationConfig=config))
self.assertTrue(len(results) >= 0)
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import string
import jmespath
from jmespath.exceptions import JMESPathError
import botocore.session
KNOWN_PAGE_KEYS = set(
['input_token', 'py_input_token', 'output_token', 'result_key',
'limit_key', 'more_results', 'non_aggregate_keys'])
MEMBER_NAME_CHARS = set(string.ascii_letters + string.digits)
def test_lint_pagination_configs():
session = botocore.session.get_session()
loader = session.get_component('data_loader')
services = loader.list_available_services('paginators-1')
for service_name in services:
service_model = session.get_service_model(service_name)
page_config = loader.load_service_model(service_name,
'paginators-1',
service_model.api_version)
for op_name, single_config in page_config['pagination'].items():
yield (
_lint_single_paginator,
op_name,
single_config,
service_model
)
def _lint_single_paginator(operation_name, page_config,
service_model):
_validate_known_pagination_keys(page_config)
_valiate_result_key_exists(page_config)
_validate_referenced_operation_exists(operation_name, service_model)
_validate_operation_has_output(operation_name, service_model)
_validate_input_keys_match(operation_name, page_config, service_model)
_validate_output_keys_match(operation_name, page_config, service_model)
def _validate_known_pagination_keys(page_config):
for key in page_config:
if key not in KNOWN_PAGE_KEYS:
raise AssertionError("Unknown key '%s' in pagination config: %s"
% (key, page_config))
def _valiate_result_key_exists(page_config):
if 'result_key' not in page_config:
raise AssertionError("Required key 'result_key' is missing "
"from pagination config: %s" % page_config)
def _validate_referenced_operation_exists(operation_name, service_model):
if operation_name not in service_model.operation_names:
raise AssertionError("Pagination config refers to operation that "
"does not exist: %s" % operation_name)
def _validate_operation_has_output(operation_name, service_model):
op_model = service_model.operation_model(operation_name)
output = op_model.output_shape
if output is None or not output.members:
raise AssertionError("Pagination config refers to operation "
"that does not have any output: %s"
% operation_name)
def _validate_input_keys_match(operation_name, page_config, service_model):
input_tokens = page_config['input_token']
if not isinstance(input_tokens, list):
input_tokens = [input_tokens]
valid_input_names = service_model.operation_model(
operation_name).input_shape.members
for token in input_tokens:
if token not in valid_input_names:
raise AssertionError("input_token '%s' refers to a non existent "
"input member for operation: %s"
% (token, operation_name))
if 'limit_key' in page_config:
limit_key = page_config['limit_key']
if limit_key not in valid_input_names:
raise AssertionError("limit_key '%s' refers to a non existent "
"input member for operation: %s"
% (limit_key, operation_name))
def _validate_output_keys_match(operation_name, page_config, service_model):
# NOTE: The original version of this function from translate.py had logic
# to ensure that the entire set of output_members was accounted for in the
# union of 'result_key', 'output_token', 'more_results', and
# 'non_aggregate_keys'.
# There's enough state drift (especially with non_aggregate_keys) that
# this is no longer a realistic thing to check. Someone would have to
# backport the missing keys to all the paginators.
output_shape = service_model.operation_model(operation_name).output_shape
output_members = output_shape.members
for key_name, output_key in _get_all_page_output_keys(page_config):
if _looks_like_jmespath(output_key):
_validate_jmespath_compiles(output_key)
else:
if output_key not in output_members:
raise AssertionError("Pagination key '%s' refers to an output "
"member that does not exist: %s" % (
key_name, output_key))
def _looks_like_jmespath(expression):
if all(ch in MEMBER_NAME_CHARS for ch in expression):
return False
return True
def _validate_jmespath_compiles(expression):
try:
jmespath.compile(expression)
except JMESPathError as e:
raise AssertionError("Invalid JMESPath expression used "
"in pagination config: %s\nerror: %s"
% (expression, e))
def _get_all_page_output_keys(page_config):
for key in _get_list_value(page_config, 'result_key'):
yield 'result_key', key
for key in _get_list_value(page_config, 'output_token'):
yield 'output_token', key
if 'more_results' in page_config:
yield 'more_results', page_config['more_results']
for key in page_config.get('non_aggregate_keys', []):
yield 'non_aggregate_keys', key
def _get_list_value(page_config, key):
# Some pagination config values can be a scalar value or a list of scalars.
# This function will always return a list of scalar values, converting as
# necessary.
value = page_config[key]
if not isinstance(value, list):
value = [value]
return value
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
from tests import BaseSessionTest
class TestMachineLearning(BaseSessionTest):
def setUp(self):
super(TestMachineLearning, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client(
'machinelearning', self.region)
def test_predict(self):
with mock.patch('botocore.endpoint.Session.send') as \
http_session_send_patch:
http_response = mock.Mock()
http_response.status_code = 200
http_response.content = b'{}'
http_response.headers = {}
http_session_send_patch.return_value = http_response
custom_endpoint = 'https://myendpoint.amazonaws.com/'
self.client.predict(
MLModelId='ml-foo',
Record={'Foo': 'Bar'},
PredictEndpoint=custom_endpoint
)
sent_request = http_session_send_patch.call_args[0][0]
self.assertEqual(sent_request.url, custom_endpoint)
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import botocore.session
from botocore.handlers import SERVICE_NAME_ALIASES
def test_can_use_service_alias():
session = botocore.session.get_session()
for (alias, name) in SERVICE_NAME_ALIASES.items():
yield _instantiates_the_same_client, session, name, alias
def _instantiates_the_same_client(session, service_name, service_alias):
client_kwargs = {
'region_name': 'us-east-1',
'aws_access_key_id': 'foo',
'aws_secret_access_key': 'bar',
}
original_client = session.create_client(service_name, **client_kwargs)
aliased_client = session.create_client(service_alias, **client_kwargs)
original_model_name = original_client.meta.service_model.service_name
aliased_model_name = aliased_client.meta.service_model.service_name
assert original_model_name == aliased_model_name
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTest
class TestCloudFormationDocs(BaseDocsFunctionalTest):
def test_get_template_response_documented_as_dict(self):
content = self.get_docstring_for_method('cloudformation', 'get_template')
# String return type should be gone
self.assert_not_contains_line(
"(*string*) --", content)
# Check for template body returning a dict
self.assert_contains_line(
"(*dict*) --", content)
# Check the specifics of the returned dict
self.assert_contains_line('{}', content)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
from tests import BaseSessionTest
class TestCloudsearchdomain(BaseSessionTest):
def setUp(self):
super(TestCloudsearchdomain, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client(
'cloudsearchdomain', self.region)
def test_search(self):
with mock.patch('botocore.endpoint.Session.send') as _send:
_send.return_value = mock.Mock(
status_code=200, headers={}, content=b'{}')
self.client.search(query='foo')
sent_request = _send.call_args[0][0]
self.assertEqual(sent_request.method, 'POST')
self.assertEqual(
sent_request.headers.get('Content-Type'),
b'application/x-www-form-urlencoded')
self.assertIn('q=foo', sent_request.body)
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import botocore.session
from botocore.stub import Stubber
from botocore.exceptions import ParamValidationError
ALIAS_CASES = [
{
'service': 'ec2',
'operation': 'describe_flow_logs',
'original_name': 'Filter',
'new_name': 'Filters',
'parameter_value': [{'Name': 'traffic-type', 'Values': ['ACCEPT']}]
},
{
'service': 'cloudsearchdomain',
'operation': 'search',
'original_name': 'return',
'new_name': 'returnFields',
'parameter_value': '_all_fields',
'extra_args': {'query': 'foo'}
},
{
'service': 'logs',
'operation': 'create_export_task',
'original_name': 'from',
'new_name': 'fromTime',
'parameter_value': 0,
'extra_args': {
'logGroupName': 'name',
'to': 10,
'destination': 'mybucket'
}
}
]
def test_can_use_alias():
session = botocore.session.get_session()
for case in ALIAS_CASES:
yield _can_use_parameter_in_client_call, session, case
def test_can_use_original_name():
session = botocore.session.get_session()
for case in ALIAS_CASES:
yield _can_use_parameter_in_client_call, session, case, False
def _can_use_parameter_in_client_call(session, case, use_alias=True):
client = session.create_client(
case['service'], region_name='us-east-1',
aws_access_key_id='foo', aws_secret_access_key='bar')
stubber = Stubber(client)
stubber.activate()
operation = case['operation']
params = case.get('extra_args', {})
params = params.copy()
param_name = case['original_name']
if use_alias:
param_name = case['new_name']
params[param_name] = case['parameter_value']
stubbed_response = case.get('stubbed_response', {})
stubber.add_response(operation, stubbed_response)
try:
getattr(client, operation)(**params)
except ParamValidationError as e:
raise AssertionError(
'Expecting %s to be valid parameter for %s.%s but received '
'%s.' % (
case['new_name'], case['service'], case['operation'], e)
)
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
from botocore.stub import Stubber
from tests import BaseSessionTest
class TestApiGateway(BaseSessionTest):
def setUp(self):
super(TestApiGateway, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client(
'apigateway', self.region)
self.stubber = Stubber(self.client)
def test_get_export(self):
params = {
'restApiId': 'foo',
'stageName': 'bar',
'exportType': 'swagger',
'accepts': 'application/yaml'
}
with mock.patch('botocore.endpoint.Session.send') as _send:
_send.return_value = mock.Mock(
status_code=200, headers={}, content=b'{}')
self.client.get_export(**params)
sent_request = _send.call_args[0][0]
self.assertEqual(sent_request.method, 'GET')
self.assertEqual(
sent_request.headers.get('Accept'), b'application/yaml')
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import botocore.config
from tests import unittest
import botocore
import botocore.session
import botocore.stub as stub
from botocore.stub import Stubber
from botocore.exceptions import StubResponseError, ClientError, \
StubAssertionError, UnStubbedResponseError
from botocore.exceptions import ParamValidationError
import botocore.client
import botocore.retryhandler
import botocore.translate
class TestStubber(unittest.TestCase):
def setUp(self):
session = botocore.session.get_session()
config = botocore.config.Config(
signature_version=botocore.UNSIGNED,
s3={'addressing_style': 'path'}
)
self.client = session.create_client(
's3', region_name='us-east-1', config=config)
self.stubber = Stubber(self.client)
def test_stubber_returns_response(self):
service_response = {'ResponseMetadata': {'foo': 'bar'}}
self.stubber.add_response('list_objects', service_response)
self.stubber.activate()
response = self.client.list_objects(Bucket='foo')
self.assertEqual(response, service_response)
def test_context_manager_returns_response(self):
service_response = {'ResponseMetadata': {'foo': 'bar'}}
self.stubber.add_response('list_objects', service_response)
with self.stubber:
response = self.client.list_objects(Bucket='foo')
self.assertEqual(response, service_response)
def test_activated_stubber_errors_with_no_registered_stubs(self):
self.stubber.activate()
# Params one per line for readability.
with self.assertRaisesRegexp(UnStubbedResponseError,
"Unexpected API Call"):
self.client.list_objects(
Bucket='asdfasdfasdfasdf',
Delimiter='asdfasdfasdfasdf',
Prefix='asdfasdfasdfasdf',
EncodingType='url')
def test_stubber_errors_when_stubs_are_used_up(self):
self.stubber.add_response('list_objects', {})
self.stubber.activate()
self.client.list_objects(Bucket='foo')
with self.assertRaises(UnStubbedResponseError):
self.client.list_objects(Bucket='foo')
def test_client_error_response(self):
error_code = "AccessDenied"
error_message = "Access Denied"
self.stubber.add_client_error(
'list_objects', error_code, error_message)
self.stubber.activate()
with self.assertRaises(ClientError):
self.client.list_objects(Bucket='foo')
def test_can_add_expected_params_to_client_error(self):
self.stubber.add_client_error(
'list_objects', 'Error', 'error',
expected_params={'Bucket': 'foo'}
)
self.stubber.activate()
with self.assertRaises(ClientError):
self.client.list_objects(Bucket='foo')
def test_can_expected_param_fails_in_client_error(self):
self.stubber.add_client_error(
'list_objects', 'Error', 'error',
expected_params={'Bucket': 'foo'}
)
self.stubber.activate()
# We expect an AssertionError instead of a ClientError
# because we're calling the operation with the wrong
# param value.
with self.assertRaises(AssertionError):
self.client.list_objects(Bucket='wrong-argument-value')
def test_expected_params_success(self):
service_response = {}
expected_params = {'Bucket': 'foo'}
self.stubber.add_response(
'list_objects', service_response, expected_params)
self.stubber.activate()
# This should be called successfully with no errors being thrown
# for mismatching expected params.
response = self.client.list_objects(Bucket='foo')
self.assertEqual(response, service_response)
def test_expected_params_fail(self):
service_response = {}
expected_params = {'Bucket': 'bar'}
self.stubber.add_response(
'list_objects', service_response, expected_params)
self.stubber.activate()
# This should call should raise an for mismatching expected params.
with self.assertRaisesRegexp(StubResponseError,
"{'Bucket': 'bar'},\n"):
self.client.list_objects(Bucket='foo')
def test_expected_params_mixed_with_errors_responses(self):
# Add an error response
error_code = "AccessDenied"
error_message = "Access Denied"
self.stubber.add_client_error(
'list_objects', error_code, error_message)
# Add a response with incorrect expected params
service_response = {}
expected_params = {'Bucket': 'bar'}
self.stubber.add_response(
'list_objects', service_response, expected_params)
self.stubber.activate()
# The first call should throw and error as expected.
with self.assertRaises(ClientError):
self.client.list_objects(Bucket='foo')
# The second call should throw an error for unexpected parameters
with self.assertRaisesRegexp(StubResponseError, 'Expected parameters'):
self.client.list_objects(Bucket='foo')
def test_can_continue_to_call_after_expected_params_fail(self):
service_response = {}
expected_params = {'Bucket': 'bar'}
self.stubber.add_response(
'list_objects', service_response, expected_params)
self.stubber.activate()
# Throw an error for unexpected parameters
with self.assertRaises(StubResponseError):
self.client.list_objects(Bucket='foo')
# The stubber should still have the responses queued up
# even though the original parameters did not match the expected ones.
self.client.list_objects(Bucket='bar')
self.stubber.assert_no_pending_responses()
def test_still_relies_on_param_validation_with_expected_params(self):
service_response = {}
expected_params = {'Buck': 'bar'}
self.stubber.add_response(
'list_objects', service_response, expected_params)
self.stubber.activate()
# Throw an error for invalid parameters
with self.assertRaises(ParamValidationError):
self.client.list_objects(Buck='bar')
def test_any_ignores_param_for_validation(self):
service_response = {}
expected_params = {'Bucket': stub.ANY}
self.stubber.add_response(
'list_objects', service_response, expected_params)
self.stubber.add_response(
'list_objects', service_response, expected_params)
try:
with self.stubber:
self.client.list_objects(Bucket='foo')
self.client.list_objects(Bucket='bar')
except StubAssertionError:
self.fail("stub.ANY failed to ignore parameter for validation.")
def test_mixed_any_and_concrete_params(self):
service_response = {}
expected_params = {'Bucket': stub.ANY, 'Key': 'foo.txt'}
self.stubber.add_response(
'head_object', service_response, expected_params)
self.stubber.add_response(
'head_object', service_response, expected_params)
try:
with self.stubber:
self.client.head_object(Bucket='foo', Key='foo.txt')
self.client.head_object(Bucket='bar', Key='foo.txt')
except StubAssertionError:
self.fail("stub.ANY failed to ignore parameter for validation.")
def test_nested_any_param(self):
service_response = {}
expected_params = {
'Bucket': 'foo',
'Key': 'bar.txt',
'Metadata': {
'MyMeta': stub.ANY,
}
}
self.stubber.add_response(
'put_object', service_response, expected_params)
self.stubber.add_response(
'put_object', service_response, expected_params)
try:
with self.stubber:
self.client.put_object(
Bucket='foo',
Key='bar.txt',
Metadata={
'MyMeta': 'Foo',
}
)
self.client.put_object(
Bucket='foo',
Key='bar.txt',
Metadata={
'MyMeta': 'Bar',
}
)
except StubAssertionError:
self.fail(
"stub.ANY failed to ignore nested parameter for validation.")
def test_ANY_repr(self):
self.assertEqual(repr(stub.ANY), '<ANY>')
def test_none_param(self):
service_response = {}
expected_params = {'Buck': None}
self.stubber.add_response(
'list_objects', service_response, expected_params)
self.stubber.activate()
# Throw an error for invalid parameters
with self.assertRaises(StubAssertionError):
self.client.list_objects(Buck='bar')
def test_many_expected_params(self):
service_response = {}
expected_params = {
'Bucket': 'mybucket',
'Prefix': 'myprefix',
'Delimiter': '/',
'EncodingType': 'url'
}
self.stubber.add_response(
'list_objects', service_response, expected_params)
try:
with self.stubber:
self.client.list_objects(**expected_params)
except StubAssertionError:
self.fail(
"Stubber inappropriately raised error for same parameters.")
def test_no_stub_for_presign_url(self):
try:
with self.stubber:
url = self.client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'mybucket',
'Key': 'mykey'
}
)
self.assertEqual(
url, 'https://s3.amazonaws.com/mybucket/mykey')
except StubResponseError:
self.fail(
'Stubbed responses should not be required for generating '
'presigned requests'
)
def test_can_stub_with_presign_url_mixed_in(self):
desired_response = {}
expected_params = {
'Bucket': 'mybucket',
'Prefix': 'myprefix',
}
self.stubber.add_response(
'list_objects', desired_response, expected_params)
with self.stubber:
url = self.client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'myotherbucket',
'Key': 'myotherkey'
}
)
self.assertEqual(
url, 'https://s3.amazonaws.com/myotherbucket/myotherkey')
actual_response = self.client.list_objects(**expected_params)
self.assertEqual(desired_response, actual_response)
self.stubber.assert_no_pending_responses()
|
from contextlib import contextmanager
import mock
from tests import BaseSessionTest
from botocore.history import BaseHistoryHandler
from botocore.history import get_global_history_recorder
class RecordingHandler(BaseHistoryHandler):
def __init__(self):
self.recorded_calls = []
def emit(self, event_type, payload, source):
self.recorded_calls.append((event_type, payload, source))
class TestRecordStatementsInjections(BaseSessionTest):
def setUp(self):
super(TestRecordStatementsInjections, self).setUp()
self.client = self.session.create_client('s3', 'us-west-2')
self.s3_response_body = (
'<ListAllMyBucketsResult '
' xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
' <Owner>'
' <ID>d41d8cd98f00b204e9800998ecf8427e</ID>'
' <DisplayName>foo</DisplayName>'
' </Owner>'
' <Buckets>'
' <Bucket>'
' <Name>bar</Name>'
' <CreationDate>1912-06-23T22:57:02.000Z</CreationDate>'
' </Bucket>'
' </Buckets>'
'</ListAllMyBucketsResult>'
).encode('utf-8')
self.recording_handler = RecordingHandler()
history_recorder = get_global_history_recorder()
history_recorder.enable()
history_recorder.add_handler(self.recording_handler)
def _get_all_events_of_type(self, event_type):
recorded_calls = self.recording_handler.recorded_calls
matching = [call for call in recorded_calls
if call[0] == event_type]
return matching
@contextmanager
def patch_http_layer(self, response, status_code=200):
with mock.patch('botocore.endpoint.Session.send') as send:
send.return_value = mock.Mock(status_code=status_code,
headers={},
content=response)
yield send
def test_does_record_api_call(self):
with self.patch_http_layer(self.s3_response_body):
self.client.list_buckets()
api_call_events = self._get_all_events_of_type('API_CALL')
self.assertEqual(len(api_call_events), 1)
event = api_call_events[0]
event_type, payload, source = event
self.assertEqual(payload, {
'operation': u'ListBuckets',
'params': {},
'service': 's3'
})
self.assertEqual(source, 'BOTOCORE')
def test_does_record_http_request(self):
with self.patch_http_layer(self.s3_response_body):
self.client.list_buckets()
http_request_events = self._get_all_events_of_type('HTTP_REQUEST')
self.assertEqual(len(http_request_events), 1)
event = http_request_events[0]
event_type, payload, source = event
method = payload['method']
self.assertEqual(method, u'GET')
# The header values vary too much per request to verify them here.
# Instead just check the presense of each expected header.
headers = payload['headers']
for expected_header in ['Authorization', 'User-Agent', 'X-Amz-Date',
'X-Amz-Content-SHA256']:
self.assertIn(expected_header, headers)
body = payload['body']
self.assertIsNone(body)
streaming = payload['streaming']
self.assertEquals(streaming, False)
url = payload['url']
self.assertEquals(url, 'https://s3.us-west-2.amazonaws.com/')
self.assertEqual(source, 'BOTOCORE')
def test_does_record_http_response(self):
with self.patch_http_layer(self.s3_response_body):
self.client.list_buckets()
http_response_events = self._get_all_events_of_type('HTTP_RESPONSE')
self.assertEqual(len(http_response_events), 1)
event = http_response_events[0]
event_type, payload, source = event
self.assertEqual(payload, {
'status_code': 200,
'headers': {},
'streaming': False,
'body': self.s3_response_body
}
)
self.assertEqual(source, 'BOTOCORE')
def test_does_record_parsed_response(self):
with self.patch_http_layer(self.s3_response_body):
self.client.list_buckets()
parsed_response_events = self._get_all_events_of_type(
'PARSED_RESPONSE')
self.assertEqual(len(parsed_response_events), 1)
event = parsed_response_events[0]
event_type, payload, source = event
# Given that the request contains headers with a user agent string
# a date and a signature we need to disassemble the call and manually
# assert the interesting bits since mock can only assert if the args
# all match exactly.
owner = payload['Owner']
self.assertEqual(owner, {
'DisplayName': 'foo',
'ID': 'd41d8cd98f00b204e9800998ecf8427e'
})
buckets = payload['Buckets']
self.assertEqual(len(buckets), 1)
bucket = buckets[0]
self.assertEqual(bucket['Name'], 'bar')
metadata = payload['ResponseMetadata']
self.assertEqual(metadata, {
'HTTPHeaders': {},
'HTTPStatusCode': 200,
'RetryAttempts': 0
})
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from botocore.stub import Stubber, ANY
import botocore.session
class TestIdempotencyToken(unittest.TestCase):
def setUp(self):
self.function_name = 'purchase_scheduled_instances'
self.region = 'us-west-2'
self.session = botocore.session.get_session()
self.client = self.session.create_client(
'ec2', self.region)
self.stubber = Stubber(self.client)
self.service_response = {}
self.params_seen = []
# Record all the parameters that get seen
self.client.meta.events.register_first(
'before-call.*.*',
self.collect_params,
unique_id='TestIdempotencyToken')
def collect_params(self, model, params, *args, **kwargs):
self.params_seen.extend(params['body'].keys())
def test_provided_idempotency_token(self):
expected_params = {
'PurchaseRequests': [
{'PurchaseToken': 'foo',
'InstanceCount': 123}],
'ClientToken': ANY
}
self.stubber.add_response(
self.function_name, self.service_response, expected_params)
with self.stubber:
self.client.purchase_scheduled_instances(
PurchaseRequests=[{'PurchaseToken': 'foo',
'InstanceCount': 123}],
ClientToken='foobar')
self.assertIn('ClientToken', self.params_seen)
def test_insert_idempotency_token(self):
expected_params = {
'PurchaseRequests': [
{'PurchaseToken': 'foo',
'InstanceCount': 123}],
}
self.stubber.add_response(
self.function_name, self.service_response, expected_params)
with self.stubber:
self.client.purchase_scheduled_instances(
PurchaseRequests=[{'PurchaseToken': 'foo',
'InstanceCount': 123}])
self.assertIn('ClientToken', self.params_seen)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, mock, BaseSessionTest, create_session
from nose.tools import assert_equal
import botocore.session
from botocore.config import Config
from botocore.exceptions import ParamValidationError
from botocore import UNSIGNED
class TestS3BucketValidation(unittest.TestCase):
def test_invalid_bucket_name_raises_error(self):
session = botocore.session.get_session()
s3 = session.create_client('s3')
with self.assertRaises(ParamValidationError):
s3.put_object(Bucket='adfgasdfadfs/bucket/name',
Key='foo', Body=b'asdf')
class BaseS3OperationTest(BaseSessionTest):
def setUp(self):
super(BaseS3OperationTest, self).setUp()
self.region = 'us-west-2'
self.client = self.session.create_client(
's3', self.region)
self.session_send_patch = mock.patch('botocore.endpoint.Session.send')
self.http_session_send_mock = self.session_send_patch.start()
def tearDown(self):
super(BaseSessionTest, self).tearDown()
self.session_send_patch.stop()
class TestOnlyAsciiCharsAllowed(BaseS3OperationTest):
def test_validates_non_ascii_chars_trigger_validation_error(self):
self.http_session_send_mock.return_value = mock.Mock(status_code=200,
headers={},
content=b'')
with self.assertRaises(ParamValidationError):
self.client.put_object(
Bucket='foo', Key='bar', Metadata={
'goodkey': 'good', 'non-ascii': u'\u2713'})
class TestS3GetBucketLifecycle(BaseS3OperationTest):
def test_multiple_transitions_returns_one(self):
http_response = mock.Mock()
http_response.status_code = 200
http_response.content = (
'<?xml version="1.0" ?>'
'<LifecycleConfiguration xmlns="http://s3.amazonaws.'
'com/doc/2006-03-01/">'
' <Rule>'
' <ID>transitionRule</ID>'
' <Prefix>foo</Prefix>'
' <Status>Enabled</Status>'
' <Transition>'
' <Days>40</Days>'
' <StorageClass>STANDARD_IA</StorageClass>'
' </Transition>'
' <Transition>'
' <Days>70</Days>'
' <StorageClass>GLACIER</StorageClass>'
' </Transition>'
' </Rule>'
' <Rule>'
' <ID>noncurrentVersionRule</ID>'
' <Prefix>bar</Prefix>'
' <Status>Enabled</Status>'
' <NoncurrentVersionTransition>'
' <NoncurrentDays>40</NoncurrentDays>'
' <StorageClass>STANDARD_IA</StorageClass>'
' </NoncurrentVersionTransition>'
' <NoncurrentVersionTransition>'
' <NoncurrentDays>70</NoncurrentDays>'
' <StorageClass>GLACIER</StorageClass>'
' </NoncurrentVersionTransition>'
' </Rule>'
'</LifecycleConfiguration>'
).encode('utf-8')
http_response.headers = {}
self.http_session_send_mock.return_value = http_response
s3 = self.session.create_client('s3')
response = s3.get_bucket_lifecycle(Bucket='mybucket')
# Each Transition member should have at least one of the
# transitions provided.
self.assertEqual(
response['Rules'][0]['Transition'],
{'Days': 40, 'StorageClass': 'STANDARD_IA'}
)
self.assertEqual(
response['Rules'][1]['NoncurrentVersionTransition'],
{'NoncurrentDays': 40, 'StorageClass': 'STANDARD_IA'}
)
class TestS3PutObject(BaseS3OperationTest):
def test_500_error_with_non_xml_body(self):
# Note: This exact test case may not be applicable from
# an integration standpoint if the issue is fixed in the future.
#
# The issue is that:
# S3 returns a 200 response but the received response from urllib3 has
# a 500 status code and the headers are in the body of the
# the response. Botocore will try to parse out the error body as xml,
# but the body is invalid xml because it is full of headers.
# So instead of blowing up on an XML parsing error, we
# should at least use the 500 status code because that can be
# retried.
#
# We are unsure of what exactly causes the response to be mangled
# but we expect it to be how 100 continues are handled.
non_xml_content = (
'x-amz-id-2: foo\r\n'
'x-amz-request-id: bar\n'
'Date: Tue, 06 Oct 2015 03:20:38 GMT\r\n'
'ETag: "a6d856bc171fc6aa1b236680856094e2"\r\n'
'Content-Length: 0\r\n'
'Server: AmazonS3\r\n'
).encode('utf-8')
http_500_response = mock.Mock()
http_500_response.status_code = 500
http_500_response.content = non_xml_content
http_500_response.headers = {}
success_response = mock.Mock()
success_response.status_code = 200
success_response.content = b''
success_response.headers = {}
self.http_session_send_mock.side_effect = [
http_500_response, success_response
]
s3 = self.session.create_client('s3')
response = s3.put_object(Bucket='mybucket', Key='mykey', Body=b'foo')
# The first response should have been retried even though the xml is
# invalid and eventually return the 200 response.
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
class TestS3SigV4(BaseS3OperationTest):
def setUp(self):
super(TestS3SigV4, self).setUp()
self.client = self.session.create_client(
's3', self.region, config=Config(signature_version='s3v4'))
self.response_mock = mock.Mock()
self.response_mock.content = b''
self.response_mock.headers = {}
self.response_mock.status_code = 200
self.http_session_send_mock.return_value = self.response_mock
def get_sent_headers(self):
return self.http_session_send_mock.mock_calls[0][1][0].headers
def test_content_md5_set(self):
self.client.put_object(Bucket='foo', Key='bar', Body='baz')
self.assertIn('content-md5', self.get_sent_headers())
def test_content_sha256_set_if_config_value_is_true(self):
config = Config(signature_version='s3v4', s3={
'payload_signing_enabled': True
})
self.client = self.session.create_client(
's3', self.region, config=config)
self.client.put_object(Bucket='foo', Key='bar', Body='baz')
sent_headers = self.get_sent_headers()
sha_header = sent_headers.get('x-amz-content-sha256')
self.assertNotEqual(sha_header, b'UNSIGNED-PAYLOAD')
def test_content_sha256_not_set_if_config_value_is_false(self):
config = Config(signature_version='s3v4', s3={
'payload_signing_enabled': False
})
self.client = self.session.create_client(
's3', self.region, config=config)
self.client.put_object(Bucket='foo', Key='bar', Body='baz')
sent_headers = self.get_sent_headers()
sha_header = sent_headers.get('x-amz-content-sha256')
self.assertEqual(sha_header, b'UNSIGNED-PAYLOAD')
def test_content_sha256_set_if_md5_is_unavailable(self):
with mock.patch('botocore.auth.MD5_AVAILABLE', False):
with mock.patch('botocore.handlers.MD5_AVAILABLE', False):
self.client.put_object(Bucket='foo', Key='bar', Body='baz')
sent_headers = self.get_sent_headers()
unsigned = 'UNSIGNED-PAYLOAD'
self.assertNotEqual(sent_headers['x-amz-content-sha256'], unsigned)
self.assertNotIn('content-md5', sent_headers)
class TestCanSendIntegerHeaders(BaseSessionTest):
def test_int_values_with_sigv4(self):
s3 = self.session.create_client(
's3', config=Config(signature_version='s3v4'))
with mock.patch('botocore.endpoint.Session.send') as mock_send:
mock_send.return_value = mock.Mock(status_code=200,
content=b'',
headers={})
s3.upload_part(Bucket='foo', Key='bar', Body=b'foo',
UploadId='bar', PartNumber=1, ContentLength=3)
headers = mock_send.call_args[0][0].headers
# Verify that the request integer value of 3 has been converted to
# string '3'. This also means we've made it pass the signer which
# expects string values in order to sign properly.
self.assertEqual(headers['Content-Length'], '3')
class TestRegionRedirect(BaseS3OperationTest):
def setUp(self):
super(TestRegionRedirect, self).setUp()
self.client = self.session.create_client(
's3', 'us-west-2', config=Config(signature_version='s3v4'))
self.redirect_response = mock.Mock()
self.redirect_response.headers = {
'x-amz-bucket-region': 'eu-central-1'
}
self.redirect_response.status_code = 301
self.redirect_response.content = (
b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<Error>'
b' <Code>PermanentRedirect</Code>'
b' <Message>The bucket you are attempting to access must be '
b' addressed using the specified endpoint. Please send all '
b' future requests to this endpoint.'
b' </Message>'
b' <Bucket>foo</Bucket>'
b' <Endpoint>foo.s3.eu-central-1.amazonaws.com</Endpoint>'
b'</Error>')
self.success_response = mock.Mock()
self.success_response.headers = {}
self.success_response.status_code = 200
self.success_response.content = (
b'<?xml version="1.0" encoding="UTF-8"?>\n'
b'<ListBucketResult>'
b' <Name>foo</Name>'
b' <Prefix></Prefix>'
b' <Marker></Marker>'
b' <MaxKeys>1000</MaxKeys>'
b' <EncodingType>url</EncodingType>'
b' <IsTruncated>false</IsTruncated>'
b'</ListBucketResult>')
def test_region_redirect(self):
self.http_session_send_mock.side_effect = [
self.redirect_response, self.success_response]
response = self.client.list_objects(Bucket='foo')
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(self.http_session_send_mock.call_count, 2)
calls = [c[0][0] for c in self.http_session_send_mock.call_args_list]
initial_url = ('https://s3.us-west-2.amazonaws.com/foo'
'?encoding-type=url')
self.assertEqual(calls[0].url, initial_url)
fixed_url = ('https://s3.eu-central-1.amazonaws.com/foo'
'?encoding-type=url')
self.assertEqual(calls[1].url, fixed_url)
def test_region_redirect_cache(self):
self.http_session_send_mock.side_effect = [
self.redirect_response, self.success_response,
self.success_response]
first_response = self.client.list_objects(Bucket='foo')
self.assertEqual(
first_response['ResponseMetadata']['HTTPStatusCode'], 200)
second_response = self.client.list_objects(Bucket='foo')
self.assertEqual(
second_response['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(self.http_session_send_mock.call_count, 3)
calls = [c[0][0] for c in self.http_session_send_mock.call_args_list]
initial_url = ('https://s3.us-west-2.amazonaws.com/foo'
'?encoding-type=url')
self.assertEqual(calls[0].url, initial_url)
fixed_url = ('https://s3.eu-central-1.amazonaws.com/foo'
'?encoding-type=url')
self.assertEqual(calls[1].url, fixed_url)
self.assertEqual(calls[2].url, fixed_url)
class TestGeneratePresigned(BaseS3OperationTest):
def test_generate_unauthed_url(self):
config = Config(signature_version=botocore.UNSIGNED)
client = self.session.create_client('s3', self.region, config=config)
url = client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'foo',
'Key': 'bar'
})
self.assertEqual(url, 'https://foo.s3.amazonaws.com/bar')
def test_generate_unauthed_post(self):
config = Config(signature_version=botocore.UNSIGNED)
client = self.session.create_client('s3', self.region, config=config)
parts = client.generate_presigned_post(Bucket='foo', Key='bar')
expected = {
'fields': {'key': 'bar'},
'url': 'https://foo.s3.amazonaws.com/'
}
self.assertEqual(parts, expected)
def test_default_presign_uses_sigv2(self):
url = self.client.generate_presigned_url(ClientMethod='list_buckets')
self.assertNotIn('Algorithm=AWS4-HMAC-SHA256', url)
def test_sigv4_presign(self):
config = Config(signature_version='s3v4')
client = self.session.create_client('s3', self.region, config=config)
url = client.generate_presigned_url(ClientMethod='list_buckets')
self.assertIn('Algorithm=AWS4-HMAC-SHA256', url)
def test_sigv2_presign(self):
config = Config(signature_version='s3')
client = self.session.create_client('s3', self.region, config=config)
url = client.generate_presigned_url(ClientMethod='list_buckets')
self.assertNotIn('Algorithm=AWS4-HMAC-SHA256', url)
def test_uses_sigv4_for_unknown_region(self):
client = self.session.create_client('s3', 'us-west-88')
url = client.generate_presigned_url(ClientMethod='list_buckets')
self.assertIn('Algorithm=AWS4-HMAC-SHA256', url)
def test_default_presign_sigv4_in_sigv4_only_region(self):
client = self.session.create_client('s3', 'us-east-2')
url = client.generate_presigned_url(ClientMethod='list_buckets')
self.assertIn('Algorithm=AWS4-HMAC-SHA256', url)
def test_presign_unsigned(self):
config = Config(signature_version=botocore.UNSIGNED)
client = self.session.create_client('s3', 'us-east-2', config=config)
url = client.generate_presigned_url(ClientMethod='list_buckets')
self.assertEqual(
'https://s3.us-east-2.amazonaws.com/', url)
def test_presign_url_with_ssec(self):
config = Config(signature_version='s3')
client = self.session.create_client('s3', 'us-east-1', config=config)
url = client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'mybucket',
'Key': 'mykey',
'SSECustomerKey': 'a' * 32,
'SSECustomerAlgorithm': 'AES256'
}
)
# The md5 of the sse-c key will be injected when parameters are
# built so it should show up in the presigned url as well.
self.assertIn(
'x-amz-server-side-encryption-customer-key-md5=', url
)
def test_presign_s3_accelerate(self):
config = Config(signature_version=botocore.UNSIGNED,
s3={'use_accelerate_endpoint': True})
client = self.session.create_client('s3', 'us-east-1', config=config)
url = client.generate_presigned_url(
ClientMethod='get_object',
Params={'Bucket': 'mybucket', 'Key': 'mykey'}
)
# The url should be the accelerate endpoint
self.assertEqual(
'https://mybucket.s3-accelerate.amazonaws.com/mykey', url)
def test_presign_post_s3_accelerate(self):
config = Config(signature_version=botocore.UNSIGNED,
s3={'use_accelerate_endpoint': True})
client = self.session.create_client('s3', 'us-east-1', config=config)
parts = client.generate_presigned_post(
Bucket='mybucket', Key='mykey')
# The url should be the accelerate endpoint
expected = {
'fields': {'key': 'mykey'},
'url': 'https://mybucket.s3-accelerate.amazonaws.com/'
}
self.assertEqual(parts, expected)
def test_correct_url_used_for_s3():
# Test that given various sets of config options and bucket names,
# we construct the expect endpoint url.
t = S3AddressingCases(_verify_expected_endpoint_url)
# The default behavior for sigv2. DNS compatible buckets
yield t.case(region='us-west-2', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.amazonaws.com/key')
yield t.case(region='us-east-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.amazonaws.com/key')
yield t.case(region='us-west-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.amazonaws.com/key')
yield t.case(region='us-west-1', bucket='bucket', key='key',
signature_version='s3', is_secure=False,
expected_url='http://bucket.s3.amazonaws.com/key')
# The default behavior for sigv4. DNS compatible buckets still get path
# style addresses.
yield t.case(region='us-west-2', bucket='bucket', key='key',
signature_version='s3v4',
expected_url=(
'https://s3.us-west-2.amazonaws.com/bucket/key'))
yield t.case(region='us-east-1', bucket='bucket', key='key',
signature_version='s3v4',
expected_url='https://s3.amazonaws.com/bucket/key')
yield t.case(region='us-west-1', bucket='bucket', key='key',
signature_version='s3v4',
expected_url=(
'https://s3.us-west-1.amazonaws.com/bucket/key'))
yield t.case(region='us-west-1', bucket='bucket', key='key',
signature_version='s3v4', is_secure=False,
expected_url=(
'http://s3.us-west-1.amazonaws.com/bucket/key'))
# Regions outside of the 'aws' partition.
# We're expecting path style because this is the default with
# 's3v4'.
yield t.case(region='cn-north-1', bucket='bucket', key='key',
signature_version='s3v4',
expected_url=(
'https://s3.cn-north-1.amazonaws.com.cn/bucket/key'))
# This isn't actually supported because cn-north-1 is sigv4 only,
# but we'll still double check that our internal logic is correct
# when building the expected url.
yield t.case(region='cn-north-1', bucket='bucket', key='key',
signature_version='s3',
expected_url=(
'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
# If the request is unsigned, we should have the default
# fix_s3_host behavior which is to use virtual hosting where
# possible but fall back to path style when needed.
yield t.case(region='cn-north-1', bucket='bucket', key='key',
signature_version=UNSIGNED,
expected_url=(
'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
yield t.case(region='cn-north-1', bucket='bucket.dot', key='key',
signature_version=UNSIGNED,
expected_url=(
'https://s3.cn-north-1.amazonaws.com.cn/bucket.dot/key'))
# And of course you can explicitly specify which style to use.
virtual_hosting = {'addressing_style': 'virtual'}
yield t.case(region='cn-north-1', bucket='bucket', key='key',
signature_version=UNSIGNED,
s3_config=virtual_hosting,
expected_url=(
'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
path_style = {'addressing_style': 'path'}
yield t.case(region='cn-north-1', bucket='bucket', key='key',
signature_version=UNSIGNED,
s3_config=path_style,
expected_url=(
'https://s3.cn-north-1.amazonaws.com.cn/bucket/key'))
# If you don't have a DNS compatible bucket, we use path style.
yield t.case(
region='us-west-2', bucket='bucket.dot', key='key',
expected_url='https://s3.us-west-2.amazonaws.com/bucket.dot/key')
yield t.case(
region='us-east-1', bucket='bucket.dot', key='key',
expected_url='https://s3.amazonaws.com/bucket.dot/key')
# Custom endpoint url should always be used.
yield t.case(
customer_provided_endpoint='https://my-custom-s3/',
bucket='foo', key='bar',
expected_url='https://my-custom-s3/foo/bar')
yield t.case(
customer_provided_endpoint='https://my-custom-s3/',
bucket='bucket.dots', key='bar',
expected_url='https://my-custom-s3/bucket.dots/bar')
# Doesn't matter what region you specify, a custom endpoint url always
# wins.
yield t.case(
customer_provided_endpoint='https://my-custom-s3/',
region='us-west-2', bucket='foo', key='bar',
expected_url='https://my-custom-s3/foo/bar')
# Explicitly configuring "virtual" addressing_style.
virtual_hosting = {'addressing_style': 'virtual'}
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.amazonaws.com/key')
yield t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.us-west-2.amazonaws.com/key')
yield t.case(
region='eu-central-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.eu-central-1.amazonaws.com/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
customer_provided_endpoint='https://foo.amazonaws.com',
expected_url='https://bucket.foo.amazonaws.com/key')
# Test us-gov with virtual addressing.
yield t.case(
region='us-gov-west-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key')
# Test restricted regions not do virtual host by default
yield t.case(
region='us-gov-west-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://s3.us-gov-west-1.amazonaws.com/bucket/key')
yield t.case(
region='fips-us-gov-west-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://s3-fips-us-gov-west-1.amazonaws.com/bucket/key')
# Test path style addressing.
path_style = {'addressing_style': 'path'}
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=path_style,
expected_url='https://s3.amazonaws.com/bucket/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=path_style,
customer_provided_endpoint='https://foo.amazonaws.com/',
expected_url='https://foo.amazonaws.com/bucket/key')
# S3 accelerate
use_accelerate = {'use_accelerate_endpoint': True}
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate,
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
yield t.case(
# region is ignored with S3 accelerate.
region='us-west-2', bucket='bucket', key='key',
s3_config=use_accelerate,
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
# Provided endpoints still get recognized as accelerate endpoints.
yield t.case(
region='us-east-1', bucket='bucket', key='key',
customer_provided_endpoint='https://s3-accelerate.amazonaws.com',
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
customer_provided_endpoint='http://s3-accelerate.amazonaws.com',
expected_url='http://bucket.s3-accelerate.amazonaws.com/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate, is_secure=False,
# Note we're using http:// because is_secure=False.
expected_url='http://bucket.s3-accelerate.amazonaws.com/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
# s3-accelerate must be the first part of the url.
customer_provided_endpoint='https://foo.s3-accelerate.amazonaws.com',
expected_url='https://foo.s3-accelerate.amazonaws.com/bucket/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
# The endpoint must be an Amazon endpoint.
customer_provided_endpoint='https://s3-accelerate.notamazon.com',
expected_url='https://s3-accelerate.notamazon.com/bucket/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
# Extra components must be whitelisted.
customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com',
expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key')
# Use virtual even if path is specified for s3 accelerate because
# path style will not work with S3 accelerate.
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config={'use_accelerate_endpoint': True,
'addressing_style': 'path'},
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
# S3 dual stack endpoints.
use_dualstack = {'use_dualstack_endpoint': True}
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3',
# Still default to virtual hosted when possible on sigv2.
expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')
yield t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3',
# Still default to virtual hosted when possible on sigv2.
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3v4',
expected_url='https://s3.dualstack.us-east-1.amazonaws.com/bucket/key')
yield t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3v4',
expected_url='https://s3.dualstack.us-west-2.amazonaws.com/bucket/key')
# Non DNS compatible buckets use path style for dual stack.
yield t.case(
region='us-west-2', bucket='bucket.dot', key='key',
s3_config=use_dualstack,
# Still default to virtual hosted when possible.
expected_url=(
'https://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key'))
# Supports is_secure (use_ssl=False in create_client()).
yield t.case(
region='us-west-2', bucket='bucket.dot', key='key', is_secure=False,
s3_config=use_dualstack,
# Still default to virtual hosted when possible.
expected_url=(
'http://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key'))
# Is path style is requested, we should use it, even if the bucket is
# DNS compatible.
force_path_style = {
'use_dualstack_endpoint': True,
'addressing_style': 'path',
}
yield t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=force_path_style,
# Still default to virtual hosted when possible.
expected_url='https://s3.dualstack.us-west-2.amazonaws.com/bucket/key')
# Accelerate + dual stack
use_accelerate_dualstack = {
'use_accelerate_endpoint': True,
'use_dualstack_endpoint': True,
}
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
yield t.case(
# Region is ignored with S3 accelerate.
region='us-west-2', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
# Only s3-accelerate overrides a customer endpoint.
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack,
customer_provided_endpoint='https://s3-accelerate.amazonaws.com',
expected_url=(
'https://bucket.s3-accelerate.amazonaws.com/key'))
yield t.case(
region='us-east-1', bucket='bucket', key='key',
# Dualstack is whitelisted.
customer_provided_endpoint=(
'https://s3-accelerate.dualstack.amazonaws.com'),
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
yield t.case(
region='us-east-1', bucket='bucket', key='key',
# Even whitelisted parts cannot be duplicated.
customer_provided_endpoint=(
'https://s3-accelerate.dualstack.dualstack.amazonaws.com'),
expected_url=(
'https://s3-accelerate.dualstack.dualstack'
'.amazonaws.com/bucket/key'))
yield t.case(
region='us-east-1', bucket='bucket', key='key',
# More than two extra parts is not allowed.
customer_provided_endpoint=(
'https://s3-accelerate.dualstack.dualstack.dualstack'
'.amazonaws.com'),
expected_url=(
'https://s3-accelerate.dualstack.dualstack.dualstack.amazonaws.com'
'/bucket/key'))
yield t.case(
region='us-east-1', bucket='bucket', key='key',
# Extra components must be whitelisted.
customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com',
expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key')
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack, is_secure=False,
# Note we're using http:// because is_secure=False.
expected_url=(
'http://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
# Use virtual even if path is specified for s3 accelerate because
# path style will not work with S3 accelerate.
use_accelerate_dualstack['addressing_style'] = 'path'
yield t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
class S3AddressingCases(object):
def __init__(self, verify_function):
self._verify = verify_function
def case(self, region=None, bucket='bucket', key='key',
s3_config=None, is_secure=True, customer_provided_endpoint=None,
expected_url=None, signature_version=None):
return (
self._verify, region, bucket, key, s3_config, is_secure,
customer_provided_endpoint, expected_url, signature_version
)
def _verify_expected_endpoint_url(region, bucket, key, s3_config,
is_secure=True,
customer_provided_endpoint=None,
expected_url=None, signature_version=None):
http_response = mock.Mock()
http_response.status_code = 200
http_response.headers = {}
http_response.content = b''
environ = {}
with mock.patch('os.environ', environ):
environ['AWS_ACCESS_KEY_ID'] = 'access_key'
environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
environ['AWS_CONFIG_FILE'] = 'no-exist-foo'
session = create_session()
session.config_filename = 'no-exist-foo'
config = Config(
signature_version=signature_version,
s3=s3_config
)
s3 = session.create_client('s3', region_name=region, use_ssl=is_secure,
config=config,
endpoint_url=customer_provided_endpoint)
with mock.patch('botocore.endpoint.Session.send') as mock_send:
mock_send.return_value = http_response
s3.put_object(Bucket=bucket,
Key=key, Body=b'bar')
request_sent = mock_send.call_args[0][0]
assert_equal(request_sent.url, expected_url)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
from tests import unittest, mock, BaseSessionTest
from botocore.exceptions import UnsupportedTLSVersionWarning
@unittest.skipIf(sys.version_info[:2] == (2, 6),
("py26 is unable to detect openssl version"))
class TestOpensslVersion(BaseSessionTest):
def test_incompatible_openssl_version(self):
with mock.patch('ssl.OPENSSL_VERSION_INFO', new=(0, 9, 8, 11, 15)):
with mock.patch('warnings.warn') as mock_warn:
self.session.create_client('iot-data', 'us-east-1')
call_args = mock_warn.call_args[0]
warning_message = call_args[0]
warning_type = call_args[1]
# We should say something specific about the service.
self.assertIn('iot-data', warning_message)
self.assertEqual(warning_type, UnsupportedTLSVersionWarning)
def test_compatible_openssl_version(self):
with mock.patch('ssl.OPENSSL_VERSION_INFO', new=(1, 0, 1, 1, 1)):
with mock.patch('warnings.warn') as mock_warn:
self.session.create_client('iot-data', 'us-east-1')
self.assertFalse(mock_warn.called)
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import mock
from nose.tools import assert_false
from tests import create_session
def test_unsigned_operations():
operation_params = {
'change_password': {
'PreviousPassword': 'myoldbadpassword',
'ProposedPassword': 'mynewgoodpassword',
'AccessToken': 'foobar'
},
'confirm_forgot_password': {
'ClientId': 'foo',
'Username': 'myusername',
'ConfirmationCode': 'thisismeforreal',
'Password': 'whydowesendpasswordsviaemail'
},
'confirm_sign_up': {
'ClientId': 'foo',
'Username': 'myusername',
'ConfirmationCode': 'ireallydowanttosignup'
},
'delete_user': {
'AccessToken': 'foobar'
},
'delete_user_attributes': {
'UserAttributeNames': ['myattribute'],
'AccessToken': 'foobar'
},
'forgot_password': {
'ClientId': 'foo',
'Username': 'myusername'
},
'get_user': {
'AccessToken': 'foobar'
},
'get_user_attribute_verification_code': {
'AttributeName': 'myattribute',
'AccessToken': 'foobar'
},
'resend_confirmation_code': {
'ClientId': 'foo',
'Username': 'myusername'
},
'set_user_settings': {
'AccessToken': 'randomtoken',
'MFAOptions': [{
'DeliveryMedium': 'SMS',
'AttributeName': 'someattributename'
}]
},
'sign_up': {
'ClientId': 'foo',
'Username': 'bar',
'Password': 'mysupersecurepassword',
},
'update_user_attributes': {
'UserAttributes': [{
'Name': 'someattributename',
'Value': 'newvalue'
}],
'AccessToken': 'foobar'
},
'verify_user_attribute': {
'AttributeName': 'someattributename',
'Code': 'someverificationcode',
'AccessToken': 'foobar'
},
}
environ = {
'AWS_ACCESS_KEY_ID': 'access_key',
'AWS_SECRET_ACCESS_KEY': 'secret_key',
'AWS_CONFIG_FILE': 'no-exist-foo',
}
with mock.patch('os.environ', environ):
session = create_session()
session.config_filename = 'no-exist-foo'
client = session.create_client('cognito-idp', 'us-west-2')
for operation, params in operation_params.items():
test_case = UnsignedOperationTestCase(client, operation, params)
yield test_case.run
class UnsignedOperationTestCase(object):
def __init__(self, client, operation_name, parameters):
self._client = client
self._operation_name = operation_name
self._parameters = parameters
def run(self):
operation = getattr(self._client, self._operation_name)
with mock.patch('botocore.endpoint.Session.send') as _send:
_send.return_value = mock.Mock(
status_code=200, headers={}, content=b'{}')
operation(**self._parameters)
request = _send.call_args[0][0]
assert_false(
'authorization' in request.headers,
'authorization header found in unsigned operation'
)
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This is a dummy implementation of a credential provider process."""
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--raise-error', action='store_true', help=(
'If set, this will cause the process to return a non-zero exit code '
'and print to stderr.'
))
args = parser.parse_args()
if args.raise_error:
raise Exception('Failed to fetch credentials.')
print(json.dumps({
'AccessKeyId': 'spam',
'SecretAccessKey': 'eggs',
'Version': 1
}))
if __name__ == "__main__":
main()
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTest
class TestLexDocs(BaseDocsFunctionalTest):
TYPE_STRING = '{...}|[...]|123|123.4|\'string\'|True|None'
def test_jsonheader_docs(self):
docs = self.get_docstring_for_method('lex-runtime', 'post_content')
self.assert_contains_lines_in_order([
'**Request Syntax**',
'sessionAttributes=%s,' % self.TYPE_STRING,
':type sessionAttributes: JSON serializable',
'**Response Syntax**',
'\'slots\': %s,' % self.TYPE_STRING,
'\'sessionAttributes\': %s' % self.TYPE_STRING,
'**slots** (JSON serializable)',
'**sessionAttributes** (JSON serializable)'
], docs)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from botocore.session import get_session
from botocore.docs.service import ServiceDocumenter
class BaseDocsFunctionalTest(unittest.TestCase):
def setUp(self):
self._session = get_session()
def assert_contains_line(self, line, contents):
contents = contents.decode('utf-8')
self.assertIn(line, contents)
def assert_contains_lines_in_order(self, lines, contents):
contents = contents.decode('utf-8')
for line in lines:
self.assertIn(line, contents)
beginning = contents.find(line)
contents = contents[(beginning + len(line)):]
def assert_not_contains_line(self, line, contents):
contents = contents.decode('utf-8')
self.assertNotIn(line, contents)
def assert_not_contains_lines(self, lines, contents):
contents = contents.decode('utf-8')
for line in lines:
self.assertNotIn(line, contents)
def get_method_document_block(self, operation_name, contents):
contents = contents.decode('utf-8')
start_method_document = ' .. py:method:: %s(' % operation_name
start_index = contents.find(start_method_document)
self.assertNotEqual(start_index, -1, 'Method is not found in contents')
contents = contents[start_index:]
end_index = contents.find(
' .. py:method::', len(start_method_document))
contents = contents[:end_index]
return contents.encode('utf-8')
def get_parameter_document_block(self, param_name, contents):
contents = contents.decode('utf-8')
start_param_document = ' :type %s:' % param_name
start_index = contents.find(start_param_document)
self.assertNotEqual(start_index, -1, 'Param is not found in contents')
contents = contents[start_index:]
end_index = contents.find(' :type', len(start_param_document))
contents = contents[:end_index]
return contents.encode('utf-8')
def get_parameter_documentation_from_service(
self, service_name, method_name, param_name):
contents = ServiceDocumenter(
service_name, self._session).document_service()
method_contents = self.get_method_document_block(
method_name, contents)
return self.get_parameter_document_block(
param_name, method_contents)
def get_docstring_for_method(self, service_name, method_name):
contents = ServiceDocumenter(
service_name, self._session).document_service()
method_contents = self.get_method_document_block(
method_name, contents)
return method_contents
def assert_is_documented_as_autopopulated_param(
self, service_name, method_name, param_name, doc_string=None):
contents = ServiceDocumenter(
service_name, self._session).document_service()
method_contents = self.get_method_document_block(
method_name, contents)
# Ensure it is not in the example.
self.assert_not_contains_line('%s=\'string\'' % param_name,
method_contents)
# Ensure it is in the params.
param_contents = self.get_parameter_document_block(
param_name, method_contents)
# Ensure it is not labeled as required.
self.assert_not_contains_line('REQUIRED', param_contents)
# Ensure the note about autopopulation was added.
if doc_string is None:
doc_string = 'Please note that this parameter is automatically'
self.assert_contains_line(doc_string, param_contents)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTest
class TestGlacierDocs(BaseDocsFunctionalTest):
def test_account_id(self):
self.assert_is_documented_as_autopopulated_param(
service_name='glacier',
method_name='abort_multipart_upload',
param_name='accountId',
doc_string='Note: this parameter is set to "-"')
def test_checksum(self):
self.assert_is_documented_as_autopopulated_param(
service_name='glacier',
method_name='upload_archive',
param_name='checksum')
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import botocore.session
from botocore.model import OperationNotFoundError
from botocore.utils import parse_timestamp
def test_lint_shared_example_configs():
session = botocore.session.Session()
loader = session.get_component('data_loader')
services = loader.list_available_services('examples-1')
for service in services:
service_model = session.get_service_model(service)
example_config = loader.load_service_model(
service, 'examples-1', service_model.api_version
)
examples = example_config.get("examples", {})
for operation, operation_examples in examples.items():
for example in operation_examples:
yield _lint_single_example, operation, example, service_model
def _lint_single_example(operation_name, example_config, service_model):
# The operation should actually exist
assert_operation_exists(service_model, operation_name)
operation_model = service_model.operation_model(operation_name)
assert_valid_values(service_model.service_name, operation_model,
example_config)
def assert_valid_values(service_name, operation_model, example_config):
example_input = example_config.get('input')
input_shape = operation_model.input_shape
example_id = example_config['id']
if input_shape is None and example_input:
raise AssertionError(
"Input found in example for %s from %s with id %s, but no input "
"shape is defined." % (
operation_model.name, service_name, example_id
))
example_output = example_config.get('output')
output_shape = operation_model.output_shape
if output_shape is None and example_output:
raise AssertionError(
"Output found in example for %s from %s with id %s, but no output "
"shape is defined." % (
operation_model.name, service_name, example_id
))
try:
if example_input is not None and input_shape is not None:
_assert_valid_values(
input_shape, example_input, [input_shape.name])
if example_output is not None and output_shape is not None:
_assert_valid_values(
output_shape, example_output, [output_shape.name])
except AssertionError as e:
raise AssertionError(
"Invalid value in example for %s from %s with id %s: %s" % (
operation_model.name, service_name, example_id, e
))
def _assert_valid_values(shape, example_value, path):
if shape.type_name == 'timestamp':
_assert_valid_timestamp(example_value, path)
elif shape.type_name == 'structure':
_assert_valid_structure_values(shape, example_value, path)
elif shape.type_name == 'list':
_assert_valid_list_values(shape, example_value, path)
elif shape.type_name == 'map':
_assert_valid_map_values(shape, example_value, path)
def _assert_valid_structure_values(shape, example_dict, path):
invalid_members = [k for k in example_dict.keys()
if k not in shape.members]
if invalid_members:
dotted_path = '.'.join(path)
raise AssertionError(
"Invalid members found for %s: %s" % (dotted_path, invalid_members)
)
for member_name, example_value in example_dict.items():
member = shape.members[member_name]
_assert_valid_values(member, example_value, path + [member_name])
def _assert_valid_list_values(shape, example_values, path):
member = shape.member
for i, value in enumerate(example_values):
name = "%s[%s]" % (path[-1], i)
_assert_valid_values(member, value, path[:-1] + [name])
def _assert_valid_map_values(shape, example_value, path):
for key, value in example_value.items():
name = '%s["%s"]' % (path[-1], key)
_assert_valid_values(shape.value, value, path[:-1] + [name])
def _assert_valid_timestamp(timestamp, path):
try:
parse_timestamp(timestamp).timetuple()
except Exception as e:
dotted_path = '.'.join(path)
raise AssertionError('Failed to parse timestamp %s for %s: %s' % (
timestamp, dotted_path, e))
def assert_operation_exists(service_model, operation_name):
try:
service_model.operation_model(operation_name)
except OperationNotFoundError:
raise AssertionError(
"Examples found in %s for operation %s that does not exist." % (
service_model.service_name, operation_name))
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore import xform_name
from tests.functional.docs import BaseDocsFunctionalTest
from botocore.docs.service import ServiceDocumenter
class TestStreamingBodyDocumentation(BaseDocsFunctionalTest):
def test_all_streaming_body_are_properly_documented(self):
for service in self._session.get_available_services():
client = self._session.create_client(
service, region_name='us-east-1',
aws_access_key_id='foo', aws_secret_access_key='bar')
service_model = client.meta.service_model
for operation in service_model.operation_names:
operation_model = service_model.operation_model(operation)
if operation_model.has_streaming_output:
self.assert_streaming_body_is_properly_documented(
service, xform_name(operation))
def assert_streaming_body_is_properly_documented(self, service, operation):
service_docs = ServiceDocumenter(
service, self._session).document_service()
method_docs = self.get_method_document_block(operation, service_docs)
self.assert_contains_line('StreamingBody', method_docs)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTest
class TestAutoscalingDocs(BaseDocsFunctionalTest):
def test_documents_encoding_of_user_data(self):
docs = self.get_parameter_documentation_from_service(
'autoscaling', 'create_launch_configuration', 'UserData')
self.assertIn('base64 encoded automatically', docs.decode('utf-8'))
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTest
from tests.functional.test_alias import ALIAS_CASES
class TestAliasesDocumented(BaseDocsFunctionalTest):
def test_all_aliases_are_documented_correctly(self):
for case in ALIAS_CASES:
content = self.get_docstring_for_method(
case['service'], case['operation']).decode('utf-8')
new_name = case['new_name']
original_name = case['original_name']
param_name_template = ':param %s:'
param_type_template = ':type %s:'
param_example_template = '%s='
# Make sure the new parameters are in the documentation
# but the old names are not.
self.assertIn(param_name_template % new_name, content)
self.assertIn(param_type_template % new_name, content)
self.assertIn(param_example_template % new_name, content)
self.assertNotIn(param_name_template % original_name, content)
self.assertNotIn(param_type_template % original_name, content)
self.assertNotIn(param_example_template % original_name, content)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTest
class TestEc2Docs(BaseDocsFunctionalTest):
def test_documents_encoding_of_user_data(self):
docs = self.get_parameter_documentation_from_service(
'ec2', 'run_instances', 'UserData')
self.assertIn('base64 encoded automatically', docs.decode('utf-8'))
def test_copy_snapshot_presigned_url_is_autopopulated(self):
self.assert_is_documented_as_autopopulated_param(
service_name='ec2',
method_name='copy_snapshot',
param_name='PresignedUrl')
def test_copy_snapshot_destination_region_is_autopopulated(self):
self.assert_is_documented_as_autopopulated_param(
service_name='ec2',
method_name='copy_snapshot',
param_name='DestinationRegion')
def test_idempotency_documented(self):
content = self.get_docstring_for_method('ec2', 'purchase_scheduled_instances')
# Client token should have had idempotentcy autopopulated doc appended
self.assert_contains_line('This field is autopopulated if not provided',
content)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTest
from botocore.docs.service import ServiceDocumenter
class TestS3Docs(BaseDocsFunctionalTest):
def test_auto_populates_sse_customer_key_md5(self):
self.assert_is_documented_as_autopopulated_param(
service_name='s3',
method_name='put_object',
param_name='SSECustomerKeyMD5')
def test_auto_populates_copy_source_sse_customer_key_md5(self):
self.assert_is_documented_as_autopopulated_param(
service_name='s3',
method_name='copy_object',
param_name='CopySourceSSECustomerKeyMD5')
def test_hides_content_md5_when_impossible_to_provide(self):
modified_methods = ['delete_objects', 'put_bucket_acl',
'put_bucket_cors', 'put_bucket_lifecycle',
'put_bucket_logging', 'put_bucket_policy',
'put_bucket_notification', 'put_bucket_tagging',
'put_bucket_replication', 'put_bucket_website',
'put_bucket_request_payment', 'put_object_acl',
'put_bucket_versioning']
service_contents = ServiceDocumenter(
's3', self._session).document_service()
for method_name in modified_methods:
method_contents = self.get_method_document_block(
method_name, service_contents)
self.assertNotIn('ContentMD5=\'string\'',
method_contents.decode('utf-8'))
def test_copy_source_documented_as_union_type(self):
content = self.get_docstring_for_method('s3', 'copy_object')
dict_form = (
"{'Bucket': 'string', 'Key': 'string', 'VersionId': 'string'}")
self.assert_contains_line(
"CopySource='string' or %s" % dict_form, content)
def test_copy_source_param_docs_also_modified(self):
content = self.get_docstring_for_method('s3', 'copy_object')
param_docs = self.get_parameter_document_block('CopySource', content)
# We don't want to overspecify the test, so I've picked
# an arbitrary line from the customized docs.
self.assert_contains_line(
"You can also provide this value as a dictionary", param_docs)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import BaseClientDriverTest
class TestDoesNotLeakMemory(BaseClientDriverTest):
# The user doesn't need to have credentials configured
# in order to run the functional tests for resource leaks.
# If we don't set this value and a user doesn't have creds
# configured, each create_client() call will have to go through
# the EC2 Instance Metadata provider's timeout, which can add
# a substantial amount of time to the total test run time.
INJECT_DUMMY_CREDS = True
# We're making up numbers here, but let's say arbitrarily
# that the memory can't increase by more than 10MB.
MAX_GROWTH_BYTES = 10 * 1024 * 1024
def test_create_single_client_memory_constant(self):
self.cmd('create_client', 's3')
self.cmd('free_clients')
self.record_memory()
for _ in range(100):
self.cmd('create_client', 's3')
self.cmd('free_clients')
self.record_memory()
start, end = self.memory_samples
self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
def test_create_memory_clients_in_loop(self):
# We need to first create clients and free then before
# recording our memory samples. This is because of two reasons:
# 1. Caching. Some of the botocore internals will cache data, so
# the first client created will consume more memory than subsequent
# clients. We're interested in growing memory, not total
# memory usage (for now), so we we care about the memory in the
# steady state case.
# 2. Python memory allocation. Due to how python allocates memory
# via it's small object allocator, arena's aren't freed until the
# entire 256kb isn't in use. If a single allocation in a single
# pool in a single arena is still in use, the arena is not
# freed. This case is easy to hit, and pretty much any
# fragmentation guarantees this case is hit. The best we can
# do is verify that memory that's released back to python's
# allocator (but not to the OS) is at least reused in subsequent
# requests to create botocore clients.
self.cmd('create_multiple_clients', '200', 's3')
self.cmd('free_clients')
self.record_memory()
# 500 clients in batches of 50.
for _ in range(10):
self.cmd('create_multiple_clients', '50', 's3')
self.cmd('free_clients')
self.record_memory()
start, end = self.memory_samples
self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
def test_create_single_waiter_memory_constant(self):
self.cmd('create_waiter', 's3', 'bucket_exists')
self.cmd('free_waiters')
self.record_memory()
for _ in range(100):
self.cmd('create_waiter', 's3', 'bucket_exists')
self.cmd('free_waiters')
self.record_memory()
start, end = self.memory_samples
self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
def test_create_memory_waiters_in_loop(self):
# See ``test_create_memory_clients_in_loop`` to understand why
# waiters are first initialized and then freed. Same reason applies.
self.cmd('create_multiple_waiters', '200', 's3', 'bucket_exists')
self.cmd('free_waiters')
self.record_memory()
# 500 waiters in batches of 50.
for _ in range(10):
self.cmd(
'create_multiple_waiters', '50', 's3', 'bucket_exists')
self.cmd('free_waiters')
self.record_memory()
start, end = self.memory_samples
self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
def test_create_single_paginator_memory_constant(self):
self.cmd('create_paginator', 's3', 'list_objects')
self.cmd('free_paginators')
self.record_memory()
for _ in range(100):
self.cmd('create_paginator', 's3', 'list_objects')
self.cmd('free_paginators')
self.record_memory()
start, end = self.memory_samples
self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
def test_create_memory_paginators_in_loop(self):
# See ``test_create_memory_clients_in_loop`` to understand why
# paginators are first initialized and then freed. Same reason applies.
self.cmd('create_multiple_paginators', '200', 's3', 'list_objects')
self.cmd('free_paginators')
self.record_memory()
# 500 waiters in batches of 50.
for _ in range(10):
self.cmd(
'create_multiple_paginators', '50', 's3', 'list_objects')
self.cmd('free_paginators')
self.record_memory()
start, end = self.memory_samples
self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
|
# -*- coding: utf-8 -*-
#
# botocore documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 2 07:26:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from botocore.session import get_session
from botocore.docs import generate_docs
generate_docs(os.path.dirname(os.path.abspath(__file__)), get_session())
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'botocore'
copyright = u'2013, Mitch Garnaat'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.8.'
# The full version, including alpha/beta/rc tags.
release = '1.8.41'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_show_sourcelink = False
html_sidebars = {
'**': ['logo-text.html',
'globaltoc.html',
'localtoc.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'botocoredoc'
import guzzle_sphinx_theme
extensions.append("guzzle_sphinx_theme")
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
# hack to add tracking
"google_analytics_account": os.getenv('TRACKING', False),
"base_url": "http://docs.aws.amazon.com/aws-sdk-php/guide/latest/"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'botocore.tex', u'botocore Documentation',
u'Mitch Garnaat', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'botocore', u'botocore Documentation',
[u'Mitch Garnaat'], 3)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'botocore', u'botocore Documentation',
u'Mitch Garnaat', 'botocore', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import functools
import logging
from binascii import crc32
from botocore.vendored.requests import ConnectionError, Timeout
from botocore.vendored.requests.packages.urllib3.exceptions import ClosedPoolError
from botocore.exceptions import ChecksumError, EndpointConnectionError
logger = logging.getLogger(__name__)
# The only supported error for now is GENERAL_CONNECTION_ERROR
# which maps to requests generic ConnectionError. If we're able
# to get more specific exceptions from requests we can update
# this mapping with more specific exceptions.
EXCEPTION_MAP = {
'GENERAL_CONNECTION_ERROR': [
ConnectionError, ClosedPoolError, Timeout,
EndpointConnectionError
],
}
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: %s" % base)
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
def create_exponential_delay_function(base, growth_factor):
"""Create an exponential delay function based on the attempts.
This is used so that you only have to pass it the attempts
parameter to calculate the delay.
"""
return functools.partial(
delay_exponential, base=base, growth_factor=growth_factor)
def create_retry_handler(config, operation_name=None):
checker = create_checker_from_retry_config(
config, operation_name=operation_name)
action = create_retry_action_from_config(
config, operation_name=operation_name)
return RetryHandler(checker=checker, action=action)
def create_retry_action_from_config(config, operation_name=None):
# The spec has the possibility of supporting per policy
# actions, but right now, we assume this comes from the
# default section, which means that delay functions apply
# for every policy in the retry config (per service).
delay_config = config['__default__']['delay']
if delay_config['type'] == 'exponential':
return create_exponential_delay_function(
base=delay_config['base'],
growth_factor=delay_config['growth_factor'])
def create_checker_from_retry_config(config, operation_name=None):
checkers = []
max_attempts = None
retryable_exceptions = []
if '__default__' in config:
policies = config['__default__'].get('policies', [])
max_attempts = config['__default__']['max_attempts']
for key in policies:
current_config = policies[key]
checkers.append(_create_single_checker(current_config))
retry_exception = _extract_retryable_exception(current_config)
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if operation_name is not None and config.get(operation_name) is not None:
operation_policies = config[operation_name]['policies']
for key in operation_policies:
checkers.append(_create_single_checker(operation_policies[key]))
retry_exception = _extract_retryable_exception(
operation_policies[key])
if retry_exception is not None:
retryable_exceptions.extend(retry_exception)
if len(checkers) == 1:
# Don't need to use a MultiChecker
return MaxAttemptsDecorator(checkers[0], max_attempts=max_attempts)
else:
multi_checker = MultiChecker(checkers)
return MaxAttemptsDecorator(
multi_checker, max_attempts=max_attempts,
retryable_exceptions=tuple(retryable_exceptions))
def _create_single_checker(config):
if 'response' in config['applies_when']:
return _create_single_response_checker(
config['applies_when']['response'])
elif 'socket_errors' in config['applies_when']:
return ExceptionRaiser()
def _create_single_response_checker(response):
if 'service_error_code' in response:
checker = ServiceErrorCodeChecker(
status_code=response['http_status_code'],
error_code=response['service_error_code'])
elif 'http_status_code' in response:
checker = HTTPStatusCodeChecker(
status_code=response['http_status_code'])
elif 'crc32body' in response:
checker = CRC32Checker(header=response['crc32body'])
else:
# TODO: send a signal.
raise ValueError("Unknown retry policy: %s" % config)
return checker
def _extract_retryable_exception(config):
applies_when = config['applies_when']
if 'crc32body' in applies_when.get('response', {}):
return [ChecksumError]
elif 'socket_errors' in applies_when:
exceptions = []
for name in applies_when['socket_errors']:
exceptions.extend(EXCEPTION_MAP[name])
return exceptions
class RetryHandler(object):
"""Retry handler.
The retry handler takes two params, ``checker`` object
and an ``action`` object.
The ``checker`` object must be a callable object and based on a response
and an attempt number, determines whether or not sufficient criteria for
a retry has been met. If this is the case then the ``action`` object
(which also is a callable) determines what needs to happen in the event
of a retry.
"""
def __init__(self, checker, action):
self._checker = checker
self._action = action
def __call__(self, attempts, response, caught_exception, **kwargs):
"""Handler for a retry.
Intended to be hooked up to an event handler (hence the **kwargs),
this will process retries appropriately.
"""
if self._checker(attempts, response, caught_exception):
result = self._action(attempts=attempts)
logger.debug("Retry needed, action of: %s", result)
return result
logger.debug("No retry needed.")
class BaseChecker(object):
"""Base class for retry checkers.
Each class is responsible for checking a single criteria that determines
whether or not a retry should not happen.
"""
def __call__(self, attempt_number, response, caught_exception):
"""Determine if retry criteria matches.
Note that either ``response`` is not None and ``caught_exception`` is
None or ``response`` is None and ``caught_exception`` is not None.
:type attempt_number: int
:param attempt_number: The total number of times we've attempted
to send the request.
:param response: The HTTP response (if one was received).
:type caught_exception: Exception
:param caught_exception: Any exception that was caught while trying to
send the HTTP response.
:return: True, if the retry criteria matches (and therefore a retry
should occur. False if the criteria does not match.
"""
# The default implementation allows subclasses to not have to check
# whether or not response is None or not.
if response is not None:
return self._check_response(attempt_number, response)
elif caught_exception is not None:
return self._check_caught_exception(
attempt_number, caught_exception)
else:
raise ValueError("Both response and caught_exception are None.")
def _check_response(self, attempt_number, response):
pass
def _check_caught_exception(self, attempt_number, caught_exception):
pass
class MaxAttemptsDecorator(BaseChecker):
"""Allow retries up to a maximum number of attempts.
This will pass through calls to the decorated retry checker, provided
that the number of attempts does not exceed max_attempts. It will
also catch any retryable_exceptions passed in. Once max_attempts has
been exceeded, then False will be returned or the retryable_exceptions
that was previously being caught will be raised.
"""
def __init__(self, checker, max_attempts, retryable_exceptions=None):
self._checker = checker
self._max_attempts = max_attempts
self._retryable_exceptions = retryable_exceptions
def __call__(self, attempt_number, response, caught_exception):
should_retry = self._should_retry(attempt_number, response,
caught_exception)
if should_retry:
if attempt_number >= self._max_attempts:
# explicitly set MaxAttemptsReached
if response is not None and 'ResponseMetadata' in response[1]:
response[1]['ResponseMetadata']['MaxAttemptsReached'] = True
logger.debug("Reached the maximum number of retry "
"attempts: %s", attempt_number)
return False
else:
return should_retry
else:
return False
def _should_retry(self, attempt_number, response, caught_exception):
if self._retryable_exceptions and \
attempt_number < self._max_attempts:
try:
return self._checker(attempt_number, response, caught_exception)
except self._retryable_exceptions as e:
logger.debug("retry needed, retryable exception caught: %s",
e, exc_info=True)
return True
else:
# If we've exceeded the max attempts we just let the exception
# propogate if one has occurred.
return self._checker(attempt_number, response, caught_exception)
class HTTPStatusCodeChecker(BaseChecker):
def __init__(self, status_code):
self._status_code = status_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
logger.debug(
"retry needed: retryable HTTP status code received: %s",
self._status_code)
return True
else:
return False
class ServiceErrorCodeChecker(BaseChecker):
def __init__(self, status_code, error_code):
self._status_code = status_code
self._error_code = error_code
def _check_response(self, attempt_number, response):
if response[0].status_code == self._status_code:
actual_error_code = response[1].get('Error', {}).get('Code')
if actual_error_code == self._error_code:
logger.debug(
"retry needed: matching HTTP status and error code seen: "
"%s, %s", self._status_code, self._error_code)
return True
return False
class MultiChecker(BaseChecker):
def __init__(self, checkers):
self._checkers = checkers
def __call__(self, attempt_number, response, caught_exception):
for checker in self._checkers:
checker_response = checker(attempt_number, response,
caught_exception)
if checker_response:
return checker_response
return False
class CRC32Checker(BaseChecker):
def __init__(self, header):
# The header where the expected crc32 is located.
self._header_name = header
def _check_response(self, attempt_number, response):
http_response = response[0]
expected_crc = http_response.headers.get(self._header_name)
if expected_crc is None:
logger.debug("crc32 check skipped, the %s header is not "
"in the http response.", self._header_name)
else:
actual_crc32 = crc32(response[0].content) & 0xffffffff
if not actual_crc32 == int(expected_crc):
logger.debug(
"retry needed: crc32 check failed, expected != actual: "
"%s != %s", int(expected_crc), actual_crc32)
raise ChecksumError(checksum_type='crc32',
expected_checksum=int(expected_crc),
actual_checksum=actual_crc32)
class ExceptionRaiser(BaseChecker):
"""Raise any caught exceptions.
This class will raise any non None ``caught_exception``.
"""
def _check_caught_exception(self, attempt_number, caught_exception):
# This is implementation specific, but this class is useful by
# coordinating with the MaxAttemptsDecorator.
# The MaxAttemptsDecorator has a list of exceptions it should catch
# and retry, but something needs to come along and actually raise the
# caught_exception. That's what this class is being used for. If
# the MaxAttemptsDecorator is not interested in retrying the exception
# then this exception just propogates out past the retry code.
raise caught_exception
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Protocol input serializes.
This module contains classes that implement input serialization
for the various AWS protocol types.
These classes essentially take user input, a model object that
represents what the expected input should look like, and it returns
a dictionary that contains the various parts of a request. A few
high level design decisions:
* Each protocol type maps to a separate class, all inherit from
``Serializer``.
* The return value for ``serialize_to_request`` (the main entry
point) returns a dictionary that represents a request. This
will have keys like ``url_path``, ``query_string``, etc. This
is done so that it's a) easy to test and b) not tied to a
particular HTTP library. See the ``serialize_to_request`` docstring
for more details.
Unicode
-------
The input to the serializers should be text (str/unicode), not bytes,
with the exception of blob types. Those are assumed to be binary,
and if a str/unicode type is passed in, it will be encoded as utf-8.
"""
import re
import base64
from xml.etree import ElementTree
import calendar
from botocore.compat import six
from botocore.compat import json, formatdate
from botocore.utils import parse_to_aware_datetime
from botocore.utils import percent_encode
from botocore.utils import is_json_value_header
from botocore import validate
# From the spec, the default timestamp format if not specified is iso8601.
DEFAULT_TIMESTAMP_FORMAT = 'iso8601'
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# Same as ISO8601, but with microsecond precision.
ISO8601_MICRO = '%Y-%m-%dT%H:%M:%S.%fZ'
def create_serializer(protocol_name, include_validation=True):
# TODO: Unknown protocols.
serializer = SERIALIZERS[protocol_name]()
if include_validation:
validator = validate.ParamValidator()
serializer = validate.ParamValidationDecorator(validator, serializer)
return serializer
class Serializer(object):
DEFAULT_METHOD = 'POST'
# Clients can change this to a different MutableMapping
# (i.e OrderedDict) if they want. This is used in the
# compliance test to match the hash ordering used in the
# tests.
MAP_TYPE = dict
DEFAULT_ENCODING = 'utf-8'
def serialize_to_request(self, parameters, operation_model):
"""Serialize parameters into an HTTP request.
This method takes user provided parameters and a shape
model and serializes the parameters to an HTTP request.
More specifically, this method returns information about
parts of the HTTP request, it does not enforce a particular
interface or standard for an HTTP request. It instead returns
a dictionary of:
* 'url_path'
* 'query_string'
* 'headers'
* 'body'
* 'method'
It is then up to consumers to decide how to map this to a Request
object of their HTTP library of choice. Below is an example
return value::
{'body': {'Action': 'OperationName',
'Bar': 'val2',
'Foo': 'val1',
'Version': '2014-01-01'},
'headers': {},
'method': 'POST',
'query_string': '',
'url_path': '/'}
:param parameters: The dictionary input parameters for the
operation (i.e the user input).
:param operation_model: The OperationModel object that describes
the operation.
"""
raise NotImplementedError("serialize_to_request")
def _create_default_request(self):
# Creates a boilerplate default request dict that subclasses
# can use as a starting point.
serialized = {
'url_path': '/',
'query_string': '',
'method': self.DEFAULT_METHOD,
'headers': {},
# An empty body is represented as an empty byte string.
'body': b''
}
return serialized
# Some extra utility methods subclasses can use.
def _timestamp_iso8601(self, value):
if value.microsecond > 0:
timestamp_format = ISO8601_MICRO
else:
timestamp_format = ISO8601
return value.strftime(timestamp_format)
def _timestamp_unixtimestamp(self, value):
return int(calendar.timegm(value.timetuple()))
def _timestamp_rfc822(self, value):
return formatdate(value, usegmt=True)
def _convert_timestamp_to_str(self, value):
datetime_obj = parse_to_aware_datetime(value)
converter = getattr(
self, '_timestamp_%s' % self.TIMESTAMP_FORMAT.lower())
final_value = converter(datetime_obj)
return final_value
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
return shape.serialization.get('name', default_name)
def _get_base64(self, value):
# Returns the base64-encoded version of value, handling
# both strings and bytes. The returned value is a string
# via the default encoding.
if isinstance(value, six.text_type):
value = value.encode(self.DEFAULT_ENCODING)
return base64.b64encode(value).strip().decode(
self.DEFAULT_ENCODING)
class QuerySerializer(Serializer):
TIMESTAMP_FORMAT = 'iso8601'
def serialize_to_request(self, parameters, operation_model):
shape = operation_model.input_shape
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
}
# The query serializer only deals with body params so
# that's what we hand off the _serialize_* methods.
body_params = self.MAP_TYPE()
body_params['Action'] = operation_model.name
body_params['Version'] = operation_model.metadata['apiVersion']
if shape is not None:
self._serialize(body_params, parameters, shape)
serialized['body'] = body_params
return serialized
def _serialize(self, serialized, value, shape, prefix=''):
# serialized: The dict that is incrementally added to with the
# final serialized parameters.
# value: The current user input value.
# shape: The shape object that describes the structure of the
# input.
# prefix: The incrementally built up prefix for the serialized
# key (i.e Foo.bar.members.1).
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, prefix=prefix)
def _serialize_type_structure(self, serialized, value, shape, prefix=''):
members = shape.members
for key, value in value.items():
member_shape = members[key]
member_prefix = self._get_serialized_name(member_shape, key)
if prefix:
member_prefix = '%s.%s' % (prefix, member_prefix)
self._serialize(serialized, value, member_shape, member_prefix)
def _serialize_type_list(self, serialized, value, shape, prefix=''):
if not value:
# The query protocol serializes empty lists.
serialized[prefix] = ''
return
if self._is_shape_flattened(shape):
list_prefix = prefix
if shape.member.serialization.get('name'):
name = self._get_serialized_name(shape.member, default_name='')
# Replace '.Original' with '.{name}'.
list_prefix = '.'.join(prefix.split('.')[:-1] + [name])
else:
list_name = shape.member.serialization.get('name', 'member')
list_prefix = '%s.%s' % (prefix, list_name)
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (list_prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
def _serialize_type_map(self, serialized, value, shape, prefix=''):
if self._is_shape_flattened(shape):
full_prefix = prefix
else:
full_prefix = '%s.entry' % prefix
template = full_prefix + '.{i}.{suffix}'
key_shape = shape.key
value_shape = shape.value
key_suffix = self._get_serialized_name(key_shape, default_name='key')
value_suffix = self._get_serialized_name(value_shape, 'value')
for i, key in enumerate(value, 1):
key_prefix = template.format(i=i, suffix=key_suffix)
value_prefix = template.format(i=i, suffix=value_suffix)
self._serialize(serialized, key, key_shape, key_prefix)
self._serialize(serialized, value[key], value_shape, value_prefix)
def _serialize_type_blob(self, serialized, value, shape, prefix=''):
# Blob args must be base64 encoded.
serialized[prefix] = self._get_base64(value)
def _serialize_type_timestamp(self, serialized, value, shape, prefix=''):
serialized[prefix] = self._convert_timestamp_to_str(value)
def _serialize_type_boolean(self, serialized, value, shape, prefix=''):
if value:
serialized[prefix] = 'true'
else:
serialized[prefix] = 'false'
def _default_serialize(self, serialized, value, shape, prefix=''):
serialized[prefix] = value
def _is_shape_flattened(self, shape):
return shape.serialization.get('flattened')
class EC2Serializer(QuerySerializer):
"""EC2 specific customizations to the query protocol serializers.
The EC2 model is almost, but not exactly, similar to the query protocol
serializer. This class encapsulates those differences. The model
will have be marked with a ``protocol`` of ``ec2``, so you don't need
to worry about wiring this class up correctly.
"""
def _get_serialized_name(self, shape, default_name):
# Returns the serialized name for the shape if it exists.
# Otherwise it will return the passed in default_name.
if 'queryName' in shape.serialization:
return shape.serialization['queryName']
elif 'name' in shape.serialization:
# A locationName is always capitalized
# on input for the ec2 protocol.
name = shape.serialization['name']
return name[0].upper() + name[1:]
else:
return default_name
def _serialize_type_list(self, serialized, value, shape, prefix=''):
for i, element in enumerate(value, 1):
element_prefix = '%s.%s' % (prefix, i)
element_shape = shape.member
self._serialize(serialized, element, element_shape, element_prefix)
class JSONSerializer(Serializer):
TIMESTAMP_FORMAT = 'unixtimestamp'
def serialize_to_request(self, parameters, operation_model):
target = '%s.%s' % (operation_model.metadata['targetPrefix'],
operation_model.name)
json_version = operation_model.metadata['jsonVersion']
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
serialized['headers'] = {
'X-Amz-Target': target,
'Content-Type': 'application/x-amz-json-%s' % json_version,
}
body = {}
input_shape = operation_model.input_shape
if input_shape is not None:
self._serialize(body, parameters, input_shape)
serialized['body'] = json.dumps(body).encode(self.DEFAULT_ENCODING)
return serialized
def _serialize(self, serialized, value, shape, key=None):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(serialized, value, shape, key)
def _serialize_type_structure(self, serialized, value, shape, key):
if key is not None:
# If a key is provided, this is a result of a recursive
# call so we need to add a new child dict as the value
# of the passed in serialized dict. We'll then add
# all the structure members as key/vals in the new serialized
# dictionary we just created.
new_serialized = self.MAP_TYPE()
serialized[key] = new_serialized
serialized = new_serialized
members = shape.members
for member_key, member_value in value.items():
member_shape = members[member_key]
if 'name' in member_shape.serialization:
member_key = member_shape.serialization['name']
self._serialize(serialized, member_value, member_shape, member_key)
def _serialize_type_map(self, serialized, value, shape, key):
map_obj = self.MAP_TYPE()
serialized[key] = map_obj
for sub_key, sub_value in value.items():
self._serialize(map_obj, sub_value, shape.value, sub_key)
def _serialize_type_list(self, serialized, value, shape, key):
list_obj = []
serialized[key] = list_obj
for list_item in value:
wrapper = {}
# The JSON list serialization is the only case where we aren't
# setting a key on a dict. We handle this by using
# a __current__ key on a wrapper dict to serialize each
# list item before appending it to the serialized list.
self._serialize(wrapper, list_item, shape.member, "__current__")
list_obj.append(wrapper["__current__"])
def _default_serialize(self, serialized, value, shape, key):
serialized[key] = value
def _serialize_type_timestamp(self, serialized, value, shape, key):
serialized[key] = self._convert_timestamp_to_str(value)
def _serialize_type_blob(self, serialized, value, shape, key):
serialized[key] = self._get_base64(value)
class BaseRestSerializer(Serializer):
"""Base class for rest protocols.
The only variance between the various rest protocols is the
way that the body is serialized. All other aspects (headers, uri, etc.)
are the same and logic for serializing those aspects lives here.
Subclasses must implement the ``_serialize_body_params`` method.
"""
# This is a list of known values for the "location" key in the
# serialization dict. The location key tells us where on the request
# to put the serialized value.
KNOWN_LOCATIONS = ['uri', 'querystring', 'header', 'headers']
def serialize_to_request(self, parameters, operation_model):
serialized = self._create_default_request()
serialized['method'] = operation_model.http.get('method',
self.DEFAULT_METHOD)
shape = operation_model.input_shape
if shape is None:
serialized['url_path'] = operation_model.http['requestUri']
return serialized
shape_members = shape.members
# While the ``serialized`` key holds the final serialized request
# data, we need interim dicts for the various locations of the
# request. We need this for the uri_path_kwargs and the
# query_string_kwargs because they are templated, so we need
# to gather all the needed data for the string template,
# then we render the template. The body_kwargs is needed
# because once we've collected them all, we run them through
# _serialize_body_params, which for rest-json, creates JSON,
# and for rest-xml, will create XML. This is what the
# ``partitioned`` dict below is for.
partitioned = {
'uri_path_kwargs': self.MAP_TYPE(),
'query_string_kwargs': self.MAP_TYPE(),
'body_kwargs': self.MAP_TYPE(),
'headers': self.MAP_TYPE(),
}
for param_name, param_value in parameters.items():
if param_value is None:
# Don't serialize any parameter with a None value.
continue
self._partition_parameters(partitioned, param_name, param_value,
shape_members)
serialized['url_path'] = self._render_uri_template(
operation_model.http['requestUri'],
partitioned['uri_path_kwargs'])
# Note that we lean on the http implementation to handle the case
# where the requestUri path already has query parameters.
# The bundled http client, requests, already supports this.
serialized['query_string'] = partitioned['query_string_kwargs']
if partitioned['headers']:
serialized['headers'] = partitioned['headers']
self._serialize_payload(partitioned, parameters,
serialized, shape, shape_members)
return serialized
def _render_uri_template(self, uri_template, params):
# We need to handle two cases::
#
# /{Bucket}/foo
# /{Key+}/bar
# A label ending with '+' is greedy. There can only
# be one greedy key.
encoded_params = {}
for template_param in re.findall(r'{(.*?)}', uri_template):
if template_param.endswith('+'):
encoded_params[template_param] = percent_encode(
params[template_param[:-1]], safe='/~')
else:
encoded_params[template_param] = percent_encode(
params[template_param])
return uri_template.format(**encoded_params)
def _serialize_payload(self, partitioned, parameters,
serialized, shape, shape_members):
# partitioned - The user input params partitioned by location.
# parameters - The user input params.
# serialized - The final serialized request dict.
# shape - Describes the expected input shape
# shape_members - The members of the input struct shape
payload_member = shape.serialization.get('payload')
if payload_member is not None and \
shape_members[payload_member].type_name in ['blob', 'string']:
# If it's streaming, then the body is just the
# value of the payload.
body_payload = parameters.get(payload_member, b'')
body_payload = self._encode_payload(body_payload)
serialized['body'] = body_payload
elif payload_member is not None:
# If there's a payload member, we serialized that
# member to they body.
body_params = parameters.get(payload_member)
if body_params is not None:
serialized['body'] = self._serialize_body_params(
body_params,
shape_members[payload_member])
elif partitioned['body_kwargs']:
serialized['body'] = self._serialize_body_params(
partitioned['body_kwargs'], shape)
def _encode_payload(self, body):
if isinstance(body, six.text_type):
return body.encode(self.DEFAULT_ENCODING)
return body
def _partition_parameters(self, partitioned, param_name,
param_value, shape_members):
# This takes the user provided input parameter (``param``)
# and figures out where they go in the request dict.
# Some params are HTTP headers, some are used in the URI, some
# are in the request body. This method deals with this.
member = shape_members[param_name]
location = member.serialization.get('location')
key_name = member.serialization.get('name', param_name)
if location == 'uri':
partitioned['uri_path_kwargs'][key_name] = param_value
elif location == 'querystring':
if isinstance(param_value, dict):
partitioned['query_string_kwargs'].update(param_value)
elif isinstance(param_value, bool):
partitioned['query_string_kwargs'][
key_name] = str(param_value).lower()
else:
partitioned['query_string_kwargs'][key_name] = param_value
elif location == 'header':
shape = shape_members[param_name]
value = self._convert_header_value(shape, param_value)
partitioned['headers'][key_name] = str(value)
elif location == 'headers':
# 'headers' is a bit of an oddball. The ``key_name``
# is actually really a prefix for the header names:
header_prefix = key_name
# The value provided by the user is a dict so we'll be
# creating multiple header key/val pairs. The key
# name to use for each header is the header_prefix (``key_name``)
# plus the key provided by the user.
self._do_serialize_header_map(header_prefix,
partitioned['headers'],
param_value)
else:
partitioned['body_kwargs'][param_name] = param_value
def _do_serialize_header_map(self, header_prefix, headers, user_input):
for key, val in user_input.items():
full_key = header_prefix + key
headers[full_key] = val
def _serialize_body_params(self, params, shape):
raise NotImplementedError('_serialize_body_params')
def _convert_header_value(self, shape, value):
if shape.type_name == 'timestamp':
datetime_obj = parse_to_aware_datetime(value)
timestamp = calendar.timegm(datetime_obj.utctimetuple())
return self._timestamp_rfc822(timestamp)
elif is_json_value_header(shape):
# Serialize with no spaces after separators to save space in
# the header.
return self._get_base64(json.dumps(value, separators=(',', ':')))
else:
return value
class RestJSONSerializer(BaseRestSerializer, JSONSerializer):
def _serialize_body_params(self, params, shape):
serialized_body = self.MAP_TYPE()
self._serialize(serialized_body, params, shape)
return json.dumps(serialized_body).encode(self.DEFAULT_ENCODING)
class RestXMLSerializer(BaseRestSerializer):
TIMESTAMP_FORMAT = 'iso8601'
def _serialize_body_params(self, params, shape):
root_name = shape.serialization['name']
pseudo_root = ElementTree.Element('')
self._serialize(shape, params, pseudo_root, root_name)
real_root = list(pseudo_root)[0]
return ElementTree.tostring(real_root, encoding=self.DEFAULT_ENCODING)
def _serialize(self, shape, params, xmlnode, name):
method = getattr(self, '_serialize_type_%s' % shape.type_name,
self._default_serialize)
method(xmlnode, params, shape, name)
def _serialize_type_structure(self, xmlnode, params, shape, name):
structure_node = ElementTree.SubElement(xmlnode, name)
if 'xmlNamespace' in shape.serialization:
namespace_metadata = shape.serialization['xmlNamespace']
attribute_name = 'xmlns'
if namespace_metadata.get('prefix'):
attribute_name += ':%s' % namespace_metadata['prefix']
structure_node.attrib[attribute_name] = namespace_metadata['uri']
for key, value in params.items():
member_shape = shape.members[key]
member_name = member_shape.serialization.get('name', key)
# We need to special case member shapes that are marked as an
# xmlAttribute. Rather than serializing into an XML child node,
# we instead serialize the shape to an XML attribute of the
# *current* node.
if value is None:
# Don't serialize any param whose value is None.
return
if member_shape.serialization.get('xmlAttribute'):
# xmlAttributes must have a serialization name.
xml_attribute_name = member_shape.serialization['name']
structure_node.attrib[xml_attribute_name] = value
continue
self._serialize(member_shape, value, structure_node, member_name)
def _serialize_type_list(self, xmlnode, params, shape, name):
member_shape = shape.member
if shape.serialization.get('flattened'):
element_name = name
list_node = xmlnode
else:
element_name = member_shape.serialization.get('name', 'member')
list_node = ElementTree.SubElement(xmlnode, name)
for item in params:
self._serialize(member_shape, item, list_node, element_name)
def _serialize_type_map(self, xmlnode, params, shape, name):
# Given the ``name`` of MyMap, and input of {"key1": "val1"}
# we serialize this as:
# <MyMap>
# <entry>
# <key>key1</key>
# <value>val1</value>
# </entry>
# </MyMap>
node = ElementTree.SubElement(xmlnode, name)
# TODO: handle flattened maps.
for key, value in params.items():
entry_node = ElementTree.SubElement(node, 'entry')
key_name = self._get_serialized_name(shape.key, default_name='key')
val_name = self._get_serialized_name(shape.value,
default_name='value')
self._serialize(shape.key, key, entry_node, key_name)
self._serialize(shape.value, value, entry_node, val_name)
def _serialize_type_boolean(self, xmlnode, params, shape, name):
# For scalar types, the 'params' attr is actually just a scalar
# value representing the data we need to serialize as a boolean.
# It will either be 'true' or 'false'
node = ElementTree.SubElement(xmlnode, name)
if params:
str_value = 'true'
else:
str_value = 'false'
node.text = str_value
def _serialize_type_blob(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._get_base64(params)
def _serialize_type_timestamp(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = self._convert_timestamp_to_str(params)
def _default_serialize(self, xmlnode, params, shape, name):
node = ElementTree.SubElement(xmlnode, name)
node.text = six.text_type(params)
SERIALIZERS = {
'ec2': EC2Serializer,
'query': QuerySerializer,
'json': JSONSerializer,
'rest-json': RestJSONSerializer,
'rest-xml': RestXMLSerializer,
}
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import datetime
from hashlib import sha256
from hashlib import sha1
import hmac
import logging
from email.utils import formatdate
from operator import itemgetter
import functools
import time
import calendar
import json
from botocore.exceptions import NoCredentialsError
from botocore.utils import normalize_url_path, percent_encode_sequence
from botocore.compat import HTTPHeaders
from botocore.compat import quote, unquote, urlsplit, parse_qs
from botocore.compat import urlunsplit
from botocore.compat import encodebytes
from botocore.compat import six
from botocore.compat import json
from botocore.compat import MD5_AVAILABLE
from botocore.compat import ensure_unicode
logger = logging.getLogger(__name__)
EMPTY_SHA256_HASH = (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
# This is the buffer size used when calculating sha256 checksums.
# Experimenting with various buffer sizes showed that this value generally
# gave the best result (in terms of performance).
PAYLOAD_BUFFER = 1024 * 1024
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
SIGV4_TIMESTAMP = '%Y%m%dT%H%M%SZ'
SIGNED_HEADERS_BLACKLIST = [
'expect',
'user-agent',
'x-amzn-trace-id',
]
UNSIGNED_PAYLOAD = 'UNSIGNED-PAYLOAD'
class BaseSigner(object):
REQUIRES_REGION = False
def add_auth(self, request):
raise NotImplementedError("add_auth")
class SigV2Auth(BaseSigner):
"""
Sign a request with Signature V2.
"""
def __init__(self, credentials):
self.credentials = credentials
def calc_signature(self, request, params):
logger.debug("Calculating signature using v2 auth.")
split = urlsplit(request.url)
path = split.path
if len(path) == 0:
path = '/'
string_to_sign = '%s\n%s\n%s\n' % (request.method,
split.netloc,
path)
lhmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
pairs = []
for key in sorted(params):
# Any previous signature should not be a part of this
# one, so we skip that particular key. This prevents
# issues during retries.
if key == 'Signature':
continue
value = six.text_type(params[key])
pairs.append(quote(key.encode('utf-8'), safe='') + '=' +
quote(value.encode('utf-8'), safe='-_~'))
qs = '&'.join(pairs)
string_to_sign += qs
logger.debug('String to sign: %s', string_to_sign)
lhmac.update(string_to_sign.encode('utf-8'))
b64 = base64.b64encode(lhmac.digest()).strip().decode('utf-8')
return (qs, b64)
def add_auth(self, request):
# The auth handler is the last thing called in the
# preparation phase of a prepared request.
# Because of this we have to parse the query params
# from the request body so we can update them with
# the sigv2 auth params.
if self.credentials is None:
raise NoCredentialsError
if request.data:
# POST
params = request.data
else:
# GET
params = request.params
params['AWSAccessKeyId'] = self.credentials.access_key
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['Timestamp'] = time.strftime(ISO8601, time.gmtime())
if self.credentials.token:
params['SecurityToken'] = self.credentials.token
qs, signature = self.calc_signature(request, params)
params['Signature'] = signature
return request
class SigV3Auth(BaseSigner):
def __init__(self, credentials):
self.credentials = credentials
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
if 'Date' in request.headers:
del request.headers['Date']
request.headers['Date'] = formatdate(usegmt=True)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha256)
new_hmac.update(request.headers['Date'].encode('utf-8'))
encoded_signature = encodebytes(new_hmac.digest()).strip()
signature = ('AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s' %
(self.credentials.access_key, 'HmacSHA256',
encoded_signature.decode('utf-8')))
if 'X-Amzn-Authorization' in request.headers:
del request.headers['X-Amzn-Authorization']
request.headers['X-Amzn-Authorization'] = signature
class SigV4Auth(BaseSigner):
"""
Sign a request with Signature V4.
"""
REQUIRES_REGION = True
def __init__(self, credentials, service_name, region_name):
self.credentials = credentials
# We initialize these value here so the unit tests can have
# valid values. But these will get overriden in ``add_auth``
# later for real requests.
self._region_name = region_name
self._service_name = service_name
def _sign(self, key, msg, hex=False):
if hex:
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
else:
sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
return sig
def headers_to_sign(self, request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
header_map = HTTPHeaders()
for name, value in request.headers.items():
lname = name.lower()
if lname not in SIGNED_HEADERS_BLACKLIST:
header_map[lname] = value
if 'host' not in header_map:
header_map['host'] = self._canonical_host(request.url)
return header_map
def _canonical_host(self, url):
url_parts = urlsplit(url)
default_ports = {
'http': 80,
'https': 443
}
if any(url_parts.scheme == scheme and url_parts.port == port
for scheme, port in default_ports.items()):
# No need to include the port if it's the default port.
return url_parts.hostname
# Strip out auth if it's present in the netloc.
return url_parts.netloc.rsplit('@', 1)[-1]
def canonical_query_string(self, request):
# The query string can come from two parts. One is the
# params attribute of the request. The other is from the request
# url (in which case we have to re-split the url into its components
# and parse out the query string component).
if request.params:
return self._canonical_query_string_params(request.params)
else:
return self._canonical_query_string_url(urlsplit(request.url))
def _canonical_query_string_params(self, params):
l = []
for param in sorted(params):
value = str(params[param])
l.append('%s=%s' % (quote(param, safe='-_.~'),
quote(value, safe='-_.~')))
cqs = '&'.join(l)
return cqs
def _canonical_query_string_url(self, parts):
canonical_query_string = ''
if parts.query:
# [(key, value), (key2, value2)]
key_val_pairs = []
for pair in parts.query.split('&'):
key, _, value = pair.partition('=')
key_val_pairs.append((key, value))
sorted_key_vals = []
# Sort by the key names, and in the case of
# repeated keys, then sort by the value.
for key, value in sorted(key_val_pairs):
sorted_key_vals.append('%s=%s' % (key, value))
canonical_query_string = '&'.join(sorted_key_vals)
return canonical_query_string
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
headers = []
sorted_header_names = sorted(set(headers_to_sign))
for key in sorted_header_names:
value = ','.join(self._header_value(v) for v in
sorted(headers_to_sign.get_all(key)))
headers.append('%s:%s' % (key, ensure_unicode(value)))
return '\n'.join(headers)
def _header_value(self, value):
# From the sigv4 docs:
# Lowercase(HeaderName) + ':' + Trimall(HeaderValue)
#
# The Trimall function removes excess white space before and after
# values, and converts sequential spaces to a single space.
return ' '.join(value.split())
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in set(headers_to_sign)]
l = sorted(l)
return ';'.join(l)
def payload(self, request):
if not self._should_sha256_sign_payload(request):
# When payload signing is disabled, we use this static string in
# place of the payload checksum.
return UNSIGNED_PAYLOAD
if request.body and hasattr(request.body, 'seek'):
position = request.body.tell()
read_chunksize = functools.partial(request.body.read,
PAYLOAD_BUFFER)
checksum = sha256()
for chunk in iter(read_chunksize, b''):
checksum.update(chunk)
hex_checksum = checksum.hexdigest()
request.body.seek(position)
return hex_checksum
elif request.body:
# The request serialization has ensured that
# request.body is a bytes() type.
return sha256(request.body).hexdigest()
else:
return EMPTY_SHA256_HASH
def _should_sha256_sign_payload(self, request):
# Payloads will always be signed over insecure connections.
if not request.url.startswith('https'):
return True
# Certain operations may have payload signing disabled by default.
# Since we don't have access to the operation model, we pass in this
# bit of metadata through the request context.
return request.context.get('payload_signing_enabled', True)
def canonical_request(self, request):
cr = [request.method.upper()]
path = self._normalize_url_path(urlsplit(request.url).path)
cr.append(path)
cr.append(self.canonical_query_string(request))
headers_to_sign = self.headers_to_sign(request)
cr.append(self.canonical_headers(headers_to_sign) + '\n')
cr.append(self.signed_headers(headers_to_sign))
if 'X-Amz-Content-SHA256' in request.headers:
body_checksum = request.headers['X-Amz-Content-SHA256']
else:
body_checksum = self.payload(request)
cr.append(body_checksum)
return '\n'.join(cr)
def _normalize_url_path(self, path):
normalized_path = quote(normalize_url_path(path), safe='/~')
return normalized_path
def scope(self, request):
scope = [self.credentials.access_key]
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def credential_scope(self, request):
scope = []
scope.append(request.context['timestamp'][0:8])
scope.append(self._region_name)
scope.append(self._service_name)
scope.append('aws4_request')
return '/'.join(scope)
def string_to_sign(self, request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
sts = ['AWS4-HMAC-SHA256']
sts.append(request.context['timestamp'])
sts.append(self.credential_scope(request))
sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
return '\n'.join(sts)
def signature(self, string_to_sign, request):
key = self.credentials.secret_key
k_date = self._sign(('AWS4' + key).encode('utf-8'),
request.context['timestamp'][0:8])
k_region = self._sign(k_date, self._region_name)
k_service = self._sign(k_region, self._service_name)
k_signing = self._sign(k_service, 'aws4_request')
return self._sign(k_signing, string_to_sign, hex=True)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
# This could be a retry. Make sure the previous
# authorization header is removed first.
self._modify_request_before_signing(request)
canonical_request = self.canonical_request(request)
logger.debug("Calculating signature using v4 auth.")
logger.debug('CanonicalRequest:\n%s', canonical_request)
string_to_sign = self.string_to_sign(request, canonical_request)
logger.debug('StringToSign:\n%s', string_to_sign)
signature = self.signature(string_to_sign, request)
logger.debug('Signature:\n%s', signature)
self._inject_signature_to_request(request, signature)
def _inject_signature_to_request(self, request, signature):
l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(request)]
headers_to_sign = self.headers_to_sign(request)
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
request.headers['Authorization'] = ', '.join(l)
return request
def _modify_request_before_signing(self, request):
if 'Authorization' in request.headers:
del request.headers['Authorization']
self._set_necessary_date_headers(request)
if self.credentials.token:
if 'X-Amz-Security-Token' in request.headers:
del request.headers['X-Amz-Security-Token']
request.headers['X-Amz-Security-Token'] = self.credentials.token
if not request.context.get('payload_signing_enabled', True):
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
request.headers['X-Amz-Content-SHA256'] = UNSIGNED_PAYLOAD
def _set_necessary_date_headers(self, request):
# The spec allows for either the Date _or_ the X-Amz-Date value to be
# used so we check both. If there's a Date header, we use the date
# header. Otherwise we use the X-Amz-Date header.
if 'Date' in request.headers:
del request.headers['Date']
datetime_timestamp = datetime.datetime.strptime(
request.context['timestamp'], SIGV4_TIMESTAMP)
request.headers['Date'] = formatdate(
int(calendar.timegm(datetime_timestamp.timetuple())))
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
else:
if 'X-Amz-Date' in request.headers:
del request.headers['X-Amz-Date']
request.headers['X-Amz-Date'] = request.context['timestamp']
class S3SigV4Auth(SigV4Auth):
def __init__(self, credentials, service_name, region_name):
super(S3SigV4Auth, self).__init__(
credentials, service_name, region_name)
self._default_region_name = region_name
def add_auth(self, request):
# If we ever decide to share auth sessions, this could potentially be
# a source of concurrency bugs.
signing_context = request.context.get('signing', {})
self._region_name = signing_context.get(
'region', self._default_region_name)
super(S3SigV4Auth, self).add_auth(request)
def _modify_request_before_signing(self, request):
super(S3SigV4Auth, self)._modify_request_before_signing(request)
if 'X-Amz-Content-SHA256' in request.headers:
del request.headers['X-Amz-Content-SHA256']
request.headers['X-Amz-Content-SHA256'] = self.payload(request)
def _should_sha256_sign_payload(self, request):
# S3 allows optional body signing, so to minimize the performance
# impact, we opt to not SHA256 sign the body on streaming uploads,
# provided that we're on https.
client_config = request.context.get('client_config')
s3_config = getattr(client_config, 's3', None)
# The config could be None if it isn't set, or if the customer sets it
# to None.
if s3_config is None:
s3_config = {}
# The explicit configuration takes precedence over any implicit
# configuration.
sign_payload = s3_config.get('payload_signing_enabled', None)
if sign_payload is not None:
return sign_payload
# We require that both content-md5 be present and https be enabled
# to implicitly disable body signing. The combination of TLS and
# content-md5 is sufficiently secure and durable for us to be
# confident in the request without body signing.
if not request.url.startswith('https') or \
'Content-MD5' not in request.headers:
return True
# If the input is streaming we disable body signing by default.
if request.context.get('has_streaming_input', False):
return False
# If the S3-specific checks had no results, delegate to the generic
# checks.
return super(S3SigV4Auth, self)._should_sha256_sign_payload(request)
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
class SigV4QueryAuth(SigV4Auth):
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, service_name, region_name,
expires=DEFAULT_EXPIRES):
super(SigV4QueryAuth, self).__init__(credentials, service_name,
region_name)
self._expires = expires
def _modify_request_before_signing(self, request):
# We automatically set this header, so if it's the auto-set value we
# want to get rid of it since it doesn't make sense for presigned urls.
content_type = request.headers.get('content-type')
blacklisted_content_type = (
'application/x-www-form-urlencoded; charset=utf-8'
)
if content_type == blacklisted_content_type:
del request.headers['content-type']
# Note that we're not including X-Amz-Signature.
# From the docs: "The Canonical Query String must include all the query
# parameters from the preceding table except for X-Amz-Signature.
signed_headers = self.signed_headers(self.headers_to_sign(request))
auth_params = {
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': self.scope(request),
'X-Amz-Date': request.context['timestamp'],
'X-Amz-Expires': self._expires,
'X-Amz-SignedHeaders': signed_headers,
}
if self.credentials.token is not None:
auth_params['X-Amz-Security-Token'] = self.credentials.token
# Now parse the original query string to a dict, inject our new query
# params, and serialize back to a query string.
url_parts = urlsplit(request.url)
# parse_qs makes each value a list, but in our case we know we won't
# have repeated keys so we know we have single element lists which we
# can convert back to scalar values.
query_dict = dict(
[(k, v[0]) for k, v in
parse_qs(url_parts.query, keep_blank_values=True).items()])
# The spec is particular about this. It *has* to be:
# https://<endpoint>?<operation params>&<auth params>
# You can't mix the two types of params together, i.e just keep doing
# new_query_params.update(op_params)
# new_query_params.update(auth_params)
# percent_encode_sequence(new_query_params)
operation_params = ''
if request.data:
# We also need to move the body params into the query string. To
# do this, we first have to convert it to a dict.
query_dict.update(self._get_body_as_dict(request))
request.data = ''
if query_dict:
operation_params = percent_encode_sequence(query_dict) + '&'
new_query_string = (operation_params +
percent_encode_sequence(auth_params))
# url_parts is a tuple (and therefore immutable) so we need to create
# a new url_parts with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
p = url_parts
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
def _get_body_as_dict(self, request):
# For query services, request.data is form-encoded and is already a
# dict, but for other services such as rest-json it could be a json
# string or bytes. In those cases we attempt to load the data as a
# dict.
data = request.data
if isinstance(data, six.binary_type):
data = json.loads(data.decode('utf-8'))
elif isinstance(data, six.string_types):
data = json.loads(data)
return data
def _inject_signature_to_request(self, request, signature):
# Rather than calculating an "Authorization" header, for the query
# param quth, we just append an 'X-Amz-Signature' param to the end
# of the query string.
request.url += '&X-Amz-Signature=%s' % signature
class S3SigV4QueryAuth(SigV4QueryAuth):
"""S3 SigV4 auth using query parameters.
This signer will sign a request using query parameters and signature
version 4, i.e a "presigned url" signer.
Based off of:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
def _normalize_url_path(self, path):
# For S3, we do not normalize the path.
return path
def payload(self, request):
# From the doc link above:
# "You don't include a payload hash in the Canonical Request, because
# when you create a presigned URL, you don't know anything about the
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
return UNSIGNED_PAYLOAD
class S3SigV4PostAuth(SigV4Auth):
"""
Presigns a s3 post
Implementation doc here:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
"""
def add_auth(self, request):
datetime_now = datetime.datetime.utcnow()
request.context['timestamp'] = datetime_now.strftime(SIGV4_TIMESTAMP)
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['x-amz-algorithm'] = 'AWS4-HMAC-SHA256'
fields['x-amz-credential'] = self.scope(request)
fields['x-amz-date'] = request.context['timestamp']
conditions.append({'x-amz-algorithm': 'AWS4-HMAC-SHA256'})
conditions.append({'x-amz-credential': self.scope(request)})
conditions.append({'x-amz-date': request.context['timestamp']})
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['x-amz-signature'] = self.signature(fields['policy'], request)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
class HmacV1Auth(BaseSigner):
# List of Query String Arguments of Interest
QSAOfInterest = ['accelerate', 'acl', 'cors', 'defaultObjectAcl',
'location', 'logging', 'partNumber', 'policy',
'requestPayment', 'torrent',
'versioning', 'versionId', 'versions', 'website',
'uploads', 'uploadId', 'response-content-type',
'response-content-language', 'response-expires',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'delete', 'lifecycle',
'tagging', 'restore', 'storageClass', 'notification',
'replication', 'requestPayment', 'analytics', 'metrics',
'inventory']
def __init__(self, credentials, service_name=None, region_name=None):
self.credentials = credentials
def sign_string(self, string_to_sign):
new_hmac = hmac.new(self.credentials.secret_key.encode('utf-8'),
digestmod=sha1)
new_hmac.update(string_to_sign.encode('utf-8'))
return encodebytes(new_hmac.digest()).strip().decode('utf-8')
def canonical_standard_headers(self, headers):
interesting_headers = ['content-md5', 'content-type', 'date']
hoi = []
if 'Date' in headers:
del headers['Date']
headers['Date'] = self._get_date()
for ih in interesting_headers:
found = False
for key in headers:
lk = key.lower()
if headers[key] is not None and lk == ih:
hoi.append(headers[key].strip())
found = True
if not found:
hoi.append('')
return '\n'.join(hoi)
def canonical_custom_headers(self, headers):
hoi = []
custom_headers = {}
for key in headers:
lk = key.lower()
if headers[key] is not None:
if lk.startswith('x-amz-'):
custom_headers[lk] = ','.join(v.strip() for v in
headers.get_all(key))
sorted_header_keys = sorted(custom_headers.keys())
for key in sorted_header_keys:
hoi.append("%s:%s" % (key, custom_headers[key]))
return '\n'.join(hoi)
def unquote_v(self, nv):
"""
TODO: Do we need this?
"""
if len(nv) == 1:
return nv
else:
return (nv[0], unquote(nv[1]))
def canonical_resource(self, split, auth_path=None):
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
# NOTE:
# The path in the canonical resource should always be the
# full path including the bucket name, even for virtual-hosting
# style addressing. The ``auth_path`` keeps track of the full
# path for the canonical resource and would be passed in if
# the client was using virtual-hosting style.
if auth_path is not None:
buf = auth_path
else:
buf = split.path
if split.query:
qsa = split.query.split('&')
qsa = [a.split('=', 1) for a in qsa]
qsa = [self.unquote_v(a) for a in qsa
if a[0] in self.QSAOfInterest]
if len(qsa) > 0:
qsa.sort(key=itemgetter(0))
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
return buf
def canonical_string(self, method, split, headers, expires=None,
auth_path=None):
cs = method.upper() + '\n'
cs += self.canonical_standard_headers(headers) + '\n'
custom_headers = self.canonical_custom_headers(headers)
if custom_headers:
cs += custom_headers + '\n'
cs += self.canonical_resource(split, auth_path=auth_path)
return cs
def get_signature(self, method, split, headers, expires=None,
auth_path=None):
if self.credentials.token:
del headers['x-amz-security-token']
headers['x-amz-security-token'] = self.credentials.token
string_to_sign = self.canonical_string(method,
split,
headers,
auth_path=auth_path)
logger.debug('StringToSign:\n%s', string_to_sign)
return self.sign_string(string_to_sign)
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError
logger.debug("Calculating signature using hmacv1 auth.")
split = urlsplit(request.url)
logger.debug('HTTP request method: %s', request.method)
signature = self.get_signature(request.method, split,
request.headers,
auth_path=request.auth_path)
self._inject_signature(request, signature)
def _get_date(self):
return formatdate(usegmt=True)
def _inject_signature(self, request, signature):
if 'Authorization' in request.headers:
# We have to do this because request.headers is not
# normal dictionary. It has the (unintuitive) behavior
# of aggregating repeated setattr calls for the same
# key value. For example:
# headers['foo'] = 'a'; headers['foo'] = 'b'
# list(headers) will print ['foo', 'foo'].
del request.headers['Authorization']
request.headers['Authorization'] = (
"AWS %s:%s" % (self.credentials.access_key, signature))
class HmacV1QueryAuth(HmacV1Auth):
"""
Generates a presigned request for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
#RESTAuthenticationQueryStringAuth
"""
DEFAULT_EXPIRES = 3600
def __init__(self, credentials, expires=DEFAULT_EXPIRES):
self.credentials = credentials
self._expires = expires
def _get_date(self):
return str(int(time.time() + int(self._expires)))
def _inject_signature(self, request, signature):
query_dict = {}
query_dict['AWSAccessKeyId'] = self.credentials.access_key
query_dict['Signature'] = signature
for header_key in request.headers:
lk = header_key.lower()
# For query string requests, Expires is used instead of the
# Date header.
if header_key == 'Date':
query_dict['Expires'] = request.headers['Date']
# We only want to include relevant headers in the query string.
# These can be anything that starts with x-amz, is Content-MD5,
# or is Content-Type.
elif lk.startswith('x-amz-') or lk in ['content-md5',
'content-type']:
query_dict[lk] = request.headers[lk]
# Combine all of the identified headers into an encoded
# query string
new_query_string = percent_encode_sequence(query_dict)
# Create a new url with the presigned url.
p = urlsplit(request.url)
if p[3]:
# If there was a pre-existing query string, we should
# add that back before injecting the new query string.
new_query_string = '%s&%s' % (p[3], new_query_string)
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
class HmacV1PostAuth(HmacV1Auth):
"""
Generates a presigned post for s3.
Spec from this document:
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingHTTPPOST.html
"""
def add_auth(self, request):
fields = {}
if request.context.get('s3-presign-post-fields', None) is not None:
fields = request.context['s3-presign-post-fields']
policy = {}
conditions = []
if request.context.get('s3-presign-post-policy', None) is not None:
policy = request.context['s3-presign-post-policy']
if policy.get('conditions', None) is not None:
conditions = policy['conditions']
policy['conditions'] = conditions
fields['AWSAccessKeyId'] = self.credentials.access_key
if self.credentials.token is not None:
fields['x-amz-security-token'] = self.credentials.token
conditions.append({'x-amz-security-token': self.credentials.token})
# Dump the base64 encoded policy into the fields dictionary.
fields['policy'] = base64.b64encode(
json.dumps(policy).encode('utf-8')).decode('utf-8')
fields['signature'] = self.sign_string(fields['policy'])
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
# Defined at the bottom instead of the top of the module because the Auth
# classes weren't defined yet.
AUTH_TYPE_MAPS = {
'v2': SigV2Auth,
'v4': SigV4Auth,
'v4-query': SigV4QueryAuth,
'v3': SigV3Auth,
'v3https': SigV3Auth,
's3': HmacV1Auth,
's3-query': HmacV1QueryAuth,
's3-presign-post': HmacV1PostAuth,
's3v4': S3SigV4Auth,
's3v4-query': S3SigV4QueryAuth,
's3v4-presign-post': S3SigV4PostAuth,
}
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from itertools import tee
from botocore.compat import six
import jmespath
import json
import base64
import logging
from botocore.exceptions import PaginationError
from botocore.compat import zip
from botocore.utils import set_value_from_jmespath, merge_dicts
log = logging.getLogger(__name__)
class TokenEncoder(object):
"""Encodes dictionaries into opaque strings.
This for the most part json dumps + base64 encoding, but also supports
having bytes in the dictionary in addition to the types that json can
handle by default.
This is intended for use in encoding pagination tokens, which in some
cases can be complex structures and / or contain bytes.
"""
def encode(self, token):
"""Encodes a dictionary to an opaque string.
:type token: dict
:param token: A dictionary containing pagination information,
particularly the service pagination token(s) but also other boto
metadata.
:rtype: str
:returns: An opaque string
"""
try:
# Try just using json dumps first to avoid having to traverse
# and encode the dict. In 99.9999% of cases this will work.
json_string = json.dumps(token)
except (TypeError, UnicodeDecodeError):
# If normal dumping failed, go through and base64 encode all bytes.
encoded_token, encoded_keys = self._encode(token, [])
# Save the list of all the encoded key paths. We can safely
# assume that no service will ever use this key.
encoded_token['boto_encoded_keys'] = encoded_keys
# Now that the bytes are all encoded, dump the json.
json_string = json.dumps(encoded_token)
# base64 encode the json string to produce an opaque token string.
return base64.b64encode(json_string.encode('utf-8')).decode('utf-8')
def _encode(self, data, path):
"""Encode bytes in given data, keeping track of the path traversed."""
if isinstance(data, dict):
return self._encode_dict(data, path)
elif isinstance(data, list):
return self._encode_list(data, path)
elif isinstance(data, six.binary_type):
return self._encode_bytes(data, path)
else:
return data, []
def _encode_list(self, data, path):
"""Encode any bytes in a list, noting the index of what is encoded."""
new_data = []
encoded = []
for i, value in enumerate(data):
new_path = path + [i]
new_value, new_encoded = self._encode(value, new_path)
new_data.append(new_value)
encoded.extend(new_encoded)
return new_data, encoded
def _encode_dict(self, data, path):
"""Encode any bytes in a dict, noting the index of what is encoded."""
new_data = {}
encoded = []
for key, value in data.items():
new_path = path + [key]
new_value, new_encoded = self._encode(value, new_path)
new_data[key] = new_value
encoded.extend(new_encoded)
return new_data, encoded
def _encode_bytes(self, data, path):
"""Base64 encode a byte string."""
return base64.b64encode(data).decode('utf-8'), [path]
class TokenDecoder(object):
"""Decodes token strings back into dictionaries.
This performs the inverse operation to the TokenEncoder, accepting
opaque strings and decoding them into a useable form.
"""
def decode(self, token):
"""Decodes an opaque string to a dictionary.
:type token: str
:param token: A token string given by the botocore pagination
interface.
:rtype: dict
:returns: A dictionary containing pagination information,
particularly the service pagination token(s) but also other boto
metadata.
"""
json_string = base64.b64decode(token.encode('utf-8')).decode('utf-8')
decoded_token = json.loads(json_string)
# Remove the encoding metadata as it is read since it will no longer
# be needed.
encoded_keys = decoded_token.pop('boto_encoded_keys', None)
if encoded_keys is None:
return decoded_token
else:
return self._decode(decoded_token, encoded_keys)
def _decode(self, token, encoded_keys):
"""Find each encoded value and decode it."""
for key in encoded_keys:
encoded = self._path_get(token, key)
decoded = base64.b64decode(encoded.encode('utf-8'))
self._path_set(token, key, decoded)
return token
def _path_get(self, data, path):
"""Return the nested data at the given path.
For instance:
data = {'foo': ['bar', 'baz']}
path = ['foo', 0]
==> 'bar'
"""
# jmespath isn't used here because it would be difficult to actually
# create the jmespath query when taking all of the unknowns of key
# structure into account. Gross though this is, it is simple and not
# very error prone.
d = data
for step in path:
d = d[step]
return d
def _path_set(self, data, path, value):
"""Set the value of a key in the given data.
Example:
data = {'foo': ['bar', 'baz']}
path = ['foo', 1]
value = 'bin'
==> data = {'foo': ['bar', 'bin']}
"""
container = self._path_get(data, path[:-1])
container[path[-1]] = value
class PaginatorModel(object):
def __init__(self, paginator_config):
self._paginator_config = paginator_config['pagination']
def get_paginator(self, operation_name):
try:
single_paginator_config = self._paginator_config[operation_name]
except KeyError:
raise ValueError("Paginator for operation does not exist: %s"
% operation_name)
return single_paginator_config
class PageIterator(object):
def __init__(self, method, input_token, output_token, more_results,
result_keys, non_aggregate_keys, limit_key, max_items,
starting_token, page_size, op_kwargs):
self._method = method
self._input_token = input_token
self._output_token = output_token
self._more_results = more_results
self._result_keys = result_keys
self._max_items = max_items
self._limit_key = limit_key
self._starting_token = starting_token
self._page_size = page_size
self._op_kwargs = op_kwargs
self._resume_token = None
self._non_aggregate_key_exprs = non_aggregate_keys
self._non_aggregate_part = {}
self._token_encoder = TokenEncoder()
self._token_decoder = TokenDecoder()
@property
def result_keys(self):
return self._result_keys
@property
def resume_token(self):
"""Token to specify to resume pagination."""
return self._resume_token
@resume_token.setter
def resume_token(self, value):
if not isinstance(value, dict):
raise ValueError("Bad starting token: %s" % value)
if 'boto_truncate_amount' in value:
token_keys = sorted(self._input_token + ['boto_truncate_amount'])
else:
token_keys = sorted(self._input_token)
dict_keys = sorted(value.keys())
if token_keys == dict_keys:
self._resume_token = self._token_encoder.encode(value)
else:
raise ValueError("Bad starting token: %s" % value)
@property
def non_aggregate_part(self):
return self._non_aggregate_part
def __iter__(self):
current_kwargs = self._op_kwargs
previous_next_token = None
next_token = dict((key, None) for key in self._input_token)
if self._starting_token is not None:
# If the starting token exists, populate the next_token with the
# values inside it. This ensures that we have the service's
# pagination token on hand if we need to truncate after the
# first response.
next_token = self._parse_starting_token()[0]
# The number of items from result_key we've seen so far.
total_items = 0
first_request = True
primary_result_key = self.result_keys[0]
starting_truncation = 0
self._inject_starting_params(current_kwargs)
while True:
response = self._make_request(current_kwargs)
parsed = self._extract_parsed_response(response)
if first_request:
# The first request is handled differently. We could
# possibly have a resume/starting token that tells us where
# to index into the retrieved page.
if self._starting_token is not None:
starting_truncation = self._handle_first_request(
parsed, primary_result_key, starting_truncation)
first_request = False
self._record_non_aggregate_key_values(parsed)
else:
# If this isn't the first request, we have already sliced into
# the first request and had to make additional requests after.
# We no longer need to add this to truncation.
starting_truncation = 0
current_response = primary_result_key.search(parsed)
if current_response is None:
current_response = []
num_current_response = len(current_response)
truncate_amount = 0
if self._max_items is not None:
truncate_amount = (total_items + num_current_response) \
- self._max_items
if truncate_amount > 0:
self._truncate_response(parsed, primary_result_key,
truncate_amount, starting_truncation,
next_token)
yield response
break
else:
yield response
total_items += num_current_response
next_token = self._get_next_token(parsed)
if all(t is None for t in next_token.values()):
break
if self._max_items is not None and \
total_items == self._max_items:
# We're on a page boundary so we can set the current
# next token to be the resume token.
self.resume_token = next_token
break
if previous_next_token is not None and \
previous_next_token == next_token:
message = ("The same next token was received "
"twice: %s" % next_token)
raise PaginationError(message=message)
self._inject_token_into_kwargs(current_kwargs, next_token)
previous_next_token = next_token
def search(self, expression):
"""Applies a JMESPath expression to a paginator
Each page of results is searched using the provided JMESPath
expression. If the result is not a list, it is yielded
directly. If the result is a list, each element in the result
is yielded individually (essentially implementing a flatmap in
which the JMESPath search is the mapping function).
:type expression: str
:param expression: JMESPath expression to apply to each page.
:return: Returns an iterator that yields the individual
elements of applying a JMESPath expression to each page of
results.
"""
compiled = jmespath.compile(expression)
for page in self:
results = compiled.search(page)
if isinstance(results, list):
for element in results:
yield element
else:
# Yield result directly if it is not a list.
yield results
def _make_request(self, current_kwargs):
return self._method(**current_kwargs)
def _extract_parsed_response(self, response):
return response
def _record_non_aggregate_key_values(self, response):
non_aggregate_keys = {}
for expression in self._non_aggregate_key_exprs:
result = expression.search(response)
set_value_from_jmespath(non_aggregate_keys,
expression.expression,
result)
self._non_aggregate_part = non_aggregate_keys
def _inject_starting_params(self, op_kwargs):
# If the user has specified a starting token we need to
# inject that into the operation's kwargs.
if self._starting_token is not None:
# Don't need to do anything special if there is no starting
# token specified.
next_token = self._parse_starting_token()[0]
self._inject_token_into_kwargs(op_kwargs, next_token)
if self._page_size is not None:
# Pass the page size as the parameter name for limiting
# page size, also known as the limit_key.
op_kwargs[self._limit_key] = self._page_size
def _inject_token_into_kwargs(self, op_kwargs, next_token):
for name, token in next_token.items():
if (token is not None) and (token != 'None'):
op_kwargs[name] = token
elif name in op_kwargs:
del op_kwargs[name]
def _handle_first_request(self, parsed, primary_result_key,
starting_truncation):
# If the payload is an array or string, we need to slice into it
# and only return the truncated amount.
starting_truncation = self._parse_starting_token()[1]
all_data = primary_result_key.search(parsed)
if isinstance(all_data, (list, six.string_types)):
data = all_data[starting_truncation:]
else:
data = None
set_value_from_jmespath(
parsed,
primary_result_key.expression,
data
)
# We also need to truncate any secondary result keys
# because they were not truncated in the previous last
# response.
for token in self.result_keys:
if token == primary_result_key:
continue
sample = token.search(parsed)
if isinstance(sample, list):
empty_value = []
elif isinstance(sample, six.string_types):
empty_value = ''
elif isinstance(sample, (int, float)):
empty_value = 0
else:
empty_value = None
set_value_from_jmespath(parsed, token.expression, empty_value)
return starting_truncation
def _truncate_response(self, parsed, primary_result_key, truncate_amount,
starting_truncation, next_token):
original = primary_result_key.search(parsed)
if original is None:
original = []
amount_to_keep = len(original) - truncate_amount
truncated = original[:amount_to_keep]
set_value_from_jmespath(
parsed,
primary_result_key.expression,
truncated
)
# The issue here is that even though we know how much we've truncated
# we need to account for this globally including any starting
# left truncation. For example:
# Raw response: [0,1,2,3]
# Starting index: 1
# Max items: 1
# Starting left truncation: [1, 2, 3]
# End right truncation for max items: [1]
# However, even though we only kept 1, this is post
# left truncation so the next starting index should be 2, not 1
# (left_truncation + amount_to_keep).
next_token['boto_truncate_amount'] = \
amount_to_keep + starting_truncation
self.resume_token = next_token
def _get_next_token(self, parsed):
if self._more_results is not None:
if not self._more_results.search(parsed):
return {}
next_tokens = {}
for output_token, input_key in \
zip(self._output_token, self._input_token):
next_token = output_token.search(parsed)
# We do not want to include any empty strings as actual tokens.
# Treat them as None.
if next_token:
next_tokens[input_key] = next_token
else:
next_tokens[input_key] = None
return next_tokens
def result_key_iters(self):
teed_results = tee(self, len(self.result_keys))
return [ResultKeyIterator(i, result_key) for i, result_key
in zip(teed_results, self.result_keys)]
def build_full_result(self):
complete_result = {}
for response in self:
page = response
# We want to try to catch operation object pagination
# and format correctly for those. They come in the form
# of a tuple of two elements: (http_response, parsed_responsed).
# We want the parsed_response as that is what the page iterator
# uses. We can remove it though once operation objects are removed.
if isinstance(response, tuple) and len(response) == 2:
page = response[1]
# We're incrementally building the full response page
# by page. For each page in the response we need to
# inject the necessary components from the page
# into the complete_result.
for result_expression in self.result_keys:
# In order to incrementally update a result key
# we need to search the existing value from complete_result,
# then we need to search the _current_ page for the
# current result key value. Then we append the current
# value onto the existing value, and re-set that value
# as the new value.
result_value = result_expression.search(page)
if result_value is None:
continue
existing_value = result_expression.search(complete_result)
if existing_value is None:
# Set the initial result
set_value_from_jmespath(
complete_result, result_expression.expression,
result_value)
continue
# Now both result_value and existing_value contain something
if isinstance(result_value, list):
existing_value.extend(result_value)
elif isinstance(result_value, (int, float, six.string_types)):
# Modify the existing result with the sum or concatenation
set_value_from_jmespath(
complete_result, result_expression.expression,
existing_value + result_value)
merge_dicts(complete_result, self.non_aggregate_part)
if self.resume_token is not None:
complete_result['NextToken'] = self.resume_token
return complete_result
def _parse_starting_token(self):
if self._starting_token is None:
return None
# The starting token is a dict passed as a base64 encoded string.
next_token = self._starting_token
try:
next_token = self._token_decoder.decode(next_token)
index = 0
if 'boto_truncate_amount' in next_token:
index = next_token.get('boto_truncate_amount')
del next_token['boto_truncate_amount']
except (ValueError, TypeError):
next_token, index = self._parse_starting_token_deprecated()
return next_token, index
def _parse_starting_token_deprecated(self):
"""
This handles parsing of old style starting tokens, and attempts to
coerce them into the new style.
"""
log.debug("Attempting to fall back to old starting token parser. For "
"token: %s" % self._starting_token)
if self._starting_token is None:
return None
parts = self._starting_token.split('___')
next_token = []
index = 0
if len(parts) == len(self._input_token) + 1:
try:
index = int(parts.pop())
except ValueError:
raise ValueError("Bad starting token: %s" %
self._starting_token)
for part in parts:
if part == 'None':
next_token.append(None)
else:
next_token.append(part)
return self._convert_deprecated_starting_token(next_token), index
def _convert_deprecated_starting_token(self, deprecated_token):
"""
This attempts to convert a deprecated starting token into the new
style.
"""
len_deprecated_token = len(deprecated_token)
len_input_token = len(self._input_token)
if len_deprecated_token > len_input_token:
raise ValueError("Bad starting token: %s" % self._starting_token)
elif len_deprecated_token < len_input_token:
log.debug("Old format starting token does not contain all input "
"tokens. Setting the rest, in order, as None.")
for i in range(len_input_token - len_deprecated_token):
deprecated_token.append(None)
return dict(zip(self._input_token, deprecated_token))
class Paginator(object):
PAGE_ITERATOR_CLS = PageIterator
def __init__(self, method, pagination_config, model):
self._model = model
self._method = method
self._pagination_cfg = pagination_config
self._output_token = self._get_output_tokens(self._pagination_cfg)
self._input_token = self._get_input_tokens(self._pagination_cfg)
self._more_results = self._get_more_results_token(self._pagination_cfg)
self._non_aggregate_keys = self._get_non_aggregate_keys(
self._pagination_cfg)
self._result_keys = self._get_result_keys(self._pagination_cfg)
self._limit_key = self._get_limit_key(self._pagination_cfg)
@property
def result_keys(self):
return self._result_keys
def _get_non_aggregate_keys(self, config):
keys = []
for key in config.get('non_aggregate_keys', []):
keys.append(jmespath.compile(key))
return keys
def _get_output_tokens(self, config):
output = []
output_token = config['output_token']
if not isinstance(output_token, list):
output_token = [output_token]
for config in output_token:
output.append(jmespath.compile(config))
return output
def _get_input_tokens(self, config):
input_token = self._pagination_cfg['input_token']
if not isinstance(input_token, list):
input_token = [input_token]
return input_token
def _get_more_results_token(self, config):
more_results = config.get('more_results')
if more_results is not None:
return jmespath.compile(more_results)
def _get_result_keys(self, config):
result_key = config.get('result_key')
if result_key is not None:
if not isinstance(result_key, list):
result_key = [result_key]
result_key = [jmespath.compile(rk) for rk in result_key]
return result_key
def _get_limit_key(self, config):
return config.get('limit_key')
def paginate(self, **kwargs):
"""Create paginator object for an operation.
This returns an iterable object. Iterating over
this object will yield a single page of a response
at a time.
"""
page_params = self._extract_paging_params(kwargs)
return self.PAGE_ITERATOR_CLS(
self._method, self._input_token,
self._output_token, self._more_results,
self._result_keys, self._non_aggregate_keys,
self._limit_key,
page_params['MaxItems'],
page_params['StartingToken'],
page_params['PageSize'],
kwargs)
def _extract_paging_params(self, kwargs):
pagination_config = kwargs.pop('PaginationConfig', {})
max_items = pagination_config.get('MaxItems', None)
if max_items is not None:
max_items = int(max_items)
page_size = pagination_config.get('PageSize', None)
if page_size is not None:
if self._limit_key is None:
raise PaginationError(
message="PageSize parameter is not supported for the "
"pagination interface for this operation.")
input_members = self._model.input_shape.members
limit_key_shape = input_members.get(self._limit_key)
if limit_key_shape.type_name == 'string':
if not isinstance(page_size, six.string_types):
page_size = str(page_size)
else:
page_size = int(page_size)
return {
'MaxItems': max_items,
'StartingToken': pagination_config.get('StartingToken', None),
'PageSize': page_size,
}
class ResultKeyIterator(object):
"""Iterates over the results of paginated responses.
Each iterator is associated with a single result key.
Iterating over this object will give you each element in
the result key list.
:param pages_iterator: An iterator that will give you
pages of results (a ``PageIterator`` class).
:param result_key: The JMESPath expression representing
the result key.
"""
def __init__(self, pages_iterator, result_key):
self._pages_iterator = pages_iterator
self.result_key = result_key
def __iter__(self):
for page in self._pages_iterator:
results = self.result_key.search(page)
if results is None:
results = []
for result in results:
yield result
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import logging
from collections import defaultdict, deque, namedtuple
from botocore.compat import accepts_kwargs, six
logger = logging.getLogger(__name__)
_NodeList = namedtuple('NodeList', ['first', 'middle', 'last'])
_FIRST = 0
_MIDDLE = 1
_LAST = 2
class NodeList(_NodeList):
def __copy__(self):
first_copy = copy.copy(self.first)
middle_copy = copy.copy(self.middle)
last_copy = copy.copy(self.last)
copied = NodeList(first_copy, middle_copy, last_copy)
return copied
def first_non_none_response(responses, default=None):
"""Find first non None response in a list of tuples.
This function can be used to find the first non None response from
handlers connected to an event. This is useful if you are interested
in the returned responses from event handlers. Example usage::
print(first_non_none_response([(func1, None), (func2, 'foo'),
(func3, 'bar')]))
# This will print 'foo'
:type responses: list of tuples
:param responses: The responses from the ``EventHooks.emit`` method.
This is a list of tuples, and each tuple is
(handler, handler_response).
:param default: If no non-None responses are found, then this default
value will be returned.
:return: The first non-None response in the list of tuples.
"""
for response in responses:
if response[1] is not None:
return response[1]
return default
class BaseEventHooks(object):
def emit(self, event_name, **kwargs):
"""Call all handlers subscribed to an event.
:type event_name: str
:param event_name: The name of the event to emit.
:type **kwargs: dict
:param **kwargs: Arbitrary kwargs to pass through to the
subscribed handlers. The ``event_name`` will be injected
into the kwargs so it's not necesary to add this to **kwargs.
:rtype: list of tuples
:return: A list of ``(handler_func, handler_func_return_value)``
"""
return []
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler for a given event.
If a ``unique_id`` is given, the handler will not be registered
if a handler with the ``unique_id`` has already been registered.
Handlers are called in the order they have been registered.
Note handlers can also be registered with ``register_first()``
and ``register_last()``. All handlers registered with
``register_first()`` are called before handlers registered
with ``register()`` which are called before handlers registered
with ``register_last()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register,
unique_id_uses_count=unique_id_uses_count)
def register_first(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler to be called first for an event.
All event handlers registered with ``register_first()`` will
be called before handlers registered with ``register()`` and
``register_last()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register_first,
unique_id_uses_count=unique_id_uses_count)
def register_last(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register an event handler to be called last for an event.
All event handlers registered with ``register_last()`` will be called
after handlers registered with ``register_first()`` and ``register()``.
"""
self._verify_and_register(event_name, handler, unique_id,
register_method=self._register_last,
unique_id_uses_count=unique_id_uses_count)
def _verify_and_register(self, event_name, handler, unique_id,
register_method, unique_id_uses_count):
self._verify_is_callable(handler)
self._verify_accept_kwargs(handler)
register_method(event_name, handler, unique_id, unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister an event handler for a given event.
If no ``unique_id`` was given during registration, then the
first instance of the event handler is removed (if the event
handler has been registered multiple times).
"""
pass
def _verify_is_callable(self, func):
if not six.callable(func):
raise ValueError("Event handler %s must be callable." % func)
def _verify_accept_kwargs(self, func):
"""Verifies a callable accepts kwargs
:type func: callable
:param func: A callable object.
:returns: True, if ``func`` accepts kwargs, otherwise False.
"""
try:
if not accepts_kwargs(func):
raise ValueError("Event handler %s must accept keyword "
"arguments (**kwargs)" % func)
except TypeError:
return False
class HierarchicalEmitter(BaseEventHooks):
def __init__(self):
# We keep a reference to the handlers for quick
# read only access (we never modify self._handlers).
# A cache of event name to handler list.
self._lookup_cache = {}
self._handlers = _PrefixTrie()
# This is used to ensure that unique_id's are only
# registered once.
self._unique_id_handlers = {}
def _emit(self, event_name, kwargs, stop_on_response=False):
"""
Emit an event with optional keyword arguments.
:type event_name: string
:param event_name: Name of the event
:type kwargs: dict
:param kwargs: Arguments to be passed to the handler functions.
:type stop_on_response: boolean
:param stop_on_response: Whether to stop on the first non-None
response. If False, then all handlers
will be called. This is especially useful
to handlers which mutate data and then
want to stop propagation of the event.
:rtype: list
:return: List of (handler, response) tuples from all processed
handlers.
"""
responses = []
# Invoke the event handlers from most specific
# to least specific, each time stripping off a dot.
handlers_to_call = self._lookup_cache.get(event_name)
if handlers_to_call is None:
handlers_to_call = self._handlers.prefix_search(event_name)
self._lookup_cache[event_name] = handlers_to_call
elif not handlers_to_call:
# Short circuit and return an empty response is we have
# no handlers to call. This is the common case where
# for the majority of signals, nothing is listening.
return []
kwargs['event_name'] = event_name
responses = []
for handler in handlers_to_call:
logger.debug('Event %s: calling handler %s', event_name, handler)
response = handler(**kwargs)
responses.append((handler, response))
if stop_on_response and response is not None:
return responses
return responses
def emit(self, event_name, **kwargs):
"""
Emit an event by name with arguments passed as keyword args.
>>> responses = emitter.emit(
... 'my-event.service.operation', arg1='one', arg2='two')
:rtype: list
:return: List of (handler, response) tuples from all processed
handlers.
"""
return self._emit(event_name, kwargs)
def emit_until_response(self, event_name, **kwargs):
"""
Emit an event by name with arguments passed as keyword args,
until the first non-``None`` response is received. This
method prevents subsequent handlers from being invoked.
>>> handler, response = emitter.emit_until_response(
'my-event.service.operation', arg1='one', arg2='two')
:rtype: tuple
:return: The first (handler, response) tuple where the response
is not ``None``, otherwise (``None``, ``None``).
"""
responses = self._emit(event_name, kwargs, stop_on_response=True)
if responses:
return responses[-1]
else:
return (None, None)
def _register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_MIDDLE)
def _register_first(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_FIRST)
def _register_last(self, event_name, handler, unique_id,
unique_id_uses_count=False):
self._register_section(event_name, handler, unique_id,
unique_id_uses_count, section=_LAST)
def _register_section(self, event_name, handler, unique_id,
unique_id_uses_count, section):
if unique_id is not None:
if unique_id in self._unique_id_handlers:
# We've already registered a handler using this unique_id
# so we don't need to register it again.
count = self._unique_id_handlers[unique_id].get('count', None)
if unique_id_uses_count:
if not count:
raise ValueError(
"Initial registration of unique id %s was "
"specified to use a counter. Subsequent register "
"calls to unique id must specify use of a counter "
"as well." % unique_id)
else:
self._unique_id_handlers[unique_id]['count'] += 1
else:
if count:
raise ValueError(
"Initial registration of unique id %s was "
"specified to not use a counter. Subsequent "
"register calls to unique id must specify not to "
"use a counter as well." % unique_id)
return
else:
# Note that the trie knows nothing about the unique
# id. We track uniqueness in this class via the
# _unique_id_handlers.
self._handlers.append_item(event_name, handler,
section=section)
unique_id_handler_item = {'handler': handler}
if unique_id_uses_count:
unique_id_handler_item['count'] = 1
self._unique_id_handlers[unique_id] = unique_id_handler_item
else:
self._handlers.append_item(event_name, handler, section=section)
# Super simple caching strategy for now, if we change the registrations
# clear the cache. This has the opportunity for smarter invalidations.
self._lookup_cache = {}
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
if unique_id is not None:
try:
count = self._unique_id_handlers[unique_id].get('count', None)
except KeyError:
# There's no handler matching that unique_id so we have
# nothing to unregister.
return
if unique_id_uses_count:
if count is None:
raise ValueError(
"Initial registration of unique id %s was specified to "
"use a counter. Subsequent unregister calls to unique "
"id must specify use of a counter as well." % unique_id)
elif count == 1:
handler = self._unique_id_handlers.pop(unique_id)['handler']
else:
self._unique_id_handlers[unique_id]['count'] -= 1
return
else:
if count:
raise ValueError(
"Initial registration of unique id %s was specified "
"to not use a counter. Subsequent unregister calls "
"to unique id must specify not to use a counter as "
"well." % unique_id)
handler = self._unique_id_handlers.pop(unique_id)['handler']
try:
self._handlers.remove_item(event_name, handler)
self._lookup_cache = {}
except ValueError:
pass
def __copy__(self):
new_instance = self.__class__()
new_state = self.__dict__.copy()
new_state['_handlers'] = copy.copy(self._handlers)
new_state['_unique_id_handlers'] = copy.copy(self._unique_id_handlers)
new_instance.__dict__ = new_state
return new_instance
class _PrefixTrie(object):
"""Specialized prefix trie that handles wildcards.
The prefixes in this case are based on dot separated
names so 'foo.bar.baz' is::
foo -> bar -> baz
Wildcard support just means that having a key such as 'foo.bar.*.baz' will
be matched with a call to ``get_items(key='foo.bar.ANYTHING.baz')``.
You can think of this prefix trie as the equivalent as defaultdict(list),
except that it can do prefix searches:
foo.bar.baz -> A
foo.bar -> B
foo -> C
Calling ``get_items('foo.bar.baz')`` will return [A + B + C], from
most specific to least specific.
"""
def __init__(self):
# Each dictionary can be though of as a node, where a node
# has values associated with the node, and children is a link
# to more nodes. So 'foo.bar' would have a 'foo' node with
# a 'bar' node as a child of foo.
# {'foo': {'children': {'bar': {...}}}}.
self._root = {'chunk': None, 'children': {}, 'values': None}
def append_item(self, key, value, section=_MIDDLE):
"""Add an item to a key.
If a value is already associated with that key, the new
value is appended to the list for the key.
"""
key_parts = key.split('.')
current = self._root
for part in key_parts:
if part not in current['children']:
new_child = {'chunk': part, 'values': None, 'children': {}}
current['children'][part] = new_child
current = new_child
else:
current = current['children'][part]
if current['values'] is None:
current['values'] = NodeList([], [], [])
current['values'][section].append(value)
def prefix_search(self, key):
"""Collect all items that are prefixes of key.
Prefix in this case are delineated by '.' characters so
'foo.bar.baz' is a 3 chunk sequence of 3 "prefixes" (
"foo", "bar", and "baz").
"""
collected = deque()
key_parts = key.split('.')
current = self._root
self._get_items(current, key_parts, collected, 0)
return collected
def _get_items(self, starting_node, key_parts, collected, starting_index):
stack = [(starting_node, starting_index)]
key_parts_len = len(key_parts)
# Traverse down the nodes, where at each level we add the
# next part from key_parts as well as the wildcard element '*'.
# This means for each node we see we potentially add two more
# elements to our stack.
while stack:
current_node, index = stack.pop()
if current_node['values']:
# We're using extendleft because we want
# the values associated with the node furthest
# from the root to come before nodes closer
# to the root. extendleft() also adds its items
# in right-left order so .extendleft([1, 2, 3])
# will result in final_list = [3, 2, 1], which is
# why we reverse the lists.
node_list = current_node['values']
complete_order = (node_list.first + node_list.middle +
node_list.last)
collected.extendleft(reversed(complete_order))
if not index == key_parts_len:
children = current_node['children']
directs = children.get(key_parts[index])
wildcard = children.get('*')
next_index = index + 1
if wildcard is not None:
stack.append((wildcard, next_index))
if directs is not None:
stack.append((directs, next_index))
def remove_item(self, key, value):
"""Remove an item associated with a key.
If the value is not associated with the key a ``ValueError``
will be raised. If the key does not exist in the trie, a
``ValueError`` will be raised.
"""
key_parts = key.split('.')
current = self._root
self._remove_item(current, key_parts, value, index=0)
def _remove_item(self, current_node, key_parts, value, index):
if current_node is None:
return
elif index < len(key_parts):
next_node = current_node['children'].get(key_parts[index])
if next_node is not None:
self._remove_item(next_node, key_parts, value, index + 1)
if index == len(key_parts) - 1:
node_list = next_node['values']
if value in node_list.first:
node_list.first.remove(value)
elif value in node_list.middle:
node_list.middle.remove(value)
elif value in node_list.last:
node_list.last.remove(value)
if not next_node['children'] and not next_node['values']:
# Then this is a leaf node with no values so
# we can just delete this link from the parent node.
# This makes subsequent search faster in the case
# where a key does not exist.
del current_node['children'][key_parts[index]]
else:
raise ValueError(
"key is not in trie: %s" % '.'.join(key_parts))
def __copy__(self):
# The fact that we're using a nested dict under the covers
# is an implementation detail, and the user shouldn't have
# to know that they'd normally need a deepcopy so we expose
# __copy__ instead of __deepcopy__.
new_copy = self.__class__()
copied_attrs = self._recursive_copy(self.__dict__)
new_copy.__dict__ = copied_attrs
return new_copy
def _recursive_copy(self, node):
# We can't use copy.deepcopy because we actually only want to copy
# the structure of the trie, not the handlers themselves.
# Each node has a chunk, children, and values.
copied_node = {}
for key, value in node.items():
if isinstance(value, NodeList):
copied_node[key] = copy.copy(value)
elif isinstance(value, dict):
copied_node[key] = self._recursive_copy(value)
else:
copied_node[key] = value
return copied_node
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from botocore.utils import merge_dicts
def build_retry_config(endpoint_prefix, retry_model, definitions,
client_retry_config=None):
service_config = retry_model.get(endpoint_prefix, {})
resolve_references(service_config, definitions)
# We want to merge the global defaults with the service specific
# defaults, with the service specific defaults taking precedence.
# So we use the global defaults as the base.
#
# A deepcopy is done on the retry defaults because it ensures the
# retry model has no chance of getting mutated when the service specific
# configuration or client retry config is merged in.
final_retry_config = {
'__default__': copy.deepcopy(retry_model.get('__default__', {}))
}
resolve_references(final_retry_config, definitions)
# The merge the service specific config on top.
merge_dicts(final_retry_config, service_config)
if client_retry_config is not None:
_merge_client_retry_config(final_retry_config, client_retry_config)
return final_retry_config
def _merge_client_retry_config(retry_config, client_retry_config):
max_retry_attempts_override = client_retry_config.get('max_attempts')
if max_retry_attempts_override is not None:
# In the retry config, the max_attempts refers to the maximum number
# of requests in general will be made. However, for the client's
# retry config it refers to how many retry attempts will be made at
# most. So to translate this number from the client config, one is
# added to convert it to the maximum number request that will be made
# by including the initial request.
#
# It is also important to note that if we ever support per operation
# configuration in the retry model via the client, we will need to
# revisit this logic to make sure max_attempts gets applied
# per operation.
retry_config['__default__'][
'max_attempts'] = max_retry_attempts_override + 1
def resolve_references(config, definitions):
"""Recursively replace $ref keys.
To cut down on duplication, common definitions can be declared
(and passed in via the ``definitions`` attribute) and then
references as {"$ref": "name"}, when this happens the reference
dict is placed with the value from the ``definition`` dict.
This is recursively done.
"""
for key, value in config.items():
if isinstance(value, dict):
if len(value) == 1 and list(value.keys())[0] == '$ref':
# Then we need to resolve this reference.
config[key] = definitions[list(value.values())[0]]
else:
resolve_references(value, definitions)
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from botocore.compat import OrderedDict
from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS
from botocore.exceptions import InvalidS3AddressingStyleError
from botocore.exceptions import InvalidRetryConfigurationError
from botocore.exceptions import InvalidMaxRetryAttemptsError
class Config(object):
"""Advanced configuration for Botocore clients.
:type region_name: str
:param region_name: The region to use in instantiating the client
:type signature_version: str
:param signature_version: The signature version when signing requests.
:type user_agent: str
:param user_agent: The value to use in the User-Agent header.
:type user_agent_extra: str
:param user_agent_extra: The value to append to the current User-Agent
header value.
:type connect_timeout: int
:param connect_timeout: The time in seconds till a timeout exception is
thrown when attempting to make a connection. The default is 60
seconds.
:type read_timeout: int
:param read_timeout: The time in seconds till a timeout exception is
thrown when attempting to read from a connection. The default is
60 seconds.
:type parameter_validation: bool
:param parameter_validation: Whether parameter validation should occur
when serializing requests. The default is True. You can disable
parameter validation for performance reasons. Otherwise, it's
recommended to leave parameter validation enabled.
:type max_pool_connections: int
:param max_pool_connections: The maximum number of connections to
keep in a connection pool. If this value is not set, the default
value of 10 is used.
:type proxies: dict
:param proxies: A dictionary of proxy servers to use by protocol or
endpoint, e.g.:
{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
:type s3: dict
:param s3: A dictionary of s3 specific configurations.
Valid keys are:
* 'use_accelerate_endpoint' -- Refers to whether to use the S3
Accelerate endpoint. The value must be a boolean. If True, the
client will use the S3 Accelerate endpoint. If the S3 Accelerate
endpoint is being used then the addressing style will always
be virtual.
* 'payload_signing_enabled' -- Refers to whether or not to SHA256
sign sigv4 payloads. By default, this is disabled for streaming
uploads (UploadPart and PutObject).
* 'addressing_style' -- Refers to the style in which to address
s3 endpoints. Values must be a string that equals:
* auto -- Addressing style is chosen for user. Depending
on the configuration of client, the endpoint may be addressed in
the virtual or the path style. Note that this is the default
behavior if no style is specified.
* virtual -- Addressing style is always virtual. The name of the
bucket must be DNS compatible or an exception will be thrown.
Endpoints will be addressed as such: mybucket.s3.amazonaws.com
* path -- Addressing style is always by path. Endpoints will be
addressed as such: s3.amazonaws.com/mybucket
:type retries: dict
:param retries: A dictionary for retry specific configurations.
Valid keys are:
* 'max_attempts' -- An integer representing the maximum number of
retry attempts that will be made on a single request. For
example, setting this value to 2 will result in the request
being retried at most two times after the initial request. Setting
this value to 0 will result in no retries ever being attempted on
the initial request. If not provided, the number of retries will
default to whatever is modeled, which is typically four retries.
"""
OPTION_DEFAULTS = OrderedDict([
('region_name', None),
('signature_version', None),
('user_agent', None),
('user_agent_extra', None),
('connect_timeout', DEFAULT_TIMEOUT),
('read_timeout', DEFAULT_TIMEOUT),
('parameter_validation', True),
('max_pool_connections', MAX_POOL_CONNECTIONS),
('proxies', None),
('s3', None),
('retries', None)
])
def __init__(self, *args, **kwargs):
self._user_provided_options = self._record_user_provided_options(
args, kwargs)
# Merge the user_provided options onto the default options
config_vars = copy.copy(self.OPTION_DEFAULTS)
config_vars.update(self._user_provided_options)
# Set the attributes based on the config_vars
for key, value in config_vars.items():
setattr(self, key, value)
# Validate the s3 options
self._validate_s3_configuration(self.s3)
self._validate_retry_configuration(self.retries)
def _record_user_provided_options(self, args, kwargs):
option_order = list(self.OPTION_DEFAULTS)
user_provided_options = {}
# Iterate through the kwargs passed through to the constructor and
# map valid keys to the dictionary
for key, value in kwargs.items():
if key in self.OPTION_DEFAULTS:
user_provided_options[key] = value
# The key must exist in the available options
else:
raise TypeError(
'Got unexpected keyword argument \'%s\'' % key)
# The number of args should not be longer than the allowed
# options
if len(args) > len(option_order):
raise TypeError(
'Takes at most %s arguments (%s given)' % (
len(option_order), len(args)))
# Iterate through the args passed through to the constructor and map
# them to appropriate keys.
for i, arg in enumerate(args):
# If it a kwarg was specified for the arg, then error out
if option_order[i] in user_provided_options:
raise TypeError(
'Got multiple values for keyword argument \'%s\'' % (
option_order[i]))
user_provided_options[option_order[i]] = arg
return user_provided_options
def _validate_s3_configuration(self, s3):
if s3 is not None:
addressing_style = s3.get('addressing_style')
if addressing_style not in ['virtual', 'auto', 'path', None]:
raise InvalidS3AddressingStyleError(
s3_addressing_style=addressing_style)
def _validate_retry_configuration(self, retries):
if retries is not None:
for key in retries:
if key not in ['max_attempts']:
raise InvalidRetryConfigurationError(
retry_config_option=key)
if key == 'max_attempts' and retries[key] < 0:
raise InvalidMaxRetryAttemptsError(
provided_max_attempts=retries[key]
)
def merge(self, other_config):
"""Merges the config object with another config object
This will merge in all non-default values from the provided config
and return a new config object
:type other_config: botocore.config.Config
:param other config: Another config object to merge with. The values
in the provided config object will take precedence in the merging
:returns: A config object built from the merged values of both
config objects.
"""
# Make a copy of the current attributes in the config object.
config_options = copy.copy(self._user_provided_options)
# Merge in the user provided options from the other config
config_options.update(other_config._user_provided_options)
# Return a new config object with the merged properties.
return Config(**config_options)
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import datetime
import sys
import inspect
import warnings
import hashlib
import logging
import shlex
from math import floor
from botocore.vendored import six
from botocore.exceptions import MD5UnavailableError
from botocore.vendored.requests.packages.urllib3 import exceptions
logger = logging.getLogger(__name__)
if six.PY3:
from botocore.vendored.six.moves import http_client
class HTTPHeaders(http_client.HTTPMessage):
pass
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.parse import unquote
from urllib.parse import unquote_plus
from urllib.parse import urlparse
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from urllib.parse import urljoin
from urllib.parse import parse_qsl
from urllib.parse import parse_qs
from http.client import HTTPResponse
from io import IOBase as _IOBase
from base64 import encodebytes
from email.utils import formatdate
from itertools import zip_longest
file_type = _IOBase
zip = zip
# In python3, unquote takes a str() object, url decodes it,
# then takes the bytestring and decodes it to utf-8.
# Python2 we'll have to do this ourself (see below).
unquote_str = unquote_plus
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp.raw._sock.settimeout(timeout)
def accepts_kwargs(func):
# In python3.4.1, there's backwards incompatible
# changes when using getargspec with functools.partials.
return inspect.getfullargspec(func)[2]
def ensure_unicode(s, encoding=None, errors=None):
# NOOP in Python 3, because every string is already unicode
return s
def ensure_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, str):
return s.encode(encoding, errors)
if isinstance(s, bytes):
return s
raise ValueError("Expected str or bytes, received %s." % type(s))
else:
from urllib import quote
from urllib import urlencode
from urllib import unquote
from urllib import unquote_plus
from urlparse import urlparse
from urlparse import urlsplit
from urlparse import urlunsplit
from urlparse import urljoin
from urlparse import parse_qsl
from urlparse import parse_qs
from email.message import Message
from email.Utils import formatdate
file_type = file
from itertools import izip as zip
from itertools import izip_longest as zip_longest
from httplib import HTTPResponse
from base64 import encodestring as encodebytes
class HTTPHeaders(Message):
# The __iter__ method is not available in python2.x, so we have
# to port the py3 version.
def __iter__(self):
for field, value in self._headers:
yield field
def unquote_str(value, encoding='utf-8'):
# In python2, unquote() gives us a string back that has the urldecoded
# bits, but not the unicode parts. We need to decode this manually.
# unquote has special logic in which if it receives a unicode object it
# will decode it to latin1. This is hard coded. To avoid this, we'll
# encode the string with the passed in encoding before trying to
# unquote it.
byte_string = value.encode(encoding)
return unquote_plus(byte_string).decode(encoding)
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp._sock.settimeout(timeout)
def accepts_kwargs(func):
return inspect.getargspec(func)[2]
def ensure_unicode(s, encoding='utf-8', errors='strict'):
if isinstance(s, six.text_type):
return s
return unicode(s, encoding, errors)
def ensure_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, unicode):
return s.encode(encoding, errors)
if isinstance(s, str):
return s
raise ValueError("Expected str or unicode, received %s." % type(s))
try:
from collections import OrderedDict
except ImportError:
# Python2.6 we use the 3rd party back port.
from ordereddict import OrderedDict
if sys.version_info[:2] == (2, 6):
import simplejson as json
# In py26, invalid xml parsed by element tree
# will raise a plain old SyntaxError instead of
# a real exception, so we need to abstract this change.
XMLParseError = SyntaxError
# Handle https://github.com/shazow/urllib3/issues/497 for py2.6. In
# python2.6, there is a known issue where sometimes we cannot read the SAN
# from an SSL cert (http://bugs.python.org/issue13034). However, newer
# versions of urllib3 will warn you when there is no SAN. While we could
# just turn off this warning in urllib3 altogether, we _do_ want warnings
# when they're legitimate warnings. This method tries to scope the warning
# filter to be as specific as possible.
def filter_ssl_san_warnings():
warnings.filterwarnings(
'ignore',
message="Certificate has no.*subjectAltName.*",
category=exceptions.SecurityWarning,
module=r".*urllib3\.connection")
else:
import xml.etree.cElementTree
XMLParseError = xml.etree.cElementTree.ParseError
import json
def filter_ssl_san_warnings():
# Noop for non-py26 versions. We will parse the SAN
# appropriately.
pass
def filter_ssl_warnings():
# Ignore warnings related to SNI as it is not being used in validations.
warnings.filterwarnings(
'ignore',
message="A true SSLContext object is not available.*",
category=exceptions.InsecurePlatformWarning,
module=r".*urllib3\.util\.ssl_")
filter_ssl_san_warnings()
@classmethod
def from_dict(cls, d):
new_instance = cls()
for key, value in d.items():
new_instance[key] = value
return new_instance
@classmethod
def from_pairs(cls, pairs):
new_instance = cls()
for key, value in pairs:
new_instance[key] = value
return new_instance
HTTPHeaders.from_dict = from_dict
HTTPHeaders.from_pairs = from_pairs
def copy_kwargs(kwargs):
"""
There is a bug in Python versions < 2.6.5 that prevents you
from passing unicode keyword args (#4978). This function
takes a dictionary of kwargs and returns a copy. If you are
using Python < 2.6.5, it also encodes the keys to avoid this bug.
Oh, and version_info wasn't a namedtuple back then, either!
"""
vi = sys.version_info
if vi[0] == 2 and vi[1] <= 6 and vi[3] < 5:
copy_kwargs = {}
for key in kwargs:
copy_kwargs[key.encode('utf-8')] = kwargs[key]
else:
copy_kwargs = copy.copy(kwargs)
return copy_kwargs
def total_seconds(delta):
"""
Returns the total seconds in a ``datetime.timedelta``.
Python 2.6 does not have ``timedelta.total_seconds()``, so we have
to calculate this ourselves. On 2.7 or better, we'll take advantage of the
built-in method.
The math was pulled from the ``datetime`` docs
(http://docs.python.org/2.7/library/datetime.html#datetime.timedelta.total_seconds).
:param delta: The timedelta object
:type delta: ``datetime.timedelta``
"""
if sys.version_info[:2] != (2, 6):
return delta.total_seconds()
day_in_seconds = delta.days * 24 * 3600.0
micro_in_seconds = delta.microseconds / 10.0**6
return day_in_seconds + delta.seconds + micro_in_seconds
# Checks to see if md5 is available on this system. A given system might not
# have access to it for various reasons, such as FIPS mode being enabled.
try:
hashlib.md5()
MD5_AVAILABLE = True
except ValueError:
MD5_AVAILABLE = False
def get_md5(*args, **kwargs):
"""
Attempts to get an md5 hashing object.
:param raise_error_if_unavailable: raise an error if md5 is unavailable on
this system. If False, None will be returned if it is unavailable.
:type raise_error_if_unavailable: bool
:param args: Args to pass to the MD5 constructor
:param kwargs: Key word arguments to pass to the MD5 constructor
:return: An MD5 hashing object if available. If it is unavailable, None
is returned if raise_error_if_unavailable is set to False.
"""
if MD5_AVAILABLE:
return hashlib.md5(*args, **kwargs)
else:
raise MD5UnavailableError()
def compat_shell_split(s, platform=None):
if platform is None:
platform = sys.platform
if platform == "win32":
return _windows_shell_split(s)
else:
return shlex.split(s)
def _windows_shell_split(s):
"""Splits up a windows command as the built-in command parser would.
Windows has potentially bizarre rules depending on where you look. When
spawning a process via the Windows C runtime (which is what python does
when you call popen) the rules are as follows:
https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
To summarize:
* Only space and tab are valid delimiters
* Double quotes are the only valid quotes
* Backslash is interpreted literally unless it is part of a chain that
leads up to a double quote. Then the backslashes escape the backslashes,
and if there is an odd number the final backslash escapes the quote.
:param s: The command string to split up into parts.
:return: A list of command components.
"""
if not s:
return []
components = []
buff = []
is_quoted = False
num_backslashes = 0
for character in s:
if character == '\\':
# We can't simply append backslashes because we don't know if
# they are being used as escape characters or not. Instead we
# keep track of how many we've encountered and handle them when
# we encounter a different character.
num_backslashes += 1
elif character == '"':
if num_backslashes > 0:
# The backslashes are in a chain leading up to a double
# quote, so they are escaping each other.
buff.append('\\' * int(floor(num_backslashes / 2)))
remainder = num_backslashes % 2
num_backslashes = 0
if remainder == 1:
# The number of backslashes is uneven, so they are also
# escaping the double quote, so it needs to be added to
# the current component buffer.
buff.append('"')
continue
# We've encountered a double quote that is not escaped,
# so we toggle is_quoted.
is_quoted = not is_quoted
# If there are quotes, then we may want an empty string. To be
# safe, we add an empty string to the buffer so that we make
# sure it sticks around if there's nothing else between quotes.
# If there is other stuff between quotes, the empty string will
# disappear during the joining process.
buff.append('')
elif character in [' ', '\t'] and not is_quoted:
# Since the backslashes aren't leading up to a quote, we put in
# the exact number of backslashes.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
num_backslashes = 0
# Excess whitespace is ignored, so only add the components list
# if there is anything in the buffer.
if buff:
components.append(''.join(buff))
buff = []
else:
# Since the backslashes aren't leading up to a quote, we put in
# the exact number of backslashes.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
num_backslashes = 0
buff.append(character)
# Quotes must be terminated.
if is_quoted:
raise ValueError('No closing quotation in string: %s' % s)
# There may be some leftover backslashes, so we need to add them in.
# There's no quote so we add the exact number.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
# Add the final component in if there is anything in the buffer.
if buff:
components.append(''.join(buff))
return components
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import datetime
import logging
import os
import getpass
import threading
import json
import subprocess
from collections import namedtuple
from copy import deepcopy
from hashlib import sha1
import json
from dateutil.parser import parse
from dateutil.tz import tzlocal
import botocore.configloader
import botocore.compat
from botocore.compat import total_seconds
from botocore.compat import compat_shell_split
from botocore.exceptions import UnknownCredentialError
from botocore.exceptions import PartialCredentialsError
from botocore.exceptions import ConfigNotFound
from botocore.exceptions import InvalidConfigError
from botocore.exceptions import InfiniteLoopConfigError
from botocore.exceptions import RefreshWithMFAUnsupportedError
from botocore.exceptions import MetadataRetrievalError
from botocore.exceptions import CredentialRetrievalError
from botocore.utils import InstanceMetadataFetcher, parse_key_val_file
from botocore.utils import ContainerMetadataFetcher
logger = logging.getLogger(__name__)
ReadOnlyCredentials = namedtuple('ReadOnlyCredentials',
['access_key', 'secret_key', 'token'])
def create_credential_resolver(session, cache=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
credential_file = session.get_config_variable('credentials_file')
config_file = session.get_config_variable('config_file')
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
if cache is None:
cache = {}
env_provider = EnvProvider()
container_provider = ContainerProvider()
instance_metadata_provider = InstanceMetadataProvider(
iam_role_fetcher=InstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts)
)
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=session.create_client,
cache=cache,
profile_name=profile_name,
credential_sourcer=CanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
])
)
providers = [
env_provider,
assume_role_provider,
SharedCredentialProvider(
creds_filename=credential_file,
profile_name=profile_name
),
ProcessProvider(profile_name=profile_name,
load_config=lambda: session.full_config),
# The new config file has precedence over the legacy
# config file.
ConfigProvider(config_filename=config_file, profile_name=profile_name),
OriginalEC2Provider(),
BotoProvider(),
container_provider,
instance_metadata_provider
]
explicit_profile = session.get_config_variable('profile',
methods=('instance',))
if explicit_profile is not None:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = CredentialResolver(providers=providers)
return resolver
def get_credentials(session):
resolver = create_credential_resolver(session)
return resolver.load_credentials()
def _local_now():
return datetime.datetime.now(tzlocal())
def _parse_if_needed(value):
if isinstance(value, datetime.datetime):
return value
return parse(value)
def _serialize_if_needed(value, iso=False):
if isinstance(value, datetime.datetime):
if iso:
return value.isoformat()
return value.strftime('%Y-%m-%dT%H:%M:%S%Z')
return value
def create_assume_role_refresher(client, params):
def refresh():
response = client.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
def __call__(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return self._refresh()
return _Refresher(actual_refresh)
class JSONFileCache(object):
"""JSON file cache.
This provides a dict like interface that stores JSON serializable
objects.
The objects are serialized to JSON and stored in a file. These
values can be retrieved at a later time.
"""
CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'boto', 'cache'))
def __init__(self, working_dir=CACHE_DIR):
self._working_dir = working_dir
def __contains__(self, cache_key):
actual_key = self._convert_cache_key(cache_key)
return os.path.isfile(actual_key)
def __getitem__(self, cache_key):
"""Retrieve value from a cache key."""
actual_key = self._convert_cache_key(cache_key)
try:
with open(actual_key) as f:
return json.load(f)
except (OSError, ValueError, IOError):
raise KeyError(cache_key)
def __setitem__(self, cache_key, value):
full_key = self._convert_cache_key(cache_key)
try:
file_content = json.dumps(value, default=_serialize_if_needed)
except (TypeError, ValueError):
raise ValueError("Value cannot be cached, must be "
"JSON serializable: %s" % value)
if not os.path.isdir(self._working_dir):
os.makedirs(self._working_dir)
with os.fdopen(os.open(full_key,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.truncate()
f.write(file_content)
def _convert_cache_key(self, cache_key):
full_path = os.path.join(self._working_dir, cache_key + '.json')
return full_path
class Credentials(object):
"""
Holds the credentials needed to authenticate requests.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
def __init__(self, access_key, secret_key, token=None,
method=None):
self.access_key = access_key
self.secret_key = secret_key
self.token = token
if method is None:
method = 'explicit'
self.method = method
self._normalize()
def _normalize(self):
# Keys would sometimes (accidentally) contain non-ascii characters.
# It would cause a confusing UnicodeDecodeError in Python 2.
# We explicitly convert them into unicode to avoid such error.
#
# Eventually the service will decide whether to accept the credential.
# This also complies with the behavior in Python 3.
self.access_key = botocore.compat.ensure_unicode(self.access_key)
self.secret_key = botocore.compat.ensure_unicode(self.secret_key)
def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
class RefreshableCredentials(Credentials):
"""
Holds the credentials needed to authenticate requests. In addition, it
knows how to refresh itself.
:ivar access_key: The access key part of the credentials.
:ivar secret_key: The secret key part of the credentials.
:ivar token: The security token, valid only for session credentials.
:ivar method: A string which identifies where the credentials
were found.
"""
# The time at which we'll attempt to refresh, but not
# block if someone else is refreshing.
_advisory_refresh_timeout = 15 * 60
# The time at which all threads will block waiting for
# refreshed credentials.
_mandatory_refresh_timeout = 10 * 60
def __init__(self, access_key, secret_key, token,
expiry_time, refresh_using, method,
time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = access_key
self._secret_key = secret_key
self._token = token
self._expiry_time = expiry_time
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = ReadOnlyCredentials(
access_key, secret_key, token)
self._normalize()
def _normalize(self):
self._access_key = botocore.compat.ensure_unicode(self._access_key)
self._secret_key = botocore.compat.ensure_unicode(self._secret_key)
@classmethod
def create_from_metadata(cls, metadata, refresh_using, method):
instance = cls(
access_key=metadata['access_key'],
secret_key=metadata['secret_key'],
token=metadata['token'],
expiry_time=cls._expiry_datetime(metadata['expiry_time']),
method=method,
refresh_using=refresh_using
)
return instance
@property
def access_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
"""Warning: Using this property can lead to race conditions if you
access another property subsequently along the refresh boundary.
Please use get_frozen_credentials instead.
"""
self._refresh()
return self._token
@token.setter
def token(self, value):
self._token = value
def _seconds_remaining(self):
delta = self._expiry_time - self._time_fetcher()
return total_seconds(delta)
def refresh_needed(self, refresh_in=None):
"""Check if a refresh is needed.
A refresh is needed if the expiry time associated
with the temporary credentials is less than the
provided ``refresh_in``. If ``time_delta`` is not
provided, ``self.advisory_refresh_needed`` will be used.
For example, if your temporary credentials expire
in 10 minutes and the provided ``refresh_in`` is
``15 * 60``, then this function will return ``True``.
:type refresh_in: int
:param refresh_in: The number of seconds before the
credentials expire in which refresh attempts should
be made.
:return: True if refresh neeeded, False otherwise.
"""
if self._expiry_time is None:
# No expiration, so assume we don't need to refresh.
return False
if refresh_in is None:
refresh_in = self._advisory_refresh_timeout
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
if self._seconds_remaining() >= refresh_in:
# There's enough time left. Don't refresh.
return False
logger.debug("Credentials need to be refreshed.")
return True
def _is_expired(self):
# Checks if the current credentials are expired.
return self.refresh_needed(refresh_in=0)
def _refresh(self):
# In the common case where we don't need a refresh, we
# can immediately exit and not require acquiring the
# refresh lock.
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# acquire() doesn't accept kwargs, but False is indicating
# that we should not block if we can't acquire the lock.
# If we aren't able to acquire the lock, we'll trigger
# the else clause.
if self._refresh_lock.acquire(False):
try:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
finally:
self._refresh_lock.release()
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're within the mandatory refresh window,
# we must block until we get refreshed credentials.
with self._refresh_lock:
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
self._protected_refresh(is_mandatory=True)
def _protected_refresh(self, is_mandatory):
# precondition: this method should only be called if you've acquired
# the self._refresh_lock.
try:
metadata = self._refresh_using()
except Exception as e:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
if self._is_expired():
# We successfully refreshed credentials but for whatever
# reason, our refreshing function returned credentials
# that are still expired. In this scenario, the only
# thing we can do is let the user know and raise
# an exception.
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
@staticmethod
def _expiry_datetime(time_str):
return parse(time_str)
def _set_from_data(self, data):
self.access_key = data['access_key']
self.secret_key = data['secret_key']
self.token = data['token']
self._expiry_time = parse(data['expiry_time'])
logger.debug("Retrieved credentials will expire at: %s",
self._expiry_time)
self._normalize()
def get_frozen_credentials(self):
"""Return immutable credentials.
The ``access_key``, ``secret_key``, and ``token`` properties
on this class will always check and refresh credentials if
needed before returning the particular credentials.
This has an edge case where you can get inconsistent
credentials. Imagine this:
# Current creds are "t1"
tmp.access_key ---> expired? no, so return t1.access_key
# ---- time is now expired, creds need refreshing to "t2" ----
tmp.secret_key ---> expired? yes, refresh and return t2.secret_key
This means we're using the access key from t1 with the secret key
from t2. To fix this issue, you can request a frozen credential object
which is guaranteed not to change.
The frozen credentials returned from this method should be used
immediately and then discarded. The typical usage pattern would
be::
creds = RefreshableCredentials(...)
some_code = SomeSignerObject()
# I'm about to sign the request.
# The frozen credentials are only used for the
# duration of generate_presigned_url and will be
# immediately thrown away.
request = some_code.sign_some_request(
with_credentials=creds.get_frozen_credentials())
print("Signed request:", request)
"""
self._refresh()
return self._frozen_credentials
class DeferredRefreshableCredentials(RefreshableCredentials):
"""Refreshable credentials that don't require initial credentials.
refresh_using will be called upon first access.
"""
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = threading.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if any(part is None for part in [self._access_key, self._secret_key]):
return True
return super(DeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class CachedCredentialFetcher(object):
def __init__(self, cache=None, expiry_window_seconds=60 * 15):
if cache is None:
cache = {}
self._cache = cache
self._cache_key = self._create_cache_key()
self._expiry_window_seconds = expiry_window_seconds
def _create_cache_key(self):
raise NotImplementedError('_create_cache_key()')
def _make_file_safe(self, filename):
# Replace :, path sep, and / to make it the string filename safe.
filename = filename.replace(':', '_').replace(os.path.sep, '_')
return filename.replace('/', '_')
def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
def fetch_credentials(self):
return self._get_cached_credentials()
def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
def _load_from_cache(self):
if self._cache_key in self._cache:
creds = deepcopy(self._cache[self._cache_key])
if not self._is_expired(creds):
return creds
else:
logger.debug(
"Credentials were found in cache, but they are expired."
)
return None
def _write_to_cache(self, response):
self._cache[self._cache_key] = deepcopy(response)
def _is_expired(self, credentials):
"""Check if credentials are expired."""
end_time = _parse_if_needed(credentials['Credentials']['Expiration'])
seconds = total_seconds(end_time - _local_now())
return seconds < self._expiry_window_seconds
class AssumeRoleCredentialFetcher(CachedCredentialFetcher):
def __init__(self, client_creator, source_credentials, role_arn,
extra_args=None, mfa_prompter=None, cache=None,
expiry_window_seconds=60 * 15):
"""
:type client_creator: callable
:param client_creator: A callable that creates a client taking
arguments like ``Session.create_client``.
:type source_credentials: Credentials
:param source_credentials: The credentials to use to create the
client for the call to AssumeRole.
:type role_arn: str
:param role_arn: The ARN of the role to be assumed.
:type extra_args: dict
:param extra_args: Any additional arguments to add to the assume
role request using the format of the botocore operation.
Possible keys include, but may not be limited to,
DurationSeconds, Policy, SerialNumber, ExternalId and
RoleSessionName.
:type mfa_prompter: callable
:param mfa_prompter: A callable that returns input provided by the
user (i.e raw_input, getpass.getpass, etc.).
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example of this is
the ``JSONFileCache`` class in aws-cli.
:type expiry_window_seconds: int
:param expiry_window_seconds: The amount of time, in seconds,
"""
self._client_creator = client_creator
self._source_credentials = source_credentials
self._role_arn = role_arn
if extra_args is None:
self._assume_kwargs = {}
else:
self._assume_kwargs = deepcopy(extra_args)
self._assume_kwargs['RoleArn'] = self._role_arn
self._role_session_name = self._assume_kwargs.get('RoleSessionName')
self._using_default_session_name = False
if not self._role_session_name:
self._role_session_name = 'botocore-session-%s' % (
int(time.time()))
self._assume_kwargs['RoleSessionName'] = self._role_session_name
self._using_default_session_name = True
self._mfa_prompter = mfa_prompter
if self._mfa_prompter is None:
self._mfa_prompter = getpass.getpass
super(AssumeRoleCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
"""Create a predictable cache key for the current configuration.
The cache key is intended to be compatible with file names.
"""
args = deepcopy(self._assume_kwargs)
# The role session name gets randomly generated, so we don't want it
# in the hash.
if self._using_default_session_name:
del args['RoleSessionName']
if 'Policy' in args:
# To have a predictable hash, the keys of the policy must be
# sorted, so we have to load it here to make sure it gets sorted
# later on.
args['Policy'] = json.loads(args['Policy'])
args = json.dumps(args, sort_keys=True)
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = self._create_client()
return client.assume_role(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = self._assume_kwargs
mfa_serial = assume_role_kwargs.get('SerialNumber')
if mfa_serial is not None:
prompt = 'Enter MFA code for %s: ' % mfa_serial
token_code = self._mfa_prompter(prompt)
assume_role_kwargs = deepcopy(assume_role_kwargs)
assume_role_kwargs['TokenCode'] = token_code
return assume_role_kwargs
def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class CredentialProvider(object):
# A short name to identify the provider within botocore.
METHOD = None
# A name to identify the provider for use in cross-sdk features like
# assume role's `credential_source` configuration option. These names
# are to be treated in a case-insensitive way. NOTE: any providers not
# implemented in botocore MUST prefix their canonical names with
# 'custom' or we DO NOT guarantee that it will work with any features
# that this provides.
CANONICAL_NAME = None
def __init__(self, session=None):
self.session = session
def load(self):
"""
Loads the credentials from their source & sets them on the object.
Subclasses should implement this method (by reading from disk, the
environment, the network or wherever), returning ``True`` if they were
found & loaded.
If not found, this method should return ``False``, indictating that the
``CredentialResolver`` should fall back to the next available method.
The default implementation does nothing, assuming the user has set the
``access_key/secret_key/token`` themselves.
:returns: Whether credentials were found & set
:rtype: Credentials
"""
return True
def _extract_creds_from_mapping(self, mapping, *key_names):
found = []
for key_name in key_names:
try:
found.append(mapping[key_name])
except KeyError:
raise PartialCredentialsError(provider=self.METHOD,
cred_var=key_name)
return found
class ProcessProvider(CredentialProvider):
METHOD = 'custom-process'
def __init__(self, profile_name, load_config, popen=subprocess.Popen):
self._profile_name = profile_name
self._load_config = load_config
self._loaded_config = None
self._popen = popen
def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return RefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return Credentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = self._popen(process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
@property
def _credential_process(self):
if self._loaded_config is None:
self._loaded_config = self._load_config()
profile_config = self._loaded_config.get(
'profiles', {}).get(self._profile_name, {})
return profile_config.get('credential_process')
class InstanceMetadataProvider(CredentialProvider):
METHOD = 'iam-role'
CANONICAL_NAME = 'Ec2InstanceMetadata'
def __init__(self, iam_role_fetcher):
self._role_fetcher = iam_role_fetcher
def load(self):
fetcher = self._role_fetcher
# We do the first request, to see if we get useful data back.
# If not, we'll pass & move on to whatever's next in the credential
# chain.
metadata = fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
# We manually set the data here, since we already made the request &
# have it. When the expiry is hit, the credentials will auto-refresh
# themselves.
creds = RefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class EnvProvider(CredentialProvider):
METHOD = 'env'
CANONICAL_NAME = 'Environment'
ACCESS_KEY = 'AWS_ACCESS_KEY_ID'
SECRET_KEY = 'AWS_SECRET_ACCESS_KEY'
# The token can come from either of these env var.
# AWS_SESSION_TOKEN is what other AWS SDKs have standardized on.
TOKENS = ['AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN']
EXPIRY_TIME = 'AWS_CREDENTIAL_EXPIRATION'
def __init__(self, environ=None, mapping=None):
"""
:param environ: The environment variables (defaults to
``os.environ`` if no value is provided).
:param mapping: An optional mapping of variable names to
environment variable names. Use this if you want to
change the mapping of access_key->AWS_ACCESS_KEY_ID, etc.
The dict can have up to 3 keys: ``access_key``, ``secret_key``,
``session_token``.
"""
if environ is None:
environ = os.environ
self.environ = environ
self._mapping = self._build_mapping(mapping)
def _build_mapping(self, mapping):
# Mapping of variable name to env var name.
var_mapping = {}
if mapping is None:
# Use the class var default.
var_mapping['access_key'] = self.ACCESS_KEY
var_mapping['secret_key'] = self.SECRET_KEY
var_mapping['token'] = self.TOKENS
var_mapping['expiry_time'] = self.EXPIRY_TIME
else:
var_mapping['access_key'] = mapping.get(
'access_key', self.ACCESS_KEY)
var_mapping['secret_key'] = mapping.get(
'secret_key', self.SECRET_KEY)
var_mapping['token'] = mapping.get(
'token', self.TOKENS)
if not isinstance(var_mapping['token'], list):
var_mapping['token'] = [var_mapping['token']]
var_mapping['expiry_time'] = mapping.get(
'expiry_time', self.EXPIRY_TIME)
return var_mapping
def load(self):
"""
Search for credentials in explicit environment variables.
"""
if self._mapping['access_key'] in self.environ:
logger.info('Found credentials in environment variables.')
fetcher = self._create_credentials_fetcher()
credentials = fetcher(require_expiry=False)
expiry_time = credentials['expiry_time']
if expiry_time is not None:
expiry_time = parse(expiry_time)
return RefreshableCredentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], expiry_time,
refresh_using=fetcher, method=self.METHOD
)
return Credentials(
credentials['access_key'], credentials['secret_key'],
credentials['token'], method=self.METHOD
)
else:
return None
def _create_credentials_fetcher(self):
mapping = self._mapping
method = self.METHOD
environ = self.environ
def fetch_credentials(require_expiry=True):
credentials = {}
access_key = environ.get(mapping['access_key'])
if access_key is None:
raise PartialCredentialsError(
provider=method, cred_var=mapping['access_key'])
credentials['access_key'] = access_key
secret_key = environ.get(mapping['secret_key'])
if secret_key is None:
raise PartialCredentialsError(
provider=method, cred_var=mapping['secret_key'])
credentials['secret_key'] = secret_key
token = None
for token_env_var in mapping['token']:
if token_env_var in environ:
token = environ[token_env_var]
break
credentials['token'] = token
expiry_time = environ.get(mapping['expiry_time'])
if require_expiry and expiry_time is None:
raise PartialCredentialsError(
provider=method, cred_var=mapping['expiry_time'])
credentials['expiry_time'] = expiry_time
return credentials
return fetch_credentials
class OriginalEC2Provider(CredentialProvider):
METHOD = 'ec2-credentials-file'
CANONICAL_NAME = 'Ec2Config'
CRED_FILE_ENV = 'AWS_CREDENTIAL_FILE'
ACCESS_KEY = 'AWSAccessKeyId'
SECRET_KEY = 'AWSSecretKey'
def __init__(self, environ=None, parser=None):
if environ is None:
environ = os.environ
if parser is None:
parser = parse_key_val_file
self._environ = environ
self._parser = parser
def load(self):
"""
Search for a credential file used by original EC2 CLI tools.
"""
if 'AWS_CREDENTIAL_FILE' in self._environ:
full_path = os.path.expanduser(
self._environ['AWS_CREDENTIAL_FILE'])
creds = self._parser(full_path)
if self.ACCESS_KEY in creds:
logger.info('Found credentials in AWS_CREDENTIAL_FILE.')
access_key = creds[self.ACCESS_KEY]
secret_key = creds[self.SECRET_KEY]
# EC2 creds file doesn't support session tokens.
return Credentials(access_key, secret_key, method=self.METHOD)
else:
return None
class SharedCredentialProvider(CredentialProvider):
METHOD = 'shared-credentials-file'
CANONICAL_NAME = 'SharedCredentials'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, creds_filename, profile_name=None, ini_parser=None):
self._creds_filename = creds_filename
if profile_name is None:
profile_name = 'default'
self._profile_name = profile_name
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._ini_parser = ini_parser
def load(self):
try:
available_creds = self._ini_parser(self._creds_filename)
except ConfigNotFound:
return None
if self._profile_name in available_creds:
config = available_creds[self._profile_name]
if self.ACCESS_KEY in config:
logger.info("Found credentials in shared credentials file: %s",
self._creds_filename)
access_key, secret_key = self._extract_creds_from_mapping(
config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
def _get_session_token(self, config):
for token_envvar in self.TOKENS:
if token_envvar in config:
return config[token_envvar]
class ConfigProvider(CredentialProvider):
"""INI based config provider with profile sections."""
METHOD = 'config-file'
CANONICAL_NAME = 'SharedConfig'
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
# Same deal as the EnvProvider above. Botocore originally supported
# aws_security_token, but the SDKs are standardizing on aws_session_token
# so we support both.
TOKENS = ['aws_security_token', 'aws_session_token']
def __init__(self, config_filename, profile_name, config_parser=None):
"""
:param config_filename: The session configuration scoped to the current
profile. This is available via ``session.config``.
:param profile_name: The name of the current profile.
:param config_parser: A config parser callable.
"""
self._config_filename = config_filename
self._profile_name = profile_name
if config_parser is None:
config_parser = botocore.configloader.load_config
self._config_parser = config_parser
def load(self):
"""
If there is are credentials in the configuration associated with
the session, use those.
"""
try:
full_config = self._config_parser(self._config_filename)
except ConfigNotFound:
return None
if self._profile_name in full_config['profiles']:
profile_config = full_config['profiles'][self._profile_name]
if self.ACCESS_KEY in profile_config:
logger.info("Credentials found in config file: %s",
self._config_filename)
access_key, secret_key = self._extract_creds_from_mapping(
profile_config, self.ACCESS_KEY, self.SECRET_KEY)
token = self._get_session_token(profile_config)
return Credentials(access_key, secret_key, token,
method=self.METHOD)
else:
return None
def _get_session_token(self, profile_config):
for token_name in self.TOKENS:
if token_name in profile_config:
return profile_config[token_name]
class BotoProvider(CredentialProvider):
METHOD = 'boto-config'
CANONICAL_NAME = 'Boto2Config'
BOTO_CONFIG_ENV = 'BOTO_CONFIG'
DEFAULT_CONFIG_FILENAMES = ['/etc/boto.cfg', '~/.boto']
ACCESS_KEY = 'aws_access_key_id'
SECRET_KEY = 'aws_secret_access_key'
def __init__(self, environ=None, ini_parser=None):
if environ is None:
environ = os.environ
if ini_parser is None:
ini_parser = botocore.configloader.raw_config_parse
self._environ = environ
self._ini_parser = ini_parser
def load(self):
"""
Look for credentials in boto config file.
"""
if self.BOTO_CONFIG_ENV in self._environ:
potential_locations = [self._environ[self.BOTO_CONFIG_ENV]]
else:
potential_locations = self.DEFAULT_CONFIG_FILENAMES
for filename in potential_locations:
try:
config = self._ini_parser(filename)
except ConfigNotFound:
# Move on to the next potential config file name.
continue
if 'Credentials' in config:
credentials = config['Credentials']
if self.ACCESS_KEY in credentials:
logger.info("Found credentials in boto config file: %s",
filename)
access_key, secret_key = self._extract_creds_from_mapping(
credentials, self.ACCESS_KEY, self.SECRET_KEY)
return Credentials(access_key, secret_key,
method=self.METHOD)
class AssumeRoleProvider(CredentialProvider):
METHOD = 'assume-role'
# The AssumeRole provider is logically part of the SharedConfig and
# SharedCredentials providers. Since the purpose of the canonical name
# is to provide cross-sdk compatibility, calling code will need to be
# aware that either of those providers should be tied to the AssumeRole
# provider as much as possible.
CANONICAL_NAME = None
ROLE_CONFIG_VAR = 'role_arn'
# Credentials are considered expired (and will be refreshed) once the total
# remaining time left until the credentials expires is less than the
# EXPIRY_WINDOW.
EXPIRY_WINDOW_SECONDS = 60 * 15
def __init__(self, load_config, client_creator, cache, profile_name,
prompter=getpass.getpass, credential_sourcer=None):
"""
:type load_config: callable
:param load_config: A function that accepts no arguments, and
when called, will return the full configuration dictionary
for the session (``session.full_config``).
:type client_creator: callable
:param client_creator: A factory function that will create
a client when called. Has the same interface as
``botocore.session.Session.create_client``.
:type cache: dict
:param cache: An object that supports ``__getitem__``,
``__setitem__``, and ``__contains__``. An example
of this is the ``JSONFileCache`` class in the CLI.
:type profile_name: str
:param profile_name: The name of the profile.
:type prompter: callable
:param prompter: A callable that returns input provided
by the user (i.e raw_input, getpass.getpass, etc.).
:type credential_sourcer: CanonicalNameCredentialSourcer
:param credential_sourcer: A credential provider that takes a
configuration, which is used to provide the source credentials
for the STS call.
"""
#: The cache used to first check for assumed credentials.
#: This is checked before making the AssumeRole API
#: calls and can be useful if you have short lived
#: scripts and you'd like to avoid calling AssumeRole
#: until the credentials are expired.
self.cache = cache
self._load_config = load_config
# client_creator is a callable that creates function.
# It's basically session.create_client
self._client_creator = client_creator
self._profile_name = profile_name
self._prompter = prompter
# The _loaded_config attribute will be populated from the
# load_config() function once the configuration is actually
# loaded. The reason we go through all this instead of just
# requiring that the loaded_config be passed to us is to that
# we can defer configuration loaded until we actually try
# to load credentials (as opposed to when the object is
# instantiated).
self._loaded_config = {}
self._credential_sourcer = credential_sourcer
self._visited_profiles = [self._profile_name]
def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return self._load_creds_via_assume_role(self._profile_name)
def _has_assume_role_config_vars(self, profile):
return self.ROLE_CONFIG_VAR in profile
def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
fetcher = AssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
def _get_role_config(self, profile_name):
"""Retrieves and validates the role configuration for the profile."""
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
source_profile = profile.get('source_profile')
role_arn = profile['role_arn']
credential_source = profile.get('credential_source')
mfa_serial = profile.get('mfa_serial')
external_id = profile.get('external_id')
role_session_name = profile.get('role_session_name')
role_config = {
'role_arn': role_arn,
'external_id': external_id,
'mfa_serial': mfa_serial,
'role_session_name': role_session_name,
'source_profile': source_profile,
'credential_source': credential_source
}
# Either the credential source or the source profile must be
# specified, but not both.
if credential_source is not None and source_profile is not None:
raise InvalidConfigError(
error_msg=(
'The profile "%s" contains both source_profile and '
'credential_source.' % profile_name
)
)
elif credential_source is None and source_profile is None:
raise PartialCredentialsError(
provider=self.METHOD,
cred_var='source_profile or credential_source'
)
elif credential_source is not None:
self._validate_credential_source(
profile_name, credential_source)
else:
self._validate_source_profile(profile_name, source_profile)
return role_config
def _validate_credential_source(self, parent_profile, credential_source):
if self._credential_sourcer is None:
raise InvalidConfigError(error_msg=(
'The credential_source "%s" is specified in profile "%s", '
'but no source provider was configured.' % (
credential_source, parent_profile)
))
if not self._credential_sourcer.is_supported(credential_source):
raise InvalidConfigError(error_msg=(
'The credential source "%s" referenced in profile "%s" is not '
'valid.' % (credential_source, parent_profile)
))
def _source_profile_has_credentials(self, profile):
return any([
self._has_static_credentials(profile),
self._has_assume_role_config_vars(profile),
])
def _validate_source_profile(self, parent_profile_name,
source_profile_name):
profiles = self._loaded_config.get('profiles', {})
if source_profile_name not in profiles:
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" referenced in '
'the profile "%s" does not exist.' % (
source_profile_name, parent_profile_name)
)
)
source_profile = profiles[source_profile_name]
# Ensure the profile has valid credential type
if not self._source_profile_has_credentials(source_profile):
raise InvalidConfigError(
error_msg=(
'The source_profile "%s" must specify either static '
'credentials or an assume role configuration' % (
source_profile_name)
)
)
# Make sure we aren't going into an infinite loop. If we haven't
# visited the profile yet, we're good.
if source_profile_name not in self._visited_profiles:
return
# If we have visited the profile and the profile isn't simply
# referencing itself, that's an infinite loop.
if source_profile_name != parent_profile_name:
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
# A profile is allowed to reference itself so that it can source
# static credentials and have configuration all in the same
# profile. This will only ever work for the top level assume
# role because the static credentials will otherwise take
# precedence.
if not self._has_static_credentials(source_profile):
raise InfiniteLoopConfigError(
source_profile=source_profile_name,
visited_profiles=self._visited_profiles
)
def _has_static_credentials(self, profile):
static_keys = ['aws_secret_access_key', 'aws_access_key_id']
return any(static_key in profile for static_key in static_keys)
def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return self._resolve_credentials_from_profile(source_profile)
def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile):
return self._resolve_static_credentials_from_profile(profile)
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return Credentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class CanonicalNameCredentialSourcer(object):
def __init__(self, providers):
self._providers = providers
def is_supported(self, source_name):
"""Validates a given source name.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: bool
:returns: True if the credential provider is supported,
False otherwise.
"""
return source_name in [p.CANONICAL_NAME for p in self._providers]
def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, CredentialResolver):
return source.load_credentials()
return source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return CredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
def _get_provider_by_canonical_name(self, canonical_name):
"""Return a credential provider by its canonical name.
This function is strict, it does not attempt to address
compatibility issues.
"""
for provider in self._providers:
name = provider.CANONICAL_NAME
# Canonical names are case-insensitive
if name and name.lower() == canonical_name.lower():
return provider
def _get_provider_by_method(self, method):
"""Return a credential provider by its METHOD name."""
for provider in self._providers:
if provider.METHOD == method:
return provider
class ContainerProvider(CredentialProvider):
METHOD = 'container-role'
CANONICAL_NAME = 'EcsContainer'
ENV_VAR = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ENV_VAR_FULL = 'AWS_CONTAINER_CREDENTIALS_FULL_URI'
ENV_VAR_AUTH_TOKEN = 'AWS_CONTAINER_AUTHORIZATION_TOKEN'
def __init__(self, environ=None, fetcher=None):
if environ is None:
environ = os.environ
if fetcher is None:
fetcher = ContainerMetadataFetcher()
self._environ = environ
self._fetcher = fetcher
def load(self):
# This cred provider is only triggered if the self.ENV_VAR is set,
# which only happens if you opt into this feature.
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return self._retrieve_or_fail()
def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = fetcher()
return RefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _build_headers(self):
headers = {}
auth_token = self._environ.get(self.ENV_VAR_AUTH_TOKEN)
if auth_token is not None:
return {
'Authorization': auth_token
}
def _create_fetcher(self, full_uri, headers):
def fetch_creds():
try:
response = self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
def _provided_relative_uri(self):
return self.ENV_VAR in self._environ
class CredentialResolver(object):
def __init__(self, providers):
"""
:param providers: A list of ``CredentialProvider`` instances.
"""
self.providers = providers
def insert_before(self, name, credential_provider):
"""
Inserts a new instance of ``CredentialProvider`` into the chain that
will be tried before an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials before. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
try:
offset = [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
self.providers.insert(offset, credential_provider)
def insert_after(self, name, credential_provider):
"""
Inserts a new type of ``Credentials`` instance into the chain that will
be tried after an existing one.
:param name: The short name of the credentials you'd like to insert the
new credentials after. (ex. ``env`` or ``config``). Existing names
& ordering can be discovered via ``self.available_methods``.
:type name: string
:param cred_instance: An instance of the new ``Credentials`` object
you'd like to add to the chain.
:type cred_instance: A subclass of ``Credentials``
"""
offset = self._get_provider_offset(name)
self.providers.insert(offset + 1, credential_provider)
def remove(self, name):
"""
Removes a given ``Credentials`` instance from the chain.
:param name: The short name of the credentials instance to remove.
:type name: string
"""
available_methods = [p.METHOD for p in self.providers]
if name not in available_methods:
# It's not present. Fail silently.
return
offset = available_methods.index(name)
self.providers.pop(offset)
def get_provider(self, name):
"""Return a credential provider by name.
:type name: str
:param name: The name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
return self.providers[self._get_provider_offset(name)]
def _get_provider_offset(self, name):
try:
return [p.METHOD for p in self.providers].index(name)
except ValueError:
raise UnknownCredentialError(name=name)
def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import weakref
import json
import base64
import botocore
import botocore.auth
from botocore.compat import six, OrderedDict
from botocore.awsrequest import create_request_object, prepare_request_dict
from botocore.exceptions import UnknownSignatureVersionError
from botocore.exceptions import UnknownClientMethodError
from botocore.exceptions import UnsupportedSignatureVersionError
from botocore.utils import fix_s3_host, datetime2timestamp
class RequestSigner(object):
"""
An object to sign requests before they go out over the wire using
one of the authentication mechanisms defined in ``auth.py``. This
class fires two events scoped to a service and operation name:
* choose-signer: Allows overriding the auth signer name.
* before-sign: Allows mutating the request before signing.
Together these events allow for customization of the request
signing pipeline, including overrides, request path manipulation,
and disabling signing per operation.
:type service_name: string
:param service_name: Name of the service, e.g. ``S3``
:type region_name: string
:param region_name: Name of the service region, e.g. ``us-east-1``
:type signing_name: string
:param signing_name: Service signing name. This is usually the
same as the service name, but can differ. E.g.
``emr`` vs. ``elasticmapreduce``.
:type signature_version: string
:param signature_version: Signature name like ``v4``.
:type credentials: :py:class:`~botocore.credentials.Credentials`
:param credentials: User credentials with which to sign requests.
:type event_emitter: :py:class:`~botocore.hooks.BaseEventHooks`
:param event_emitter: Extension mechanism to fire events.
"""
def __init__(self, service_name, region_name, signing_name,
signature_version, credentials, event_emitter):
self._service_name = service_name
self._region_name = region_name
self._signing_name = signing_name
self._signature_version = signature_version
self._credentials = credentials
# We need weakref to prevent leaking memory in Python 2.6 on Linux 2.6
self._event_emitter = weakref.proxy(event_emitter)
@property
def region_name(self):
return self._region_name
@property
def signature_version(self):
return self._signature_version
@property
def signing_name(self):
return self._signing_name
def handler(self, operation_name=None, request=None, **kwargs):
# This is typically hooked up to the "request-created" event
# from a client's event emitter. When a new request is created
# this method is invoked to sign the request.
# Don't call this method directly.
return self.sign(operation_name, request)
def sign(self, operation_name, request, region_name=None,
signing_type='standard', expires_in=None, signing_name=None):
"""Sign a request before it goes out over the wire.
:type operation_name: string
:param operation_name: The name of the current operation, e.g.
``ListBuckets``.
:type request: AWSRequest
:param request: The request object to be sent over the wire.
:type region_name: str
:param region_name: The region to sign the request for.
:type signing_type: str
:param signing_type: The type of signing to perform. This can be one of
three possible values:
* 'standard' - This should be used for most requests.
* 'presign-url' - This should be used when pre-signing a request.
* 'presign-post' - This should be used when pre-signing an S3 post.
:type expires_in: int
:param expires_in: The number of seconds the presigned url is valid
for. This parameter is only valid for signing type 'presign-url'.
:type signing_name: str
:param signing_name: The name to use for the service when signing.
"""
if region_name is None:
region_name = self._region_name
if signing_name is None:
signing_name = self._signing_name
signature_version = self._choose_signer(
operation_name, signing_type, request.context)
# Allow mutating request before signing
self._event_emitter.emit(
'before-sign.{0}.{1}'.format(self._service_name, operation_name),
request=request, signing_name=signing_name,
region_name=self._region_name,
signature_version=signature_version, request_signer=self,
operation_name=operation_name
)
if signature_version != botocore.UNSIGNED:
kwargs = {
'signing_name': signing_name,
'region_name': region_name,
'signature_version': signature_version
}
if expires_in is not None:
kwargs['expires'] = expires_in
try:
auth = self.get_auth_instance(**kwargs)
except UnknownSignatureVersionError as e:
if signing_type != 'standard':
raise UnsupportedSignatureVersionError(
signature_version=signature_version)
else:
raise e
auth.add_auth(request)
def _choose_signer(self, operation_name, signing_type, context):
"""
Allow setting the signature version via the choose-signer event.
A value of `botocore.UNSIGNED` means no signing will be performed.
:param operation_name: The operation to sign.
:param signing_type: The type of signing that the signer is to be used
for.
:return: The signature version to sign with.
"""
signing_type_suffix_map = {
'presign-post': '-presign-post',
'presign-url': '-query'
}
suffix = signing_type_suffix_map.get(signing_type, '')
signature_version = self._signature_version
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
handler, response = self._event_emitter.emit_until_response(
'choose-signer.{0}.{1}'.format(self._service_name, operation_name),
signing_name=self._signing_name, region_name=self._region_name,
signature_version=signature_version, context=context)
if response is not None:
signature_version = response
# The suffix needs to be checked again in case we get an improper
# signature version from choose-signer.
if signature_version is not botocore.UNSIGNED and not \
signature_version.endswith(suffix):
signature_version += suffix
return signature_version
def get_auth_instance(self, signing_name, region_name,
signature_version=None, **kwargs):
"""
Get an auth instance which can be used to sign a request
using the given signature version.
:type signing_name: string
:param signing_name: Service signing name. This is usually the
same as the service name, but can differ. E.g.
``emr`` vs. ``elasticmapreduce``.
:type region_name: string
:param region_name: Name of the service region, e.g. ``us-east-1``
:type signature_version: string
:param signature_version: Signature name like ``v4``.
:rtype: :py:class:`~botocore.auth.BaseSigner`
:return: Auth instance to sign a request.
"""
if signature_version is None:
signature_version = self._signature_version
cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version)
if cls is None:
raise UnknownSignatureVersionError(
signature_version=signature_version)
# If there's no credentials provided (i.e credentials is None),
# then we'll pass a value of "None" over to the auth classes,
# which already handle the cases where no credentials have
# been provided.
frozen_credentials = None
if self._credentials is not None:
frozen_credentials = self._credentials.get_frozen_credentials()
kwargs['credentials'] = frozen_credentials
if cls.REQUIRES_REGION:
if self._region_name is None:
raise botocore.exceptions.NoRegionError()
kwargs['region_name'] = region_name
kwargs['service_name'] = signing_name
auth = cls(**kwargs)
return auth
# Alias get_auth for backwards compatibility.
get_auth = get_auth_instance
def generate_presigned_url(self, request_dict, operation_name,
expires_in=3600, region_name=None,
signing_name=None):
"""Generates a presigned url
:type request_dict: dict
:param request_dict: The prepared request dictionary returned by
``botocore.awsrequest.prepare_request_dict()``
:type operation_name: str
:param operation_name: The operation being signed.
:type expires_in: int
:param expires_in: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type region_name: string
:param region_name: The region name to sign the presigned url.
:type signing_name: str
:param signing_name: The name to use for the service when signing.
:returns: The presigned url
"""
request = create_request_object(request_dict)
self.sign(operation_name, request, region_name,
'presign-url', expires_in, signing_name)
request.prepare()
return request.url
class CloudFrontSigner(object):
'''A signer to create a signed CloudFront URL.
First you create a cloudfront signer based on a normalized RSA signer::
import rsa
def rsa_signer(message):
private_key = open('private_key.pem', 'r').read()
return rsa.sign(
message,
rsa.PrivateKey.load_pkcs1(private_key.encode('utf8')),
'SHA-1') # CloudFront requires SHA-1 hash
cf_signer = CloudFrontSigner(key_id, rsa_signer)
To sign with a canned policy::
signed_url = cf_signer.generate_signed_url(
url, date_less_than=datetime(2015, 12, 1))
To sign with a custom policy::
signed_url = cf_signer.generate_signed_url(url, policy=my_policy)
'''
def __init__(self, key_id, rsa_signer):
"""Create a CloudFrontSigner.
:type key_id: str
:param key_id: The CloudFront Key Pair ID
:type rsa_signer: callable
:param rsa_signer: An RSA signer.
Its only input parameter will be the message to be signed,
and its output will be the signed content as a binary string.
The hash algorithm needed by CloudFront is SHA-1.
"""
self.key_id = key_id
self.rsa_signer = rsa_signer
def generate_presigned_url(self, url, date_less_than=None, policy=None):
"""Creates a signed CloudFront URL based on given parameters.
:type url: str
:param url: The URL of the protected object
:type date_less_than: datetime
:param date_less_than: The URL will expire after that date and time
:type policy: str
:param policy: The custom policy, possibly built by self.build_policy()
:rtype: str
:return: The signed URL.
"""
if (date_less_than is not None and policy is not None or
date_less_than is None and policy is None):
e = 'Need to provide either date_less_than or policy, but not both'
raise ValueError(e)
if date_less_than is not None:
# We still need to build a canned policy for signing purpose
policy = self.build_policy(url, date_less_than)
if isinstance(policy, six.text_type):
policy = policy.encode('utf8')
if date_less_than is not None:
params = ['Expires=%s' % int(datetime2timestamp(date_less_than))]
else:
params = ['Policy=%s' % self._url_b64encode(policy).decode('utf8')]
signature = self.rsa_signer(policy)
params.extend([
'Signature=%s' % self._url_b64encode(signature).decode('utf8'),
'Key-Pair-Id=%s' % self.key_id,
])
return self._build_url(url, params)
def _build_url(self, base_url, extra_params):
separator = '&' if '?' in base_url else '?'
return base_url + separator + '&'.join(extra_params)
def build_policy(self, resource, date_less_than,
date_greater_than=None, ip_address=None):
"""A helper to build policy.
:type resource: str
:param resource: The URL or the stream filename of the protected object
:type date_less_than: datetime
:param date_less_than: The URL will expire after the time has passed
:type date_greater_than: datetime
:param date_greater_than: The URL will not be valid until this time
:type ip_address: str
:param ip_address: Use 'x.x.x.x' for an IP, or 'x.x.x.x/x' for a subnet
:rtype: str
:return: The policy in a compact string.
"""
# Note:
# 1. Order in canned policy is significant. Special care has been taken
# to ensure the output will match the order defined by the document.
# There is also a test case to ensure that order.
# SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-policy-statement
# 2. Albeit the order in custom policy is not required by CloudFront,
# we still use OrderedDict internally to ensure the result is stable
# and also matches canned policy requirement.
# SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html
moment = int(datetime2timestamp(date_less_than))
condition = OrderedDict({"DateLessThan": {"AWS:EpochTime": moment}})
if ip_address:
if '/' not in ip_address:
ip_address += '/32'
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
if date_greater_than:
moment = int(datetime2timestamp(date_greater_than))
condition["DateGreaterThan"] = {"AWS:EpochTime": moment}
ordered_payload = [('Resource', resource), ('Condition', condition)]
custom_policy = {"Statement": [OrderedDict(ordered_payload)]}
return json.dumps(custom_policy, separators=(',', ':'))
def _url_b64encode(self, data):
# Required by CloudFront. See also:
# http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-linux-openssl.html
return base64.b64encode(
data).replace(b'+', b'-').replace(b'=', b'_').replace(b'/', b'~')
def add_generate_db_auth_token(class_attributes, **kwargs):
class_attributes['generate_db_auth_token'] = generate_db_auth_token
def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None):
"""Generates an auth token used to connect to a db with IAM credentials.
:type DBHostname: str
:param DBHostname: The hostname of the database to connect to.
:type Port: int
:param Port: The port number the database is listening on.
:type DBUsername: str
:param DBUsername: The username to log in as.
:type Region: str
:param Region: The region the database is in. If None, the client
region will be used.
:return: A presigned url which can be used as an auth token.
"""
region = Region
if region is None:
region = self.meta.region_name
params = {
'Action': 'connect',
'DBUser': DBUsername,
}
request_dict = {
'url_path': '/',
'query_string': '',
'headers': {},
'body': params,
'method': 'GET'
}
# RDS requires that the scheme not be set when sent over. This can cause
# issues when signing because the Python url parsing libraries follow
# RFC 1808 closely, which states that a netloc must be introduced by `//`.
# Otherwise the url is presumed to be relative, and thus the whole
# netloc would be treated as a path component. To work around this we
# introduce https here and remove it once we're done processing it.
scheme = 'https://'
endpoint_url = '%s%s:%s' % (scheme, DBHostname, Port)
prepare_request_dict(request_dict, endpoint_url)
presigned_url = self._request_signer.generate_presigned_url(
operation_name='connect', request_dict=request_dict,
region_name=region, expires_in=900, signing_name='rds-db'
)
return presigned_url[len(scheme):]
class S3PostPresigner(object):
def __init__(self, request_signer):
self._request_signer = request_signer
def generate_presigned_post(self, request_dict, fields=None,
conditions=None, expires_in=3600,
region_name=None):
"""Generates the url and the form fields used for a presigned s3 post
:type request_dict: dict
:param request_dict: The prepared request dictionary returned by
``botocore.awsrequest.prepare_request_dict()``
:type fields: dict
:param fields: A dictionary of prefilled form fields to build on top
of.
:type conditions: list
:param conditions: A list of conditions to include in the policy. Each
element can be either a list or a structure. For example:
[
{"acl": "public-read"},
{"bucket": "mybucket"},
["starts-with", "$key", "mykey"]
]
:type expires_in: int
:param expires_in: The number of seconds the presigned post is valid
for.
:type region_name: string
:param region_name: The region name to sign the presigned post to.
:rtype: dict
:returns: A dictionary with two elements: ``url`` and ``fields``.
Url is the url to post to. Fields is a dictionary filled with
the form fields and respective values to use when submitting the
post. For example:
{'url': 'https://mybucket.s3.amazonaws.com
'fields': {'acl': 'public-read',
'key': 'mykey',
'signature': 'mysignature',
'policy': 'mybase64 encoded policy'}
}
"""
if fields is None:
fields = {}
if conditions is None:
conditions = []
# Create the policy for the post.
policy = {}
# Create an expiration date for the policy
datetime_now = datetime.datetime.utcnow()
expire_date = datetime_now + datetime.timedelta(seconds=expires_in)
policy['expiration'] = expire_date.strftime(botocore.auth.ISO8601)
# Append all of the conditions that the user supplied.
policy['conditions'] = []
for condition in conditions:
policy['conditions'].append(condition)
# Store the policy and the fields in the request for signing
request = create_request_object(request_dict)
request.context['s3-presign-post-fields'] = fields
request.context['s3-presign-post-policy'] = policy
self._request_signer.sign(
'PutObject', request, region_name, 'presign-post')
# Return the url and the fields for th form to post.
return {'url': request.url, 'fields': fields}
def add_generate_presigned_url(class_attributes, **kwargs):
class_attributes['generate_presigned_url'] = generate_presigned_url
def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600,
HttpMethod=None):
"""Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
:returns: The presigned url
"""
client_method = ClientMethod
params = Params
if params is None:
params = {}
expires_in = ExpiresIn
http_method = HttpMethod
context = {
'is_presign_request': True
}
request_signer = self._request_signer
serializer = self._serializer
try:
operation_name = self._PY_TO_OP_NAME[client_method]
except KeyError:
raise UnknownClientMethodError(method_name=client_method)
operation_model = self.meta.service_model.operation_model(
operation_name)
params = self._emit_api_params(params, operation_model, context)
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
params, operation_model)
# Switch out the http method if user specified it.
if http_method is not None:
request_dict['method'] = http_method
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url, context=context)
# Generate the presigned url.
return request_signer.generate_presigned_url(
request_dict=request_dict, expires_in=expires_in,
operation_name=operation_name)
def add_generate_presigned_post(class_attributes, **kwargs):
class_attributes['generate_presigned_post'] = generate_presigned_post
def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None,
ExpiresIn=3600):
"""Builds the url and the form fields used for a presigned s3 post
:type Bucket: string
:param Bucket: The name of the bucket to presign the post to. Note that
bucket related conditions should not be included in the
``conditions`` parameter.
:type Key: string
:param Key: Key name, optionally add ${filename} to the end to
attach the submitted filename. Note that key related conditions and
fields are filled out for you and should not be included in the
``Fields`` or ``Conditions`` parameter.
:type Fields: dict
:param Fields: A dictionary of prefilled form fields to build on top
of. Elements that may be included are acl, Cache-Control,
Content-Type, Content-Disposition, Content-Encoding, Expires,
success_action_redirect, redirect, success_action_status,
and x-amz-meta-.
Note that if a particular element is included in the fields
dictionary it will not be automatically added to the conditions
list. You must specify a condition for the element as well.
:type Conditions: list
:param Conditions: A list of conditions to include in the policy. Each
element can be either a list or a structure. For example:
[
{"acl": "public-read"},
["content-length-range", 2, 5],
["starts-with", "$success_action_redirect", ""]
]
Conditions that are included may pertain to acl,
content-length-range, Cache-Control, Content-Type,
Content-Disposition, Content-Encoding, Expires,
success_action_redirect, redirect, success_action_status,
and/or x-amz-meta-.
Note that if you include a condition, you must specify
the a valid value in the fields dictionary as well. A value will
not be added automatically to the fields dictionary based on the
conditions.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned post
is valid for.
:rtype: dict
:returns: A dictionary with two elements: ``url`` and ``fields``.
Url is the url to post to. Fields is a dictionary filled with
the form fields and respective values to use when submitting the
post. For example:
{'url': 'https://mybucket.s3.amazonaws.com
'fields': {'acl': 'public-read',
'key': 'mykey',
'signature': 'mysignature',
'policy': 'mybase64 encoded policy'}
}
"""
bucket = Bucket
key = Key
fields = Fields
conditions = Conditions
expires_in = ExpiresIn
if fields is None:
fields = {}
if conditions is None:
conditions = []
post_presigner = S3PostPresigner(self._request_signer)
serializer = self._serializer
# We choose the CreateBucket operation model because its url gets
# serialized to what a presign post requires.
operation_model = self.meta.service_model.operation_model(
'CreateBucket')
# Create a request dict based on the params to serialize.
request_dict = serializer.serialize_to_request(
{'Bucket': bucket}, operation_model)
# Prepare the request dict by including the client's endpoint url.
prepare_request_dict(
request_dict, endpoint_url=self.meta.endpoint_url)
# Append that the bucket name to the list of conditions.
conditions.append({'bucket': bucket})
# If the key ends with filename, the only constraint that can be
# imposed is if it starts with the specified prefix.
if key.endswith('${filename}'):
conditions.append(["starts-with", '$key', key[:-len('${filename}')]])
else:
conditions.append({'key': key})
# Add the key to the fields.
fields['key'] = key
return post_presigner.generate_presigned_post(
request_dict=request_dict, fields=fields, conditions=conditions,
expires_in=expires_in)
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Resolves regions and endpoints.
This module implements endpoint resolution, including resolving endpoints for a
given service and region and resolving the available endpoints for a service
in a specific AWS partition.
"""
import logging
import re
from botocore.exceptions import NoRegionError
LOG = logging.getLogger(__name__)
DEFAULT_URI_TEMPLATE = '{service}.{region}.{dnsSuffix}'
DEFAULT_SERVICE_DATA = {'endpoints': {}}
class BaseEndpointResolver(object):
"""Resolves regions and endpoints. Must be subclassed."""
def construct_endpoint(self, service_name, region_name=None):
"""Resolves an endpoint for a service and region combination.
:type service_name: string
:param service_name: Name of the service to resolve an endpoint for
(e.g., s3)
:type region_name: string
:param region_name: Region/endpoint name to resolve (e.g., us-east-1)
if no region is provided, the first found partition-wide endpoint
will be used if available.
:rtype: dict
:return: Returns a dict containing the following keys:
- partition: (string, required) Resolved partition name
- endpointName: (string, required) Resolved endpoint name
- hostname: (string, required) Hostname to use for this endpoint
- sslCommonName: (string) sslCommonName to use for this endpoint.
- credentialScope: (dict) Signature version 4 credential scope
- region: (string) region name override when signing.
- service: (string) service name override when signing.
- signatureVersions: (list<string>) A list of possible signature
versions, including s3, v4, v2, and s3v4
- protocols: (list<string>) A list of supported protocols
(e.g., http, https)
- ...: Other keys may be included as well based on the metadata
"""
raise NotImplementedError
def get_available_partitions(self):
"""Lists the partitions available to the endpoint resolver.
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"]).
"""
raise NotImplementedError
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3)
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
raise NotImplementedError
class EndpointResolver(BaseEndpointResolver):
"""Resolves endpoints based on partition endpoint metadata"""
def __init__(self, endpoint_data):
"""
:param endpoint_data: A dict of partition data.
"""
if 'partitions' not in endpoint_data:
raise ValueError('Missing "partitions" in endpoint data')
self._endpoint_data = endpoint_data
def get_available_partitions(self):
result = []
for partition in self._endpoint_data['partitions']:
result.append(partition['partition'])
return result
def get_available_endpoints(self, service_name, partition_name='aws',
allow_non_regional=False):
result = []
for partition in self._endpoint_data['partitions']:
if partition['partition'] != partition_name:
continue
services = partition['services']
if service_name not in services:
continue
for endpoint_name in services[service_name]['endpoints']:
if allow_non_regional or endpoint_name in partition['regions']:
result.append(endpoint_name)
return result
def construct_endpoint(self, service_name, region_name=None):
# Iterate over each partition until a match is found.
for partition in self._endpoint_data['partitions']:
result = self._endpoint_for_partition(
partition, service_name, region_name)
if result:
return result
def _endpoint_for_partition(self, partition, service_name, region_name):
# Get the service from the partition, or an empty template.
service_data = partition['services'].get(
service_name, DEFAULT_SERVICE_DATA)
# Use the partition endpoint if no region is supplied.
if region_name is None:
if 'partitionEndpoint' in service_data:
region_name = service_data['partitionEndpoint']
else:
raise NoRegionError()
# Attempt to resolve the exact region for this partition.
if region_name in service_data['endpoints']:
return self._resolve(
partition, service_name, service_data, region_name)
# Check to see if the endpoint provided is valid for the partition.
if self._region_match(partition, region_name):
# Use the partition endpoint if set and not regionalized.
partition_endpoint = service_data.get('partitionEndpoint')
is_regionalized = service_data.get('isRegionalized', True)
if partition_endpoint and not is_regionalized:
LOG.debug('Using partition endpoint for %s, %s: %s',
service_name, region_name, partition_endpoint)
return self._resolve(
partition, service_name, service_data, partition_endpoint)
LOG.debug('Creating a regex based endpoint for %s, %s',
service_name, region_name)
return self._resolve(
partition, service_name, service_data, region_name)
def _region_match(self, partition, region_name):
if region_name in partition['regions']:
return True
if 'regionRegex' in partition:
return re.compile(partition['regionRegex']).match(region_name)
return False
def _resolve(self, partition, service_name, service_data, endpoint_name):
result = service_data['endpoints'].get(endpoint_name, {})
result['partition'] = partition['partition']
result['endpointName'] = endpoint_name
# Merge in the service defaults then the partition defaults.
self._merge_keys(service_data.get('defaults', {}), result)
self._merge_keys(partition.get('defaults', {}), result)
hostname = result.get('hostname', DEFAULT_URI_TEMPLATE)
result['hostname'] = self._expand_template(
partition, result['hostname'], service_name, endpoint_name)
if 'sslCommonName' in result:
result['sslCommonName'] = self._expand_template(
partition, result['sslCommonName'], service_name,
endpoint_name)
result['dnsSuffix'] = partition['dnsSuffix']
return result
def _merge_keys(self, from_data, result):
for key in from_data:
if key not in result:
result[key] = from_data[key]
def _expand_template(self, partition, template, service_name,
endpoint_name):
return template.format(
service=service_name, region=endpoint_name,
dnsSuffix=partition['dnsSuffix'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.