Skip to content

Commit

Permalink
strip chunk signatures for S3 content md5 checks (localstack#775)
Browse files Browse the repository at this point in the history
  • Loading branch information
whummer authored May 22, 2018
1 parent fe1bc23 commit 582f3cd
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 50 deletions.
5 changes: 5 additions & 0 deletions localstack/ext/java/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,11 @@

<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.7.0</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
package cloud.localstack.docker;

import cloud.localstack.LocalstackTestRunner;
import cloud.localstack.ServiceName;
import cloud.localstack.docker.command.RegexStream;
import cloud.localstack.docker.exception.LocalstackDockerException;
Expand Down
5 changes: 4 additions & 1 deletion localstack/services/install.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def install_elasticsearch():
mkdir(INSTALL_DIR_INFRA)
# download and extract archive
download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, TMP_ARCHIVE_ES, INSTALL_DIR_INFRA)
run('cd %s && mv elasticsearch* elasticsearch' % (INSTALL_DIR_INFRA))
elasticsearch_dir = glob.glob(os.path.join(INSTALL_DIR_INFRA, 'elasticsearch*'))
if not elasticsearch_dir:
raise Exception('Unable to find Elasticsearch folder in %s' % INSTALL_DIR_INFRA)
shutil.move(elasticsearch_dir[0], INSTALL_DIR_ES)

for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'):
dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name)
Expand Down
101 changes: 53 additions & 48 deletions localstack/services/s3/s3_listener.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def strip_chunk_signatures(data):


def check_content_md5(data, headers):
actual = md5(data)
actual = md5(strip_chunk_signatures(data))
expected = headers['Content-MD5']
try:
expected = to_str(codecs.encode(base64.b64decode(expected), 'hex'))
Expand Down Expand Up @@ -354,6 +354,56 @@ def get_bucket_name(path, headers):
return bucket_name


def handle_notification_request(bucket, method, data):
response = Response()
response.status_code = 200
response._content = ''
if method == 'GET':
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notif = S3_NOTIFICATIONS[bucket]
for dest in NOTIFICATION_DESTINATION_TYPES:
if dest in notif:
dest_dict = {
'%sConfiguration' % dest: {
'Id': uuid.uuid4(),
dest: notif[dest],
'Event': notif['Event'],
'Filter': notif['Filter']
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += '</NotificationConfiguration>'
response._content = result

if method == 'PUT':
parsed = xmltodict.parse(data)
notif_config = parsed.get('NotificationConfiguration')
S3_NOTIFICATIONS.pop(bucket, None)
for dest in NOTIFICATION_DESTINATION_TYPES:
config = notif_config.get('%sConfiguration' % (dest))
if config:
events = config.get('Event')
if isinstance(events, six.string_types):
events = [events]
event_filter = config.get('Filter', {})
# make sure FilterRule is an array
s3_filter = _get_s3_filter(event_filter)
if s3_filter and not isinstance(s3_filter.get('FilterRule', []), list):
s3_filter['FilterRule'] = [s3_filter['FilterRule']]
# create final details dict
notification_details = {
'Id': config.get('Id'),
'Event': events,
dest: config.get(dest),
'Filter': event_filter
}
# TODO: what if we have multiple destinations - would we overwrite the config?
S3_NOTIFICATIONS[bucket] = clone(notification_details)
return response


class ProxyListenerS3(ProxyListener):

def forward_request(self, method, path, data, headers):
Expand Down Expand Up @@ -400,53 +450,8 @@ def forward_request(self, method, path, data, headers):
bucket = path.split('/')[1]
query_map = urlparse.parse_qs(query)
if query == 'notification' or 'notification' in query_map:
response = Response()
response.status_code = 200
if method == 'GET':
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notif = S3_NOTIFICATIONS[bucket]
for dest in NOTIFICATION_DESTINATION_TYPES:
if dest in notif:
dest_dict = {
'%sConfiguration' % dest: {
'Id': uuid.uuid4(),
dest: notif[dest],
'Event': notif['Event'],
'Filter': notif['Filter']
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += '</NotificationConfiguration>'
response._content = result

if method == 'PUT':
parsed = xmltodict.parse(data)
notif_config = parsed.get('NotificationConfiguration')
S3_NOTIFICATIONS.pop(bucket, None)
for dest in NOTIFICATION_DESTINATION_TYPES:
config = notif_config.get('%sConfiguration' % (dest))
if config:
events = config.get('Event')
if isinstance(events, six.string_types):
events = [events]
event_filter = config.get('Filter', {})
# make sure FilterRule is an array
s3_filter = _get_s3_filter(event_filter)
if s3_filter and not isinstance(s3_filter.get('FilterRule', []), list):
s3_filter['FilterRule'] = [s3_filter['FilterRule']]
# create final details dict
notification_details = {
'Id': config.get('Id'),
'Event': events,
dest: config.get(dest),
'Filter': event_filter
}
# TODO: what if we have multiple destinations - would we overwrite the config?
S3_NOTIFICATIONS[bucket] = clone(notification_details)

# return response for ?notification request
# handle and return response for ?notification request
response = handle_notification_request(bucket, method, data)
return response

if query == 'cors' or 'cors' in query_map:
Expand Down

0 comments on commit 582f3cd

Please sign in to comment.