|
| 1 | +#!/usr/bin/env python |
| 2 | +# -*- coding: utf-8 -* |
| 3 | +import time |
| 4 | +import boto |
| 5 | +import boto.s3.connection |
| 6 | +import string |
| 7 | +import random |
| 8 | +import argparse |
| 9 | +from cStringIO import StringIO |
| 10 | +from nose.tools import eq_ as eq |
| 11 | +import os |
| 12 | + |
| 13 | +def transfer_part(bucket, mp_id, mp_keyname, i, part): |
| 14 | + """Transfer a part of a multipart upload. Designed to be run in parallel. |
| 15 | + """ |
| 16 | + mp = boto.s3.multipart.MultiPartUpload(bucket) |
| 17 | + mp.key_name = mp_keyname |
| 18 | + mp.id = mp_id |
| 19 | + part_out = StringIO(part) |
| 20 | + mp.upload_part_from_file(part_out, i+1) |
| 21 | + |
| 22 | +def generate_random(size, part_size=5*1024*1024): |
| 23 | + """ |
| 24 | + Generate the specified number random data. |
| 25 | + (actually each MB is a repetition of the first KB) |
| 26 | + """ |
| 27 | + chunk = 1024 |
| 28 | + allowed = string.ascii_letters |
| 29 | + for x in range(0, size, part_size): |
| 30 | + strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in xrange(chunk)]) |
| 31 | + s = '' |
| 32 | + left = size - x |
| 33 | + this_part_size = min(left, part_size) |
| 34 | + for y in range(this_part_size / chunk): |
| 35 | + s = s + strpart |
| 36 | + if this_part_size > len(s): |
| 37 | + s = s + strpart[0:this_part_size - len(s)] |
| 38 | + yield s |
| 39 | + if (x == size): |
| 40 | + return |
| 41 | + |
| 42 | +def _multipart_upload(bucket, s3_key_name, size, part_size=5*1024*1024, do_list=None, headers=None, metadata=None, resend_parts=[]): |
| 43 | + """ |
| 44 | + generate a multi-part upload for a random file of specifed size, |
| 45 | + if requested, generate a list of the parts |
| 46 | + return the upload descriptor |
| 47 | + """ |
| 48 | + upload = bucket.initiate_multipart_upload(s3_key_name, headers=headers, metadata=metadata) |
| 49 | + s = '' |
| 50 | + for i, part in enumerate(generate_random(size, part_size)): |
| 51 | + s += part |
| 52 | + transfer_part(bucket, upload.id, upload.key_name, i, part) |
| 53 | + if i in resend_parts: |
| 54 | + transfer_part(bucket, upload.id, upload.key_name, i, part) |
| 55 | + |
| 56 | + if do_list is not None: |
| 57 | + l = bucket.list_multipart_uploads() |
| 58 | + l = list(l) |
| 59 | + |
| 60 | + return (upload, s) |
| 61 | + |
| 62 | +if __name__ == "__main__": |
| 63 | + s3_access_key=os.environ['S3_ACCESS_KEY_ID'] |
| 64 | + s3_secret_key=os.environ['S3_SECRET_ACCESS_KEY'] |
| 65 | + |
| 66 | + parser = argparse.ArgumentParser(description='test_copy') |
| 67 | + parser.add_argument('--port', type=int, action='store', default=8000 ) |
| 68 | + parser.add_argument('--key',type=str, action='store', default=s3_access_key) |
| 69 | + parser.add_argument('--secret',type=str, action='store', default=s3_secret_key) |
| 70 | + parser.add_argument('--num', type=int, action='store', default=100 ) |
| 71 | + parser.add_argument('--bucket', type=str, action='store', default='bucket1' ) |
| 72 | + |
| 73 | + args = parser.parse_args() |
| 74 | + |
| 75 | + s3_conn = boto.connect_s3( |
| 76 | + aws_access_key_id=s3_access_key, |
| 77 | + aws_secret_access_key=s3_secret_key, |
| 78 | + host='localhost', |
| 79 | + port=args.port, |
| 80 | + is_secure=False, |
| 81 | + calling_format=boto.s3.connection.OrdinaryCallingFormat(), |
| 82 | + ) |
| 83 | + |
| 84 | + print 'creating bucket' |
| 85 | + bucket = s3_conn.create_bucket(args.bucket) |
| 86 | + |
| 87 | + content_type='text/bla' |
| 88 | + for i in range(args.num): |
| 89 | + print 'creating mulitpart obj' +`i` |
| 90 | + objlen = 30 * 1024 * 1024 |
| 91 | + key_name = 'obj' + `i` |
| 92 | + (upload, data) = _multipart_upload(bucket, key_name, objlen, headers={'Content-Type': content_type}, metadata={'foo': 'bar'}) |
| 93 | + upload.complete_upload() |
| 94 | + key = bucket.get_key(key_name) |
| 95 | + print key |
0 commit comments