| Index: boto/s3/key.py
|
| diff --git a/boto/s3/key.py b/boto/s3/key.py
|
| index c7e77f4c6dd6b55e525f7ae48f4f6a91f072598d..18829c2b91e094eb4c94f302fc67f5f4e9755cd6 100644
|
| --- a/boto/s3/key.py
|
| +++ b/boto/s3/key.py
|
| @@ -1,4 +1,5 @@
|
| # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
|
| +# Copyright (c) 2011, Nexenta Systems Inc.
|
| #
|
| # Permission is hereby granted, free of charge, to any person obtaining a
|
| # copy of this software and associated documentation files (the
|
| @@ -14,13 +15,14 @@
|
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
| # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
| # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
| -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
| +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
| # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
| # IN THE SOFTWARE.
|
|
|
| import mimetypes
|
| import os
|
| +import re
|
| import rfc822
|
| import StringIO
|
| import base64
|
| @@ -62,6 +64,7 @@ class Key(object):
|
| self.version_id = None
|
| self.source_version_id = None
|
| self.delete_marker = False
|
| + self.encrypted = None
|
|
|
| def __repr__(self):
|
| if self.bucket:
|
| @@ -91,7 +94,7 @@ class Key(object):
|
| if self.bucket.connection:
|
| provider = self.bucket.connection.provider
|
| return provider
|
| -
|
| +
|
| def get_md5_from_hexdigest(self, md5_hexdigest):
|
| """
|
| A utility function to create the 2-tuple (md5hexdigest, base64md5)
|
| @@ -103,7 +106,14 @@ class Key(object):
|
| if base64md5[-1] == '\n':
|
| base64md5 = base64md5[0:-1]
|
| return (md5_hexdigest, base64md5)
|
| -
|
| +
|
| + def handle_encryption_headers(self, resp):
|
| + provider = self.bucket.connection.provider
|
| + if provider.server_side_encryption_header:
|
| + self.encrypted = resp.getheader(provider.server_side_encryption_header, None)
|
| + else:
|
| + self.encrypted = None
|
| +
|
| def handle_version_headers(self, resp, force=False):
|
| provider = self.bucket.connection.provider
|
| # If the Key object already has a version_id attribute value, it
|
| @@ -113,7 +123,8 @@ class Key(object):
|
| # overwrite the version_id in this Key object. Comprende?
|
| if self.version_id is None or force:
|
| self.version_id = resp.getheader(provider.version_id, None)
|
| - self.source_version_id = resp.getheader(provider.copy_source_version_id, None)
|
| + self.source_version_id = resp.getheader(provider.copy_source_version_id,
|
| + None)
|
| if resp.getheader(provider.delete_marker, 'false') == 'true':
|
| self.delete_marker = True
|
| else:
|
| @@ -123,13 +134,13 @@ class Key(object):
|
| override_num_retries=None, response_headers=None):
|
| """
|
| Open this key for reading
|
| -
|
| +
|
| :type headers: dict
|
| :param headers: Headers to pass in the web request
|
| -
|
| +
|
| :type query_args: string
|
| :param query_args: Arguments to pass in the query string (ie, 'torrent')
|
| -
|
| +
|
| :type override_num_retries: int
|
| :param override_num_retries: If not None will override configured
|
| num_retries parameter for underlying GET.
|
| @@ -142,7 +153,7 @@ class Key(object):
|
| """
|
| if self.resp == None:
|
| self.mode = 'r'
|
| -
|
| +
|
| provider = self.bucket.connection.provider
|
| self.resp = self.bucket.connection.make_request(
|
| 'GET', self.bucket.name, self.name, headers,
|
| @@ -156,8 +167,15 @@ class Key(object):
|
| self.metadata = boto.utils.get_aws_metadata(response_headers,
|
| provider)
|
| for name,value in response_headers.items():
|
| - if name.lower() == 'content-length':
|
| + # To get correct size for Range GETs, use Content-Range
|
| + # header if one was returned. If not, use Content-Length
|
| + # header.
|
| + if (name.lower() == 'content-length' and
|
| + 'Content-Range' not in response_headers):
|
| self.size = int(value)
|
| + elif name.lower() == 'content-range':
|
| + end_range = re.sub('.*/(.*)', '\\1', value)
|
| + self.size = int(end_range)
|
| elif name.lower() == 'etag':
|
| self.etag = value
|
| elif name.lower() == 'content-type':
|
| @@ -169,12 +187,13 @@ class Key(object):
|
| elif name.lower() == 'cache-control':
|
| self.cache_control = value
|
| self.handle_version_headers(self.resp)
|
| + self.handle_encryption_headers(self.resp)
|
|
|
| def open_write(self, headers=None, override_num_retries=None):
|
| """
|
| - Open this key for writing.
|
| + Open this key for writing.
|
| Not yet implemented
|
| -
|
| +
|
| :type headers: dict
|
| :param headers: Headers to pass in the write request
|
|
|
| @@ -204,7 +223,7 @@ class Key(object):
|
| self.resp = None
|
| self.mode = None
|
| self.closed = True
|
| -
|
| +
|
| def next(self):
|
| """
|
| By providing a next method, the key object supports use as an iterator.
|
| @@ -223,10 +242,11 @@ class Key(object):
|
| return data
|
|
|
| def read(self, size=0):
|
| - if size == 0:
|
| - size = self.BufferSize
|
| self.open_read()
|
| - data = self.resp.read(size)
|
| + if size == 0:
|
| + data = self.resp.read()
|
| + else:
|
| + data = self.resp.read(size)
|
| if not data:
|
| self.close()
|
| return data
|
| @@ -250,7 +270,7 @@ class Key(object):
|
| :param dst_bucket: The name of a destination bucket. If not
|
| provided the current bucket of the key
|
| will be used.
|
| -
|
| +
|
| """
|
| if new_storage_class == 'STANDARD':
|
| return self.copy(self.bucket.name, self.name,
|
| @@ -263,7 +283,8 @@ class Key(object):
|
| new_storage_class)
|
|
|
| def copy(self, dst_bucket, dst_key, metadata=None,
|
| - reduced_redundancy=False, preserve_acl=False):
|
| + reduced_redundancy=False, preserve_acl=False,
|
| + encrypt_key=False):
|
| """
|
| Copy this Key to another bucket.
|
|
|
| @@ -272,7 +293,7 @@ class Key(object):
|
|
|
| :type dst_key: string
|
| :param dst_key: The name of the destination key
|
| -
|
| +
|
| :type metadata: dict
|
| :param metadata: Metadata to be associated with new key.
|
| If metadata is supplied, it will replace the
|
| @@ -303,6 +324,12 @@ class Key(object):
|
| of False will be significantly more
|
| efficient.
|
|
|
| + :type encrypt_key: bool
|
| + :param encrypt_key: If True, the new copy of the object will
|
| + be encrypted on the server-side by S3 and
|
| + will be stored in an encrypted form while
|
| + at rest in S3.
|
| +
|
| :rtype: :class:`boto.s3.key.Key` or subclass
|
| :returns: An instance of the newly created key object
|
| """
|
| @@ -314,7 +341,8 @@ class Key(object):
|
| return dst_bucket.copy_key(dst_key, self.bucket.name,
|
| self.name, metadata,
|
| storage_class=storage_class,
|
| - preserve_acl=preserve_acl)
|
| + preserve_acl=preserve_acl,
|
| + encrypt_key=encrypt_key)
|
|
|
| def startElement(self, name, attrs, connection):
|
| if name == 'Owner':
|
| @@ -344,7 +372,7 @@ class Key(object):
|
| def exists(self):
|
| """
|
| Returns True if the key exists
|
| -
|
| +
|
| :rtype: bool
|
| :return: Whether the key exists on S3
|
| """
|
| @@ -364,7 +392,7 @@ class Key(object):
|
|
|
| def update_metadata(self, d):
|
| self.metadata.update(d)
|
| -
|
| +
|
| # convenience methods for setting/getting ACL
|
| def set_acl(self, acl_str, headers=None):
|
| if self.bucket != None:
|
| @@ -384,51 +412,56 @@ class Key(object):
|
|
|
| def set_canned_acl(self, acl_str, headers=None):
|
| return self.bucket.set_canned_acl(acl_str, self.name, headers)
|
| -
|
| +
|
| def make_public(self, headers=None):
|
| return self.bucket.set_canned_acl('public-read', self.name, headers)
|
|
|
| def generate_url(self, expires_in, method='GET', headers=None,
|
| - query_auth=True, force_http=False):
|
| + query_auth=True, force_http=False, response_headers=None):
|
| """
|
| Generate a URL to access this key.
|
| -
|
| +
|
| :type expires_in: int
|
| :param expires_in: How long the url is valid for, in seconds
|
| -
|
| +
|
| :type method: string
|
| - :param method: The method to use for retrieving the file (default is GET)
|
| -
|
| + :param method: The method to use for retrieving the file
|
| + (default is GET)
|
| +
|
| :type headers: dict
|
| :param headers: Any headers to pass along in the request
|
| -
|
| +
|
| :type query_auth: bool
|
| - :param query_auth:
|
| -
|
| + :param query_auth:
|
| +
|
| :rtype: string
|
| :return: The URL to access the key
|
| """
|
| return self.bucket.connection.generate_url(expires_in, method,
|
| self.bucket.name, self.name,
|
| - headers, query_auth, force_http)
|
| + headers, query_auth,
|
| + force_http,
|
| + response_headers)
|
|
|
| - def send_file(self, fp, headers=None, cb=None, num_cb=10, query_args=None):
|
| + def send_file(self, fp, headers=None, cb=None, num_cb=10,
|
| + query_args=None, chunked_transfer=False):
|
| """
|
| Upload a file to a key into a bucket on S3.
|
| -
|
| +
|
| :type fp: file
|
| :param fp: The file pointer to upload
|
| -
|
| +
|
| :type headers: dict
|
| :param headers: The headers to pass along with the PUT request
|
| -
|
| +
|
| :type cb: function
|
| :param cb: a callback function that will be called to report
|
| - progress on the upload. The callback should accept two integer
|
| - parameters, the first representing the number of bytes that have
|
| - been successfully transmitted to S3 and the second representing
|
| - the total number of bytes that need to be transmitted.
|
| -
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| :type num_cb: int
|
| :param num_cb: (optional) If a callback is specified with the cb
|
| parameter this parameter determines the granularity
|
| @@ -436,7 +469,7 @@ class Key(object):
|
| times the callback will be called during the file
|
| transfer. Providing a negative integer will cause
|
| your callback to be called with each buffer read.
|
| -
|
| +
|
| """
|
| provider = self.bucket.connection.provider
|
|
|
| @@ -445,12 +478,28 @@ class Key(object):
|
| for key in headers:
|
| http_conn.putheader(key, headers[key])
|
| http_conn.endheaders()
|
| - fp.seek(0)
|
| + if chunked_transfer:
|
| + # MD5 for the stream has to be calculated on the fly, as
|
| + # we don't know the size of the stream before hand.
|
| + m = md5()
|
| + else:
|
| + fp.seek(0)
|
| +
|
| save_debug = self.bucket.connection.debug
|
| self.bucket.connection.debug = 0
|
| - http_conn.set_debuglevel(0)
|
| + # If the debuglevel < 3 we don't want to show connection
|
| + # payload, so turn off HTTP connection-level debug output (to
|
| + # be restored below).
|
| + # Use the getattr approach to allow this to work in AppEngine.
|
| + if getattr(http_conn, 'debuglevel', 0) < 3:
|
| + http_conn.set_debuglevel(0)
|
| if cb:
|
| - if num_cb > 2:
|
| + if chunked_transfer:
|
| + # For chunked Transfer, we call the cb for every 1MB
|
| + # of data transferred.
|
| + cb_count = (1024 * 1024)/self.BufferSize
|
| + self.size = 0
|
| + elif num_cb > 2:
|
| cb_count = self.size / self.BufferSize / (num_cb-2)
|
| elif num_cb < 0:
|
| cb_count = -1
|
| @@ -460,24 +509,39 @@ class Key(object):
|
| cb(total_bytes, self.size)
|
| l = fp.read(self.BufferSize)
|
| while len(l) > 0:
|
| - http_conn.send(l)
|
| + if chunked_transfer:
|
| + http_conn.send('%x;\r\n' % len(l))
|
| + http_conn.send(l)
|
| + http_conn.send('\r\n')
|
| + else:
|
| + http_conn.send(l)
|
| if cb:
|
| total_bytes += len(l)
|
| i += 1
|
| if i == cb_count or cb_count == -1:
|
| cb(total_bytes, self.size)
|
| i = 0
|
| + if chunked_transfer:
|
| + m.update(l)
|
| l = fp.read(self.BufferSize)
|
| + if chunked_transfer:
|
| + http_conn.send('0\r\n')
|
| + http_conn.send('\r\n')
|
| + if cb:
|
| + self.size = total_bytes
|
| + # Get the md5 which is calculated on the fly.
|
| + self.md5 = m.hexdigest()
|
| + else:
|
| + fp.seek(0)
|
| if cb:
|
| cb(total_bytes, self.size)
|
| response = http_conn.getresponse()
|
| body = response.read()
|
| - fp.seek(0)
|
| http_conn.set_debuglevel(save_debug)
|
| self.bucket.connection.debug = save_debug
|
| - if response.status == 500 or response.status == 503 or \
|
| - response.getheader('location'):
|
| - # we'll try again
|
| + if ((response.status == 500 or response.status == 503 or
|
| + response.getheader('location')) and not chunked_transfer):
|
| + # we'll try again.
|
| return response
|
| elif response.status >= 200 and response.status <= 299:
|
| self.etag = response.getheader('etag')
|
| @@ -494,7 +558,8 @@ class Key(object):
|
| else:
|
| headers = headers.copy()
|
| headers['User-Agent'] = UserAgent
|
| - headers['Content-MD5'] = self.base64md5
|
| + if self.base64md5:
|
| + headers['Content-MD5'] = self.base64md5
|
| if self.storage_class != 'STANDARD':
|
| headers[provider.storage_class_header] = self.storage_class
|
| if headers.has_key('Content-Encoding'):
|
| @@ -508,7 +573,8 @@ class Key(object):
|
| headers['Content-Type'] = self.content_type
|
| else:
|
| headers['Content-Type'] = self.content_type
|
| - headers['Content-Length'] = str(self.size)
|
| + if not chunked_transfer:
|
| + headers['Content-Length'] = str(self.size)
|
| headers['Expect'] = '100-Continue'
|
| headers = boto.utils.merge_meta(headers, self.metadata, provider)
|
| resp = self.bucket.connection.make_request('PUT', self.bucket.name,
|
| @@ -520,9 +586,10 @@ class Key(object):
|
| def compute_md5(self, fp):
|
| """
|
| :type fp: file
|
| - :param fp: File pointer to the file to MD5 hash. The file pointer will be
|
| - reset to the beginning of the file before the method returns.
|
| -
|
| + :param fp: File pointer to the file to MD5 hash. The file pointer
|
| + will be reset to the beginning of the file before the
|
| + method returns.
|
| +
|
| :rtype: tuple
|
| :return: A tuple containing the hex digest version of the MD5 hash
|
| as the first element and the base64 encoded version of the
|
| @@ -542,19 +609,102 @@ class Key(object):
|
| fp.seek(0)
|
| return (hex_md5, base64md5)
|
|
|
| + def set_contents_from_stream(self, fp, headers=None, replace=True,
|
| + cb=None, num_cb=10, policy=None,
|
| + reduced_redundancy=False, query_args=None):
|
| + """
|
| + Store an object using the name of the Key object as the key in
|
| + cloud and the contents of the data stream pointed to by 'fp' as
|
| + the contents.
|
| + The stream object is not seekable and total size is not known.
|
| + This has the implication that we can't specify the Content-Size and
|
| + Content-MD5 in the header. So for huge uploads, the delay in calculating
|
| + MD5 is avoided but with a penalty of inability to verify the integrity
|
| + of the uploaded data.
|
| +
|
| + :type fp: file
|
| + :param fp: the file whose contents are to be uploaded
|
| +
|
| + :type headers: dict
|
| + :param headers: additional HTTP headers to be sent with the PUT request.
|
| +
|
| + :type replace: bool
|
| + :param replace: If this parameter is False, the method will first check
|
| + to see if an object exists in the bucket with the same key. If it
|
| + does, it won't overwrite it. The default value is True which will
|
| + overwrite the object.
|
| +
|
| + :type cb: function
|
| + :param cb: a callback function that will be called to report
|
| + progress on the upload. The callback should accept two integer
|
| + parameters, the first representing the number of bytes that have
|
| + been successfully transmitted to GS and the second representing the
|
| + total number of bytes that need to be transmitted.
|
| +
|
| + :type num_cb: int
|
| + :param num_cb: (optional) If a callback is specified with the cb
|
| + parameter, this parameter determines the granularity of the callback
|
| + by defining the maximum number of times the callback will be called
|
| + during the file transfer.
|
| +
|
| + :type policy: :class:`boto.gs.acl.CannedACLStrings`
|
| + :param policy: A canned ACL policy that will be applied to the new key
|
| + in GS.
|
| +
|
| + :type reduced_redundancy: bool
|
| + :param reduced_redundancy: If True, this will set the storage
|
| + class of the new Key to be
|
| + REDUCED_REDUNDANCY. The Reduced Redundancy
|
| + Storage (RRS) feature of S3, provides lower
|
| + redundancy at lower storage cost.
|
| + """
|
| +
|
| + provider = self.bucket.connection.provider
|
| + if not provider.supports_chunked_transfer():
|
| + raise BotoClientError('%s does not support chunked transfer'
|
| + % provider.get_provider_name())
|
| +
|
| + # Name of the Object should be specified explicitly for Streams.
|
| + if not self.name or self.name == '':
|
| + raise BotoClientError('Cannot determine the destination '
|
| + 'object name for the given stream')
|
| +
|
| + if headers is None:
|
| + headers = {}
|
| + if policy:
|
| + headers[provider.acl_header] = policy
|
| +
|
| + # Set the Transfer Encoding for Streams.
|
| + headers['Transfer-Encoding'] = 'chunked'
|
| +
|
| + if reduced_redundancy:
|
| + self.storage_class = 'REDUCED_REDUNDANCY'
|
| + if provider.storage_class_header:
|
| + headers[provider.storage_class_header] = self.storage_class
|
| +
|
| + if self.bucket != None:
|
| + if not replace:
|
| + k = self.bucket.lookup(self.name)
|
| + if k:
|
| + return
|
| + self.send_file(fp, headers, cb, num_cb, query_args,
|
| + chunked_transfer=True)
|
| +
|
| def set_contents_from_file(self, fp, headers=None, replace=True,
|
| cb=None, num_cb=10, policy=None, md5=None,
|
| - reduced_redundancy=False, query_args=None):
|
| + reduced_redundancy=False, query_args=None,
|
| + encrypt_key=False):
|
| """
|
| Store an object in S3 using the name of the Key object as the
|
| key in S3 and the contents of the file pointed to by 'fp' as the
|
| contents.
|
| -
|
| +
|
| :type fp: file
|
| :param fp: the file whose contents to upload
|
| -
|
| +
|
| :type headers: dict
|
| - :param headers: additional HTTP headers that will be sent with the PUT request.
|
| + :param headers: Additional HTTP headers that will be sent with
|
| + the PUT request.
|
|
|
| :type replace: bool
|
| :param replace: If this parameter is False, the method
|
| @@ -562,29 +712,36 @@ class Key(object):
|
| bucket with the same key. If it does, it won't
|
| overwrite it. The default value is True which will
|
| overwrite the object.
|
| -
|
| +
|
| :type cb: function
|
| :param cb: a callback function that will be called to report
|
| - progress on the upload. The callback should accept two integer
|
| - parameters, the first representing the number of bytes that have
|
| - been successfully transmitted to S3 and the second representing
|
| - the total number of bytes that need to be transmitted.
|
| -
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| :type cb: int
|
| - :param num_cb: (optional) If a callback is specified with the cb parameter
|
| - this parameter determines the granularity of the callback by defining
|
| - the maximum number of times the callback will be called during the file transfer.
|
| + :param num_cb: (optional) If a callback is specified with the cb
|
| + parameter this parameter determines the granularity
|
| + of the callback by defining the maximum number of
|
| + times the callback will be called during the
|
| + file transfer.
|
|
|
| :type policy: :class:`boto.s3.acl.CannedACLStrings`
|
| - :param policy: A canned ACL policy that will be applied to the new key in S3.
|
| -
|
| - :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
|
| - file as the first element and the Base64-encoded version of the plain
|
| - checksum as the second element. This is the same format returned by
|
| + :param policy: A canned ACL policy that will be applied to the
|
| + new key in S3.
|
| +
|
| + :type md5: A tuple containing the hexdigest version of the MD5
|
| + checksum of the file as the first element and the
|
| + Base64-encoded version of the plain checksum as the
|
| + second element. This is the same format returned by
|
| the compute_md5 method.
|
| - :param md5: If you need to compute the MD5 for any reason prior to upload,
|
| - it's silly to have to do it twice so this param, if present, will be
|
| - used as the MD5 values of the file. Otherwise, the checksum will be computed.
|
| + :param md5: If you need to compute the MD5 for any reason prior
|
| + to upload, it's silly to have to do it twice so this
|
| + param, if present, will be used as the MD5 values of
|
| + the file. Otherwise, the checksum will be computed.
|
| +
|
| :type reduced_redundancy: bool
|
| :param reduced_redundancy: If True, this will set the storage
|
| class of the new Key to be
|
| @@ -592,17 +749,25 @@ class Key(object):
|
| Storage (RRS) feature of S3, provides lower
|
| redundancy at lower storage cost.
|
|
|
| + :type encrypt_key: bool
|
| + :param encrypt_key: If True, the new copy of the object will
|
| + be encrypted on the server-side by S3 and
|
| + will be stored in an encrypted form while
|
| + at rest in S3.
|
| """
|
| provider = self.bucket.connection.provider
|
| if headers is None:
|
| headers = {}
|
| if policy:
|
| headers[provider.acl_header] = policy
|
| + if encrypt_key:
|
| + headers[provider.server_side_encryption_header] = 'AES256'
|
| +
|
| if reduced_redundancy:
|
| self.storage_class = 'REDUCED_REDUNDANCY'
|
| if provider.storage_class_header:
|
| headers[provider.storage_class_header] = self.storage_class
|
| - # TODO - What if the provider doesn't support reduced reduncancy?
|
| + # TODO - What if provider doesn't support reduced reduncancy?
|
| # What if different providers provide different classes?
|
| if hasattr(fp, 'name'):
|
| self.path = fp.name
|
| @@ -626,105 +791,137 @@ class Key(object):
|
|
|
| def set_contents_from_filename(self, filename, headers=None, replace=True,
|
| cb=None, num_cb=10, policy=None, md5=None,
|
| - reduced_redundancy=False):
|
| + reduced_redundancy=False,
|
| + encrypt_key=False):
|
| """
|
| Store an object in S3 using the name of the Key object as the
|
| key in S3 and the contents of the file named by 'filename'.
|
| See set_contents_from_file method for details about the
|
| parameters.
|
| -
|
| +
|
| :type filename: string
|
| :param filename: The name of the file that you want to put onto S3
|
| -
|
| +
|
| :type headers: dict
|
| - :param headers: Additional headers to pass along with the request to AWS.
|
| -
|
| + :param headers: Additional headers to pass along with the
|
| + request to AWS.
|
| +
|
| :type replace: bool
|
| - :param replace: If True, replaces the contents of the file if it already exists.
|
| -
|
| + :param replace: If True, replaces the contents of the file
|
| + if it already exists.
|
| +
|
| :type cb: function
|
| - :param cb: (optional) a callback function that will be called to report
|
| - progress on the download. The callback should accept two integer
|
| - parameters, the first representing the number of bytes that have
|
| - been successfully transmitted from S3 and the second representing
|
| - the total number of bytes that need to be transmitted.
|
| -
|
| + :param cb: a callback function that will be called to report
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| :type cb: int
|
| - :param num_cb: (optional) If a callback is specified with the cb parameter
|
| - this parameter determines the granularity of the callback by defining
|
| - the maximum number of times the callback will be called during the file transfer.
|
| -
|
| + :param num_cb: (optional) If a callback is specified with
|
| + the cb parameter this parameter determines the
|
| + granularity of the callback by defining
|
| + the maximum number of times the callback will
|
| + be called during the file transfer.
|
| +
|
| :type policy: :class:`boto.s3.acl.CannedACLStrings`
|
| - :param policy: A canned ACL policy that will be applied to the new key in S3.
|
| -
|
| - :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
|
| - file as the first element and the Base64-encoded version of the plain
|
| - checksum as the second element. This is the same format returned by
|
| + :param policy: A canned ACL policy that will be applied to the
|
| + new key in S3.
|
| +
|
| + :type md5: A tuple containing the hexdigest version of the MD5
|
| + checksum of the file as the first element and the
|
| + Base64-encoded version of the plain checksum as the
|
| + second element. This is the same format returned by
|
| the compute_md5 method.
|
| - :param md5: If you need to compute the MD5 for any reason prior to upload,
|
| - it's silly to have to do it twice so this param, if present, will be
|
| - used as the MD5 values of the file. Otherwise, the checksum will be computed.
|
| -
|
| + :param md5: If you need to compute the MD5 for any reason prior
|
| + to upload, it's silly to have to do it twice so this
|
| + param, if present, will be used as the MD5 values
|
| + of the file. Otherwise, the checksum will be computed.
|
| +
|
| :type reduced_redundancy: bool
|
| :param reduced_redundancy: If True, this will set the storage
|
| class of the new Key to be
|
| REDUCED_REDUNDANCY. The Reduced Redundancy
|
| Storage (RRS) feature of S3, provides lower
|
| redundancy at lower storage cost.
|
| + :type encrypt_key: bool
|
| + :param encrypt_key: If True, the new copy of the object will
|
| + be encrypted on the server-side by S3 and
|
| + will be stored in an encrypted form while
|
| + at rest in S3.
|
| """
|
| fp = open(filename, 'rb')
|
| self.set_contents_from_file(fp, headers, replace, cb, num_cb,
|
| - policy, md5, reduced_redundancy)
|
| + policy, md5, reduced_redundancy,
|
| + encrypt_key=encrypt_key)
|
| fp.close()
|
|
|
| def set_contents_from_string(self, s, headers=None, replace=True,
|
| cb=None, num_cb=10, policy=None, md5=None,
|
| - reduced_redundancy=False):
|
| + reduced_redundancy=False,
|
| + encrypt_key=False):
|
| """
|
| Store an object in S3 using the name of the Key object as the
|
| key in S3 and the string 's' as the contents.
|
| See set_contents_from_file method for details about the
|
| parameters.
|
| -
|
| +
|
| :type headers: dict
|
| - :param headers: Additional headers to pass along with the request to AWS.
|
| -
|
| + :param headers: Additional headers to pass along with the
|
| + request to AWS.
|
| +
|
| :type replace: bool
|
| - :param replace: If True, replaces the contents of the file if it already exists.
|
| -
|
| + :param replace: If True, replaces the contents of the file if
|
| + it already exists.
|
| +
|
| :type cb: function
|
| - :param cb: (optional) a callback function that will be called to report
|
| - progress on the download. The callback should accept two integer
|
| - parameters, the first representing the number of bytes that have
|
| - been successfully transmitted from S3 and the second representing
|
| - the total number of bytes that need to be transmitted.
|
| -
|
| + :param cb: a callback function that will be called to report
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| :type cb: int
|
| - :param num_cb: (optional) If a callback is specified with the cb parameter
|
| - this parameter determines the granularity of the callback by defining
|
| - the maximum number of times the callback will be called during the file transfer.
|
| -
|
| + :param num_cb: (optional) If a callback is specified with
|
| + the cb parameter this parameter determines the
|
| + granularity of the callback by defining
|
| + the maximum number of times the callback will
|
| + be called during the file transfer.
|
| +
|
| :type policy: :class:`boto.s3.acl.CannedACLStrings`
|
| - :param policy: A canned ACL policy that will be applied to the new key in S3.
|
| -
|
| - :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
|
| - file as the first element and the Base64-encoded version of the plain
|
| - checksum as the second element. This is the same format returned by
|
| + :param policy: A canned ACL policy that will be applied to the
|
| + new key in S3.
|
| +
|
| + :type md5: A tuple containing the hexdigest version of the MD5
|
| + checksum of the file as the first element and the
|
| + Base64-encoded version of the plain checksum as the
|
| + second element. This is the same format returned by
|
| the compute_md5 method.
|
| - :param md5: If you need to compute the MD5 for any reason prior to upload,
|
| - it's silly to have to do it twice so this param, if present, will be
|
| - used as the MD5 values of the file. Otherwise, the checksum will be computed.
|
| -
|
| + :param md5: If you need to compute the MD5 for any reason prior
|
| + to upload, it's silly to have to do it twice so this
|
| + param, if present, will be used as the MD5 values
|
| + of the file. Otherwise, the checksum will be computed.
|
| +
|
| :type reduced_redundancy: bool
|
| :param reduced_redundancy: If True, this will set the storage
|
| class of the new Key to be
|
| REDUCED_REDUNDANCY. The Reduced Redundancy
|
| Storage (RRS) feature of S3, provides lower
|
| redundancy at lower storage cost.
|
| + :type encrypt_key: bool
|
| + :param encrypt_key: If True, the new copy of the object will
|
| + be encrypted on the server-side by S3 and
|
| + will be stored in an encrypted form while
|
| + at rest in S3.
|
| """
|
| + if isinstance(s, unicode):
|
| + s = s.encode("utf-8")
|
| fp = StringIO.StringIO(s)
|
| r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
|
| - policy, md5, reduced_redundancy)
|
| + policy, md5, reduced_redundancy,
|
| + encrypt_key=encrypt_key)
|
| fp.close()
|
| return r
|
|
|
| @@ -733,26 +930,28 @@ class Key(object):
|
| response_headers=None):
|
| """
|
| Retrieves a file from an S3 Key
|
| -
|
| +
|
| :type fp: file
|
| :param fp: File pointer to put the data into
|
| -
|
| +
|
| :type headers: string
|
| :param: headers to send when retrieving the files
|
| -
|
| +
|
| :type cb: function
|
| - :param cb: (optional) a callback function that will be called to report
|
| - progress on the download. The callback should accept two integer
|
| - parameters, the first representing the number of bytes that have
|
| - been successfully transmitted from S3 and the second representing
|
| - the total number of bytes that need to be transmitted.
|
| -
|
| -
|
| + :param cb: a callback function that will be called to report
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| :type cb: int
|
| - :param num_cb: (optional) If a callback is specified with the cb parameter
|
| - this parameter determines the granularity of the callback by defining
|
| - the maximum number of times the callback will be called during the file transfer.
|
| -
|
| + :param num_cb: (optional) If a callback is specified with
|
| + the cb parameter this parameter determines the
|
| + granularity of the callback by defining
|
| + the maximum number of times the callback will
|
| + be called during the file transfer.
|
| +
|
| :type torrent: bool
|
| :param torrent: Flag for whether to get a torrent for the file
|
|
|
| @@ -778,7 +977,7 @@ class Key(object):
|
| save_debug = self.bucket.connection.debug
|
| if self.bucket.connection.debug == 1:
|
| self.bucket.connection.debug = 0
|
| -
|
| +
|
| query_args = []
|
| if torrent:
|
| query_args.append('torrent')
|
| @@ -811,31 +1010,31 @@ class Key(object):
|
| def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
|
| """
|
| Get a torrent file (see to get_file)
|
| -
|
| +
|
| :type fp: file
|
| :param fp: The file pointer of where to put the torrent
|
| -
|
| +
|
| :type headers: dict
|
| :param headers: Headers to be passed
|
| -
|
| +
|
| :type cb: function
|
| - :param cb: (optional) a callback function that will be called to
|
| - report progress on the download. The callback should
|
| - accept two integer parameters, the first representing
|
| - the number of bytes that have been successfully
|
| - transmitted from S3 and the second representing the
|
| - total number of bytes that need to be transmitted.
|
| + :param cb: a callback function that will be called to report
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| + :type cb: int
|
| + :param num_cb: (optional) If a callback is specified with
|
| + the cb parameter this parameter determines the
|
| + granularity of the callback by defining
|
| + the maximum number of times the callback will
|
| + be called during the file transfer.
|
|
|
| - :type num_cb: int
|
| - :param num_cb: (optional) If a callback is specified with the
|
| - cb parameter this parameter determines the
|
| - granularity of the callback by defining the
|
| - maximum number of times the callback will be
|
| - called during the file transfer.
|
| -
|
| """
|
| return self.get_file(fp, headers, cb, num_cb, torrent=True)
|
| -
|
| +
|
| def get_contents_to_file(self, fp, headers=None,
|
| cb=None, num_cb=10,
|
| torrent=False,
|
| @@ -846,29 +1045,29 @@ class Key(object):
|
| Retrieve an object from S3 using the name of the Key object as the
|
| key in S3. Write the contents of the object to the file pointed
|
| to by 'fp'.
|
| -
|
| +
|
| :type fp: File -like object
|
| :param fp:
|
| -
|
| +
|
| :type headers: dict
|
| :param headers: additional HTTP headers that will be sent with
|
| the GET request.
|
| -
|
| +
|
| :type cb: function
|
| - :param cb: (optional) a callback function that will be called to
|
| - report progress on the download. The callback should
|
| - accept two integer parameters, the first representing
|
| - the number of bytes that have been successfully
|
| - transmitted from S3 and the second representing the
|
| - total number of bytes that need to be transmitted.
|
| + :param cb: a callback function that will be called to report
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| + :type cb: int
|
| + :param num_cb: (optional) If a callback is specified with
|
| + the cb parameter this parameter determines the
|
| + granularity of the callback by defining
|
| + the maximum number of times the callback will
|
| + be called during the file transfer.
|
|
|
| - :type num_cb: int
|
| - :param num_cb: (optional) If a callback is specified with the
|
| - cb parameter this parameter determines the
|
| - granularity of the callback by defining the
|
| - maximum number of times the callback will be
|
| - called during the file transfer.
|
| -
|
| :type torrent: bool
|
| :param torrent: If True, returns the contents of a torrent
|
| file as a string.
|
| @@ -904,28 +1103,28 @@ class Key(object):
|
| key in S3. Store contents of the object to a file named by 'filename'.
|
| See get_contents_to_file method for details about the
|
| parameters.
|
| -
|
| +
|
| :type filename: string
|
| :param filename: The filename of where to put the file contents
|
| -
|
| +
|
| :type headers: dict
|
| :param headers: Any additional headers to send in the request
|
| -
|
| +
|
| :type cb: function
|
| - :param cb: (optional) a callback function that will be called to
|
| - report progress on the download. The callback should
|
| - accept two integer parameters, the first representing
|
| - the number of bytes that have been successfully
|
| - transmitted from S3 and the second representing the
|
| - total number of bytes that need to be transmitted.
|
| + :param cb: a callback function that will be called to report
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| + :type cb: int
|
| + :param num_cb: (optional) If a callback is specified with
|
| + the cb parameter this parameter determines the
|
| + granularity of the callback by defining
|
| + the maximum number of times the callback will
|
| + be called during the file transfer.
|
|
|
| - :type num_cb: int
|
| - :param num_cb: (optional) If a callback is specified with the
|
| - cb parameter this parameter determines the
|
| - granularity of the callback by defining the
|
| - maximum number of times the callback will be
|
| - called during the file transfer.
|
| -
|
| :type torrent: bool
|
| :param torrent: If True, returns the contents of a torrent file
|
| as a string.
|
| @@ -964,35 +1163,35 @@ class Key(object):
|
| key in S3. Return the contents of the object as a string.
|
| See get_contents_to_file method for details about the
|
| parameters.
|
| -
|
| +
|
| :type headers: dict
|
| :param headers: Any additional headers to send in the request
|
| -
|
| +
|
| :type cb: function
|
| - :param cb: (optional) a callback function that will be called to
|
| - report progress on the download. The callback should
|
| - accept two integer parameters, the first representing
|
| - the number of bytes that have been successfully
|
| - transmitted from S3 and the second representing the
|
| - total number of bytes that need to be transmitted.
|
| + :param cb: a callback function that will be called to report
|
| + progress on the upload. The callback should accept
|
| + two integer parameters, the first representing the
|
| + number of bytes that have been successfully
|
| + transmitted to S3 and the second representing the
|
| + size of the to be transmitted object.
|
| +
|
| + :type cb: int
|
| + :param num_cb: (optional) If a callback is specified with
|
| + the cb parameter this parameter determines the
|
| + granularity of the callback by defining
|
| + the maximum number of times the callback will
|
| + be called during the file transfer.
|
|
|
| - :type num_cb: int
|
| - :param num_cb: (optional) If a callback is specified with the
|
| - cb parameter this parameter determines the
|
| - granularity of the callback by defining the
|
| - maximum number of times the callback will be
|
| - called during the file transfer.
|
| -
|
| :type torrent: bool
|
| :param torrent: If True, returns the contents of a torrent file
|
| as a string.
|
| -
|
| +
|
| :type response_headers: dict
|
| :param response_headers: A dictionary containing HTTP headers/values
|
| that will override any headers associated with
|
| the stored object in the response.
|
| See http://goo.gl/EWOPb for details.
|
| -
|
| +
|
| :rtype: string
|
| :returns: The contents of the file as a string
|
| """
|
| @@ -1008,15 +1207,15 @@ class Key(object):
|
| to a key. This method retrieves the current ACL, creates a new
|
| grant based on the parameters passed in, adds that grant to the ACL
|
| and then PUT's the new ACL back to S3.
|
| -
|
| +
|
| :type permission: string
|
| :param permission: The permission being granted. Should be one of:
|
| (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
|
| -
|
| +
|
| :type email_address: string
|
| :param email_address: The email address associated with the AWS
|
| account your are granting the permission to.
|
| -
|
| +
|
| :type recursive: boolean
|
| :param recursive: A boolean value to controls whether the command
|
| will apply the grant to all keys within the bucket
|
| @@ -1030,30 +1229,27 @@ class Key(object):
|
| policy.acl.add_email_grant(permission, email_address)
|
| self.set_acl(policy, headers=headers)
|
|
|
| - def add_user_grant(self, permission, user_id, headers=None):
|
| + def add_user_grant(self, permission, user_id, headers=None,
|
| + display_name=None):
|
| """
|
| Convenience method that provides a quick way to add a canonical
|
| user grant to a key. This method retrieves the current ACL,
|
| creates a new grant based on the parameters passed in, adds that
|
| grant to the ACL and then PUT's the new ACL back to S3.
|
| -
|
| +
|
| :type permission: string
|
| :param permission: The permission being granted. Should be one of:
|
| (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
|
| -
|
| +
|
| :type user_id: string
|
| :param user_id: The canonical user id associated with the AWS
|
| account your are granting the permission to.
|
| -
|
| - :type recursive: boolean
|
| - :param recursive: A boolean value to controls whether the command
|
| - will apply the grant to all keys within the bucket
|
| - or not. The default value is False. By passing a
|
| - True value, the call will iterate through all keys
|
| - in the bucket and apply the same grant to each key.
|
| - CAUTION: If you have a lot of keys, this could take
|
| - a long time!
|
| +
|
| + :type display_name: string
|
| + :param display_name: An option string containing the user's
|
| + Display Name. Only required on Walrus.
|
| """
|
| policy = self.get_acl()
|
| - policy.acl.add_user_grant(permission, user_id)
|
| + policy.acl.add_user_grant(permission, user_id,
|
| + display_name=display_name)
|
| self.set_acl(policy, headers=headers)
|
|
|