| Index: boto/s3/bucket.py
|
| diff --git a/boto/s3/bucket.py b/boto/s3/bucket.py
|
| index c1b38e983cf8904e611726bb76130222355d37d1..144edbff6d9272c86b8f458a74da04d2181aee8d 100644
|
| --- a/boto/s3/bucket.py
|
| +++ b/boto/s3/bucket.py
|
| @@ -23,13 +23,11 @@
|
|
|
| import boto
|
| from boto import handler
|
| -from boto.provider import Provider
|
| from boto.resultset import ResultSet
|
| -from boto.s3.acl import ACL, Policy, CannedACLStrings, Grant
|
| +from boto.s3.acl import Policy, CannedACLStrings, Grant
|
| from boto.s3.key import Key
|
| from boto.s3.prefix import Prefix
|
| from boto.s3.deletemarker import DeleteMarker
|
| -from boto.s3.user import User
|
| from boto.s3.multipart import MultiPartUpload
|
| from boto.s3.multipart import CompleteMultiPartUpload
|
| from boto.s3.bucketlistresultset import BucketListResultSet
|
| @@ -48,6 +46,7 @@ class S3WebsiteEndpointTranslate:
|
|
|
| trans_region['EU'] = 's3-website-eu-west-1'
|
| trans_region['us-west-1'] = 's3-website-us-west-1'
|
| + trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
|
| trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
|
|
|
| @classmethod
|
| @@ -106,7 +105,7 @@ class Bucket(object):
|
| return iter(BucketListResultSet(self))
|
|
|
| def __contains__(self, key_name):
|
| - return not (self.get_key(key_name) is None)
|
| + return not (self.get_key(key_name) is None)
|
|
|
| def startElement(self, name, attrs, connection):
|
| return None
|
| @@ -175,10 +174,19 @@ class Bucket(object):
|
| k.content_type = response.getheader('content-type')
|
| k.content_encoding = response.getheader('content-encoding')
|
| k.last_modified = response.getheader('last-modified')
|
| - k.size = int(response.getheader('content-length'))
|
| + # the following machinations are a workaround to the fact that
|
| + # apache/fastcgi omits the content-length header on HEAD
|
| + # requests when the content-length is zero.
|
| + # See http://goo.gl/0Tdax for more details.
|
| + clen = response.getheader('content-length')
|
| + if clen:
|
| + k.size = int(response.getheader('content-length'))
|
| + else:
|
| + k.size = 0
|
| k.cache_control = response.getheader('cache-control')
|
| k.name = key_name
|
| k.handle_version_headers(response)
|
| + k.handle_encryption_headers(response)
|
| return k
|
| else:
|
| if response.status == 404:
|
| @@ -281,7 +289,7 @@ class Bucket(object):
|
| def _get_all(self, element_map, initial_query_string='',
|
| headers=None, **params):
|
| l = []
|
| - for k,v in params.items():
|
| + for k, v in params.items():
|
| k = k.replace('_', '-')
|
| if k == 'maxkeys':
|
| k = 'max-keys'
|
| @@ -294,7 +302,8 @@ class Bucket(object):
|
| else:
|
| s = initial_query_string
|
| response = self.connection.make_request('GET', self.name,
|
| - headers=headers, query_args=s)
|
| + headers=headers,
|
| + query_args=s)
|
| body = response.read()
|
| boto.log.debug(body)
|
| if response.status == 200:
|
| @@ -434,11 +443,12 @@ class Bucket(object):
|
| """
|
| return self.key_class(self, key_name)
|
|
|
| - def generate_url(self, expires_in, method='GET',
|
| - headers=None, force_http=False):
|
| + def generate_url(self, expires_in, method='GET', headers=None,
|
| + force_http=False, response_headers=None):
|
| return self.connection.generate_url(expires_in, method, self.name,
|
| headers=headers,
|
| - force_http=force_http)
|
| + force_http=force_http,
|
| + response_headers=response_headers)
|
|
|
| def delete_key(self, key_name, headers=None,
|
| version_id=None, mfa_token=None):
|
| @@ -479,7 +489,8 @@ class Bucket(object):
|
|
|
| def copy_key(self, new_key_name, src_bucket_name,
|
| src_key_name, metadata=None, src_version_id=None,
|
| - storage_class='STANDARD', preserve_acl=False):
|
| + storage_class='STANDARD', preserve_acl=False,
|
| + encrypt_key=False):
|
| """
|
| Create a new key in the bucket by copying another existing key.
|
|
|
| @@ -524,21 +535,34 @@ class Bucket(object):
|
| of False will be significantly more
|
| efficient.
|
|
|
| + :type encrypt_key: bool
|
| + :param encrypt_key: If True, the new copy of the object will
|
| + be encrypted on the server-side by S3 and
|
| + will be stored in an encrypted form while
|
| + at rest in S3.
|
| +
|
| :rtype: :class:`boto.s3.key.Key` or subclass
|
| :returns: An instance of the newly created key object
|
| """
|
| + headers = {}
|
| + provider = self.connection.provider
|
| + src_key_name = boto.utils.get_utf8_value(src_key_name)
|
| if preserve_acl:
|
| - acl = self.get_xml_acl(src_key_name)
|
| + if self.name == src_bucket_name:
|
| + src_bucket = self
|
| + else:
|
| + src_bucket = self.connection.get_bucket(src_bucket_name)
|
| + acl = src_bucket.get_xml_acl(src_key_name)
|
| + if encrypt_key:
|
| + headers[provider.server_side_encryption_header] = 'AES256'
|
| src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))
|
| if src_version_id:
|
| src += '?version_id=%s' % src_version_id
|
| - provider = self.connection.provider
|
| - headers = {provider.copy_source_header : src}
|
| - if storage_class != 'STANDARD':
|
| - headers[provider.storage_class_header] = storage_class
|
| + headers = {provider.copy_source_header : str(src)}
|
| + headers[provider.storage_class_header] = storage_class
|
| if metadata:
|
| headers[provider.metadata_directive_header] = 'REPLACE'
|
| - headers = boto.utils.merge_meta(headers, metadata)
|
| + headers = boto.utils.merge_meta(headers, metadata, provider)
|
| else:
|
| headers[provider.metadata_directive_header] = 'COPY'
|
| response = self.connection.make_request('PUT', self.name, new_key_name,
|
| @@ -555,7 +579,8 @@ class Bucket(object):
|
| self.set_xml_acl(acl, new_key_name)
|
| return key
|
| else:
|
| - raise provider.storage_response_error(response.status, response.reason, body)
|
| + raise provider.storage_response_error(response.status,
|
| + response.reason, body)
|
|
|
| def set_canned_acl(self, acl_str, key_name='', headers=None,
|
| version_id=None):
|
| @@ -566,7 +591,7 @@ class Bucket(object):
|
| else:
|
| headers={self.connection.provider.acl_header: acl_str}
|
|
|
| - query_args='acl'
|
| + query_args = 'acl'
|
| if version_id:
|
| query_args += '&versionId=%s' % version_id
|
| response = self.connection.make_request('PUT', self.name, key_name,
|
| @@ -594,7 +619,7 @@ class Bucket(object):
|
| if version_id:
|
| query_args += '&versionId=%s' % version_id
|
| response = self.connection.make_request('PUT', self.name, key_name,
|
| - data=acl_str,
|
| + data=acl_str.encode('ISO-8859-1'),
|
| query_args=query_args,
|
| headers=headers)
|
| body = response.read()
|
| @@ -627,6 +652,80 @@ class Bucket(object):
|
| raise self.connection.provider.storage_response_error(
|
| response.status, response.reason, body)
|
|
|
| + def set_subresource(self, subresource, value, key_name = '', headers=None,
|
| + version_id=None):
|
| + """
|
| + Set a subresource for a bucket or key.
|
| +
|
| + :type subresource: string
|
| + :param subresource: The subresource to set.
|
| +
|
| + :type value: string
|
| + :param value: The value of the subresource.
|
| +
|
| + :type key_name: string
|
| + :param key_name: The key to operate on, or None to operate on the
|
| + bucket.
|
| +
|
| + :type headers: dict
|
| + :param headers: Additional HTTP headers to include in the request.
|
| +
|
| + :type src_version_id: string
|
| + :param src_version_id: Optional. The version id of the key to operate
|
| + on. If not specified, operate on the newest
|
| + version.
|
| + """
|
| + if not subresource:
|
| + raise TypeError('set_subresource called with subresource=None')
|
| + query_args = subresource
|
| + if version_id:
|
| + query_args += '&versionId=%s' % version_id
|
| + response = self.connection.make_request('PUT', self.name, key_name,
|
| + data=value.encode('UTF-8'),
|
| + query_args=query_args,
|
| + headers=headers)
|
| + body = response.read()
|
| + if response.status != 200:
|
| + raise self.connection.provider.storage_response_error(
|
| + response.status, response.reason, body)
|
| +
|
| + def get_subresource(self, subresource, key_name='', headers=None,
|
| + version_id=None):
|
| + """
|
| + Get a subresource for a bucket or key.
|
| +
|
| + :type subresource: string
|
| + :param subresource: The subresource to get.
|
| +
|
| + :type key_name: string
|
| + :param key_name: The key to operate on, or None to operate on the
|
| + bucket.
|
| +
|
| + :type headers: dict
|
| + :param headers: Additional HTTP headers to include in the request.
|
| +
|
| + :type src_version_id: string
|
| + :param src_version_id: Optional. The version id of the key to operate
|
| + on. If not specified, operate on the newest
|
| + version.
|
| +
|
| + :rtype: string
|
| + :returns: The value of the subresource.
|
| + """
|
| + if not subresource:
|
| + raise TypeError('get_subresource called with subresource=None')
|
| + query_args = subresource
|
| + if version_id:
|
| + query_args += '&versionId=%s' % version_id
|
| + response = self.connection.make_request('GET', self.name, key_name,
|
| + query_args=query_args,
|
| + headers=headers)
|
| + body = response.read()
|
| + if response.status != 200:
|
| + raise self.connection.provider.storage_response_error(
|
| + response.status, response.reason, body)
|
| + return body
|
| +
|
| def make_public(self, recursive=False, headers=None):
|
| self.set_canned_acl('public-read', headers=headers)
|
| if recursive:
|
| @@ -668,8 +767,8 @@ class Bucket(object):
|
| for key in self:
|
| key.add_email_grant(permission, email_address, headers=headers)
|
|
|
| - def add_user_grant(self, permission, user_id,
|
| - recursive=False, headers=None):
|
| + def add_user_grant(self, permission, user_id, recursive=False,
|
| + headers=None, display_name=None):
|
| """
|
| Convenience method that provides a quick way to add a canonical
|
| user grant to a bucket. This method retrieves the current ACL,
|
| @@ -692,16 +791,22 @@ class Bucket(object):
|
| in the bucket and apply the same grant to each key.
|
| CAUTION: If you have a lot of keys, this could take
|
| a long time!
|
| +
|
| + :type display_name: string
|
| + :param display_name: An option string containing the user's
|
| + Display Name. Only required on Walrus.
|
| """
|
| if permission not in S3Permissions:
|
| raise self.connection.provider.storage_permissions_error(
|
| 'Unknown Permission: %s' % permission)
|
| policy = self.get_acl(headers=headers)
|
| - policy.acl.add_user_grant(permission, user_id)
|
| + policy.acl.add_user_grant(permission, user_id,
|
| + display_name=display_name)
|
| self.set_acl(policy, headers=headers)
|
| if recursive:
|
| for key in self:
|
| - key.add_user_grant(permission, user_id, headers=headers)
|
| + key.add_user_grant(permission, user_id, headers=headers,
|
| + display_name=display_name)
|
|
|
| def list_grants(self, headers=None):
|
| policy = self.get_acl(headers=headers)
|
| @@ -795,9 +900,9 @@ class Bucket(object):
|
| mfa_token=None, headers=None):
|
| """
|
| Configure versioning for this bucket.
|
| - Note: This feature is currently in beta release and is available
|
| - only in the Northern California region.
|
| -
|
| +
|
| + ..note:: This feature is currently in beta.
|
| +
|
| :type versioning: bool
|
| :param versioning: A boolean indicating whether version is
|
| enabled (True) or disabled (False).
|
| @@ -909,14 +1014,17 @@ class Bucket(object):
|
|
|
| :rtype: dict
|
| :returns: A dictionary containing a Python representation
|
| - of the XML response from S3. The overall structure is:
|
| + of the XML response from S3. The overall structure is:
|
|
|
| - * WebsiteConfiguration
|
| - * IndexDocument
|
| - * Suffix : suffix that is appended to request that
|
| - is for a "directory" on the website endpoint
|
| - * ErrorDocument
|
| - * Key : name of object to serve when an error occurs
|
| + * WebsiteConfiguration
|
| +
|
| + * IndexDocument
|
| +
|
| + * Suffix : suffix that is appended to request that
|
| + is for a "directory" on the website endpoint
|
| + * ErrorDocument
|
| +
|
| + * Key : name of object to serve when an error occurs
|
| """
|
| response = self.connection.make_request('GET', self.name,
|
| query_args='website', headers=headers)
|
| @@ -978,8 +1086,70 @@ class Bucket(object):
|
| raise self.connection.provider.storage_response_error(
|
| response.status, response.reason, body)
|
|
|
| - def initiate_multipart_upload(self, key_name, headers=None):
|
| + def delete_policy(self, headers=None):
|
| + response = self.connection.make_request('DELETE', self.name,
|
| + data='/?policy',
|
| + query_args='policy',
|
| + headers=headers)
|
| + body = response.read()
|
| + if response.status >= 200 and response.status <= 204:
|
| + return True
|
| + else:
|
| + raise self.connection.provider.storage_response_error(
|
| + response.status, response.reason, body)
|
| +
|
| +
|
| + def initiate_multipart_upload(self, key_name, headers=None,
|
| + reduced_redundancy=False,
|
| + metadata=None, encrypt_key=False):
|
| + """
|
| + Start a multipart upload operation.
|
| +
|
| + :type key_name: string
|
| + :param key_name: The name of the key that will ultimately result from
|
| + this multipart upload operation. This will be exactly
|
| + as the key appears in the bucket after the upload
|
| + process has been completed.
|
| +
|
| + :type headers: dict
|
| + :param headers: Additional HTTP headers to send and store with the
|
| + resulting key in S3.
|
| +
|
| + :type reduced_redundancy: boolean
|
| + :param reduced_redundancy: In multipart uploads, the storage class is
|
| + specified when initiating the upload,
|
| + not when uploading individual parts. So
|
| + if you want the resulting key to use the
|
| + reduced redundancy storage class set this
|
| + flag when you initiate the upload.
|
| +
|
| + :type metadata: dict
|
| + :param metadata: Any metadata that you would like to set on the key
|
| + that results from the multipart upload.
|
| +
|
| + :type encrypt_key: bool
|
| + :param encrypt_key: If True, the new copy of the object will
|
| + be encrypted on the server-side by S3 and
|
| + will be stored in an encrypted form while
|
| + at rest in S3.
|
| + """
|
| query_args = 'uploads'
|
| + provider = self.connection.provider
|
| + if headers is None:
|
| + headers = {}
|
| + if reduced_redundancy:
|
| + storage_class_header = provider.storage_class_header
|
| + if storage_class_header:
|
| + headers[storage_class_header] = 'REDUCED_REDUNDANCY'
|
| + # TODO: what if the provider doesn't support reduced redundancy?
|
| + # (see boto.s3.key.Key.set_contents_from_file)
|
| + if encrypt_key:
|
| + headers[provider.server_side_encryption_header] = 'AES256'
|
| + if metadata is None:
|
| + metadata = {}
|
| +
|
| + headers = boto.utils.merge_meta(headers, metadata,
|
| + self.connection.provider)
|
| response = self.connection.make_request('POST', self.name, key_name,
|
| query_args=query_args,
|
| headers=headers)
|
| @@ -996,6 +1166,9 @@ class Bucket(object):
|
|
|
| def complete_multipart_upload(self, key_name, upload_id,
|
| xml_body, headers=None):
|
| + """
|
| + Complete a multipart upload operation.
|
| + """
|
| query_args = 'uploadId=%s' % upload_id
|
| if headers is None:
|
| headers = {}
|
| @@ -1003,9 +1176,15 @@ class Bucket(object):
|
| response = self.connection.make_request('POST', self.name, key_name,
|
| query_args=query_args,
|
| headers=headers, data=xml_body)
|
| + contains_error = False
|
| body = response.read()
|
| + # Some errors will be reported in the body of the response
|
| + # even though the HTTP response code is 200. This check
|
| + # does a quick and dirty peek in the body for an error element.
|
| + if body.find('<Error>') > 0:
|
| + contains_error = True
|
| boto.log.debug(body)
|
| - if response.status == 200:
|
| + if response.status == 200 and not contains_error:
|
| resp = CompleteMultiPartUpload(self)
|
| h = handler.XmlHandler(resp, self)
|
| xml.sax.parseString(body, h)
|
| @@ -1027,4 +1206,3 @@ class Bucket(object):
|
|
|
| def delete(self, headers=None):
|
| return self.connection.delete_bucket(self.name, headers=headers)
|
| -
|
|
|