OLD | NEW |
1 # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ | 1 # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ |
| 2 # Copyright (c) 2011, Nexenta Systems Inc. |
2 # | 3 # |
3 # Permission is hereby granted, free of charge, to any person obtaining a | 4 # Permission is hereby granted, free of charge, to any person obtaining a |
4 # copy of this software and associated documentation files (the | 5 # copy of this software and associated documentation files (the |
5 # "Software"), to deal in the Software without restriction, including | 6 # "Software"), to deal in the Software without restriction, including |
6 # without limitation the rights to use, copy, modify, merge, publish, dis- | 7 # without limitation the rights to use, copy, modify, merge, publish, dis- |
7 # tribute, sublicense, and/or sell copies of the Software, and to permit | 8 # tribute, sublicense, and/or sell copies of the Software, and to permit |
8 # persons to whom the Software is furnished to do so, subject to the fol- | 9 # persons to whom the Software is furnished to do so, subject to the fol- |
9 # lowing conditions: | 10 # lowing conditions: |
10 # | 11 # |
11 # The above copyright notice and this permission notice shall be included | 12 # The above copyright notice and this permission notice shall be included |
12 # in all copies or substantial portions of the Software. | 13 # in all copies or substantial portions of the Software. |
13 # | 14 # |
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- | 16 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- |
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT | 17 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT |
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | 18 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, |
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | 19 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 # IN THE SOFTWARE. | 21 # IN THE SOFTWARE. |
21 | 22 |
22 import mimetypes | 23 import mimetypes |
23 import os | 24 import os |
| 25 import re |
24 import rfc822 | 26 import rfc822 |
25 import StringIO | 27 import StringIO |
26 import base64 | 28 import base64 |
27 import boto.utils | 29 import boto.utils |
28 from boto.exception import BotoClientError | 30 from boto.exception import BotoClientError |
29 from boto.provider import Provider | 31 from boto.provider import Provider |
30 from boto.s3.user import User | 32 from boto.s3.user import User |
31 from boto import UserAgent | 33 from boto import UserAgent |
32 try: | 34 try: |
33 from hashlib import md5 | 35 from hashlib import md5 |
(...skipping 21 matching lines...) Expand all Loading... |
55 self.storage_class = 'STANDARD' | 57 self.storage_class = 'STANDARD' |
56 self.md5 = None | 58 self.md5 = None |
57 self.base64md5 = None | 59 self.base64md5 = None |
58 self.path = None | 60 self.path = None |
59 self.resp = None | 61 self.resp = None |
60 self.mode = None | 62 self.mode = None |
61 self.size = None | 63 self.size = None |
62 self.version_id = None | 64 self.version_id = None |
63 self.source_version_id = None | 65 self.source_version_id = None |
64 self.delete_marker = False | 66 self.delete_marker = False |
| 67 self.encrypted = None |
65 | 68 |
66 def __repr__(self): | 69 def __repr__(self): |
67 if self.bucket: | 70 if self.bucket: |
68 return '<Key: %s,%s>' % (self.bucket.name, self.name) | 71 return '<Key: %s,%s>' % (self.bucket.name, self.name) |
69 else: | 72 else: |
70 return '<Key: None,%s>' % self.name | 73 return '<Key: None,%s>' % self.name |
71 | 74 |
72 def __getattr__(self, name): | 75 def __getattr__(self, name): |
73 if name == 'key': | 76 if name == 'key': |
74 return self.name | 77 return self.name |
75 else: | 78 else: |
76 raise AttributeError | 79 raise AttributeError |
77 | 80 |
78 def __setattr__(self, name, value): | 81 def __setattr__(self, name, value): |
79 if name == 'key': | 82 if name == 'key': |
80 self.__dict__['name'] = value | 83 self.__dict__['name'] = value |
81 else: | 84 else: |
82 self.__dict__[name] = value | 85 self.__dict__[name] = value |
83 | 86 |
84 def __iter__(self): | 87 def __iter__(self): |
85 return self | 88 return self |
86 | 89 |
87 @property | 90 @property |
88 def provider(self): | 91 def provider(self): |
89 provider = None | 92 provider = None |
90 if self.bucket: | 93 if self.bucket: |
91 if self.bucket.connection: | 94 if self.bucket.connection: |
92 provider = self.bucket.connection.provider | 95 provider = self.bucket.connection.provider |
93 return provider | 96 return provider |
94 | 97 |
95 def get_md5_from_hexdigest(self, md5_hexdigest): | 98 def get_md5_from_hexdigest(self, md5_hexdigest): |
96 """ | 99 """ |
97 A utility function to create the 2-tuple (md5hexdigest, base64md5) | 100 A utility function to create the 2-tuple (md5hexdigest, base64md5) |
98 from just having a precalculated md5_hexdigest. | 101 from just having a precalculated md5_hexdigest. |
99 """ | 102 """ |
100 import binascii | 103 import binascii |
101 digest = binascii.unhexlify(md5_hexdigest) | 104 digest = binascii.unhexlify(md5_hexdigest) |
102 base64md5 = base64.encodestring(digest) | 105 base64md5 = base64.encodestring(digest) |
103 if base64md5[-1] == '\n': | 106 if base64md5[-1] == '\n': |
104 base64md5 = base64md5[0:-1] | 107 base64md5 = base64md5[0:-1] |
105 return (md5_hexdigest, base64md5) | 108 return (md5_hexdigest, base64md5) |
106 | 109 |
| 110 def handle_encryption_headers(self, resp): |
| 111 provider = self.bucket.connection.provider |
| 112 if provider.server_side_encryption_header: |
| 113 self.encrypted = resp.getheader(provider.server_side_encryption_head
er, None) |
| 114 else: |
| 115 self.encrypted = None |
| 116 |
107 def handle_version_headers(self, resp, force=False): | 117 def handle_version_headers(self, resp, force=False): |
108 provider = self.bucket.connection.provider | 118 provider = self.bucket.connection.provider |
109 # If the Key object already has a version_id attribute value, it | 119 # If the Key object already has a version_id attribute value, it |
110 # means that it represents an explicit version and the user is | 120 # means that it represents an explicit version and the user is |
111 # doing a get_contents_*(version_id=<foo>) to retrieve another | 121 # doing a get_contents_*(version_id=<foo>) to retrieve another |
112 # version of the Key. In that case, we don't really want to | 122 # version of the Key. In that case, we don't really want to |
113 # overwrite the version_id in this Key object. Comprende? | 123 # overwrite the version_id in this Key object. Comprende? |
114 if self.version_id is None or force: | 124 if self.version_id is None or force: |
115 self.version_id = resp.getheader(provider.version_id, None) | 125 self.version_id = resp.getheader(provider.version_id, None) |
116 self.source_version_id = resp.getheader(provider.copy_source_version_id,
None) | 126 self.source_version_id = resp.getheader(provider.copy_source_version_id, |
| 127 None) |
117 if resp.getheader(provider.delete_marker, 'false') == 'true': | 128 if resp.getheader(provider.delete_marker, 'false') == 'true': |
118 self.delete_marker = True | 129 self.delete_marker = True |
119 else: | 130 else: |
120 self.delete_marker = False | 131 self.delete_marker = False |
121 | 132 |
122 def open_read(self, headers=None, query_args=None, | 133 def open_read(self, headers=None, query_args=None, |
123 override_num_retries=None, response_headers=None): | 134 override_num_retries=None, response_headers=None): |
124 """ | 135 """ |
125 Open this key for reading | 136 Open this key for reading |
126 | 137 |
127 :type headers: dict | 138 :type headers: dict |
128 :param headers: Headers to pass in the web request | 139 :param headers: Headers to pass in the web request |
129 | 140 |
130 :type query_args: string | 141 :type query_args: string |
131 :param query_args: Arguments to pass in the query string (ie, 'torrent') | 142 :param query_args: Arguments to pass in the query string (ie, 'torrent') |
132 | 143 |
133 :type override_num_retries: int | 144 :type override_num_retries: int |
134 :param override_num_retries: If not None will override configured | 145 :param override_num_retries: If not None will override configured |
135 num_retries parameter for underlying GET. | 146 num_retries parameter for underlying GET. |
136 | 147 |
137 :type response_headers: dict | 148 :type response_headers: dict |
138 :param response_headers: A dictionary containing HTTP headers/values | 149 :param response_headers: A dictionary containing HTTP headers/values |
139 that will override any headers associated with | 150 that will override any headers associated with |
140 the stored object in the response. | 151 the stored object in the response. |
141 See http://goo.gl/EWOPb for details. | 152 See http://goo.gl/EWOPb for details. |
142 """ | 153 """ |
143 if self.resp == None: | 154 if self.resp == None: |
144 self.mode = 'r' | 155 self.mode = 'r' |
145 | 156 |
146 provider = self.bucket.connection.provider | 157 provider = self.bucket.connection.provider |
147 self.resp = self.bucket.connection.make_request( | 158 self.resp = self.bucket.connection.make_request( |
148 'GET', self.bucket.name, self.name, headers, | 159 'GET', self.bucket.name, self.name, headers, |
149 query_args=query_args, | 160 query_args=query_args, |
150 override_num_retries=override_num_retries) | 161 override_num_retries=override_num_retries) |
151 if self.resp.status < 199 or self.resp.status > 299: | 162 if self.resp.status < 199 or self.resp.status > 299: |
152 body = self.resp.read() | 163 body = self.resp.read() |
153 raise provider.storage_response_error(self.resp.status, | 164 raise provider.storage_response_error(self.resp.status, |
154 self.resp.reason, body) | 165 self.resp.reason, body) |
155 response_headers = self.resp.msg | 166 response_headers = self.resp.msg |
156 self.metadata = boto.utils.get_aws_metadata(response_headers, | 167 self.metadata = boto.utils.get_aws_metadata(response_headers, |
157 provider) | 168 provider) |
158 for name,value in response_headers.items(): | 169 for name,value in response_headers.items(): |
159 if name.lower() == 'content-length': | 170 # To get correct size for Range GETs, use Content-Range |
| 171 # header if one was returned. If not, use Content-Length |
| 172 # header. |
| 173 if (name.lower() == 'content-length' and |
| 174 'Content-Range' not in response_headers): |
160 self.size = int(value) | 175 self.size = int(value) |
| 176 elif name.lower() == 'content-range': |
| 177 end_range = re.sub('.*/(.*)', '\\1', value) |
| 178 self.size = int(end_range) |
161 elif name.lower() == 'etag': | 179 elif name.lower() == 'etag': |
162 self.etag = value | 180 self.etag = value |
163 elif name.lower() == 'content-type': | 181 elif name.lower() == 'content-type': |
164 self.content_type = value | 182 self.content_type = value |
165 elif name.lower() == 'content-encoding': | 183 elif name.lower() == 'content-encoding': |
166 self.content_encoding = value | 184 self.content_encoding = value |
167 elif name.lower() == 'last-modified': | 185 elif name.lower() == 'last-modified': |
168 self.last_modified = value | 186 self.last_modified = value |
169 elif name.lower() == 'cache-control': | 187 elif name.lower() == 'cache-control': |
170 self.cache_control = value | 188 self.cache_control = value |
171 self.handle_version_headers(self.resp) | 189 self.handle_version_headers(self.resp) |
| 190 self.handle_encryption_headers(self.resp) |
172 | 191 |
173 def open_write(self, headers=None, override_num_retries=None): | 192 def open_write(self, headers=None, override_num_retries=None): |
174 """ | 193 """ |
175 Open this key for writing. | 194 Open this key for writing. |
176 Not yet implemented | 195 Not yet implemented |
177 | 196 |
178 :type headers: dict | 197 :type headers: dict |
179 :param headers: Headers to pass in the write request | 198 :param headers: Headers to pass in the write request |
180 | 199 |
181 :type override_num_retries: int | 200 :type override_num_retries: int |
182 :param override_num_retries: If not None will override configured | 201 :param override_num_retries: If not None will override configured |
183 num_retries parameter for underlying PUT. | 202 num_retries parameter for underlying PUT. |
184 """ | 203 """ |
185 raise BotoClientError('Not Implemented') | 204 raise BotoClientError('Not Implemented') |
186 | 205 |
187 def open(self, mode='r', headers=None, query_args=None, | 206 def open(self, mode='r', headers=None, query_args=None, |
188 override_num_retries=None): | 207 override_num_retries=None): |
189 if mode == 'r': | 208 if mode == 'r': |
190 self.mode = 'r' | 209 self.mode = 'r' |
191 self.open_read(headers=headers, query_args=query_args, | 210 self.open_read(headers=headers, query_args=query_args, |
192 override_num_retries=override_num_retries) | 211 override_num_retries=override_num_retries) |
193 elif mode == 'w': | 212 elif mode == 'w': |
194 self.mode = 'w' | 213 self.mode = 'w' |
195 self.open_write(headers=headers, | 214 self.open_write(headers=headers, |
196 override_num_retries=override_num_retries) | 215 override_num_retries=override_num_retries) |
197 else: | 216 else: |
198 raise BotoClientError('Invalid mode: %s' % mode) | 217 raise BotoClientError('Invalid mode: %s' % mode) |
199 | 218 |
200 closed = False | 219 closed = False |
201 def close(self): | 220 def close(self): |
202 if self.resp: | 221 if self.resp: |
203 self.resp.read() | 222 self.resp.read() |
204 self.resp = None | 223 self.resp = None |
205 self.mode = None | 224 self.mode = None |
206 self.closed = True | 225 self.closed = True |
207 | 226 |
208 def next(self): | 227 def next(self): |
209 """ | 228 """ |
210 By providing a next method, the key object supports use as an iterator. | 229 By providing a next method, the key object supports use as an iterator. |
211 For example, you can now say: | 230 For example, you can now say: |
212 | 231 |
213 for bytes in key: | 232 for bytes in key: |
214 write bytes to a file or whatever | 233 write bytes to a file or whatever |
215 | 234 |
216 All of the HTTP connection stuff is handled for you. | 235 All of the HTTP connection stuff is handled for you. |
217 """ | 236 """ |
218 self.open_read() | 237 self.open_read() |
219 data = self.resp.read(self.BufferSize) | 238 data = self.resp.read(self.BufferSize) |
220 if not data: | 239 if not data: |
221 self.close() | 240 self.close() |
222 raise StopIteration | 241 raise StopIteration |
223 return data | 242 return data |
224 | 243 |
225 def read(self, size=0): | 244 def read(self, size=0): |
| 245 self.open_read() |
226 if size == 0: | 246 if size == 0: |
227 size = self.BufferSize | 247 data = self.resp.read() |
228 self.open_read() | 248 else: |
229 data = self.resp.read(size) | 249 data = self.resp.read(size) |
230 if not data: | 250 if not data: |
231 self.close() | 251 self.close() |
232 return data | 252 return data |
233 | 253 |
234 def change_storage_class(self, new_storage_class, dst_bucket=None): | 254 def change_storage_class(self, new_storage_class, dst_bucket=None): |
235 """ | 255 """ |
236 Change the storage class of an existing key. | 256 Change the storage class of an existing key. |
237 Depending on whether a different destination bucket is supplied | 257 Depending on whether a different destination bucket is supplied |
238 or not, this will either move the item within the bucket, preserving | 258 or not, this will either move the item within the bucket, preserving |
239 all metadata and ACL info bucket changing the storage class or it | 259 all metadata and ACL info bucket changing the storage class or it |
240 will copy the item to the provided destination bucket, also | 260 will copy the item to the provided destination bucket, also |
241 preserving metadata and ACL info. | 261 preserving metadata and ACL info. |
242 | 262 |
243 :type new_storage_class: string | 263 :type new_storage_class: string |
244 :param new_storage_class: The new storage class for the Key. | 264 :param new_storage_class: The new storage class for the Key. |
245 Possible values are: | 265 Possible values are: |
246 * STANDARD | 266 * STANDARD |
247 * REDUCED_REDUNDANCY | 267 * REDUCED_REDUNDANCY |
248 | 268 |
249 :type dst_bucket: string | 269 :type dst_bucket: string |
250 :param dst_bucket: The name of a destination bucket. If not | 270 :param dst_bucket: The name of a destination bucket. If not |
251 provided the current bucket of the key | 271 provided the current bucket of the key |
252 will be used. | 272 will be used. |
253 | 273 |
254 """ | 274 """ |
255 if new_storage_class == 'STANDARD': | 275 if new_storage_class == 'STANDARD': |
256 return self.copy(self.bucket.name, self.name, | 276 return self.copy(self.bucket.name, self.name, |
257 reduced_redundancy=False, preserve_acl=True) | 277 reduced_redundancy=False, preserve_acl=True) |
258 elif new_storage_class == 'REDUCED_REDUNDANCY': | 278 elif new_storage_class == 'REDUCED_REDUNDANCY': |
259 return self.copy(self.bucket.name, self.name, | 279 return self.copy(self.bucket.name, self.name, |
260 reduced_redundancy=True, preserve_acl=True) | 280 reduced_redundancy=True, preserve_acl=True) |
261 else: | 281 else: |
262 raise BotoClientError('Invalid storage class: %s' % | 282 raise BotoClientError('Invalid storage class: %s' % |
263 new_storage_class) | 283 new_storage_class) |
264 | 284 |
265 def copy(self, dst_bucket, dst_key, metadata=None, | 285 def copy(self, dst_bucket, dst_key, metadata=None, |
266 reduced_redundancy=False, preserve_acl=False): | 286 reduced_redundancy=False, preserve_acl=False, |
| 287 encrypt_key=False): |
267 """ | 288 """ |
268 Copy this Key to another bucket. | 289 Copy this Key to another bucket. |
269 | 290 |
270 :type dst_bucket: string | 291 :type dst_bucket: string |
271 :param dst_bucket: The name of the destination bucket | 292 :param dst_bucket: The name of the destination bucket |
272 | 293 |
273 :type dst_key: string | 294 :type dst_key: string |
274 :param dst_key: The name of the destination key | 295 :param dst_key: The name of the destination key |
275 | 296 |
276 :type metadata: dict | 297 :type metadata: dict |
277 :param metadata: Metadata to be associated with new key. | 298 :param metadata: Metadata to be associated with new key. |
278 If metadata is supplied, it will replace the | 299 If metadata is supplied, it will replace the |
279 metadata of the source key being copied. | 300 metadata of the source key being copied. |
280 If no metadata is supplied, the source key's | 301 If no metadata is supplied, the source key's |
281 metadata will be copied to the new key. | 302 metadata will be copied to the new key. |
282 | 303 |
283 :type reduced_redundancy: bool | 304 :type reduced_redundancy: bool |
284 :param reduced_redundancy: If True, this will force the storage | 305 :param reduced_redundancy: If True, this will force the storage |
285 class of the new Key to be | 306 class of the new Key to be |
(...skipping 10 matching lines...) Expand all Loading... |
296 will have the default ACL. | 317 will have the default ACL. |
297 Note that preserving the ACL in the | 318 Note that preserving the ACL in the |
298 new key object will require two | 319 new key object will require two |
299 additional API calls to S3, one to | 320 additional API calls to S3, one to |
300 retrieve the current ACL and one to | 321 retrieve the current ACL and one to |
301 set that ACL on the new object. If | 322 set that ACL on the new object. If |
302 you don't care about the ACL, a value | 323 you don't care about the ACL, a value |
303 of False will be significantly more | 324 of False will be significantly more |
304 efficient. | 325 efficient. |
305 | 326 |
| 327 :type encrypt_key: bool |
| 328 :param encrypt_key: If True, the new copy of the object will |
| 329 be encrypted on the server-side by S3 and |
| 330 will be stored in an encrypted form while |
| 331 at rest in S3. |
| 332 |
306 :rtype: :class:`boto.s3.key.Key` or subclass | 333 :rtype: :class:`boto.s3.key.Key` or subclass |
307 :returns: An instance of the newly created key object | 334 :returns: An instance of the newly created key object |
308 """ | 335 """ |
309 dst_bucket = self.bucket.connection.lookup(dst_bucket) | 336 dst_bucket = self.bucket.connection.lookup(dst_bucket) |
310 if reduced_redundancy: | 337 if reduced_redundancy: |
311 storage_class = 'REDUCED_REDUNDANCY' | 338 storage_class = 'REDUCED_REDUNDANCY' |
312 else: | 339 else: |
313 storage_class = self.storage_class | 340 storage_class = self.storage_class |
314 return dst_bucket.copy_key(dst_key, self.bucket.name, | 341 return dst_bucket.copy_key(dst_key, self.bucket.name, |
315 self.name, metadata, | 342 self.name, metadata, |
316 storage_class=storage_class, | 343 storage_class=storage_class, |
317 preserve_acl=preserve_acl) | 344 preserve_acl=preserve_acl, |
| 345 encrypt_key=encrypt_key) |
318 | 346 |
319 def startElement(self, name, attrs, connection): | 347 def startElement(self, name, attrs, connection): |
320 if name == 'Owner': | 348 if name == 'Owner': |
321 self.owner = User(self) | 349 self.owner = User(self) |
322 return self.owner | 350 return self.owner |
323 else: | 351 else: |
324 return None | 352 return None |
325 | 353 |
326 def endElement(self, name, value, connection): | 354 def endElement(self, name, value, connection): |
327 if name == 'Key': | 355 if name == 'Key': |
328 self.name = value.encode('utf-8') | 356 self.name = value.encode('utf-8') |
329 elif name == 'ETag': | 357 elif name == 'ETag': |
330 self.etag = value | 358 self.etag = value |
331 elif name == 'LastModified': | 359 elif name == 'LastModified': |
332 self.last_modified = value | 360 self.last_modified = value |
333 elif name == 'Size': | 361 elif name == 'Size': |
334 self.size = int(value) | 362 self.size = int(value) |
335 elif name == 'StorageClass': | 363 elif name == 'StorageClass': |
336 self.storage_class = value | 364 self.storage_class = value |
337 elif name == 'Owner': | 365 elif name == 'Owner': |
338 pass | 366 pass |
339 elif name == 'VersionId': | 367 elif name == 'VersionId': |
340 self.version_id = value | 368 self.version_id = value |
341 else: | 369 else: |
342 setattr(self, name, value) | 370 setattr(self, name, value) |
343 | 371 |
344 def exists(self): | 372 def exists(self): |
345 """ | 373 """ |
346 Returns True if the key exists | 374 Returns True if the key exists |
347 | 375 |
348 :rtype: bool | 376 :rtype: bool |
349 :return: Whether the key exists on S3 | 377 :return: Whether the key exists on S3 |
350 """ | 378 """ |
351 return bool(self.bucket.lookup(self.name)) | 379 return bool(self.bucket.lookup(self.name)) |
352 | 380 |
353 def delete(self): | 381 def delete(self): |
354 """ | 382 """ |
355 Delete this key from S3 | 383 Delete this key from S3 |
356 """ | 384 """ |
357 return self.bucket.delete_key(self.name, version_id=self.version_id) | 385 return self.bucket.delete_key(self.name, version_id=self.version_id) |
358 | 386 |
359 def get_metadata(self, name): | 387 def get_metadata(self, name): |
360 return self.metadata.get(name) | 388 return self.metadata.get(name) |
361 | 389 |
362 def set_metadata(self, name, value): | 390 def set_metadata(self, name, value): |
363 self.metadata[name] = value | 391 self.metadata[name] = value |
364 | 392 |
365 def update_metadata(self, d): | 393 def update_metadata(self, d): |
366 self.metadata.update(d) | 394 self.metadata.update(d) |
367 | 395 |
368 # convenience methods for setting/getting ACL | 396 # convenience methods for setting/getting ACL |
369 def set_acl(self, acl_str, headers=None): | 397 def set_acl(self, acl_str, headers=None): |
370 if self.bucket != None: | 398 if self.bucket != None: |
371 self.bucket.set_acl(acl_str, self.name, headers=headers) | 399 self.bucket.set_acl(acl_str, self.name, headers=headers) |
372 | 400 |
373 def get_acl(self, headers=None): | 401 def get_acl(self, headers=None): |
374 if self.bucket != None: | 402 if self.bucket != None: |
375 return self.bucket.get_acl(self.name, headers=headers) | 403 return self.bucket.get_acl(self.name, headers=headers) |
376 | 404 |
377 def get_xml_acl(self, headers=None): | 405 def get_xml_acl(self, headers=None): |
378 if self.bucket != None: | 406 if self.bucket != None: |
379 return self.bucket.get_xml_acl(self.name, headers=headers) | 407 return self.bucket.get_xml_acl(self.name, headers=headers) |
380 | 408 |
381 def set_xml_acl(self, acl_str, headers=None): | 409 def set_xml_acl(self, acl_str, headers=None): |
382 if self.bucket != None: | 410 if self.bucket != None: |
383 return self.bucket.set_xml_acl(acl_str, self.name, headers=headers) | 411 return self.bucket.set_xml_acl(acl_str, self.name, headers=headers) |
384 | 412 |
385 def set_canned_acl(self, acl_str, headers=None): | 413 def set_canned_acl(self, acl_str, headers=None): |
386 return self.bucket.set_canned_acl(acl_str, self.name, headers) | 414 return self.bucket.set_canned_acl(acl_str, self.name, headers) |
387 | 415 |
388 def make_public(self, headers=None): | 416 def make_public(self, headers=None): |
389 return self.bucket.set_canned_acl('public-read', self.name, headers) | 417 return self.bucket.set_canned_acl('public-read', self.name, headers) |
390 | 418 |
391 def generate_url(self, expires_in, method='GET', headers=None, | 419 def generate_url(self, expires_in, method='GET', headers=None, |
392 query_auth=True, force_http=False): | 420 query_auth=True, force_http=False, response_headers=None): |
393 """ | 421 """ |
394 Generate a URL to access this key. | 422 Generate a URL to access this key. |
395 | 423 |
396 :type expires_in: int | 424 :type expires_in: int |
397 :param expires_in: How long the url is valid for, in seconds | 425 :param expires_in: How long the url is valid for, in seconds |
398 | 426 |
399 :type method: string | 427 :type method: string |
400 :param method: The method to use for retrieving the file (default is GET
) | 428 :param method: The method to use for retrieving the file |
401 | 429 (default is GET) |
| 430 |
402 :type headers: dict | 431 :type headers: dict |
403 :param headers: Any headers to pass along in the request | 432 :param headers: Any headers to pass along in the request |
404 | 433 |
405 :type query_auth: bool | 434 :type query_auth: bool |
406 :param query_auth: | 435 :param query_auth: |
407 | 436 |
408 :rtype: string | 437 :rtype: string |
409 :return: The URL to access the key | 438 :return: The URL to access the key |
410 """ | 439 """ |
411 return self.bucket.connection.generate_url(expires_in, method, | 440 return self.bucket.connection.generate_url(expires_in, method, |
412 self.bucket.name, self.name, | 441 self.bucket.name, self.name, |
413 headers, query_auth, force_ht
tp) | 442 headers, query_auth, |
| 443 force_http, |
| 444 response_headers) |
414 | 445 |
415 def send_file(self, fp, headers=None, cb=None, num_cb=10, query_args=None): | 446 def send_file(self, fp, headers=None, cb=None, num_cb=10, |
| 447 query_args=None, chunked_transfer=False): |
416 """ | 448 """ |
417 Upload a file to a key into a bucket on S3. | 449 Upload a file to a key into a bucket on S3. |
418 | 450 |
419 :type fp: file | 451 :type fp: file |
420 :param fp: The file pointer to upload | 452 :param fp: The file pointer to upload |
421 | 453 |
422 :type headers: dict | 454 :type headers: dict |
423 :param headers: The headers to pass along with the PUT request | 455 :param headers: The headers to pass along with the PUT request |
424 | 456 |
425 :type cb: function | 457 :type cb: function |
426 :param cb: a callback function that will be called to report | 458 :param cb: a callback function that will be called to report |
427 progress on the upload. The callback should accept two inte
ger | 459 progress on the upload. The callback should accept |
428 parameters, the first representing the number of bytes that
have | 460 two integer parameters, the first representing the |
429 been successfully transmitted to S3 and the second represent
ing | 461 number of bytes that have been successfully |
430 the total number of bytes that need to be transmitted. | 462 transmitted to S3 and the second representing the |
431 | 463 size of the to be transmitted object. |
| 464 |
432 :type num_cb: int | 465 :type num_cb: int |
433 :param num_cb: (optional) If a callback is specified with the cb | 466 :param num_cb: (optional) If a callback is specified with the cb |
434 parameter this parameter determines the granularity | 467 parameter this parameter determines the granularity |
435 of the callback by defining the maximum number of | 468 of the callback by defining the maximum number of |
436 times the callback will be called during the file | 469 times the callback will be called during the file |
437 transfer. Providing a negative integer will cause | 470 transfer. Providing a negative integer will cause |
438 your callback to be called with each buffer read. | 471 your callback to be called with each buffer read. |
439 | 472 |
440 """ | 473 """ |
441 provider = self.bucket.connection.provider | 474 provider = self.bucket.connection.provider |
442 | 475 |
443 def sender(http_conn, method, path, data, headers): | 476 def sender(http_conn, method, path, data, headers): |
444 http_conn.putrequest(method, path) | 477 http_conn.putrequest(method, path) |
445 for key in headers: | 478 for key in headers: |
446 http_conn.putheader(key, headers[key]) | 479 http_conn.putheader(key, headers[key]) |
447 http_conn.endheaders() | 480 http_conn.endheaders() |
448 fp.seek(0) | 481 if chunked_transfer: |
| 482 # MD5 for the stream has to be calculated on the fly, as |
| 483 # we don't know the size of the stream before hand. |
| 484 m = md5() |
| 485 else: |
| 486 fp.seek(0) |
| 487 |
449 save_debug = self.bucket.connection.debug | 488 save_debug = self.bucket.connection.debug |
450 self.bucket.connection.debug = 0 | 489 self.bucket.connection.debug = 0 |
451 http_conn.set_debuglevel(0) | 490 # If the debuglevel < 3 we don't want to show connection |
| 491 # payload, so turn off HTTP connection-level debug output (to |
| 492 # be restored below). |
| 493 # Use the getattr approach to allow this to work in AppEngine. |
| 494 if getattr(http_conn, 'debuglevel', 0) < 3: |
| 495 http_conn.set_debuglevel(0) |
452 if cb: | 496 if cb: |
453 if num_cb > 2: | 497 if chunked_transfer: |
| 498 # For chunked Transfer, we call the cb for every 1MB |
| 499 # of data transferred. |
| 500 cb_count = (1024 * 1024)/self.BufferSize |
| 501 self.size = 0 |
| 502 elif num_cb > 2: |
454 cb_count = self.size / self.BufferSize / (num_cb-2) | 503 cb_count = self.size / self.BufferSize / (num_cb-2) |
455 elif num_cb < 0: | 504 elif num_cb < 0: |
456 cb_count = -1 | 505 cb_count = -1 |
457 else: | 506 else: |
458 cb_count = 0 | 507 cb_count = 0 |
459 i = total_bytes = 0 | 508 i = total_bytes = 0 |
460 cb(total_bytes, self.size) | 509 cb(total_bytes, self.size) |
461 l = fp.read(self.BufferSize) | 510 l = fp.read(self.BufferSize) |
462 while len(l) > 0: | 511 while len(l) > 0: |
463 http_conn.send(l) | 512 if chunked_transfer: |
| 513 http_conn.send('%x;\r\n' % len(l)) |
| 514 http_conn.send(l) |
| 515 http_conn.send('\r\n') |
| 516 else: |
| 517 http_conn.send(l) |
464 if cb: | 518 if cb: |
465 total_bytes += len(l) | 519 total_bytes += len(l) |
466 i += 1 | 520 i += 1 |
467 if i == cb_count or cb_count == -1: | 521 if i == cb_count or cb_count == -1: |
468 cb(total_bytes, self.size) | 522 cb(total_bytes, self.size) |
469 i = 0 | 523 i = 0 |
| 524 if chunked_transfer: |
| 525 m.update(l) |
470 l = fp.read(self.BufferSize) | 526 l = fp.read(self.BufferSize) |
| 527 if chunked_transfer: |
| 528 http_conn.send('0\r\n') |
| 529 http_conn.send('\r\n') |
| 530 if cb: |
| 531 self.size = total_bytes |
| 532 # Get the md5 which is calculated on the fly. |
| 533 self.md5 = m.hexdigest() |
| 534 else: |
| 535 fp.seek(0) |
471 if cb: | 536 if cb: |
472 cb(total_bytes, self.size) | 537 cb(total_bytes, self.size) |
473 response = http_conn.getresponse() | 538 response = http_conn.getresponse() |
474 body = response.read() | 539 body = response.read() |
475 fp.seek(0) | |
476 http_conn.set_debuglevel(save_debug) | 540 http_conn.set_debuglevel(save_debug) |
477 self.bucket.connection.debug = save_debug | 541 self.bucket.connection.debug = save_debug |
478 if response.status == 500 or response.status == 503 or \ | 542 if ((response.status == 500 or response.status == 503 or |
479 response.getheader('location'): | 543 response.getheader('location')) and not chunked_transfer): |
480 # we'll try again | 544 # we'll try again. |
481 return response | 545 return response |
482 elif response.status >= 200 and response.status <= 299: | 546 elif response.status >= 200 and response.status <= 299: |
483 self.etag = response.getheader('etag') | 547 self.etag = response.getheader('etag') |
484 if self.etag != '"%s"' % self.md5: | 548 if self.etag != '"%s"' % self.md5: |
485 raise provider.storage_data_error( | 549 raise provider.storage_data_error( |
486 'ETag from S3 did not match computed MD5') | 550 'ETag from S3 did not match computed MD5') |
487 return response | 551 return response |
488 else: | 552 else: |
489 raise provider.storage_response_error( | 553 raise provider.storage_response_error( |
490 response.status, response.reason, body) | 554 response.status, response.reason, body) |
491 | 555 |
492 if not headers: | 556 if not headers: |
493 headers = {} | 557 headers = {} |
494 else: | 558 else: |
495 headers = headers.copy() | 559 headers = headers.copy() |
496 headers['User-Agent'] = UserAgent | 560 headers['User-Agent'] = UserAgent |
497 headers['Content-MD5'] = self.base64md5 | 561 if self.base64md5: |
| 562 headers['Content-MD5'] = self.base64md5 |
498 if self.storage_class != 'STANDARD': | 563 if self.storage_class != 'STANDARD': |
499 headers[provider.storage_class_header] = self.storage_class | 564 headers[provider.storage_class_header] = self.storage_class |
500 if headers.has_key('Content-Encoding'): | 565 if headers.has_key('Content-Encoding'): |
501 self.content_encoding = headers['Content-Encoding'] | 566 self.content_encoding = headers['Content-Encoding'] |
502 if headers.has_key('Content-Type'): | 567 if headers.has_key('Content-Type'): |
503 self.content_type = headers['Content-Type'] | 568 self.content_type = headers['Content-Type'] |
504 elif self.path: | 569 elif self.path: |
505 self.content_type = mimetypes.guess_type(self.path)[0] | 570 self.content_type = mimetypes.guess_type(self.path)[0] |
506 if self.content_type == None: | 571 if self.content_type == None: |
507 self.content_type = self.DefaultContentType | 572 self.content_type = self.DefaultContentType |
508 headers['Content-Type'] = self.content_type | 573 headers['Content-Type'] = self.content_type |
509 else: | 574 else: |
510 headers['Content-Type'] = self.content_type | 575 headers['Content-Type'] = self.content_type |
511 headers['Content-Length'] = str(self.size) | 576 if not chunked_transfer: |
| 577 headers['Content-Length'] = str(self.size) |
512 headers['Expect'] = '100-Continue' | 578 headers['Expect'] = '100-Continue' |
513 headers = boto.utils.merge_meta(headers, self.metadata, provider) | 579 headers = boto.utils.merge_meta(headers, self.metadata, provider) |
514 resp = self.bucket.connection.make_request('PUT', self.bucket.name, | 580 resp = self.bucket.connection.make_request('PUT', self.bucket.name, |
515 self.name, headers, | 581 self.name, headers, |
516 sender=sender, | 582 sender=sender, |
517 query_args=query_args) | 583 query_args=query_args) |
518 self.handle_version_headers(resp, force=True) | 584 self.handle_version_headers(resp, force=True) |
519 | 585 |
520 def compute_md5(self, fp): | 586 def compute_md5(self, fp): |
521 """ | 587 """ |
522 :type fp: file | 588 :type fp: file |
523 :param fp: File pointer to the file to MD5 hash. The file pointer will
be | 589 :param fp: File pointer to the file to MD5 hash. The file pointer |
524 reset to the beginning of the file before the method returns. | 590 will be reset to the beginning of the file before the |
525 | 591 method returns. |
| 592 |
526 :rtype: tuple | 593 :rtype: tuple |
527 :return: A tuple containing the hex digest version of the MD5 hash | 594 :return: A tuple containing the hex digest version of the MD5 hash |
528 as the first element and the base64 encoded version of the | 595 as the first element and the base64 encoded version of the |
529 plain digest as the second element. | 596 plain digest as the second element. |
530 """ | 597 """ |
531 m = md5() | 598 m = md5() |
532 fp.seek(0) | 599 fp.seek(0) |
533 s = fp.read(self.BufferSize) | 600 s = fp.read(self.BufferSize) |
534 while s: | 601 while s: |
535 m.update(s) | 602 m.update(s) |
536 s = fp.read(self.BufferSize) | 603 s = fp.read(self.BufferSize) |
537 hex_md5 = m.hexdigest() | 604 hex_md5 = m.hexdigest() |
538 base64md5 = base64.encodestring(m.digest()) | 605 base64md5 = base64.encodestring(m.digest()) |
539 if base64md5[-1] == '\n': | 606 if base64md5[-1] == '\n': |
540 base64md5 = base64md5[0:-1] | 607 base64md5 = base64md5[0:-1] |
541 self.size = fp.tell() | 608 self.size = fp.tell() |
542 fp.seek(0) | 609 fp.seek(0) |
543 return (hex_md5, base64md5) | 610 return (hex_md5, base64md5) |
544 | 611 |
| 612 def set_contents_from_stream(self, fp, headers=None, replace=True, |
| 613 cb=None, num_cb=10, policy=None, |
| 614 reduced_redundancy=False, query_args=None): |
| 615 """ |
| 616 Store an object using the name of the Key object as the key in |
| 617 cloud and the contents of the data stream pointed to by 'fp' as |
| 618 the contents. |
| 619 The stream object is not seekable and total size is not known. |
| 620 This has the implication that we can't specify the Content-Size and |
| 621 Content-MD5 in the header. So for huge uploads, the delay in calculating |
| 622 MD5 is avoided but with a penalty of inability to verify the integrity |
| 623 of the uploaded data. |
| 624 |
| 625 :type fp: file |
| 626 :param fp: the file whose contents are to be uploaded |
| 627 |
| 628 :type headers: dict |
| 629 :param headers: additional HTTP headers to be sent with the PUT request. |
| 630 |
| 631 :type replace: bool |
| 632 :param replace: If this parameter is False, the method will first check |
| 633 to see if an object exists in the bucket with the same key. If it |
| 634 does, it won't overwrite it. The default value is True which will |
| 635 overwrite the object. |
| 636 |
| 637 :type cb: function |
| 638 :param cb: a callback function that will be called to report |
| 639 progress on the upload. The callback should accept two integer |
| 640 parameters, the first representing the number of bytes that have |
| 641 been successfully transmitted to GS and the second representing the |
| 642 total number of bytes that need to be transmitted. |
| 643 |
| 644 :type num_cb: int |
| 645 :param num_cb: (optional) If a callback is specified with the cb |
| 646 parameter, this parameter determines the granularity of the callback |
| 647 by defining the maximum number of times the callback will be called |
| 648 during the file transfer. |
| 649 |
| 650 :type policy: :class:`boto.gs.acl.CannedACLStrings` |
| 651 :param policy: A canned ACL policy that will be applied to the new key |
| 652 in GS. |
| 653 |
| 654 :type reduced_redundancy: bool |
| 655 :param reduced_redundancy: If True, this will set the storage |
| 656 class of the new Key to be |
| 657 REDUCED_REDUNDANCY. The Reduced Redundancy |
| 658 Storage (RRS) feature of S3, provides lower |
| 659 redundancy at lower storage cost. |
| 660 """ |
| 661 |
| 662 provider = self.bucket.connection.provider |
| 663 if not provider.supports_chunked_transfer(): |
| 664 raise BotoClientError('%s does not support chunked transfer' |
| 665 % provider.get_provider_name()) |
| 666 |
| 667 # Name of the Object should be specified explicitly for Streams. |
| 668 if not self.name or self.name == '': |
| 669 raise BotoClientError('Cannot determine the destination ' |
| 670 'object name for the given stream') |
| 671 |
| 672 if headers is None: |
| 673 headers = {} |
| 674 if policy: |
| 675 headers[provider.acl_header] = policy |
| 676 |
| 677 # Set the Transfer Encoding for Streams. |
| 678 headers['Transfer-Encoding'] = 'chunked' |
| 679 |
| 680 if reduced_redundancy: |
| 681 self.storage_class = 'REDUCED_REDUNDANCY' |
| 682 if provider.storage_class_header: |
| 683 headers[provider.storage_class_header] = self.storage_class |
| 684 |
| 685 if self.bucket != None: |
| 686 if not replace: |
| 687 k = self.bucket.lookup(self.name) |
| 688 if k: |
| 689 return |
| 690 self.send_file(fp, headers, cb, num_cb, query_args, |
| 691 chunked_transfer=True) |
| 692 |
545 def set_contents_from_file(self, fp, headers=None, replace=True, | 693 def set_contents_from_file(self, fp, headers=None, replace=True, |
546 cb=None, num_cb=10, policy=None, md5=None, | 694 cb=None, num_cb=10, policy=None, md5=None, |
547 reduced_redundancy=False, query_args=None): | 695 reduced_redundancy=False, query_args=None, |
| 696 encrypt_key=False): |
548 """ | 697 """ |
549 Store an object in S3 using the name of the Key object as the | 698 Store an object in S3 using the name of the Key object as the |
550 key in S3 and the contents of the file pointed to by 'fp' as the | 699 key in S3 and the contents of the file pointed to by 'fp' as the |
551 contents. | 700 contents. |
552 | 701 |
553 :type fp: file | 702 :type fp: file |
554 :param fp: the file whose contents to upload | 703 :param fp: the file whose contents to upload |
555 | 704 |
556 :type headers: dict | 705 :type headers: dict |
557 :param headers: additional HTTP headers that will be sent with the PUT r
equest. | 706 :param headers: Additional HTTP headers that will be sent with |
| 707 the PUT request. |
558 | 708 |
559 :type replace: bool | 709 :type replace: bool |
560 :param replace: If this parameter is False, the method | 710 :param replace: If this parameter is False, the method |
561 will first check to see if an object exists in the | 711 will first check to see if an object exists in the |
562 bucket with the same key. If it does, it won't | 712 bucket with the same key. If it does, it won't |
563 overwrite it. The default value is True which will | 713 overwrite it. The default value is True which will |
564 overwrite the object. | 714 overwrite the object. |
565 | 715 |
566 :type cb: function | 716 :type cb: function |
567 :param cb: a callback function that will be called to report | 717 :param cb: a callback function that will be called to report |
568 progress on the upload. The callback should accept two inte
ger | 718 progress on the upload. The callback should accept |
569 parameters, the first representing the number of bytes that
have | 719 two integer parameters, the first representing the |
570 been successfully transmitted to S3 and the second represent
ing | 720 number of bytes that have been successfully |
571 the total number of bytes that need to be transmitted. | 721 transmitted to S3 and the second representing the |
572 | 722 size of the to be transmitted object. |
| 723 |
573 :type cb: int | 724 :type cb: int |
574 :param num_cb: (optional) If a callback is specified with the cb paramet
er | 725 :param num_cb: (optional) If a callback is specified with the cb |
575 this parameter determines the granularity of the callback by defini
ng | 726 parameter this parameter determines the granularity |
576 the maximum number of times the callback will be called during the
file transfer. | 727 of the callback by defining the maximum number of |
| 728 times the callback will be called during the |
| 729 file transfer. |
577 | 730 |
578 :type policy: :class:`boto.s3.acl.CannedACLStrings` | 731 :type policy: :class:`boto.s3.acl.CannedACLStrings` |
579 :param policy: A canned ACL policy that will be applied to the new key i
n S3. | 732 :param policy: A canned ACL policy that will be applied to the |
580 | 733 new key in S3. |
581 :type md5: A tuple containing the hexdigest version of the MD5 checksum
of the | 734 |
582 file as the first element and the Base64-encoded version of t
he plain | 735 :type md5: A tuple containing the hexdigest version of the MD5 |
583 checksum as the second element. This is the same format retu
rned by | 736 checksum of the file as the first element and the |
| 737 Base64-encoded version of the plain checksum as the |
| 738 second element. This is the same format returned by |
584 the compute_md5 method. | 739 the compute_md5 method. |
585 :param md5: If you need to compute the MD5 for any reason prior to uploa
d, | 740 :param md5: If you need to compute the MD5 for any reason prior |
586 it's silly to have to do it twice so this param, if present,
will be | 741 to upload, it's silly to have to do it twice so this |
587 used as the MD5 values of the file. Otherwise, the checksum
will be computed. | 742 param, if present, will be used as the MD5 values of |
| 743 the file. Otherwise, the checksum will be computed. |
| 744 |
588 :type reduced_redundancy: bool | 745 :type reduced_redundancy: bool |
589 :param reduced_redundancy: If True, this will set the storage | 746 :param reduced_redundancy: If True, this will set the storage |
590 class of the new Key to be | 747 class of the new Key to be |
591 REDUCED_REDUNDANCY. The Reduced Redundancy | 748 REDUCED_REDUNDANCY. The Reduced Redundancy |
592 Storage (RRS) feature of S3, provides lower | 749 Storage (RRS) feature of S3, provides lower |
593 redundancy at lower storage cost. | 750 redundancy at lower storage cost. |
594 | 751 |
| 752 :type encrypt_key: bool |
| 753 :param encrypt_key: If True, the new copy of the object will |
| 754 be encrypted on the server-side by S3 and |
| 755 will be stored in an encrypted form while |
| 756 at rest in S3. |
595 """ | 757 """ |
596 provider = self.bucket.connection.provider | 758 provider = self.bucket.connection.provider |
597 if headers is None: | 759 if headers is None: |
598 headers = {} | 760 headers = {} |
599 if policy: | 761 if policy: |
600 headers[provider.acl_header] = policy | 762 headers[provider.acl_header] = policy |
| 763 if encrypt_key: |
| 764 headers[provider.server_side_encryption_header] = 'AES256' |
| 765 |
601 if reduced_redundancy: | 766 if reduced_redundancy: |
602 self.storage_class = 'REDUCED_REDUNDANCY' | 767 self.storage_class = 'REDUCED_REDUNDANCY' |
603 if provider.storage_class_header: | 768 if provider.storage_class_header: |
604 headers[provider.storage_class_header] = self.storage_class | 769 headers[provider.storage_class_header] = self.storage_class |
605 # TODO - What if the provider doesn't support reduced reduncancy
? | 770 # TODO - What if provider doesn't support reduced reduncancy? |
606 # What if different providers provide different classes? | 771 # What if different providers provide different classes? |
607 if hasattr(fp, 'name'): | 772 if hasattr(fp, 'name'): |
608 self.path = fp.name | 773 self.path = fp.name |
609 if self.bucket != None: | 774 if self.bucket != None: |
610 if not md5: | 775 if not md5: |
611 md5 = self.compute_md5(fp) | 776 md5 = self.compute_md5(fp) |
612 else: | 777 else: |
613 # even if md5 is provided, still need to set size of content | 778 # even if md5 is provided, still need to set size of content |
614 fp.seek(0, 2) | 779 fp.seek(0, 2) |
615 self.size = fp.tell() | 780 self.size = fp.tell() |
616 fp.seek(0) | 781 fp.seek(0) |
617 self.md5 = md5[0] | 782 self.md5 = md5[0] |
618 self.base64md5 = md5[1] | 783 self.base64md5 = md5[1] |
619 if self.name == None: | 784 if self.name == None: |
620 self.name = self.md5 | 785 self.name = self.md5 |
621 if not replace: | 786 if not replace: |
622 k = self.bucket.lookup(self.name) | 787 k = self.bucket.lookup(self.name) |
623 if k: | 788 if k: |
624 return | 789 return |
625 self.send_file(fp, headers, cb, num_cb, query_args) | 790 self.send_file(fp, headers, cb, num_cb, query_args) |
626 | 791 |
627 def set_contents_from_filename(self, filename, headers=None, replace=True, | 792 def set_contents_from_filename(self, filename, headers=None, replace=True, |
628 cb=None, num_cb=10, policy=None, md5=None, | 793 cb=None, num_cb=10, policy=None, md5=None, |
629 reduced_redundancy=False): | 794 reduced_redundancy=False, |
| 795 encrypt_key=False): |
630 """ | 796 """ |
631 Store an object in S3 using the name of the Key object as the | 797 Store an object in S3 using the name of the Key object as the |
632 key in S3 and the contents of the file named by 'filename'. | 798 key in S3 and the contents of the file named by 'filename'. |
633 See set_contents_from_file method for details about the | 799 See set_contents_from_file method for details about the |
634 parameters. | 800 parameters. |
635 | 801 |
636 :type filename: string | 802 :type filename: string |
637 :param filename: The name of the file that you want to put onto S3 | 803 :param filename: The name of the file that you want to put onto S3 |
638 | 804 |
639 :type headers: dict | 805 :type headers: dict |
640 :param headers: Additional headers to pass along with the request to AWS
. | 806 :param headers: Additional headers to pass along with the |
641 | 807 request to AWS. |
| 808 |
642 :type replace: bool | 809 :type replace: bool |
643 :param replace: If True, replaces the contents of the file if it already
exists. | 810 :param replace: If True, replaces the contents of the file |
644 | 811 if it already exists. |
| 812 |
645 :type cb: function | 813 :type cb: function |
646 :param cb: (optional) a callback function that will be called to report | 814 :param cb: a callback function that will be called to report |
647 progress on the download. The callback should accept two integer | 815 progress on the upload. The callback should accept |
648 parameters, the first representing the number of bytes that have | 816 two integer parameters, the first representing the |
649 been successfully transmitted from S3 and the second representing | 817 number of bytes that have been successfully |
650 the total number of bytes that need to be transmitted. | 818 transmitted to S3 and the second representing the |
651 | 819 size of the to be transmitted object. |
| 820 |
652 :type cb: int | 821 :type cb: int |
653 :param num_cb: (optional) If a callback is specified with the cb paramet
er | 822 :param num_cb: (optional) If a callback is specified with |
654 this parameter determines the granularity of the callback by defini
ng | 823 the cb parameter this parameter determines the |
655 the maximum number of times the callback will be called during the
file transfer. | 824 granularity of the callback by defining |
656 | 825 the maximum number of times the callback will |
| 826 be called during the file transfer. |
| 827 |
657 :type policy: :class:`boto.s3.acl.CannedACLStrings` | 828 :type policy: :class:`boto.s3.acl.CannedACLStrings` |
658 :param policy: A canned ACL policy that will be applied to the new key i
n S3. | 829 :param policy: A canned ACL policy that will be applied to the |
659 | 830 new key in S3. |
660 :type md5: A tuple containing the hexdigest version of the MD5 checksum
of the | 831 |
661 file as the first element and the Base64-encoded version of t
he plain | 832 :type md5: A tuple containing the hexdigest version of the MD5 |
662 checksum as the second element. This is the same format retu
rned by | 833 checksum of the file as the first element and the |
| 834 Base64-encoded version of the plain checksum as the |
| 835 second element. This is the same format returned by |
663 the compute_md5 method. | 836 the compute_md5 method. |
664 :param md5: If you need to compute the MD5 for any reason prior to uploa
d, | 837 :param md5: If you need to compute the MD5 for any reason prior |
665 it's silly to have to do it twice so this param, if present,
will be | 838 to upload, it's silly to have to do it twice so this |
666 used as the MD5 values of the file. Otherwise, the checksum
will be computed. | 839 param, if present, will be used as the MD5 values |
667 | 840 of the file. Otherwise, the checksum will be computed. |
| 841 |
668 :type reduced_redundancy: bool | 842 :type reduced_redundancy: bool |
669 :param reduced_redundancy: If True, this will set the storage | 843 :param reduced_redundancy: If True, this will set the storage |
670 class of the new Key to be | 844 class of the new Key to be |
671 REDUCED_REDUNDANCY. The Reduced Redundancy | 845 REDUCED_REDUNDANCY. The Reduced Redundancy |
672 Storage (RRS) feature of S3, provides lower | 846 Storage (RRS) feature of S3, provides lower |
673 redundancy at lower storage cost. | 847 redundancy at lower storage cost. |
| 848 :type encrypt_key: bool |
| 849 :param encrypt_key: If True, the new copy of the object will |
| 850 be encrypted on the server-side by S3 and |
| 851 will be stored in an encrypted form while |
| 852 at rest in S3. |
674 """ | 853 """ |
675 fp = open(filename, 'rb') | 854 fp = open(filename, 'rb') |
676 self.set_contents_from_file(fp, headers, replace, cb, num_cb, | 855 self.set_contents_from_file(fp, headers, replace, cb, num_cb, |
677 policy, md5, reduced_redundancy) | 856 policy, md5, reduced_redundancy, |
| 857 encrypt_key=encrypt_key) |
678 fp.close() | 858 fp.close() |
679 | 859 |
680 def set_contents_from_string(self, s, headers=None, replace=True, | 860 def set_contents_from_string(self, s, headers=None, replace=True, |
681 cb=None, num_cb=10, policy=None, md5=None, | 861 cb=None, num_cb=10, policy=None, md5=None, |
682 reduced_redundancy=False): | 862 reduced_redundancy=False, |
| 863 encrypt_key=False): |
683 """ | 864 """ |
684 Store an object in S3 using the name of the Key object as the | 865 Store an object in S3 using the name of the Key object as the |
685 key in S3 and the string 's' as the contents. | 866 key in S3 and the string 's' as the contents. |
686 See set_contents_from_file method for details about the | 867 See set_contents_from_file method for details about the |
687 parameters. | 868 parameters. |
688 | 869 |
689 :type headers: dict | 870 :type headers: dict |
690 :param headers: Additional headers to pass along with the request to AWS
. | 871 :param headers: Additional headers to pass along with the |
691 | 872 request to AWS. |
| 873 |
692 :type replace: bool | 874 :type replace: bool |
693 :param replace: If True, replaces the contents of the file if it already
exists. | 875 :param replace: If True, replaces the contents of the file if |
694 | 876 it already exists. |
| 877 |
695 :type cb: function | 878 :type cb: function |
696 :param cb: (optional) a callback function that will be called to report | 879 :param cb: a callback function that will be called to report |
697 progress on the download. The callback should accept two integer | 880 progress on the upload. The callback should accept |
698 parameters, the first representing the number of bytes that have | 881 two integer parameters, the first representing the |
699 been successfully transmitted from S3 and the second representing | 882 number of bytes that have been successfully |
700 the total number of bytes that need to be transmitted. | 883 transmitted to S3 and the second representing the |
701 | 884 size of the to be transmitted object. |
| 885 |
702 :type cb: int | 886 :type cb: int |
703 :param num_cb: (optional) If a callback is specified with the cb paramet
er | 887 :param num_cb: (optional) If a callback is specified with |
704 this parameter determines the granularity of the callback by defini
ng | 888 the cb parameter this parameter determines the |
705 the maximum number of times the callback will be called during the
file transfer. | 889 granularity of the callback by defining |
706 | 890 the maximum number of times the callback will |
| 891 be called during the file transfer. |
| 892 |
707 :type policy: :class:`boto.s3.acl.CannedACLStrings` | 893 :type policy: :class:`boto.s3.acl.CannedACLStrings` |
708 :param policy: A canned ACL policy that will be applied to the new key i
n S3. | 894 :param policy: A canned ACL policy that will be applied to the |
709 | 895 new key in S3. |
710 :type md5: A tuple containing the hexdigest version of the MD5 checksum
of the | 896 |
711 file as the first element and the Base64-encoded version of t
he plain | 897 :type md5: A tuple containing the hexdigest version of the MD5 |
712 checksum as the second element. This is the same format retu
rned by | 898 checksum of the file as the first element and the |
| 899 Base64-encoded version of the plain checksum as the |
| 900 second element. This is the same format returned by |
713 the compute_md5 method. | 901 the compute_md5 method. |
714 :param md5: If you need to compute the MD5 for any reason prior to uploa
d, | 902 :param md5: If you need to compute the MD5 for any reason prior |
715 it's silly to have to do it twice so this param, if present,
will be | 903 to upload, it's silly to have to do it twice so this |
716 used as the MD5 values of the file. Otherwise, the checksum
will be computed. | 904 param, if present, will be used as the MD5 values |
717 | 905 of the file. Otherwise, the checksum will be computed. |
| 906 |
718 :type reduced_redundancy: bool | 907 :type reduced_redundancy: bool |
719 :param reduced_redundancy: If True, this will set the storage | 908 :param reduced_redundancy: If True, this will set the storage |
720 class of the new Key to be | 909 class of the new Key to be |
721 REDUCED_REDUNDANCY. The Reduced Redundancy | 910 REDUCED_REDUNDANCY. The Reduced Redundancy |
722 Storage (RRS) feature of S3, provides lower | 911 Storage (RRS) feature of S3, provides lower |
723 redundancy at lower storage cost. | 912 redundancy at lower storage cost. |
| 913 :type encrypt_key: bool |
| 914 :param encrypt_key: If True, the new copy of the object will |
| 915 be encrypted on the server-side by S3 and |
| 916 will be stored in an encrypted form while |
| 917 at rest in S3. |
724 """ | 918 """ |
| 919 if isinstance(s, unicode): |
| 920 s = s.encode("utf-8") |
725 fp = StringIO.StringIO(s) | 921 fp = StringIO.StringIO(s) |
726 r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, | 922 r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, |
727 policy, md5, reduced_redundancy) | 923 policy, md5, reduced_redundancy, |
| 924 encrypt_key=encrypt_key) |
728 fp.close() | 925 fp.close() |
729 return r | 926 return r |
730 | 927 |
731 def get_file(self, fp, headers=None, cb=None, num_cb=10, | 928 def get_file(self, fp, headers=None, cb=None, num_cb=10, |
732 torrent=False, version_id=None, override_num_retries=None, | 929 torrent=False, version_id=None, override_num_retries=None, |
733 response_headers=None): | 930 response_headers=None): |
734 """ | 931 """ |
735 Retrieves a file from an S3 Key | 932 Retrieves a file from an S3 Key |
736 | 933 |
737 :type fp: file | 934 :type fp: file |
738 :param fp: File pointer to put the data into | 935 :param fp: File pointer to put the data into |
739 | 936 |
740 :type headers: string | 937 :type headers: string |
741 :param: headers to send when retrieving the files | 938 :param: headers to send when retrieving the files |
742 | 939 |
743 :type cb: function | 940 :type cb: function |
744 :param cb: (optional) a callback function that will be called to report | 941 :param cb: a callback function that will be called to report |
745 progress on the download. The callback should accept two integer | 942 progress on the upload. The callback should accept |
746 parameters, the first representing the number of bytes that have | 943 two integer parameters, the first representing the |
747 been successfully transmitted from S3 and the second representing | 944 number of bytes that have been successfully |
748 the total number of bytes that need to be transmitted. | 945 transmitted to S3 and the second representing the |
749 | 946 size of the to be transmitted object. |
750 | 947 |
751 :type cb: int | 948 :type cb: int |
752 :param num_cb: (optional) If a callback is specified with the cb paramet
er | 949 :param num_cb: (optional) If a callback is specified with |
753 this parameter determines the granularity of the callback by defini
ng | 950 the cb parameter this parameter determines the |
754 the maximum number of times the callback will be called during the
file transfer. | 951 granularity of the callback by defining |
755 | 952 the maximum number of times the callback will |
| 953 be called during the file transfer. |
| 954 |
756 :type torrent: bool | 955 :type torrent: bool |
757 :param torrent: Flag for whether to get a torrent for the file | 956 :param torrent: Flag for whether to get a torrent for the file |
758 | 957 |
759 :type override_num_retries: int | 958 :type override_num_retries: int |
760 :param override_num_retries: If not None will override configured | 959 :param override_num_retries: If not None will override configured |
761 num_retries parameter for underlying GET. | 960 num_retries parameter for underlying GET. |
762 | 961 |
763 :type response_headers: dict | 962 :type response_headers: dict |
764 :param response_headers: A dictionary containing HTTP headers/values | 963 :param response_headers: A dictionary containing HTTP headers/values |
765 that will override any headers associated with | 964 that will override any headers associated with |
766 the stored object in the response. | 965 the stored object in the response. |
767 See http://goo.gl/EWOPb for details. | 966 See http://goo.gl/EWOPb for details. |
768 """ | 967 """ |
769 if cb: | 968 if cb: |
770 if num_cb > 2: | 969 if num_cb > 2: |
771 cb_count = self.size / self.BufferSize / (num_cb-2) | 970 cb_count = self.size / self.BufferSize / (num_cb-2) |
772 elif num_cb < 0: | 971 elif num_cb < 0: |
773 cb_count = -1 | 972 cb_count = -1 |
774 else: | 973 else: |
775 cb_count = 0 | 974 cb_count = 0 |
776 i = total_bytes = 0 | 975 i = total_bytes = 0 |
777 cb(total_bytes, self.size) | 976 cb(total_bytes, self.size) |
778 save_debug = self.bucket.connection.debug | 977 save_debug = self.bucket.connection.debug |
779 if self.bucket.connection.debug == 1: | 978 if self.bucket.connection.debug == 1: |
780 self.bucket.connection.debug = 0 | 979 self.bucket.connection.debug = 0 |
781 | 980 |
782 query_args = [] | 981 query_args = [] |
783 if torrent: | 982 if torrent: |
784 query_args.append('torrent') | 983 query_args.append('torrent') |
785 # If a version_id is passed in, use that. If not, check to see | 984 # If a version_id is passed in, use that. If not, check to see |
786 # if the Key object has an explicit version_id and, if so, use that. | 985 # if the Key object has an explicit version_id and, if so, use that. |
787 # Otherwise, don't pass a version_id query param. | 986 # Otherwise, don't pass a version_id query param. |
788 if version_id is None: | 987 if version_id is None: |
789 version_id = self.version_id | 988 version_id = self.version_id |
790 if version_id: | 989 if version_id: |
791 query_args.append('versionId=%s' % version_id) | 990 query_args.append('versionId=%s' % version_id) |
(...skipping 12 matching lines...) Expand all Loading... |
804 cb(total_bytes, self.size) | 1003 cb(total_bytes, self.size) |
805 i = 0 | 1004 i = 0 |
806 if cb: | 1005 if cb: |
807 cb(total_bytes, self.size) | 1006 cb(total_bytes, self.size) |
808 self.close() | 1007 self.close() |
809 self.bucket.connection.debug = save_debug | 1008 self.bucket.connection.debug = save_debug |
810 | 1009 |
811 def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10): | 1010 def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10): |
812 """ | 1011 """ |
813 Get a torrent file (see to get_file) | 1012 Get a torrent file (see to get_file) |
814 | 1013 |
815 :type fp: file | 1014 :type fp: file |
816 :param fp: The file pointer of where to put the torrent | 1015 :param fp: The file pointer of where to put the torrent |
817 | 1016 |
818 :type headers: dict | 1017 :type headers: dict |
819 :param headers: Headers to be passed | 1018 :param headers: Headers to be passed |
820 | 1019 |
821 :type cb: function | 1020 :type cb: function |
822 :param cb: (optional) a callback function that will be called to | 1021 :param cb: a callback function that will be called to report |
823 report progress on the download. The callback should | 1022 progress on the upload. The callback should accept |
824 accept two integer parameters, the first representing | 1023 two integer parameters, the first representing the |
825 the number of bytes that have been successfully | 1024 number of bytes that have been successfully |
826 transmitted from S3 and the second representing the | 1025 transmitted to S3 and the second representing the |
827 total number of bytes that need to be transmitted. | 1026 size of the to be transmitted object. |
828 | 1027 |
829 :type num_cb: int | 1028 :type cb: int |
830 :param num_cb: (optional) If a callback is specified with the | 1029 :param num_cb: (optional) If a callback is specified with |
831 cb parameter this parameter determines the | 1030 the cb parameter this parameter determines the |
832 granularity of the callback by defining the | 1031 granularity of the callback by defining |
833 maximum number of times the callback will be | 1032 the maximum number of times the callback will |
834 called during the file transfer. | 1033 be called during the file transfer. |
835 | 1034 |
836 """ | 1035 """ |
837 return self.get_file(fp, headers, cb, num_cb, torrent=True) | 1036 return self.get_file(fp, headers, cb, num_cb, torrent=True) |
838 | 1037 |
839 def get_contents_to_file(self, fp, headers=None, | 1038 def get_contents_to_file(self, fp, headers=None, |
840 cb=None, num_cb=10, | 1039 cb=None, num_cb=10, |
841 torrent=False, | 1040 torrent=False, |
842 version_id=None, | 1041 version_id=None, |
843 res_download_handler=None, | 1042 res_download_handler=None, |
844 response_headers=None): | 1043 response_headers=None): |
845 """ | 1044 """ |
846 Retrieve an object from S3 using the name of the Key object as the | 1045 Retrieve an object from S3 using the name of the Key object as the |
847 key in S3. Write the contents of the object to the file pointed | 1046 key in S3. Write the contents of the object to the file pointed |
848 to by 'fp'. | 1047 to by 'fp'. |
849 | 1048 |
850 :type fp: File -like object | 1049 :type fp: File -like object |
851 :param fp: | 1050 :param fp: |
852 | 1051 |
853 :type headers: dict | 1052 :type headers: dict |
854 :param headers: additional HTTP headers that will be sent with | 1053 :param headers: additional HTTP headers that will be sent with |
855 the GET request. | 1054 the GET request. |
856 | 1055 |
857 :type cb: function | 1056 :type cb: function |
858 :param cb: (optional) a callback function that will be called to | 1057 :param cb: a callback function that will be called to report |
859 report progress on the download. The callback should | 1058 progress on the upload. The callback should accept |
860 accept two integer parameters, the first representing | 1059 two integer parameters, the first representing the |
861 the number of bytes that have been successfully | 1060 number of bytes that have been successfully |
862 transmitted from S3 and the second representing the | 1061 transmitted to S3 and the second representing the |
863 total number of bytes that need to be transmitted. | 1062 size of the to be transmitted object. |
864 | 1063 |
865 :type num_cb: int | 1064 :type cb: int |
866 :param num_cb: (optional) If a callback is specified with the | 1065 :param num_cb: (optional) If a callback is specified with |
867 cb parameter this parameter determines the | 1066 the cb parameter this parameter determines the |
868 granularity of the callback by defining the | 1067 granularity of the callback by defining |
869 maximum number of times the callback will be | 1068 the maximum number of times the callback will |
870 called during the file transfer. | 1069 be called during the file transfer. |
871 | 1070 |
872 :type torrent: bool | 1071 :type torrent: bool |
873 :param torrent: If True, returns the contents of a torrent | 1072 :param torrent: If True, returns the contents of a torrent |
874 file as a string. | 1073 file as a string. |
875 | 1074 |
876 :type res_upload_handler: ResumableDownloadHandler | 1075 :type res_upload_handler: ResumableDownloadHandler |
877 :param res_download_handler: If provided, this handler will | 1076 :param res_download_handler: If provided, this handler will |
878 perform the download. | 1077 perform the download. |
879 | 1078 |
880 :type response_headers: dict | 1079 :type response_headers: dict |
881 :param response_headers: A dictionary containing HTTP headers/values | 1080 :param response_headers: A dictionary containing HTTP headers/values |
(...skipping 15 matching lines...) Expand all Loading... |
897 cb=None, num_cb=10, | 1096 cb=None, num_cb=10, |
898 torrent=False, | 1097 torrent=False, |
899 version_id=None, | 1098 version_id=None, |
900 res_download_handler=None, | 1099 res_download_handler=None, |
901 response_headers=None): | 1100 response_headers=None): |
902 """ | 1101 """ |
903 Retrieve an object from S3 using the name of the Key object as the | 1102 Retrieve an object from S3 using the name of the Key object as the |
904 key in S3. Store contents of the object to a file named by 'filename'. | 1103 key in S3. Store contents of the object to a file named by 'filename'. |
905 See get_contents_to_file method for details about the | 1104 See get_contents_to_file method for details about the |
906 parameters. | 1105 parameters. |
907 | 1106 |
908 :type filename: string | 1107 :type filename: string |
909 :param filename: The filename of where to put the file contents | 1108 :param filename: The filename of where to put the file contents |
910 | 1109 |
911 :type headers: dict | 1110 :type headers: dict |
912 :param headers: Any additional headers to send in the request | 1111 :param headers: Any additional headers to send in the request |
913 | 1112 |
914 :type cb: function | 1113 :type cb: function |
915 :param cb: (optional) a callback function that will be called to | 1114 :param cb: a callback function that will be called to report |
916 report progress on the download. The callback should | 1115 progress on the upload. The callback should accept |
917 accept two integer parameters, the first representing | 1116 two integer parameters, the first representing the |
918 the number of bytes that have been successfully | 1117 number of bytes that have been successfully |
919 transmitted from S3 and the second representing the | 1118 transmitted to S3 and the second representing the |
920 total number of bytes that need to be transmitted. | 1119 size of the to be transmitted object. |
921 | 1120 |
922 :type num_cb: int | 1121 :type cb: int |
923 :param num_cb: (optional) If a callback is specified with the | 1122 :param num_cb: (optional) If a callback is specified with |
924 cb parameter this parameter determines the | 1123 the cb parameter this parameter determines the |
925 granularity of the callback by defining the | 1124 granularity of the callback by defining |
926 maximum number of times the callback will be | 1125 the maximum number of times the callback will |
927 called during the file transfer. | 1126 be called during the file transfer. |
928 | 1127 |
929 :type torrent: bool | 1128 :type torrent: bool |
930 :param torrent: If True, returns the contents of a torrent file | 1129 :param torrent: If True, returns the contents of a torrent file |
931 as a string. | 1130 as a string. |
932 | 1131 |
933 :type res_upload_handler: ResumableDownloadHandler | 1132 :type res_upload_handler: ResumableDownloadHandler |
934 :param res_download_handler: If provided, this handler will | 1133 :param res_download_handler: If provided, this handler will |
935 perform the download. | 1134 perform the download. |
936 | 1135 |
937 :type response_headers: dict | 1136 :type response_headers: dict |
938 :param response_headers: A dictionary containing HTTP headers/values | 1137 :param response_headers: A dictionary containing HTTP headers/values |
(...skipping 18 matching lines...) Expand all Loading... |
957 def get_contents_as_string(self, headers=None, | 1156 def get_contents_as_string(self, headers=None, |
958 cb=None, num_cb=10, | 1157 cb=None, num_cb=10, |
959 torrent=False, | 1158 torrent=False, |
960 version_id=None, | 1159 version_id=None, |
961 response_headers=None): | 1160 response_headers=None): |
962 """ | 1161 """ |
963 Retrieve an object from S3 using the name of the Key object as the | 1162 Retrieve an object from S3 using the name of the Key object as the |
964 key in S3. Return the contents of the object as a string. | 1163 key in S3. Return the contents of the object as a string. |
965 See get_contents_to_file method for details about the | 1164 See get_contents_to_file method for details about the |
966 parameters. | 1165 parameters. |
967 | 1166 |
968 :type headers: dict | 1167 :type headers: dict |
969 :param headers: Any additional headers to send in the request | 1168 :param headers: Any additional headers to send in the request |
970 | 1169 |
971 :type cb: function | 1170 :type cb: function |
972 :param cb: (optional) a callback function that will be called to | 1171 :param cb: a callback function that will be called to report |
973 report progress on the download. The callback should | 1172 progress on the upload. The callback should accept |
974 accept two integer parameters, the first representing | 1173 two integer parameters, the first representing the |
975 the number of bytes that have been successfully | 1174 number of bytes that have been successfully |
976 transmitted from S3 and the second representing the | 1175 transmitted to S3 and the second representing the |
977 total number of bytes that need to be transmitted. | 1176 size of the to be transmitted object. |
978 | 1177 |
979 :type num_cb: int | 1178 :type cb: int |
980 :param num_cb: (optional) If a callback is specified with the | 1179 :param num_cb: (optional) If a callback is specified with |
981 cb parameter this parameter determines the | 1180 the cb parameter this parameter determines the |
982 granularity of the callback by defining the | 1181 granularity of the callback by defining |
983 maximum number of times the callback will be | 1182 the maximum number of times the callback will |
984 called during the file transfer. | 1183 be called during the file transfer. |
985 | 1184 |
986 :type torrent: bool | 1185 :type torrent: bool |
987 :param torrent: If True, returns the contents of a torrent file | 1186 :param torrent: If True, returns the contents of a torrent file |
988 as a string. | 1187 as a string. |
989 | 1188 |
990 :type response_headers: dict | 1189 :type response_headers: dict |
991 :param response_headers: A dictionary containing HTTP headers/values | 1190 :param response_headers: A dictionary containing HTTP headers/values |
992 that will override any headers associated with | 1191 that will override any headers associated with |
993 the stored object in the response. | 1192 the stored object in the response. |
994 See http://goo.gl/EWOPb for details. | 1193 See http://goo.gl/EWOPb for details. |
995 | 1194 |
996 :rtype: string | 1195 :rtype: string |
997 :returns: The contents of the file as a string | 1196 :returns: The contents of the file as a string |
998 """ | 1197 """ |
999 fp = StringIO.StringIO() | 1198 fp = StringIO.StringIO() |
1000 self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, | 1199 self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, |
1001 version_id=version_id, | 1200 version_id=version_id, |
1002 response_headers=response_headers) | 1201 response_headers=response_headers) |
1003 return fp.getvalue() | 1202 return fp.getvalue() |
1004 | 1203 |
1005 def add_email_grant(self, permission, email_address, headers=None): | 1204 def add_email_grant(self, permission, email_address, headers=None): |
1006 """ | 1205 """ |
1007 Convenience method that provides a quick way to add an email grant | 1206 Convenience method that provides a quick way to add an email grant |
1008 to a key. This method retrieves the current ACL, creates a new | 1207 to a key. This method retrieves the current ACL, creates a new |
1009 grant based on the parameters passed in, adds that grant to the ACL | 1208 grant based on the parameters passed in, adds that grant to the ACL |
1010 and then PUT's the new ACL back to S3. | 1209 and then PUT's the new ACL back to S3. |
1011 | 1210 |
1012 :type permission: string | 1211 :type permission: string |
1013 :param permission: The permission being granted. Should be one of: | 1212 :param permission: The permission being granted. Should be one of: |
1014 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). | 1213 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). |
1015 | 1214 |
1016 :type email_address: string | 1215 :type email_address: string |
1017 :param email_address: The email address associated with the AWS | 1216 :param email_address: The email address associated with the AWS |
1018 account your are granting the permission to. | 1217 account your are granting the permission to. |
1019 | 1218 |
1020 :type recursive: boolean | 1219 :type recursive: boolean |
1021 :param recursive: A boolean value to controls whether the command | 1220 :param recursive: A boolean value to controls whether the command |
1022 will apply the grant to all keys within the bucket | 1221 will apply the grant to all keys within the bucket |
1023 or not. The default value is False. By passing a | 1222 or not. The default value is False. By passing a |
1024 True value, the call will iterate through all keys | 1223 True value, the call will iterate through all keys |
1025 in the bucket and apply the same grant to each key. | 1224 in the bucket and apply the same grant to each key. |
1026 CAUTION: If you have a lot of keys, this could take | 1225 CAUTION: If you have a lot of keys, this could take |
1027 a long time! | 1226 a long time! |
1028 """ | 1227 """ |
1029 policy = self.get_acl(headers=headers) | 1228 policy = self.get_acl(headers=headers) |
1030 policy.acl.add_email_grant(permission, email_address) | 1229 policy.acl.add_email_grant(permission, email_address) |
1031 self.set_acl(policy, headers=headers) | 1230 self.set_acl(policy, headers=headers) |
1032 | 1231 |
1033 def add_user_grant(self, permission, user_id, headers=None): | 1232 def add_user_grant(self, permission, user_id, headers=None, |
| 1233 display_name=None): |
1034 """ | 1234 """ |
1035 Convenience method that provides a quick way to add a canonical | 1235 Convenience method that provides a quick way to add a canonical |
1036 user grant to a key. This method retrieves the current ACL, | 1236 user grant to a key. This method retrieves the current ACL, |
1037 creates a new grant based on the parameters passed in, adds that | 1237 creates a new grant based on the parameters passed in, adds that |
1038 grant to the ACL and then PUT's the new ACL back to S3. | 1238 grant to the ACL and then PUT's the new ACL back to S3. |
1039 | 1239 |
1040 :type permission: string | 1240 :type permission: string |
1041 :param permission: The permission being granted. Should be one of: | 1241 :param permission: The permission being granted. Should be one of: |
1042 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). | 1242 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). |
1043 | 1243 |
1044 :type user_id: string | 1244 :type user_id: string |
1045 :param user_id: The canonical user id associated with the AWS | 1245 :param user_id: The canonical user id associated with the AWS |
1046 account your are granting the permission to. | 1246 account your are granting the permission to. |
1047 | 1247 |
1048 :type recursive: boolean | 1248 :type display_name: string |
1049 :param recursive: A boolean value to controls whether the command | 1249 :param display_name: An option string containing the user's |
1050 will apply the grant to all keys within the bucket | 1250 Display Name. Only required on Walrus. |
1051 or not. The default value is False. By passing a | |
1052 True value, the call will iterate through all keys | |
1053 in the bucket and apply the same grant to each key. | |
1054 CAUTION: If you have a lot of keys, this could take | |
1055 a long time! | |
1056 """ | 1251 """ |
1057 policy = self.get_acl() | 1252 policy = self.get_acl() |
1058 policy.acl.add_user_grant(permission, user_id) | 1253 policy.acl.add_user_grant(permission, user_id, |
| 1254 display_name=display_name) |
1059 self.set_acl(policy, headers=headers) | 1255 self.set_acl(policy, headers=headers) |
OLD | NEW |