diff options
| author | Craig Jackson | 2011-12-13 14:46:59 -0700 |
|---|---|---|
| committer | Craig Jackson | 2011-12-13 14:46:59 -0700 |
| commit | 647c078f0c801eb9fa63dfc7ba3c017b30e4d467 (patch) | |
| tree | a13b16dbd41bb39269c7649fcc2f2ad77137500a /src/amazons3 | |
| parent | db95070a37f70a00f80a1a2a9ba732c55f6bee1b (diff) | |
| download | amazons3-py-647c078f0c801eb9fa63dfc7ba3c017b30e4d467.tar.gz amazons3-py-647c078f0c801eb9fa63dfc7ba3c017b30e4d467.zip | |
Made the project available in pypi. Involved moving files and
initializing a setup.py.
Diffstat (limited to 'src/amazons3')
| -rw-r--r-- | src/amazons3/S3.py | 633 | ||||
| -rw-r--r-- | src/amazons3/__init__.py | 0 | ||||
| -rw-r--r-- | src/amazons3/django/__init__.py | 181 | ||||
| -rw-r--r-- | src/amazons3/s3-driver.py | 118 | ||||
| -rw-r--r-- | src/amazons3/s3-test.py | 267 |
5 files changed, 1199 insertions, 0 deletions
diff --git a/src/amazons3/S3.py b/src/amazons3/S3.py new file mode 100644 index 0000000..0999f27 --- /dev/null +++ b/src/amazons3/S3.py | |||
| @@ -0,0 +1,633 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | |||
| 3 | # This software code is made available "AS IS" without warranties of any | ||
| 4 | # kind. You may copy, display, modify and redistribute the software | ||
| 5 | # code either by itself or as incorporated into your code; provided that | ||
| 6 | # you do not remove any proprietary notices. Your use of this software | ||
| 7 | # code is at your own risk and you waive any claim against Amazon | ||
| 8 | # Digital Services, Inc. or its affiliates with respect to your use of | ||
| 9 | # this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its | ||
| 10 | # affiliates. | ||
| 11 | |||
| 12 | import base64 | ||
| 13 | import hmac | ||
| 14 | import httplib | ||
| 15 | import re | ||
| 16 | from hashlib import sha1 | ||
| 17 | import sys | ||
| 18 | import time | ||
| 19 | import urllib | ||
| 20 | import urlparse | ||
| 21 | import xml.sax | ||
| 22 | |||
| 23 | DEFAULT_HOST = 's3.amazonaws.com' | ||
| 24 | PORTS_BY_SECURITY = { True: 443, False: 80 } | ||
| 25 | METADATA_PREFIX = 'x-amz-meta-' | ||
| 26 | AMAZON_HEADER_PREFIX = 'x-amz-' | ||
| 27 | |||
| 28 | # generates the aws canonical string for the given parameters | ||
| 29 | def canonical_string(method, bucket="", key="", query_args={}, headers={}, expires=None): | ||
| 30 | interesting_headers = {} | ||
| 31 | for header_key in headers: | ||
| 32 | lk = header_key.lower() | ||
| 33 | if lk in ['content-md5', 'content-type', 'date'] or lk.startswith(AMAZON_HEADER_PREFIX): | ||
| 34 | interesting_headers[lk] = headers[header_key].strip() | ||
| 35 | |||
| 36 | # these keys get empty strings if they don't exist | ||
| 37 | if not interesting_headers.has_key('content-type'): | ||
| 38 | interesting_headers['content-type'] = '' | ||
| 39 | if not interesting_headers.has_key('content-md5'): | ||
| 40 | interesting_headers['content-md5'] = '' | ||
| 41 | |||
| 42 | # just in case someone used this. it's not necessary in this lib. | ||
| 43 | if interesting_headers.has_key('x-amz-date'): | ||
| 44 | interesting_headers['date'] = '' | ||
| 45 | |||
| 46 | # if you're using expires for query string auth, then it trumps date | ||
| 47 | # (and x-amz-date) | ||
| 48 | if expires: | ||
| 49 | interesting_headers['date'] = str(expires) | ||
| 50 | |||
| 51 | sorted_header_keys = interesting_headers.keys() | ||
| 52 | sorted_header_keys.sort() | ||
| 53 | |||
| 54 | buf = "%s\n" % method | ||
| 55 | for header_key in sorted_header_keys: | ||
| 56 | if header_key.startswith(AMAZON_HEADER_PREFIX): | ||
| 57 | buf += "%s:%s\n" % (header_key, interesting_headers[header_key]) | ||
| 58 | else: | ||
| 59 | buf += "%s\n" % interesting_headers[header_key] | ||
| 60 | |||
| 61 | # append the bucket if it exists | ||
| 62 | if bucket != "": | ||
| 63 | buf += "/%s" % bucket | ||
| 64 | |||
| 65 | # add the key. even if it doesn't exist, add the slash | ||
| 66 | buf += "/%s" % urllib.quote_plus(key.encode('utf-8')) | ||
| 67 | |||
| 68 | # handle special query string arguments | ||
| 69 | |||
| 70 | if query_args.has_key("acl"): | ||
| 71 | buf += "?acl" | ||
| 72 | elif query_args.has_key("torrent"): | ||
| 73 | buf += "?torrent" | ||
| 74 | elif query_args.has_key("logging"): | ||
| 75 | buf += "?logging" | ||
| 76 | elif query_args.has_key("location"): | ||
| 77 | buf += "?location" | ||
| 78 | |||
| 79 | return buf | ||
| 80 | |||
| 81 | # computes the base64'ed hmac-sha hash of the canonical string and the secret | ||
| 82 | # access key, optionally urlencoding the result | ||
| 83 | def encode(aws_secret_access_key, str, urlencode=False): | ||
| 84 | b64_hmac = base64.encodestring(hmac.new(aws_secret_access_key, str, sha1).digest()).strip() | ||
| 85 | if urlencode: | ||
| 86 | return urllib.quote_plus(b64_hmac) | ||
| 87 | else: | ||
| 88 | return b64_hmac | ||
| 89 | |||
| 90 | def merge_meta(headers, metadata): | ||
| 91 | final_headers = headers.copy() | ||
| 92 | for k in metadata.keys(): | ||
| 93 | final_headers[METADATA_PREFIX + k] = metadata[k] | ||
| 94 | |||
| 95 | return final_headers | ||
| 96 | |||
| 97 | # builds the query arg string | ||
| 98 | def query_args_hash_to_string(query_args): | ||
| 99 | query_string = "" | ||
| 100 | pairs = [] | ||
| 101 | for k, v in query_args.items(): | ||
| 102 | piece = k | ||
| 103 | if v != None: | ||
| 104 | piece += "=%s" % urllib.quote_plus(str(v).encode('utf-8')) | ||
| 105 | pairs.append(piece) | ||
| 106 | |||
| 107 | return '&'.join(pairs) | ||
| 108 | |||
| 109 | |||
| 110 | class CallingFormat: | ||
| 111 | PATH = 1 | ||
| 112 | SUBDOMAIN = 2 | ||
| 113 | VANITY = 3 | ||
| 114 | |||
| 115 | def build_url_base(protocol, server, port, bucket, calling_format): | ||
| 116 | url_base = '%s://' % protocol | ||
| 117 | |||
| 118 | if bucket == '': | ||
| 119 | url_base += server | ||
| 120 | elif calling_format == CallingFormat.SUBDOMAIN: | ||
| 121 | url_base += "%s.%s" % (bucket, server) | ||
| 122 | elif calling_format == CallingFormat.VANITY: | ||
| 123 | url_base += bucket | ||
| 124 | else: | ||
| 125 | url_base += server | ||
| 126 | |||
| 127 | url_base += ":%s" % port | ||
| 128 | |||
| 129 | if (bucket != '') and (calling_format == CallingFormat.PATH): | ||
| 130 | url_base += "/%s" % bucket | ||
| 131 | |||
| 132 | return url_base | ||
| 133 | |||
| 134 | build_url_base = staticmethod(build_url_base) | ||
| 135 | |||
| 136 | |||
| 137 | |||
| 138 | class Location: | ||
| 139 | DEFAULT = None | ||
| 140 | EU = 'EU' | ||
| 141 | |||
| 142 | |||
| 143 | |||
| 144 | class AWSAuthConnection: | ||
| 145 | def __init__(self, aws_access_key_id, aws_secret_access_key, is_secure=True, | ||
| 146 | server=DEFAULT_HOST, port=None, calling_format=CallingFormat.SUBDOMAIN): | ||
| 147 | |||
| 148 | if not port: | ||
| 149 | port = PORTS_BY_SECURITY[is_secure] | ||
| 150 | |||
| 151 | self.aws_access_key_id = aws_access_key_id | ||
| 152 | self.aws_secret_access_key = aws_secret_access_key | ||
| 153 | self.is_secure = is_secure | ||
| 154 | self.server = server | ||
| 155 | self.port = port | ||
| 156 | self.calling_format = calling_format | ||
| 157 | |||
| 158 | def create_bucket(self, bucket, headers={}): | ||
| 159 | return Response(self._make_request('PUT', bucket, '', {}, headers)) | ||
| 160 | |||
| 161 | def create_located_bucket(self, bucket, location=Location.DEFAULT, headers={}): | ||
| 162 | if location == Location.DEFAULT: | ||
| 163 | body = "" | ||
| 164 | else: | ||
| 165 | body = "<CreateBucketConstraint><LocationConstraint>" + \ | ||
| 166 | location + \ | ||
| 167 | "</LocationConstraint></CreateBucketConstraint>" | ||
| 168 | return Response(self._make_request('PUT', bucket, '', {}, headers, body)) | ||
| 169 | |||
| 170 | def check_bucket_exists(self, bucket): | ||
| 171 | return self._make_request('HEAD', bucket, '', {}, {}) | ||
| 172 | |||
| 173 | def list_bucket(self, bucket, options={}, headers={}): | ||
| 174 | return ListBucketResponse(self._make_request('GET', bucket, '', options, headers)) | ||
| 175 | |||
| 176 | def delete_bucket(self, bucket, headers={}): | ||
| 177 | return Response(self._make_request('DELETE', bucket, '', {}, headers)) | ||
| 178 | |||
| 179 | def put(self, bucket, key, object, headers={}): | ||
| 180 | if not isinstance(object, S3Object): | ||
| 181 | object = S3Object(object) | ||
| 182 | |||
| 183 | return Response( | ||
| 184 | self._make_request( | ||
| 185 | 'PUT', | ||
| 186 | bucket, | ||
| 187 | key, | ||
| 188 | {}, | ||
| 189 | headers, | ||
| 190 | object.data, | ||
| 191 | object.metadata)) | ||
| 192 | |||
| 193 | def get(self, bucket, key, headers={}): | ||
| 194 | return GetResponse( | ||
| 195 | self._make_request('GET', bucket, key, {}, headers)) | ||
| 196 | |||
| 197 | def delete(self, bucket, key, headers={}): | ||
| 198 | return Response( | ||
| 199 | self._make_request('DELETE', bucket, key, {}, headers)) | ||
| 200 | |||
| 201 | def get_bucket_logging(self, bucket, headers={}): | ||
| 202 | return GetResponse(self._make_request('GET', bucket, '', { 'logging': None }, headers)) | ||
| 203 | |||
| 204 | def put_bucket_logging(self, bucket, logging_xml_doc, headers={}): | ||
| 205 | return Response(self._make_request('PUT', bucket, '', { 'logging': None }, headers, logging_xml_doc)) | ||
| 206 | |||
| 207 | def get_bucket_acl(self, bucket, headers={}): | ||
| 208 | return self.get_acl(bucket, '', headers) | ||
| 209 | |||
| 210 | def get_acl(self, bucket, key, headers={}): | ||
| 211 | return GetResponse( | ||
| 212 | self._make_request('GET', bucket, key, { 'acl': None }, headers)) | ||
| 213 | |||
| 214 | def put_bucket_acl(self, bucket, acl_xml_document, headers={}): | ||
| 215 | return self.put_acl(bucket, '', acl_xml_document, headers) | ||
| 216 | |||
| 217 | def put_acl(self, bucket, key, acl_xml_document, headers={}): | ||
| 218 | return Response( | ||
| 219 | self._make_request( | ||
| 220 | 'PUT', | ||
| 221 | bucket, | ||
| 222 | key, | ||
| 223 | { 'acl': None }, | ||
| 224 | headers, | ||
| 225 | acl_xml_document)) | ||
| 226 | |||
| 227 | def list_all_my_buckets(self, headers={}): | ||
| 228 | return ListAllMyBucketsResponse(self._make_request('GET', '', '', {}, headers)) | ||
| 229 | |||
| 230 | def get_bucket_location(self, bucket): | ||
| 231 | return LocationResponse(self._make_request('GET', bucket, '', {'location' : None})) | ||
| 232 | |||
| 233 | # end public methods | ||
| 234 | |||
| 235 | def _make_request(self, method, bucket='', key='', query_args={}, headers={}, data='', metadata={}): | ||
| 236 | |||
| 237 | server = '' | ||
| 238 | if bucket == '': | ||
| 239 | server = self.server | ||
| 240 | elif self.calling_format == CallingFormat.SUBDOMAIN: | ||
| 241 | server = "%s.%s" % (bucket, self.server) | ||
| 242 | elif self.calling_format == CallingFormat.VANITY: | ||
| 243 | server = bucket | ||
| 244 | else: | ||
| 245 | server = self.server | ||
| 246 | |||
| 247 | path = '' | ||
| 248 | |||
| 249 | if (bucket != '') and (self.calling_format == CallingFormat.PATH): | ||
| 250 | path += "/%s" % bucket | ||
| 251 | |||
| 252 | # add the slash after the bucket regardless | ||
| 253 | # the key will be appended if it is non-empty | ||
| 254 | path += "/%s" % urllib.quote_plus(key.encode('utf-8')) | ||
| 255 | |||
| 256 | |||
| 257 | # build the path_argument string | ||
| 258 | # add the ? in all cases since | ||
| 259 | # signature and credentials follow path args | ||
| 260 | if len(query_args): | ||
| 261 | path += "?" + query_args_hash_to_string(query_args) | ||
| 262 | |||
| 263 | is_secure = self.is_secure | ||
| 264 | host = "%s:%d" % (server, self.port) | ||
| 265 | while True: | ||
| 266 | if (is_secure): | ||
| 267 | connection = httplib.HTTPSConnection(host) | ||
| 268 | else: | ||
| 269 | connection = httplib.HTTPConnection(host) | ||
| 270 | |||
| 271 | final_headers = merge_meta(headers, metadata) | ||
| 272 | # add auth header | ||
| 273 | self._add_aws_auth_header(final_headers, method, bucket, key, query_args) | ||
| 274 | |||
| 275 | try: | ||
| 276 | connection.request(method, path, data, final_headers) | ||
| 277 | except: | ||
| 278 | # Try try again | ||
| 279 | connection.close() | ||
| 280 | continue | ||
| 281 | |||
| 282 | try: | ||
| 283 | resp = connection.getresponse() | ||
| 284 | except: | ||
| 285 | # Sometimes the connection is reset by peer. If that happens | ||
| 286 | # just try it again and we'll see what happens. | ||
| 287 | connection.close() | ||
| 288 | continue | ||
| 289 | |||
| 290 | if resp.status < 300 or resp.status >= 400: | ||
| 291 | return resp | ||
| 292 | # handle redirect | ||
| 293 | location = resp.getheader('location') | ||
| 294 | if not location: | ||
| 295 | return resp | ||
| 296 | # (close connection) | ||
| 297 | resp.read() | ||
| 298 | scheme, host, path, params, query, fragment \ | ||
| 299 | = urlparse.urlparse(location) | ||
| 300 | if scheme == "http": | ||
| 301 | is_secure = True | ||
| 302 | elif scheme == "https": is_secure = False | ||
| 303 | else: raise invalidURL("Not http/https: " + location) | ||
| 304 | if query: | ||
| 305 | path += "?" + query | ||
| 306 | # retry with redirect | ||
| 307 | connection.close() | ||
| 308 | |||
| 309 | def _add_aws_auth_header(self, headers, method, bucket, key, query_args): | ||
| 310 | if not headers.has_key('Date'): | ||
| 311 | headers['Date'] = time.strftime("%a, %d %b %Y %X GMT", time.gmtime()) | ||
| 312 | |||
| 313 | c_string = canonical_string(method, bucket, key, query_args, headers) | ||
| 314 | headers['Authorization'] = \ | ||
| 315 | "AWS %s:%s" % (self.aws_access_key_id, encode(self.aws_secret_access_key, c_string)) | ||
| 316 | |||
| 317 | |||
| 318 | class QueryStringAuthGenerator: | ||
| 319 | # by default, expire in 1 minute | ||
| 320 | DEFAULT_EXPIRES_IN = 60 | ||
| 321 | |||
| 322 | def __init__(self, aws_access_key_id, aws_secret_access_key, is_secure=True, | ||
| 323 | server=DEFAULT_HOST, port=None, calling_format=CallingFormat.SUBDOMAIN): | ||
| 324 | |||
| 325 | if not port: | ||
| 326 | port = PORTS_BY_SECURITY[is_secure] | ||
| 327 | |||
| 328 | self.aws_access_key_id = aws_access_key_id | ||
| 329 | self.aws_secret_access_key = aws_secret_access_key | ||
| 330 | if (is_secure): | ||
| 331 | self.protocol = 'https' | ||
| 332 | else: | ||
| 333 | self.protocol = 'http' | ||
| 334 | |||
| 335 | self.is_secure = is_secure | ||
| 336 | self.server = server | ||
| 337 | self.port = port | ||
| 338 | self.calling_format = calling_format | ||
| 339 | self.__expires_in = QueryStringAuthGenerator.DEFAULT_EXPIRES_IN | ||
| 340 | self.__expires = None | ||
| 341 | |||
| 342 | # for backwards compatibility with older versions | ||
| 343 | self.server_name = "%s:%s" % (self.server, self.port) | ||
| 344 | |||
| 345 | def set_expires_in(self, expires_in): | ||
| 346 | self.__expires_in = expires_in | ||
| 347 | self.__expires = None | ||
| 348 | |||
| 349 | def set_expires(self, expires): | ||
| 350 | self.__expires = expires | ||
| 351 | self.__expires_in = None | ||
| 352 | |||
| 353 | def create_bucket(self, bucket, headers={}): | ||
| 354 | return self.generate_url('PUT', bucket, '', {}, headers) | ||
| 355 | |||
| 356 | def list_bucket(self, bucket, options={}, headers={}): | ||
| 357 | return self.generate_url('GET', bucket, '', options, headers) | ||
| 358 | |||
| 359 | def delete_bucket(self, bucket, headers={}): | ||
| 360 | return self.generate_url('DELETE', bucket, '', {}, headers) | ||
| 361 | |||
| 362 | def put(self, bucket, key, object, headers={}): | ||
| 363 | if not isinstance(object, S3Object): | ||
| 364 | object = S3Object(object) | ||
| 365 | |||
| 366 | return self.generate_url( | ||
| 367 | 'PUT', | ||
| 368 | bucket, | ||
| 369 | key, | ||
| 370 | {}, | ||
| 371 | merge_meta(headers, object.metadata)) | ||
| 372 | |||
| 373 | def get(self, bucket, key, headers={}): | ||
| 374 | return self.generate_url('GET', bucket, key, {}, headers) | ||
| 375 | |||
| 376 | def delete(self, bucket, key, headers={}): | ||
| 377 | return self.generate_url('DELETE', bucket, key, {}, headers) | ||
| 378 | |||
| 379 | def get_bucket_logging(self, bucket, headers={}): | ||
| 380 | return self.generate_url('GET', bucket, '', { 'logging': None }, headers) | ||
| 381 | |||
| 382 | def put_bucket_logging(self, bucket, logging_xml_doc, headers={}): | ||
| 383 | return self.generate_url('PUT', bucket, '', { 'logging': None }, headers) | ||
| 384 | |||
| 385 | def get_bucket_acl(self, bucket, headers={}): | ||
| 386 | return self.get_acl(bucket, '', headers) | ||
| 387 | |||
| 388 | def get_acl(self, bucket, key='', headers={}): | ||
| 389 | return self.generate_url('GET', bucket, key, { 'acl': None }, headers) | ||
| 390 | |||
| 391 | def put_bucket_acl(self, bucket, acl_xml_document, headers={}): | ||
| 392 | return self.put_acl(bucket, '', acl_xml_document, headers) | ||
| 393 | |||
| 394 | # don't really care what the doc is here. | ||
| 395 | def put_acl(self, bucket, key, acl_xml_document, headers={}): | ||
| 396 | return self.generate_url('PUT', bucket, key, { 'acl': None }, headers) | ||
| 397 | |||
| 398 | def list_all_my_buckets(self, headers={}): | ||
| 399 | return self.generate_url('GET', '', '', {}, headers) | ||
| 400 | |||
| 401 | def make_bare_url(self, bucket, key=''): | ||
| 402 | full_url = self.generate_url(self, bucket, key) | ||
| 403 | return full_url[:full_url.index('?')] | ||
| 404 | |||
| 405 | def generate_url(self, method, bucket='', key='', query_args={}, headers={}): | ||
| 406 | expires = 0 | ||
| 407 | if self.__expires_in != None: | ||
| 408 | expires = int(time.time() + self.__expires_in) | ||
| 409 | elif self.__expires != None: | ||
| 410 | expires = int(self.__expires) | ||
| 411 | else: | ||
| 412 | raise "Invalid expires state" | ||
| 413 | |||
| 414 | canonical_str = canonical_string(method, bucket, key, query_args, headers, expires) | ||
| 415 | encoded_canonical = encode(self.aws_secret_access_key, canonical_str) | ||
| 416 | |||
| 417 | url = CallingFormat.build_url_base(self.protocol, self.server, self.port, bucket, self.calling_format) | ||
| 418 | |||
| 419 | url += "/%s" % urllib.quote_plus(key.encode('utf-8')) | ||
| 420 | |||
| 421 | query_args['Signature'] = encoded_canonical | ||
| 422 | query_args['Expires'] = expires | ||
| 423 | query_args['AWSAccessKeyId'] = self.aws_access_key_id | ||
| 424 | |||
| 425 | url += "?%s" % query_args_hash_to_string(query_args) | ||
| 426 | |||
| 427 | return url | ||
| 428 | |||
| 429 | |||
| 430 | class S3Object: | ||
| 431 | def __init__(self, data, metadata={}): | ||
| 432 | self.data = data | ||
| 433 | self.metadata = metadata | ||
| 434 | |||
| 435 | class Owner: | ||
| 436 | def __init__(self, id='', display_name=''): | ||
| 437 | self.id = id | ||
| 438 | self.display_name = display_name | ||
| 439 | |||
| 440 | class ListEntry: | ||
| 441 | def __init__(self, key='', last_modified=None, etag='', size=0, storage_class='', owner=None): | ||
| 442 | self.key = key | ||
| 443 | self.last_modified = last_modified | ||
| 444 | self.etag = etag | ||
| 445 | self.size = size | ||
| 446 | self.storage_class = storage_class | ||
| 447 | self.owner = owner | ||
| 448 | |||
| 449 | class CommonPrefixEntry: | ||
| 450 | def __init(self, prefix=''): | ||
| 451 | self.prefix = prefix | ||
| 452 | |||
| 453 | class Bucket: | ||
| 454 | def __init__(self, name='', creation_date=''): | ||
| 455 | self.name = name | ||
| 456 | self.creation_date = creation_date | ||
| 457 | |||
| 458 | class Response: | ||
| 459 | def __init__(self, http_response): | ||
| 460 | self.http_response = http_response | ||
| 461 | # you have to do this read, even if you don't expect a body. | ||
| 462 | # otherwise, the next request fails. | ||
| 463 | self.body = http_response.read() | ||
| 464 | if http_response.status >= 300 and self.body: | ||
| 465 | self.message = self.body | ||
| 466 | else: | ||
| 467 | self.message = "%03d %s" % (http_response.status, http_response.reason) | ||
| 468 | |||
| 469 | |||
| 470 | |||
| 471 | class ListBucketResponse(Response): | ||
| 472 | def __init__(self, http_response): | ||
| 473 | Response.__init__(self, http_response) | ||
| 474 | if http_response.status < 300: | ||
| 475 | handler = ListBucketHandler() | ||
| 476 | xml.sax.parseString(self.body, handler) | ||
| 477 | self.entries = handler.entries | ||
| 478 | self.common_prefixes = handler.common_prefixes | ||
| 479 | self.name = handler.name | ||
| 480 | self.marker = handler.marker | ||
| 481 | self.prefix = handler.prefix | ||
| 482 | self.is_truncated = handler.is_truncated | ||
| 483 | self.delimiter = handler.delimiter | ||
| 484 | self.max_keys = handler.max_keys | ||
| 485 | self.next_marker = handler.next_marker | ||
| 486 | else: | ||
| 487 | self.entries = [] | ||
| 488 | |||
| 489 | class ListAllMyBucketsResponse(Response): | ||
| 490 | def __init__(self, http_response): | ||
| 491 | Response.__init__(self, http_response) | ||
| 492 | if http_response.status < 300: | ||
| 493 | handler = ListAllMyBucketsHandler() | ||
| 494 | xml.sax.parseString(self.body, handler) | ||
| 495 | self.entries = handler.entries | ||
| 496 | else: | ||
| 497 | self.entries = [] | ||
| 498 | |||
| 499 | class GetResponse(Response): | ||
| 500 | def __init__(self, http_response): | ||
| 501 | Response.__init__(self, http_response) | ||
| 502 | response_headers = http_response.msg # older pythons don't have getheaders | ||
| 503 | metadata = self.get_aws_metadata(response_headers) | ||
| 504 | self.object = S3Object(self.body, metadata) | ||
| 505 | |||
| 506 | def get_aws_metadata(self, headers): | ||
| 507 | metadata = {} | ||
| 508 | for hkey in headers.keys(): | ||
| 509 | if hkey.lower().startswith(METADATA_PREFIX): | ||
| 510 | metadata[hkey[len(METADATA_PREFIX):]] = headers[hkey] | ||
| 511 | del headers[hkey] | ||
| 512 | |||
| 513 | return metadata | ||
| 514 | |||
| 515 | class LocationResponse(Response): | ||
| 516 | def __init__(self, http_response): | ||
| 517 | Response.__init__(self, http_response) | ||
| 518 | if http_response.status < 300: | ||
| 519 | handler = LocationHandler() | ||
| 520 | xml.sax.parseString(self.body, handler) | ||
| 521 | self.location = handler.location | ||
| 522 | |||
| 523 | class ListBucketHandler(xml.sax.ContentHandler): | ||
| 524 | def __init__(self): | ||
| 525 | self.entries = [] | ||
| 526 | self.curr_entry = None | ||
| 527 | self.curr_text = '' | ||
| 528 | self.common_prefixes = [] | ||
| 529 | self.curr_common_prefix = None | ||
| 530 | self.name = '' | ||
| 531 | self.marker = '' | ||
| 532 | self.prefix = '' | ||
| 533 | self.is_truncated = False | ||
| 534 | self.delimiter = '' | ||
| 535 | self.max_keys = 0 | ||
| 536 | self.next_marker = '' | ||
| 537 | self.is_echoed_prefix_set = False | ||
| 538 | |||
| 539 | def startElement(self, name, attrs): | ||
| 540 | if name == 'Contents': | ||
| 541 | self.curr_entry = ListEntry() | ||
| 542 | elif name == 'Owner': | ||
| 543 | self.curr_entry.owner = Owner() | ||
| 544 | elif name == 'CommonPrefixes': | ||
| 545 | self.curr_common_prefix = CommonPrefixEntry() | ||
| 546 | |||
| 547 | |||
| 548 | def endElement(self, name): | ||
| 549 | if name == 'Contents': | ||
| 550 | self.entries.append(self.curr_entry) | ||
| 551 | elif name == 'CommonPrefixes': | ||
| 552 | self.common_prefixes.append(self.curr_common_prefix) | ||
| 553 | elif name == 'Key': | ||
| 554 | self.curr_entry.key = self.curr_text | ||
| 555 | elif name == 'LastModified': | ||
| 556 | self.curr_entry.last_modified = self.curr_text | ||
| 557 | elif name == 'ETag': | ||
| 558 | self.curr_entry.etag = self.curr_text | ||
| 559 | elif name == 'Size': | ||
| 560 | self.curr_entry.size = int(self.curr_text) | ||
| 561 | elif name == 'ID': | ||
| 562 | self.curr_entry.owner.id = self.curr_text | ||
| 563 | elif name == 'DisplayName': | ||
| 564 | self.curr_entry.owner.display_name = self.curr_text | ||
| 565 | elif name == 'StorageClass': | ||
| 566 | self.curr_entry.storage_class = self.curr_text | ||
| 567 | elif name == 'Name': | ||
| 568 | self.name = self.curr_text | ||
| 569 | elif name == 'Prefix' and self.is_echoed_prefix_set: | ||
| 570 | self.curr_common_prefix.prefix = self.curr_text | ||
| 571 | elif name == 'Prefix': | ||
| 572 | self.prefix = self.curr_text | ||
| 573 | self.is_echoed_prefix_set = True | ||
| 574 | elif name == 'Marker': | ||
| 575 | self.marker = self.curr_text | ||
| 576 | elif name == 'IsTruncated': | ||
| 577 | self.is_truncated = self.curr_text == 'true' | ||
| 578 | elif name == 'Delimiter': | ||
| 579 | self.delimiter = self.curr_text | ||
| 580 | elif name == 'MaxKeys': | ||
| 581 | self.max_keys = int(self.curr_text) | ||
| 582 | elif name == 'NextMarker': | ||
| 583 | self.next_marker = self.curr_text | ||
| 584 | |||
| 585 | self.curr_text = '' | ||
| 586 | |||
| 587 | def characters(self, content): | ||
| 588 | self.curr_text += content | ||
| 589 | |||
| 590 | |||
| 591 | class ListAllMyBucketsHandler(xml.sax.ContentHandler): | ||
| 592 | def __init__(self): | ||
| 593 | self.entries = [] | ||
| 594 | self.curr_entry = None | ||
| 595 | self.curr_text = '' | ||
| 596 | |||
| 597 | def startElement(self, name, attrs): | ||
| 598 | if name == 'Bucket': | ||
| 599 | self.curr_entry = Bucket() | ||
| 600 | |||
| 601 | def endElement(self, name): | ||
| 602 | if name == 'Name': | ||
| 603 | self.curr_entry.name = self.curr_text | ||
| 604 | elif name == 'CreationDate': | ||
| 605 | self.curr_entry.creation_date = self.curr_text | ||
| 606 | elif name == 'Bucket': | ||
| 607 | self.entries.append(self.curr_entry) | ||
| 608 | |||
| 609 | def characters(self, content): | ||
| 610 | self.curr_text = content | ||
| 611 | |||
| 612 | |||
| 613 | class LocationHandler(xml.sax.ContentHandler): | ||
| 614 | def __init__(self): | ||
| 615 | self.location = None | ||
| 616 | self.state = 'init' | ||
| 617 | |||
| 618 | def startElement(self, name, attrs): | ||
| 619 | if self.state == 'init': | ||
| 620 | if name == 'LocationConstraint': | ||
| 621 | self.state = 'tag_location' | ||
| 622 | self.location = '' | ||
| 623 | else: self.state = 'bad' | ||
| 624 | else: self.state = 'bad' | ||
| 625 | |||
| 626 | def endElement(self, name): | ||
| 627 | if self.state == 'tag_location' and name == 'LocationConstraint': | ||
| 628 | self.state = 'done' | ||
| 629 | else: self.state = 'bad' | ||
| 630 | |||
| 631 | def characters(self, content): | ||
| 632 | if self.state == 'tag_location': | ||
| 633 | self.location += content | ||
diff --git a/src/amazons3/__init__.py b/src/amazons3/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/src/amazons3/__init__.py | |||
diff --git a/src/amazons3/django/__init__.py b/src/amazons3/django/__init__.py new file mode 100644 index 0000000..3e57e15 --- /dev/null +++ b/src/amazons3/django/__init__.py | |||
| @@ -0,0 +1,181 @@ | |||
| 1 | import os | ||
| 2 | from StringIO import StringIO | ||
| 3 | from django.conf import settings | ||
| 4 | from amazons3 import S3 | ||
| 5 | |||
| 6 | from django.core.files.storage import Storage | ||
| 7 | |||
| 8 | class S3OpenFile(StringIO): | ||
| 9 | """ | ||
| 10 | Wrapper for StringIO which allows open() to be called on it. | ||
| 11 | |||
| 12 | This is for FileField form fields, which expect to be able to call open() | ||
| 13 | and then retrieve data from the file. | ||
| 14 | ** NOTE: The behavior of calling open() and then writing to the file is | ||
| 15 | currently unknown. ** | ||
| 16 | """ | ||
| 17 | def open(self, *args, **kwargs): | ||
| 18 | self.seek(0) | ||
| 19 | |||
| 20 | class S3Error(Exception): | ||
| 21 | "Misc. S3 Service Error" | ||
| 22 | pass | ||
| 23 | |||
| 24 | class S3Storage(Storage): | ||
| 25 | options = None | ||
| 26 | |||
| 27 | def __init__(self, options=None): | ||
| 28 | if not options: | ||
| 29 | options = settings.S3_SETTINGS | ||
| 30 | self.options = options | ||
| 31 | self.perm_tuple = ( | ||
| 32 | 'private', | ||
| 33 | 'public-read', | ||
| 34 | 'public-read-write', | ||
| 35 | 'authenticated-read' | ||
| 36 | ) | ||
| 37 | if self.options['default_perm'] not in self.perm_tuple: | ||
| 38 | self.options['default_perm'] = 'private' | ||
| 39 | |||
| 40 | self.connect() | ||
| 41 | |||
| 42 | def connect(self): | ||
| 43 | self.conn = S3.AWSAuthConnection(self.options['aws_key'], self.options['aws_secret_key']) | ||
| 44 | |||
| 45 | res = self.conn.check_bucket_exists(self.options['bucket']) | ||
| 46 | |||
| 47 | if res.status != 200: | ||
| 48 | res = self.conn.create_bucket(self.options['bucket']) | ||
| 49 | if res.http_response.status != 200: | ||
| 50 | raise S3Error, 'Unable to create bucket %s' % (self.options['bucket']) | ||
| 51 | |||
| 52 | return True | ||
| 53 | |||
| 54 | def exists(self, filename): | ||
| 55 | contents = self.conn.list_bucket(self.options['bucket'], {'prefix': os.path.dirname(filename)}) | ||
| 56 | if filename in [f.key for f in contents.entries]: | ||
| 57 | return True | ||
| 58 | else: | ||
| 59 | return False | ||
| 60 | |||
| 61 | def size(self, filename): | ||
| 62 | contents = self.conn.list_bucket(self.options['bucket'], {'prefix': os.path.dirname(filename)} ) | ||
| 63 | for f in contents.entries: | ||
| 64 | if f.key == filename: | ||
| 65 | return f.size | ||
| 66 | |||
| 67 | return False | ||
| 68 | |||
| 69 | def url(self, filename): | ||
| 70 | server = self.options['bucket'] | ||
| 71 | if not self.options['vanity_url']: | ||
| 72 | server += '.s3.amazonaws.com' | ||
| 73 | else: | ||
| 74 | server = self.options['vanity_url'] | ||
| 75 | return 'http://' + server + '/' + filename | ||
| 76 | |||
| 77 | |||
| 78 | def _save(self, filename, content): | ||
| 79 | # a stupid hack | ||
| 80 | try: | ||
| 81 | content.url = self.url | ||
| 82 | except AttributeError, e: | ||
| 83 | content = content.file | ||
| 84 | |||
| 85 | try: | ||
| 86 | data = content.read() | ||
| 87 | except IOError, err: | ||
| 88 | raise S3Error, 'Unable to read %s: %s' % (filename, err.strerror) | ||
| 89 | |||
| 90 | guess_type = False | ||
| 91 | try: | ||
| 92 | content.content_type | ||
| 93 | except AttributeError, e: | ||
| 94 | guess_type = True | ||
| 95 | |||
| 96 | if guess_type or not content.content_type: | ||
| 97 | import mimetypes | ||
| 98 | content_type = mimetypes.guess_type(filename)[0] | ||
| 99 | if content_type is None: | ||
| 100 | content_type = 'text/plain' | ||
| 101 | else: | ||
| 102 | content_type = content.content_type | ||
| 103 | |||
| 104 | perm = self.options['default_perm'] | ||
| 105 | |||
| 106 | res = self.conn.put( | ||
| 107 | self.options['bucket'], | ||
| 108 | filename, | ||
| 109 | S3.S3Object(data), | ||
| 110 | { | ||
| 111 | 'x-amz-acl': perm, | ||
| 112 | 'Content-Type': content_type | ||
| 113 | } | ||
| 114 | ) | ||
| 115 | |||
| 116 | if res.http_response.status != 200: | ||
| 117 | raise S3Error, 'Unable to upload file %s: Error code %s: %s' % (filename, self.options['bucket'], res.body) | ||
| 118 | |||
| 119 | |||
| 120 | content.filename = filename | ||
| 121 | content.url = self.url(filename) | ||
| 122 | |||
| 123 | return filename | ||
| 124 | |||
| 125 | def delete(self, filename): | ||
| 126 | res = self.conn.delete(self.options['bucket'], filename) | ||
| 127 | if res.http_response.status != 204: | ||
| 128 | pass | ||
| 129 | #raise S3Error, 'Unable to delete file %s' % (filename) | ||
| 130 | |||
| 131 | return (res.http_response.status == 204) | ||
| 132 | |||
| 133 | def path(self, filename): | ||
| 134 | raise NotImplementedError | ||
| 135 | |||
| 136 | def open(self, filename, mode): | ||
| 137 | from urllib import urlopen | ||
| 138 | # Download data from S3 and save | ||
| 139 | # into a file wrapper, which allows its | ||
| 140 | # use as normal in FileFields. | ||
| 141 | # | ||
| 142 | # Note: This saves the file data into memory. | ||
| 143 | data = urlopen(self.url(filename)) | ||
| 144 | openfile = S3OpenFile() | ||
| 145 | openfile.write(data.read()) | ||
| 146 | return openfile | ||
| 147 | |||
| 148 | def get_available_name(self, filename): | ||
| 149 | import os | ||
| 150 | basefilename = os.path.splitext(filename) | ||
| 151 | i = 1 | ||
| 152 | while self.exists(filename): | ||
| 153 | i += 1 | ||
| 154 | filename = '%s-%d%s' % (basefilename[0], i, basefilename[1]) | ||
| 155 | |||
| 156 | return filename | ||
| 157 | |||
| 158 | class CxStorage(S3Storage): | ||
| 159 | """ | ||
| 160 | This storage engine provides the naming scheme for phonese3. It hashes | ||
| 161 | the file names before storage. | ||
| 162 | To use, set DEFAULT_STORAGE_ENGINE="CxStorage" | ||
| 163 | |||
| 164 | Author: Jason Braegger | ||
| 165 | License: AGPLv3 | ||
| 166 | Source: http://code.twi.gs/phonese3/ | ||
| 167 | """ | ||
| 168 | def get_valid_name(self, name): | ||
| 169 | """ | ||
| 170 | This returns a hashed name to use for storage on the filesystem | ||
| 171 | """ | ||
| 172 | import os.path | ||
| 173 | from hashlib import md5 | ||
| 174 | import time | ||
| 175 | |||
| 176 | extension = os.path.splitext(name)[1].lower() | ||
| 177 | # Ensure an ascii string for .hexdigest() later. | ||
| 178 | name = name.encode('ascii', 'ignore') | ||
| 179 | |||
| 180 | return str(md5(str(time.time()) + name).hexdigest()) + \ | ||
| 181 | str(extension) | ||
diff --git a/src/amazons3/s3-driver.py b/src/amazons3/s3-driver.py new file mode 100644 index 0000000..29f700b --- /dev/null +++ b/src/amazons3/s3-driver.py | |||
| @@ -0,0 +1,118 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | |||
| 3 | # This software code is made available "AS IS" without warranties of any | ||
| 4 | # kind. You may copy, display, modify and redistribute the software | ||
| 5 | # code either by itself or as incorporated into your code; provided that | ||
| 6 | # you do not remove any proprietary notices. Your use of this software | ||
| 7 | # code is at your own risk and you waive any claim against Amazon | ||
| 8 | # Digital Services, Inc. or its affiliates with respect to your use of | ||
| 9 | # this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its | ||
| 10 | # affiliates. | ||
| 11 | |||
| 12 | import S3 | ||
| 13 | import time | ||
| 14 | import sys | ||
| 15 | |||
| 16 | AWS_ACCESS_KEY_ID = '<INSERT YOUR AWS ACCESS KEY ID HERE>' | ||
| 17 | AWS_SECRET_ACCESS_KEY = '<INSERT YOUR AWS SECRET ACCESS KEY HERE>' | ||
| 18 | # remove these next two lines when you've updated your credentials. | ||
| 19 | print "update s3-driver.py with your AWS credentials" | ||
| 20 | sys.exit(); | ||
| 21 | |||
| 22 | # convert the bucket to lowercase for vanity domains | ||
| 23 | # the bucket name must be lowercase since DNS is case-insensitive | ||
| 24 | BUCKET_NAME = AWS_ACCESS_KEY_ID.lower() + '-test-bucket' | ||
| 25 | KEY_NAME = 'test-key' | ||
| 26 | |||
| 27 | conn = S3.AWSAuthConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) | ||
| 28 | generator = S3.QueryStringAuthGenerator(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) | ||
| 29 | |||
| 30 | |||
| 31 | # Check if the bucket exists. The high availability engineering of | ||
| 32 | # Amazon S3 is focused on get, put, list, and delete operations. | ||
| 33 | # Because bucket operations work against a centralized, global | ||
| 34 | # resource space, it is not appropriate to make bucket create or | ||
| 35 | # delete calls on the high availability code path of your application. | ||
| 36 | # It is better to create or delete buckets in a separate initialization | ||
| 37 | # or setup routine that you run less often. | ||
| 38 | if (conn.check_bucket_exists(BUCKET_NAME).status == 200): | ||
| 39 | print '----- bucket already exists! -----' | ||
| 40 | else: | ||
| 41 | print '----- creating bucket -----' | ||
| 42 | print conn.create_located_bucket(BUCKET_NAME, S3.Location.DEFAULT).message | ||
| 43 | # to create an EU bucket | ||
| 44 | #print conn.create_located_bucket(BUCKET_NAME, S3.Location.EU).message | ||
| 45 | |||
| 46 | print '----- bucket location -----' | ||
| 47 | print conn.get_bucket_location(BUCKET_NAME).location | ||
| 48 | |||
| 49 | print '----- listing bucket -----' | ||
| 50 | print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries) | ||
| 51 | |||
| 52 | print '----- putting object (with content type) -----' | ||
| 53 | print conn.put( | ||
| 54 | BUCKET_NAME, | ||
| 55 | KEY_NAME, | ||
| 56 | S3.S3Object('this is a test'), | ||
| 57 | { 'Content-Type': 'text/plain' }).message | ||
| 58 | |||
| 59 | print '----- listing bucket -----' | ||
| 60 | print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries) | ||
| 61 | |||
| 62 | print '----- getting object -----' | ||
| 63 | print conn.get(BUCKET_NAME, KEY_NAME).object.data | ||
| 64 | |||
| 65 | print '----- query string auth example -----' | ||
| 66 | print "\nTry this url out in your browser (it will only be valid for 60 seconds).\n" | ||
| 67 | generator.set_expires_in(60); | ||
| 68 | url = generator.get(BUCKET_NAME, KEY_NAME) | ||
| 69 | print url | ||
| 70 | print '\npress enter> ', | ||
| 71 | sys.stdin.readline() | ||
| 72 | |||
| 73 | print "\nNow try just the url without the query string arguments. it should fail.\n" | ||
| 74 | print generator.make_bare_url(BUCKET_NAME, KEY_NAME) | ||
| 75 | print '\npress enter> ', | ||
| 76 | sys.stdin.readline() | ||
| 77 | |||
| 78 | |||
| 79 | print '----- putting object with metadata and public read acl -----' | ||
| 80 | print conn.put( | ||
| 81 | BUCKET_NAME, | ||
| 82 | KEY_NAME + '-public', | ||
| 83 | S3.S3Object('this is a publicly readable test'), | ||
| 84 | { 'x-amz-acl': 'public-read' , 'Content-Type': 'text/plain' } | ||
| 85 | ).message | ||
| 86 | |||
| 87 | print '----- anonymous read test ----' | ||
| 88 | print "\nYou should be able to try this in your browser\n" | ||
| 89 | public_key = KEY_NAME + '-public' | ||
| 90 | print generator.make_bare_url(BUCKET_NAME, public_key) | ||
| 91 | print "\npress enter> ", | ||
| 92 | sys.stdin.readline() | ||
| 93 | |||
| 94 | print "----- getting object's acl -----" | ||
| 95 | print conn.get_acl(BUCKET_NAME, KEY_NAME).object.data | ||
| 96 | |||
| 97 | print "\n----- path style url example -----"; | ||
| 98 | print "Non-location-constrained buckets can also be specified as part of the url path. (This was the original url style supported by S3.)\n"; | ||
| 99 | print "Try this url out in your browser (it will only be valid for 60 seconds).\n" | ||
| 100 | generator.calling_format = S3.CallingFormat.PATH | ||
| 101 | url = generator.get(BUCKET_NAME, KEY_NAME) | ||
| 102 | print url | ||
| 103 | print "\npress enter> ", | ||
| 104 | sys.stdin.readline() | ||
| 105 | |||
| 106 | print '----- deleting objects -----' | ||
| 107 | print conn.delete(BUCKET_NAME, KEY_NAME).message | ||
| 108 | print conn.delete(BUCKET_NAME, KEY_NAME + '-public').message | ||
| 109 | |||
| 110 | print '----- listing bucket -----' | ||
| 111 | print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries) | ||
| 112 | |||
| 113 | print '----- listing all my buckets -----' | ||
| 114 | print map(lambda x: x.name, conn.list_all_my_buckets().entries) | ||
| 115 | |||
| 116 | print '----- deleting bucket ------' | ||
| 117 | print conn.delete_bucket(BUCKET_NAME).message | ||
| 118 | |||
diff --git a/src/amazons3/s3-test.py b/src/amazons3/s3-test.py new file mode 100644 index 0000000..fbd8d9c --- /dev/null +++ b/src/amazons3/s3-test.py | |||
| @@ -0,0 +1,267 @@ | |||
| 1 | #!/usr/bin/env python | ||
| 2 | |||
| 3 | # This software code is made available "AS IS" without warranties of any | ||
| 4 | # kind. You may copy, display, modify and redistribute the software | ||
| 5 | # code either by itself or as incorporated into your code; provided that | ||
| 6 | # you do not remove any proprietary notices. Your use of this software | ||
| 7 | # code is at your own risk and you waive any claim against Amazon | ||
| 8 | # Digital Services, Inc. or its affiliates with respect to your use of | ||
| 9 | # this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its | ||
| 10 | # affiliates. | ||
| 11 | |||
| 12 | import unittest | ||
| 13 | import S3 | ||
| 14 | import httplib | ||
| 15 | import sys | ||
| 16 | |||
| 17 | AWS_ACCESS_KEY_ID = '<INSERT YOUR AWS ACCESS KEY ID HERE>' | ||
| 18 | AWS_SECRET_ACCESS_KEY = '<INSERT YOUR AWS SECRET ACCESS KEY HERE>' | ||
| 19 | # remove these next two lines when you've updated your credentials. | ||
| 20 | print "update s3-test.py with your AWS credentials" | ||
| 21 | sys.exit(); | ||
| 22 | |||
| 23 | # for subdomains (bucket.s3.amazonaws.com), | ||
| 24 | # the bucket name must be lowercase since DNS is case-insensitive | ||
| 25 | BUCKET_NAME = "%s-test-bucket" % AWS_ACCESS_KEY_ID.lower(); | ||
| 26 | |||
| 27 | |||
| 28 | class TestAWSAuthConnection(unittest.TestCase): | ||
| 29 | def setUp(self): | ||
| 30 | self.conn = S3.AWSAuthConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) | ||
| 31 | |||
| 32 | # test all operations for both regular and vanity domains | ||
| 33 | # regular: http://s3.amazonaws.com/bucket/key | ||
| 34 | # subdomain: http://bucket.s3.amazonaws.com/key | ||
| 35 | # testing pure vanity domains (http://<vanity domain>/key) is not covered here | ||
| 36 | # but is possible with some additional setup (set the server in @conn to your vanity domain) | ||
| 37 | |||
| 38 | def test_subdomain_default(self): | ||
| 39 | self.run_tests(S3.CallingFormat.SUBDOMAIN, S3.Location.DEFAULT) | ||
| 40 | |||
| 41 | def test_subdomain_eu(self): | ||
| 42 | self.run_tests(S3.CallingFormat.SUBDOMAIN, S3.Location.EU) | ||
| 43 | |||
| 44 | def test_path_default(self): | ||
| 45 | self.run_tests(S3.CallingFormat.PATH, S3.Location.DEFAULT) | ||
| 46 | |||
| 47 | |||
| 48 | def run_tests(self, calling_format, location): | ||
| 49 | self.conn.calling_format = calling_format | ||
| 50 | |||
| 51 | response = self.conn.create_located_bucket(BUCKET_NAME, location) | ||
| 52 | self.assertEquals(response.http_response.status, 200, 'create bucket') | ||
| 53 | |||
| 54 | response = self.conn.list_bucket(BUCKET_NAME) | ||
| 55 | self.assertEquals(response.http_response.status, 200, 'list bucket') | ||
| 56 | self.assertEquals(len(response.entries), 0, 'bucket is empty') | ||
| 57 | |||
| 58 | text = 'this is a test' | ||
| 59 | key = 'example.txt' | ||
| 60 | |||
| 61 | response = self.conn.put(BUCKET_NAME, key, text) | ||
| 62 | self.assertEquals(response.http_response.status, 200, 'put with a string argument') | ||
| 63 | |||
| 64 | response = \ | ||
| 65 | self.conn.put( | ||
| 66 | BUCKET_NAME, | ||
| 67 | key, | ||
| 68 | S3.S3Object(text, {'title': 'title'}), | ||
| 69 | {'Content-Type': 'text/plain'}) | ||
| 70 | |||
| 71 | self.assertEquals(response.http_response.status, 200, 'put with complex argument and headers') | ||
| 72 | |||
| 73 | response = self.conn.get(BUCKET_NAME, key) | ||
| 74 | self.assertEquals(response.http_response.status, 200, 'get object') | ||
| 75 | self.assertEquals(response.object.data, text, 'got right data') | ||
| 76 | self.assertEquals(response.object.metadata, { 'title': 'title' }, 'metadata is correct') | ||
| 77 | self.assertEquals(int(response.http_response.getheader('Content-Length')), len(text), 'got content-length header') | ||
| 78 | |||
| 79 | title_with_spaces = " \t title with leading and trailing spaces " | ||
| 80 | response = \ | ||
| 81 | self.conn.put( | ||
| 82 | BUCKET_NAME, | ||
| 83 | key, | ||
| 84 | S3.S3Object(text, {'title': title_with_spaces}), | ||
| 85 | {'Content-Type': 'text/plain'}) | ||
| 86 | |||
| 87 | self.assertEquals(response.http_response.status, 200, 'put with headers with spaces') | ||
| 88 | |||
| 89 | response = self.conn.get(BUCKET_NAME, key) | ||
| 90 | self.assertEquals(response.http_response.status, 200, 'get object') | ||
| 91 | self.assertEquals( | ||
| 92 | response.object.metadata, | ||
| 93 | { 'title': title_with_spaces.strip() }, | ||
| 94 | 'metadata with spaces is correct') | ||
| 95 | |||
| 96 | # delimited list tests | ||
| 97 | inner_key = 'test/inner.txt' | ||
| 98 | last_key = 'z-last-key.txt' | ||
| 99 | response = self.conn.put(BUCKET_NAME, inner_key, text) | ||
| 100 | self.assertEquals(response.http_response.status, 200, 'put inner key') | ||
| 101 | |||
| 102 | response = self.conn.put(BUCKET_NAME, last_key, text) | ||
| 103 | self.assertEquals(response.http_response.status, 200, 'put last key') | ||
| 104 | |||
| 105 | response = self.do_delimited_list(BUCKET_NAME, False, {'delimiter': '/'}, 2, 1, 'root list') | ||
| 106 | |||
| 107 | response = self.do_delimited_list(BUCKET_NAME, True, {'max-keys': 1, 'delimiter': '/'}, 1, 0, 'root list with max keys of 1', 'example.txt') | ||
| 108 | |||
| 109 | response = self.do_delimited_list(BUCKET_NAME, True, {'max-keys': 2, 'delimiter': '/'}, 1, 1, 'root list with max keys of 2, page 1', 'test/') | ||
| 110 | |||
| 111 | marker = response.next_marker | ||
| 112 | |||
| 113 | response = self.do_delimited_list(BUCKET_NAME, False, {'marker': marker, 'max-keys': 2, 'delimiter': '/'}, 1, 0, 'root list with max keys of 2, page 2') | ||
| 114 | |||
| 115 | response = self.do_delimited_list(BUCKET_NAME, False, {'prefix': 'test/', 'delimiter': '/'}, 1, 0, 'test/ list') | ||
| 116 | |||
| 117 | response = self.conn.delete(BUCKET_NAME, inner_key) | ||
| 118 | self.assertEquals(response.http_response.status, 204, 'delete %s' % inner_key) | ||
| 119 | |||
| 120 | response = self.conn.delete(BUCKET_NAME, last_key) | ||
| 121 | self.assertEquals(response.http_response.status, 204, 'delete %s' % last_key) | ||
| 122 | |||
| 123 | |||
| 124 | weird_key = '&=//%# ++++' | ||
| 125 | |||
| 126 | response = self.conn.put(BUCKET_NAME, weird_key, text) | ||
| 127 | self.assertEquals(response.http_response.status, 200, 'put weird key') | ||
| 128 | |||
| 129 | response = self.conn.get(BUCKET_NAME, weird_key) | ||
| 130 | self.assertEquals(response.http_response.status, 200, 'get weird key') | ||
| 131 | |||
| 132 | response = self.conn.get_acl(BUCKET_NAME, key) | ||
| 133 | self.assertEquals(response.http_response.status, 200, 'get acl') | ||
| 134 | |||
| 135 | acl = response.object.data | ||
| 136 | |||
| 137 | response = self.conn.put_acl(BUCKET_NAME, key, acl) | ||
| 138 | self.assertEquals(response.http_response.status, 200, 'put acl') | ||
| 139 | |||
| 140 | response = self.conn.get_bucket_acl(BUCKET_NAME) | ||
| 141 | self.assertEquals(response.http_response.status, 200, 'get bucket acl') | ||
| 142 | |||
| 143 | bucket_acl = response.object.data | ||
| 144 | |||
| 145 | response = self.conn.put_bucket_acl(BUCKET_NAME, bucket_acl) | ||
| 146 | self.assertEquals(response.http_response.status, 200, 'put bucket acl') | ||
| 147 | |||
| 148 | response = self.conn.get_bucket_acl(BUCKET_NAME) | ||
| 149 | self.assertEquals(response.http_response.status, 200, 'get bucket logging') | ||
| 150 | |||
| 151 | bucket_logging = response.object.data | ||
| 152 | |||
| 153 | response = self.conn.put_bucket_acl(BUCKET_NAME, bucket_logging) | ||
| 154 | self.assertEquals(response.http_response.status, 200, 'put bucket logging') | ||
| 155 | |||
| 156 | response = self.conn.list_bucket(BUCKET_NAME) | ||
| 157 | self.assertEquals(response.http_response.status, 200, 'list bucket') | ||
| 158 | entries = response.entries | ||
| 159 | self.assertEquals(len(entries), 2, 'got back right number of keys') | ||
| 160 | # depends on weird_key < key | ||
| 161 | self.assertEquals(entries[0].key, weird_key, 'first key is right') | ||
| 162 | self.assertEquals(entries[1].key, key, 'second key is right') | ||
| 163 | |||
| 164 | response = self.conn.list_bucket(BUCKET_NAME, {'max-keys': 1}) | ||
| 165 | self.assertEquals(response.http_response.status, 200, 'list bucket with args') | ||
| 166 | self.assertEquals(len(response.entries), 1, 'got back right number of keys') | ||
| 167 | |||
| 168 | for entry in entries: | ||
| 169 | response = self.conn.delete(BUCKET_NAME, entry.key) | ||
| 170 | self.assertEquals(response.http_response.status, 204, 'delete %s' % entry.key) | ||
| 171 | |||
| 172 | response = self.conn.list_all_my_buckets() | ||
| 173 | self.assertEquals(response.http_response.status, 200, 'list all my buckets') | ||
| 174 | buckets = response.entries | ||
| 175 | |||
| 176 | response = self.conn.delete_bucket(BUCKET_NAME) | ||
| 177 | self.assertEquals(response.http_response.status, 204, 'delete bucket') | ||
| 178 | |||
| 179 | response = self.conn.list_all_my_buckets() | ||
| 180 | self.assertEquals(response.http_response.status, 200, 'list all my buckets again') | ||
| 181 | |||
| 182 | self.assertEquals(len(response.entries), len(buckets) - 1, 'bucket count is correct') | ||
| 183 | |||
| 184 | def verify_list_bucket_response(self, response, bucket, is_truncated, parameters, next_marker=''): | ||
| 185 | prefix = '' | ||
| 186 | marker = '' | ||
| 187 | |||
| 188 | if parameters.has_key('prefix'): | ||
| 189 | prefix = parameters['prefix'] | ||
| 190 | if parameters.has_key('marker'): | ||
| 191 | marker = parameters['marker'] | ||
| 192 | |||
| 193 | self.assertEquals(bucket, response.name, 'bucket name should match') | ||
| 194 | self.assertEquals(prefix, response.prefix, 'prefix should match') | ||
| 195 | self.assertEquals(marker, response.marker, 'marker should match') | ||
| 196 | if parameters.has_key('max-keys'): | ||
| 197 | self.assertEquals(parameters['max-keys'], response.max_keys, 'max-keys should match') | ||
| 198 | self.assertEquals(parameters['delimiter'], response.delimiter, 'delimiter should match') | ||
| 199 | self.assertEquals(is_truncated, response.is_truncated, 'is_truncated should match') | ||
| 200 | self.assertEquals(next_marker, response.next_marker, 'next_marker should match') | ||
| 201 | |||
| 202 | def do_delimited_list(self, bucket_name, is_truncated, parameters, regular_expected, common_expected, test_name, next_marker=''): | ||
| 203 | response = self.conn.list_bucket(bucket_name, parameters) | ||
| 204 | self.assertEquals(response.http_response.status, 200, test_name) | ||
| 205 | self.assertEquals(regular_expected, len(response.entries), 'right number of regular entries') | ||
| 206 | self.assertEquals(common_expected, len(response.common_prefixes), 'right number of common prefixes') | ||
| 207 | |||
| 208 | self.verify_list_bucket_response(response, bucket_name, is_truncated, parameters, next_marker) | ||
| 209 | |||
| 210 | return response | ||
| 211 | |||
| 212 | class TestQueryStringAuthGenerator(unittest.TestCase): | ||
| 213 | def setUp(self): | ||
| 214 | self.generator = S3.QueryStringAuthGenerator(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) | ||
| 215 | if (self.generator.is_secure == True): | ||
| 216 | self.connection = httplib.HTTPSConnection(self.generator.server_name) | ||
| 217 | else: | ||
| 218 | self.connection = httplib.HTTPConnection(self.generator.server_name) | ||
| 219 | |||
| 220 | def check_url(self, url, method, status, message, data=''): | ||
| 221 | if (method == 'PUT'): | ||
| 222 | headers = { 'Content-Length': len(data) } | ||
| 223 | self.connection.request(method, url, data, headers) | ||
| 224 | else: | ||
| 225 | self.connection.request(method, url) | ||
| 226 | |||
| 227 | response = self.connection.getresponse() | ||
| 228 | self.assertEquals(response.status, status, message) | ||
| 229 | |||
| 230 | return response.read() | ||
| 231 | |||
| 232 | # test all operations for both regular and vanity domains | ||
| 233 | # regular: http://s3.amazonaws.com/bucket/key | ||
| 234 | # subdomain: http://bucket.s3.amazonaws.com/key | ||
| 235 | # testing pure vanity domains (http://<vanity domain>/key) is not covered here | ||
| 236 | # but is possible with some additional setup (set the server in @conn to your vanity domain) | ||
| 237 | |||
| 238 | def test_subdomain(self): | ||
| 239 | self.run_tests(S3.CallingFormat.SUBDOMAIN) | ||
| 240 | |||
| 241 | def test_path(self): | ||
| 242 | self.run_tests(S3.CallingFormat.PATH) | ||
| 243 | |||
| 244 | def run_tests(self, calling_format): | ||
| 245 | self.generator.calling_format = calling_format | ||
| 246 | |||
| 247 | key = 'test' | ||
| 248 | |||
| 249 | self.check_url(self.generator.create_bucket(BUCKET_NAME), 'PUT', 200, 'create_bucket') | ||
| 250 | self.check_url(self.generator.put(BUCKET_NAME, key, ''), 'PUT', 200, 'put object', 'test data') | ||
| 251 | self.check_url(self.generator.get(BUCKET_NAME, key), 'GET', 200, 'get object') | ||
| 252 | self.check_url(self.generator.list_bucket(BUCKET_NAME), 'GET', 200, 'list bucket') | ||
| 253 | self.check_url(self.generator.list_all_my_buckets(), 'GET', 200, 'list all my buckets') | ||
| 254 | acl = self.check_url(self.generator.get_acl(BUCKET_NAME, key), 'GET', 200, 'get acl') | ||
| 255 | self.check_url(self.generator.put_acl(BUCKET_NAME, key, acl), 'PUT', 200, 'put acl', acl) | ||
| 256 | bucket_acl = self.check_url(self.generator.get_bucket_acl(BUCKET_NAME), 'GET', 200, 'get bucket acl') | ||
| 257 | self.check_url(self.generator.put_bucket_acl(BUCKET_NAME, bucket_acl), 'PUT', 200, 'put bucket acl', bucket_acl) | ||
| 258 | bucket_logging = self.check_url(self.generator.get_bucket_logging(BUCKET_NAME), 'GET', 200, 'get bucket logging') | ||
| 259 | self.check_url(self.generator.put_bucket_logging(BUCKET_NAME, bucket_logging), 'PUT', 200, 'put bucket logging', bucket_logging) | ||
| 260 | self.check_url(self.generator.delete(BUCKET_NAME, key), 'DELETE', 204, 'delete object') | ||
| 261 | self.check_url(self.generator.delete_bucket(BUCKET_NAME), 'DELETE', 204, 'delete bucket') | ||
| 262 | |||
| 263 | |||
| 264 | if __name__ == '__main__': | ||
| 265 | unittest.main() | ||
| 266 | |||
| 267 | |||