From 647c078f0c801eb9fa63dfc7ba3c017b30e4d467 Mon Sep 17 00:00:00 2001
From: Craig Jackson
Date: Tue, 13 Dec 2011 14:46:59 -0700
Subject: Made the project available in pypi. Involved moving files and
initializing a setup.py.
---
src/amazons3/S3.py | 633 ++++++++++++++++++++++++++++++++++++++++
src/amazons3/__init__.py | 0
src/amazons3/django/__init__.py | 181 ++++++++++++
src/amazons3/s3-driver.py | 118 ++++++++
src/amazons3/s3-test.py | 267 +++++++++++++++++
5 files changed, 1199 insertions(+)
create mode 100644 src/amazons3/S3.py
create mode 100644 src/amazons3/__init__.py
create mode 100644 src/amazons3/django/__init__.py
create mode 100644 src/amazons3/s3-driver.py
create mode 100644 src/amazons3/s3-test.py
(limited to 'src')
diff --git a/src/amazons3/S3.py b/src/amazons3/S3.py
new file mode 100644
index 0000000..0999f27
--- /dev/null
+++ b/src/amazons3/S3.py
@@ -0,0 +1,633 @@
+#!/usr/bin/env python
+
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its
+# affiliates.
+
+import base64
+import hmac
+import httplib
+import re
+from hashlib import sha1
+import sys
+import time
+import urllib
+import urlparse
+import xml.sax
+
+DEFAULT_HOST = 's3.amazonaws.com'
+PORTS_BY_SECURITY = { True: 443, False: 80 }
+METADATA_PREFIX = 'x-amz-meta-'
+AMAZON_HEADER_PREFIX = 'x-amz-'
+
+# generates the aws canonical string for the given parameters
+def canonical_string(method, bucket="", key="", query_args={}, headers={}, expires=None):
+ interesting_headers = {}
+ for header_key in headers:
+ lk = header_key.lower()
+ if lk in ['content-md5', 'content-type', 'date'] or lk.startswith(AMAZON_HEADER_PREFIX):
+ interesting_headers[lk] = headers[header_key].strip()
+
+ # these keys get empty strings if they don't exist
+ if not interesting_headers.has_key('content-type'):
+ interesting_headers['content-type'] = ''
+ if not interesting_headers.has_key('content-md5'):
+ interesting_headers['content-md5'] = ''
+
+ # just in case someone used this. it's not necessary in this lib.
+ if interesting_headers.has_key('x-amz-date'):
+ interesting_headers['date'] = ''
+
+ # if you're using expires for query string auth, then it trumps date
+ # (and x-amz-date)
+ if expires:
+ interesting_headers['date'] = str(expires)
+
+ sorted_header_keys = interesting_headers.keys()
+ sorted_header_keys.sort()
+
+ buf = "%s\n" % method
+ for header_key in sorted_header_keys:
+ if header_key.startswith(AMAZON_HEADER_PREFIX):
+ buf += "%s:%s\n" % (header_key, interesting_headers[header_key])
+ else:
+ buf += "%s\n" % interesting_headers[header_key]
+
+ # append the bucket if it exists
+ if bucket != "":
+ buf += "/%s" % bucket
+
+ # add the key. even if it doesn't exist, add the slash
+ buf += "/%s" % urllib.quote_plus(key.encode('utf-8'))
+
+ # handle special query string arguments
+
+ if query_args.has_key("acl"):
+ buf += "?acl"
+ elif query_args.has_key("torrent"):
+ buf += "?torrent"
+ elif query_args.has_key("logging"):
+ buf += "?logging"
+ elif query_args.has_key("location"):
+ buf += "?location"
+
+ return buf
+
+# computes the base64'ed hmac-sha hash of the canonical string and the secret
+# access key, optionally urlencoding the result
+def encode(aws_secret_access_key, str, urlencode=False):
+ b64_hmac = base64.encodestring(hmac.new(aws_secret_access_key, str, sha1).digest()).strip()
+ if urlencode:
+ return urllib.quote_plus(b64_hmac)
+ else:
+ return b64_hmac
+
+def merge_meta(headers, metadata):
+ final_headers = headers.copy()
+ for k in metadata.keys():
+ final_headers[METADATA_PREFIX + k] = metadata[k]
+
+ return final_headers
+
+# builds the query arg string
+def query_args_hash_to_string(query_args):
+ query_string = ""
+ pairs = []
+ for k, v in query_args.items():
+ piece = k
+ if v != None:
+ piece += "=%s" % urllib.quote_plus(str(v).encode('utf-8'))
+ pairs.append(piece)
+
+ return '&'.join(pairs)
+
+
+class CallingFormat:
+ PATH = 1
+ SUBDOMAIN = 2
+ VANITY = 3
+
+ def build_url_base(protocol, server, port, bucket, calling_format):
+ url_base = '%s://' % protocol
+
+ if bucket == '':
+ url_base += server
+ elif calling_format == CallingFormat.SUBDOMAIN:
+ url_base += "%s.%s" % (bucket, server)
+ elif calling_format == CallingFormat.VANITY:
+ url_base += bucket
+ else:
+ url_base += server
+
+ url_base += ":%s" % port
+
+ if (bucket != '') and (calling_format == CallingFormat.PATH):
+ url_base += "/%s" % bucket
+
+ return url_base
+
+ build_url_base = staticmethod(build_url_base)
+
+
+
+class Location:
+ DEFAULT = None
+ EU = 'EU'
+
+
+
+class AWSAuthConnection:
+ def __init__(self, aws_access_key_id, aws_secret_access_key, is_secure=True,
+ server=DEFAULT_HOST, port=None, calling_format=CallingFormat.SUBDOMAIN):
+
+ if not port:
+ port = PORTS_BY_SECURITY[is_secure]
+
+ self.aws_access_key_id = aws_access_key_id
+ self.aws_secret_access_key = aws_secret_access_key
+ self.is_secure = is_secure
+ self.server = server
+ self.port = port
+ self.calling_format = calling_format
+
+ def create_bucket(self, bucket, headers={}):
+ return Response(self._make_request('PUT', bucket, '', {}, headers))
+
+ def create_located_bucket(self, bucket, location=Location.DEFAULT, headers={}):
+ if location == Location.DEFAULT:
+ body = ""
+ else:
+ body = "" + \
+ location + \
+ ""
+ return Response(self._make_request('PUT', bucket, '', {}, headers, body))
+
+ def check_bucket_exists(self, bucket):
+ return self._make_request('HEAD', bucket, '', {}, {})
+
+ def list_bucket(self, bucket, options={}, headers={}):
+ return ListBucketResponse(self._make_request('GET', bucket, '', options, headers))
+
+ def delete_bucket(self, bucket, headers={}):
+ return Response(self._make_request('DELETE', bucket, '', {}, headers))
+
+ def put(self, bucket, key, object, headers={}):
+ if not isinstance(object, S3Object):
+ object = S3Object(object)
+
+ return Response(
+ self._make_request(
+ 'PUT',
+ bucket,
+ key,
+ {},
+ headers,
+ object.data,
+ object.metadata))
+
+ def get(self, bucket, key, headers={}):
+ return GetResponse(
+ self._make_request('GET', bucket, key, {}, headers))
+
+ def delete(self, bucket, key, headers={}):
+ return Response(
+ self._make_request('DELETE', bucket, key, {}, headers))
+
+ def get_bucket_logging(self, bucket, headers={}):
+ return GetResponse(self._make_request('GET', bucket, '', { 'logging': None }, headers))
+
+ def put_bucket_logging(self, bucket, logging_xml_doc, headers={}):
+ return Response(self._make_request('PUT', bucket, '', { 'logging': None }, headers, logging_xml_doc))
+
+ def get_bucket_acl(self, bucket, headers={}):
+ return self.get_acl(bucket, '', headers)
+
+ def get_acl(self, bucket, key, headers={}):
+ return GetResponse(
+ self._make_request('GET', bucket, key, { 'acl': None }, headers))
+
+ def put_bucket_acl(self, bucket, acl_xml_document, headers={}):
+ return self.put_acl(bucket, '', acl_xml_document, headers)
+
+ def put_acl(self, bucket, key, acl_xml_document, headers={}):
+ return Response(
+ self._make_request(
+ 'PUT',
+ bucket,
+ key,
+ { 'acl': None },
+ headers,
+ acl_xml_document))
+
+ def list_all_my_buckets(self, headers={}):
+ return ListAllMyBucketsResponse(self._make_request('GET', '', '', {}, headers))
+
+ def get_bucket_location(self, bucket):
+ return LocationResponse(self._make_request('GET', bucket, '', {'location' : None}))
+
+ # end public methods
+
+ def _make_request(self, method, bucket='', key='', query_args={}, headers={}, data='', metadata={}):
+
+ server = ''
+ if bucket == '':
+ server = self.server
+ elif self.calling_format == CallingFormat.SUBDOMAIN:
+ server = "%s.%s" % (bucket, self.server)
+ elif self.calling_format == CallingFormat.VANITY:
+ server = bucket
+ else:
+ server = self.server
+
+ path = ''
+
+ if (bucket != '') and (self.calling_format == CallingFormat.PATH):
+ path += "/%s" % bucket
+
+ # add the slash after the bucket regardless
+ # the key will be appended if it is non-empty
+ path += "/%s" % urllib.quote_plus(key.encode('utf-8'))
+
+
+ # build the path_argument string
+ # add the ? in all cases since
+ # signature and credentials follow path args
+ if len(query_args):
+ path += "?" + query_args_hash_to_string(query_args)
+
+ is_secure = self.is_secure
+ host = "%s:%d" % (server, self.port)
+ while True:
+ if (is_secure):
+ connection = httplib.HTTPSConnection(host)
+ else:
+ connection = httplib.HTTPConnection(host)
+
+ final_headers = merge_meta(headers, metadata)
+ # add auth header
+ self._add_aws_auth_header(final_headers, method, bucket, key, query_args)
+
+ try:
+ connection.request(method, path, data, final_headers)
+ except:
+ # Try try again
+ connection.close()
+ continue
+
+ try:
+ resp = connection.getresponse()
+ except:
+ # Sometimes the connection is reset by peer. If that happens
+ # just try it again and we'll see what happens.
+ connection.close()
+ continue
+
+ if resp.status < 300 or resp.status >= 400:
+ return resp
+ # handle redirect
+ location = resp.getheader('location')
+ if not location:
+ return resp
+ # (close connection)
+ resp.read()
+ scheme, host, path, params, query, fragment \
+ = urlparse.urlparse(location)
+ if scheme == "http":
+ is_secure = True
+ elif scheme == "https": is_secure = False
+ else: raise invalidURL("Not http/https: " + location)
+ if query:
+ path += "?" + query
+ # retry with redirect
+ connection.close()
+
+ def _add_aws_auth_header(self, headers, method, bucket, key, query_args):
+ if not headers.has_key('Date'):
+ headers['Date'] = time.strftime("%a, %d %b %Y %X GMT", time.gmtime())
+
+ c_string = canonical_string(method, bucket, key, query_args, headers)
+ headers['Authorization'] = \
+ "AWS %s:%s" % (self.aws_access_key_id, encode(self.aws_secret_access_key, c_string))
+
+
+class QueryStringAuthGenerator:
+ # by default, expire in 1 minute
+ DEFAULT_EXPIRES_IN = 60
+
+ def __init__(self, aws_access_key_id, aws_secret_access_key, is_secure=True,
+ server=DEFAULT_HOST, port=None, calling_format=CallingFormat.SUBDOMAIN):
+
+ if not port:
+ port = PORTS_BY_SECURITY[is_secure]
+
+ self.aws_access_key_id = aws_access_key_id
+ self.aws_secret_access_key = aws_secret_access_key
+ if (is_secure):
+ self.protocol = 'https'
+ else:
+ self.protocol = 'http'
+
+ self.is_secure = is_secure
+ self.server = server
+ self.port = port
+ self.calling_format = calling_format
+ self.__expires_in = QueryStringAuthGenerator.DEFAULT_EXPIRES_IN
+ self.__expires = None
+
+ # for backwards compatibility with older versions
+ self.server_name = "%s:%s" % (self.server, self.port)
+
+ def set_expires_in(self, expires_in):
+ self.__expires_in = expires_in
+ self.__expires = None
+
+ def set_expires(self, expires):
+ self.__expires = expires
+ self.__expires_in = None
+
+ def create_bucket(self, bucket, headers={}):
+ return self.generate_url('PUT', bucket, '', {}, headers)
+
+ def list_bucket(self, bucket, options={}, headers={}):
+ return self.generate_url('GET', bucket, '', options, headers)
+
+ def delete_bucket(self, bucket, headers={}):
+ return self.generate_url('DELETE', bucket, '', {}, headers)
+
+ def put(self, bucket, key, object, headers={}):
+ if not isinstance(object, S3Object):
+ object = S3Object(object)
+
+ return self.generate_url(
+ 'PUT',
+ bucket,
+ key,
+ {},
+ merge_meta(headers, object.metadata))
+
+ def get(self, bucket, key, headers={}):
+ return self.generate_url('GET', bucket, key, {}, headers)
+
+ def delete(self, bucket, key, headers={}):
+ return self.generate_url('DELETE', bucket, key, {}, headers)
+
+ def get_bucket_logging(self, bucket, headers={}):
+ return self.generate_url('GET', bucket, '', { 'logging': None }, headers)
+
+ def put_bucket_logging(self, bucket, logging_xml_doc, headers={}):
+ return self.generate_url('PUT', bucket, '', { 'logging': None }, headers)
+
+ def get_bucket_acl(self, bucket, headers={}):
+ return self.get_acl(bucket, '', headers)
+
+ def get_acl(self, bucket, key='', headers={}):
+ return self.generate_url('GET', bucket, key, { 'acl': None }, headers)
+
+ def put_bucket_acl(self, bucket, acl_xml_document, headers={}):
+ return self.put_acl(bucket, '', acl_xml_document, headers)
+
+ # don't really care what the doc is here.
+ def put_acl(self, bucket, key, acl_xml_document, headers={}):
+ return self.generate_url('PUT', bucket, key, { 'acl': None }, headers)
+
+ def list_all_my_buckets(self, headers={}):
+ return self.generate_url('GET', '', '', {}, headers)
+
+ def make_bare_url(self, bucket, key=''):
+ full_url = self.generate_url(self, bucket, key)
+ return full_url[:full_url.index('?')]
+
+ def generate_url(self, method, bucket='', key='', query_args={}, headers={}):
+ expires = 0
+ if self.__expires_in != None:
+ expires = int(time.time() + self.__expires_in)
+ elif self.__expires != None:
+ expires = int(self.__expires)
+ else:
+ raise "Invalid expires state"
+
+ canonical_str = canonical_string(method, bucket, key, query_args, headers, expires)
+ encoded_canonical = encode(self.aws_secret_access_key, canonical_str)
+
+ url = CallingFormat.build_url_base(self.protocol, self.server, self.port, bucket, self.calling_format)
+
+ url += "/%s" % urllib.quote_plus(key.encode('utf-8'))
+
+ query_args['Signature'] = encoded_canonical
+ query_args['Expires'] = expires
+ query_args['AWSAccessKeyId'] = self.aws_access_key_id
+
+ url += "?%s" % query_args_hash_to_string(query_args)
+
+ return url
+
+
+class S3Object:
+ def __init__(self, data, metadata={}):
+ self.data = data
+ self.metadata = metadata
+
+class Owner:
+ def __init__(self, id='', display_name=''):
+ self.id = id
+ self.display_name = display_name
+
+class ListEntry:
+ def __init__(self, key='', last_modified=None, etag='', size=0, storage_class='', owner=None):
+ self.key = key
+ self.last_modified = last_modified
+ self.etag = etag
+ self.size = size
+ self.storage_class = storage_class
+ self.owner = owner
+
+class CommonPrefixEntry:
+ def __init(self, prefix=''):
+ self.prefix = prefix
+
+class Bucket:
+ def __init__(self, name='', creation_date=''):
+ self.name = name
+ self.creation_date = creation_date
+
+class Response:
+ def __init__(self, http_response):
+ self.http_response = http_response
+ # you have to do this read, even if you don't expect a body.
+ # otherwise, the next request fails.
+ self.body = http_response.read()
+ if http_response.status >= 300 and self.body:
+ self.message = self.body
+ else:
+ self.message = "%03d %s" % (http_response.status, http_response.reason)
+
+
+
+class ListBucketResponse(Response):
+ def __init__(self, http_response):
+ Response.__init__(self, http_response)
+ if http_response.status < 300:
+ handler = ListBucketHandler()
+ xml.sax.parseString(self.body, handler)
+ self.entries = handler.entries
+ self.common_prefixes = handler.common_prefixes
+ self.name = handler.name
+ self.marker = handler.marker
+ self.prefix = handler.prefix
+ self.is_truncated = handler.is_truncated
+ self.delimiter = handler.delimiter
+ self.max_keys = handler.max_keys
+ self.next_marker = handler.next_marker
+ else:
+ self.entries = []
+
+class ListAllMyBucketsResponse(Response):
+ def __init__(self, http_response):
+ Response.__init__(self, http_response)
+ if http_response.status < 300:
+ handler = ListAllMyBucketsHandler()
+ xml.sax.parseString(self.body, handler)
+ self.entries = handler.entries
+ else:
+ self.entries = []
+
+class GetResponse(Response):
+ def __init__(self, http_response):
+ Response.__init__(self, http_response)
+ response_headers = http_response.msg # older pythons don't have getheaders
+ metadata = self.get_aws_metadata(response_headers)
+ self.object = S3Object(self.body, metadata)
+
+ def get_aws_metadata(self, headers):
+ metadata = {}
+ for hkey in headers.keys():
+ if hkey.lower().startswith(METADATA_PREFIX):
+ metadata[hkey[len(METADATA_PREFIX):]] = headers[hkey]
+ del headers[hkey]
+
+ return metadata
+
+class LocationResponse(Response):
+ def __init__(self, http_response):
+ Response.__init__(self, http_response)
+ if http_response.status < 300:
+ handler = LocationHandler()
+ xml.sax.parseString(self.body, handler)
+ self.location = handler.location
+
+class ListBucketHandler(xml.sax.ContentHandler):
+ def __init__(self):
+ self.entries = []
+ self.curr_entry = None
+ self.curr_text = ''
+ self.common_prefixes = []
+ self.curr_common_prefix = None
+ self.name = ''
+ self.marker = ''
+ self.prefix = ''
+ self.is_truncated = False
+ self.delimiter = ''
+ self.max_keys = 0
+ self.next_marker = ''
+ self.is_echoed_prefix_set = False
+
+ def startElement(self, name, attrs):
+ if name == 'Contents':
+ self.curr_entry = ListEntry()
+ elif name == 'Owner':
+ self.curr_entry.owner = Owner()
+ elif name == 'CommonPrefixes':
+ self.curr_common_prefix = CommonPrefixEntry()
+
+
+ def endElement(self, name):
+ if name == 'Contents':
+ self.entries.append(self.curr_entry)
+ elif name == 'CommonPrefixes':
+ self.common_prefixes.append(self.curr_common_prefix)
+ elif name == 'Key':
+ self.curr_entry.key = self.curr_text
+ elif name == 'LastModified':
+ self.curr_entry.last_modified = self.curr_text
+ elif name == 'ETag':
+ self.curr_entry.etag = self.curr_text
+ elif name == 'Size':
+ self.curr_entry.size = int(self.curr_text)
+ elif name == 'ID':
+ self.curr_entry.owner.id = self.curr_text
+ elif name == 'DisplayName':
+ self.curr_entry.owner.display_name = self.curr_text
+ elif name == 'StorageClass':
+ self.curr_entry.storage_class = self.curr_text
+ elif name == 'Name':
+ self.name = self.curr_text
+ elif name == 'Prefix' and self.is_echoed_prefix_set:
+ self.curr_common_prefix.prefix = self.curr_text
+ elif name == 'Prefix':
+ self.prefix = self.curr_text
+ self.is_echoed_prefix_set = True
+ elif name == 'Marker':
+ self.marker = self.curr_text
+ elif name == 'IsTruncated':
+ self.is_truncated = self.curr_text == 'true'
+ elif name == 'Delimiter':
+ self.delimiter = self.curr_text
+ elif name == 'MaxKeys':
+ self.max_keys = int(self.curr_text)
+ elif name == 'NextMarker':
+ self.next_marker = self.curr_text
+
+ self.curr_text = ''
+
+ def characters(self, content):
+ self.curr_text += content
+
+
+class ListAllMyBucketsHandler(xml.sax.ContentHandler):
+ def __init__(self):
+ self.entries = []
+ self.curr_entry = None
+ self.curr_text = ''
+
+ def startElement(self, name, attrs):
+ if name == 'Bucket':
+ self.curr_entry = Bucket()
+
+ def endElement(self, name):
+ if name == 'Name':
+ self.curr_entry.name = self.curr_text
+ elif name == 'CreationDate':
+ self.curr_entry.creation_date = self.curr_text
+ elif name == 'Bucket':
+ self.entries.append(self.curr_entry)
+
+ def characters(self, content):
+ self.curr_text = content
+
+
+class LocationHandler(xml.sax.ContentHandler):
+ def __init__(self):
+ self.location = None
+ self.state = 'init'
+
+ def startElement(self, name, attrs):
+ if self.state == 'init':
+ if name == 'LocationConstraint':
+ self.state = 'tag_location'
+ self.location = ''
+ else: self.state = 'bad'
+ else: self.state = 'bad'
+
+ def endElement(self, name):
+ if self.state == 'tag_location' and name == 'LocationConstraint':
+ self.state = 'done'
+ else: self.state = 'bad'
+
+ def characters(self, content):
+ if self.state == 'tag_location':
+ self.location += content
diff --git a/src/amazons3/__init__.py b/src/amazons3/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/amazons3/django/__init__.py b/src/amazons3/django/__init__.py
new file mode 100644
index 0000000..3e57e15
--- /dev/null
+++ b/src/amazons3/django/__init__.py
@@ -0,0 +1,181 @@
+import os
+from StringIO import StringIO
+from django.conf import settings
+from amazons3 import S3
+
+from django.core.files.storage import Storage
+
+class S3OpenFile(StringIO):
+ """
+ Wrapper for StringIO which allows open() to be called on it.
+
+ This is for FileField form fields, which expect to be able to call open()
+ and then retrieve data from the file.
+ ** NOTE: The behavior of calling open() and then writing to the file is
+ currently unknown. **
+ """
+ def open(self, *args, **kwargs):
+ self.seek(0)
+
+class S3Error(Exception):
+ "Misc. S3 Service Error"
+ pass
+
+class S3Storage(Storage):
+ options = None
+
+ def __init__(self, options=None):
+ if not options:
+ options = settings.S3_SETTINGS
+ self.options = options
+ self.perm_tuple = (
+ 'private',
+ 'public-read',
+ 'public-read-write',
+ 'authenticated-read'
+ )
+ if self.options['default_perm'] not in self.perm_tuple:
+ self.options['default_perm'] = 'private'
+
+ self.connect()
+
+ def connect(self):
+ self.conn = S3.AWSAuthConnection(self.options['aws_key'], self.options['aws_secret_key'])
+
+ res = self.conn.check_bucket_exists(self.options['bucket'])
+
+ if res.status != 200:
+ res = self.conn.create_bucket(self.options['bucket'])
+ if res.http_response.status != 200:
+ raise S3Error, 'Unable to create bucket %s' % (self.options['bucket'])
+
+ return True
+
+ def exists(self, filename):
+ contents = self.conn.list_bucket(self.options['bucket'], {'prefix': os.path.dirname(filename)})
+ if filename in [f.key for f in contents.entries]:
+ return True
+ else:
+ return False
+
+ def size(self, filename):
+ contents = self.conn.list_bucket(self.options['bucket'], {'prefix': os.path.dirname(filename)} )
+ for f in contents.entries:
+ if f.key == filename:
+ return f.size
+
+ return False
+
+ def url(self, filename):
+ server = self.options['bucket']
+ if not self.options['vanity_url']:
+ server += '.s3.amazonaws.com'
+ else:
+ server = self.options['vanity_url']
+ return 'http://' + server + '/' + filename
+
+
+ def _save(self, filename, content):
+ # a stupid hack
+ try:
+ content.url = self.url
+ except AttributeError, e:
+ content = content.file
+
+ try:
+ data = content.read()
+ except IOError, err:
+ raise S3Error, 'Unable to read %s: %s' % (filename, err.strerror)
+
+ guess_type = False
+ try:
+ content.content_type
+ except AttributeError, e:
+ guess_type = True
+
+ if guess_type or not content.content_type:
+ import mimetypes
+ content_type = mimetypes.guess_type(filename)[0]
+ if content_type is None:
+ content_type = 'text/plain'
+ else:
+ content_type = content.content_type
+
+ perm = self.options['default_perm']
+
+ res = self.conn.put(
+ self.options['bucket'],
+ filename,
+ S3.S3Object(data),
+ {
+ 'x-amz-acl': perm,
+ 'Content-Type': content_type
+ }
+ )
+
+ if res.http_response.status != 200:
+ raise S3Error, 'Unable to upload file %s: Error code %s: %s' % (filename, self.options['bucket'], res.body)
+
+
+ content.filename = filename
+ content.url = self.url(filename)
+
+ return filename
+
+ def delete(self, filename):
+ res = self.conn.delete(self.options['bucket'], filename)
+ if res.http_response.status != 204:
+ pass
+ #raise S3Error, 'Unable to delete file %s' % (filename)
+
+ return (res.http_response.status == 204)
+
+ def path(self, filename):
+ raise NotImplementedError
+
+ def open(self, filename, mode):
+ from urllib import urlopen
+ # Download data from S3 and save
+ # into a file wrapper, which allows its
+ # use as normal in FileFields.
+ #
+ # Note: This saves the file data into memory.
+ data = urlopen(self.url(filename))
+ openfile = S3OpenFile()
+ openfile.write(data.read())
+ return openfile
+
+ def get_available_name(self, filename):
+ import os
+ basefilename = os.path.splitext(filename)
+ i = 1
+ while self.exists(filename):
+ i += 1
+ filename = '%s-%d%s' % (basefilename[0], i, basefilename[1])
+
+ return filename
+
+class CxStorage(S3Storage):
+ """
+ This storage engine provides the naming scheme for phonese3. It hashes
+ the file names before storage.
+ To use, set DEFAULT_STORAGE_ENGINE="CxStorage"
+
+ Author: Jason Braegger
+ License: AGPLv3
+ Source: http://code.twi.gs/phonese3/
+ """
+ def get_valid_name(self, name):
+ """
+ This returns a hashed name to use for storage on the filesystem
+ """
+ import os.path
+ from hashlib import md5
+ import time
+
+ extension = os.path.splitext(name)[1].lower()
+ # Ensure an ascii string for .hexdigest() later.
+ name = name.encode('ascii', 'ignore')
+
+ return str(md5(str(time.time()) + name).hexdigest()) + \
+ str(extension)
diff --git a/src/amazons3/s3-driver.py b/src/amazons3/s3-driver.py
new file mode 100644
index 0000000..29f700b
--- /dev/null
+++ b/src/amazons3/s3-driver.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its
+# affiliates.
+
+import S3
+import time
+import sys
+
+AWS_ACCESS_KEY_ID = ''
+AWS_SECRET_ACCESS_KEY = ''
+# remove these next two lines when you've updated your credentials.
+print "update s3-driver.py with your AWS credentials"
+sys.exit();
+
+# convert the bucket to lowercase for vanity domains
+# the bucket name must be lowercase since DNS is case-insensitive
+BUCKET_NAME = AWS_ACCESS_KEY_ID.lower() + '-test-bucket'
+KEY_NAME = 'test-key'
+
+conn = S3.AWSAuthConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
+generator = S3.QueryStringAuthGenerator(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
+
+
+# Check if the bucket exists. The high availability engineering of
+# Amazon S3 is focused on get, put, list, and delete operations.
+# Because bucket operations work against a centralized, global
+# resource space, it is not appropriate to make bucket create or
+# delete calls on the high availability code path of your application.
+# It is better to create or delete buckets in a separate initialization
+# or setup routine that you run less often.
+if (conn.check_bucket_exists(BUCKET_NAME).status == 200):
+ print '----- bucket already exists! -----'
+else:
+ print '----- creating bucket -----'
+ print conn.create_located_bucket(BUCKET_NAME, S3.Location.DEFAULT).message
+ # to create an EU bucket
+ #print conn.create_located_bucket(BUCKET_NAME, S3.Location.EU).message
+
+print '----- bucket location -----'
+print conn.get_bucket_location(BUCKET_NAME).location
+
+print '----- listing bucket -----'
+print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries)
+
+print '----- putting object (with content type) -----'
+print conn.put(
+ BUCKET_NAME,
+ KEY_NAME,
+ S3.S3Object('this is a test'),
+ { 'Content-Type': 'text/plain' }).message
+
+print '----- listing bucket -----'
+print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries)
+
+print '----- getting object -----'
+print conn.get(BUCKET_NAME, KEY_NAME).object.data
+
+print '----- query string auth example -----'
+print "\nTry this url out in your browser (it will only be valid for 60 seconds).\n"
+generator.set_expires_in(60);
+url = generator.get(BUCKET_NAME, KEY_NAME)
+print url
+print '\npress enter> ',
+sys.stdin.readline()
+
+print "\nNow try just the url without the query string arguments. it should fail.\n"
+print generator.make_bare_url(BUCKET_NAME, KEY_NAME)
+print '\npress enter> ',
+sys.stdin.readline()
+
+
+print '----- putting object with metadata and public read acl -----'
+print conn.put(
+ BUCKET_NAME,
+ KEY_NAME + '-public',
+ S3.S3Object('this is a publicly readable test'),
+ { 'x-amz-acl': 'public-read' , 'Content-Type': 'text/plain' }
+).message
+
+print '----- anonymous read test ----'
+print "\nYou should be able to try this in your browser\n"
+public_key = KEY_NAME + '-public'
+print generator.make_bare_url(BUCKET_NAME, public_key)
+print "\npress enter> ",
+sys.stdin.readline()
+
+print "----- getting object's acl -----"
+print conn.get_acl(BUCKET_NAME, KEY_NAME).object.data
+
+print "\n----- path style url example -----";
+print "Non-location-constrained buckets can also be specified as part of the url path. (This was the original url style supported by S3.)\n";
+print "Try this url out in your browser (it will only be valid for 60 seconds).\n"
+generator.calling_format = S3.CallingFormat.PATH
+url = generator.get(BUCKET_NAME, KEY_NAME)
+print url
+print "\npress enter> ",
+sys.stdin.readline()
+
+print '----- deleting objects -----'
+print conn.delete(BUCKET_NAME, KEY_NAME).message
+print conn.delete(BUCKET_NAME, KEY_NAME + '-public').message
+
+print '----- listing bucket -----'
+print map(lambda x: x.key, conn.list_bucket(BUCKET_NAME).entries)
+
+print '----- listing all my buckets -----'
+print map(lambda x: x.name, conn.list_all_my_buckets().entries)
+
+print '----- deleting bucket ------'
+print conn.delete_bucket(BUCKET_NAME).message
+
diff --git a/src/amazons3/s3-test.py b/src/amazons3/s3-test.py
new file mode 100644
index 0000000..fbd8d9c
--- /dev/null
+++ b/src/amazons3/s3-test.py
@@ -0,0 +1,267 @@
+#!/usr/bin/env python
+
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its
+# affiliates.
+
+import unittest
+import S3
+import httplib
+import sys
+
+AWS_ACCESS_KEY_ID = ''
+AWS_SECRET_ACCESS_KEY = ''
+# remove these next two lines when you've updated your credentials.
+print "update s3-test.py with your AWS credentials"
+sys.exit();
+
+# for subdomains (bucket.s3.amazonaws.com),
+# the bucket name must be lowercase since DNS is case-insensitive
+BUCKET_NAME = "%s-test-bucket" % AWS_ACCESS_KEY_ID.lower();
+
+
+class TestAWSAuthConnection(unittest.TestCase):
+ def setUp(self):
+ self.conn = S3.AWSAuthConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
+
+ # test all operations for both regular and vanity domains
+ # regular: http://s3.amazonaws.com/bucket/key
+ # subdomain: http://bucket.s3.amazonaws.com/key
+ # testing pure vanity domains (http:///key) is not covered here
+ # but is possible with some additional setup (set the server in @conn to your vanity domain)
+
+ def test_subdomain_default(self):
+ self.run_tests(S3.CallingFormat.SUBDOMAIN, S3.Location.DEFAULT)
+
+ def test_subdomain_eu(self):
+ self.run_tests(S3.CallingFormat.SUBDOMAIN, S3.Location.EU)
+
+ def test_path_default(self):
+ self.run_tests(S3.CallingFormat.PATH, S3.Location.DEFAULT)
+
+
+ def run_tests(self, calling_format, location):
+ self.conn.calling_format = calling_format
+
+ response = self.conn.create_located_bucket(BUCKET_NAME, location)
+ self.assertEquals(response.http_response.status, 200, 'create bucket')
+
+ response = self.conn.list_bucket(BUCKET_NAME)
+ self.assertEquals(response.http_response.status, 200, 'list bucket')
+ self.assertEquals(len(response.entries), 0, 'bucket is empty')
+
+ text = 'this is a test'
+ key = 'example.txt'
+
+ response = self.conn.put(BUCKET_NAME, key, text)
+ self.assertEquals(response.http_response.status, 200, 'put with a string argument')
+
+ response = \
+ self.conn.put(
+ BUCKET_NAME,
+ key,
+ S3.S3Object(text, {'title': 'title'}),
+ {'Content-Type': 'text/plain'})
+
+ self.assertEquals(response.http_response.status, 200, 'put with complex argument and headers')
+
+ response = self.conn.get(BUCKET_NAME, key)
+ self.assertEquals(response.http_response.status, 200, 'get object')
+ self.assertEquals(response.object.data, text, 'got right data')
+ self.assertEquals(response.object.metadata, { 'title': 'title' }, 'metadata is correct')
+ self.assertEquals(int(response.http_response.getheader('Content-Length')), len(text), 'got content-length header')
+
+ title_with_spaces = " \t title with leading and trailing spaces "
+ response = \
+ self.conn.put(
+ BUCKET_NAME,
+ key,
+ S3.S3Object(text, {'title': title_with_spaces}),
+ {'Content-Type': 'text/plain'})
+
+ self.assertEquals(response.http_response.status, 200, 'put with headers with spaces')
+
+ response = self.conn.get(BUCKET_NAME, key)
+ self.assertEquals(response.http_response.status, 200, 'get object')
+ self.assertEquals(
+ response.object.metadata,
+ { 'title': title_with_spaces.strip() },
+ 'metadata with spaces is correct')
+
+ # delimited list tests
+ inner_key = 'test/inner.txt'
+ last_key = 'z-last-key.txt'
+ response = self.conn.put(BUCKET_NAME, inner_key, text)
+ self.assertEquals(response.http_response.status, 200, 'put inner key')
+
+ response = self.conn.put(BUCKET_NAME, last_key, text)
+ self.assertEquals(response.http_response.status, 200, 'put last key')
+
+ response = self.do_delimited_list(BUCKET_NAME, False, {'delimiter': '/'}, 2, 1, 'root list')
+
+ response = self.do_delimited_list(BUCKET_NAME, True, {'max-keys': 1, 'delimiter': '/'}, 1, 0, 'root list with max keys of 1', 'example.txt')
+
+ response = self.do_delimited_list(BUCKET_NAME, True, {'max-keys': 2, 'delimiter': '/'}, 1, 1, 'root list with max keys of 2, page 1', 'test/')
+
+ marker = response.next_marker
+
+ response = self.do_delimited_list(BUCKET_NAME, False, {'marker': marker, 'max-keys': 2, 'delimiter': '/'}, 1, 0, 'root list with max keys of 2, page 2')
+
+ response = self.do_delimited_list(BUCKET_NAME, False, {'prefix': 'test/', 'delimiter': '/'}, 1, 0, 'test/ list')
+
+ response = self.conn.delete(BUCKET_NAME, inner_key)
+ self.assertEquals(response.http_response.status, 204, 'delete %s' % inner_key)
+
+ response = self.conn.delete(BUCKET_NAME, last_key)
+ self.assertEquals(response.http_response.status, 204, 'delete %s' % last_key)
+
+
+ weird_key = '&=//%# ++++'
+
+ response = self.conn.put(BUCKET_NAME, weird_key, text)
+ self.assertEquals(response.http_response.status, 200, 'put weird key')
+
+ response = self.conn.get(BUCKET_NAME, weird_key)
+ self.assertEquals(response.http_response.status, 200, 'get weird key')
+
+ response = self.conn.get_acl(BUCKET_NAME, key)
+ self.assertEquals(response.http_response.status, 200, 'get acl')
+
+ acl = response.object.data
+
+ response = self.conn.put_acl(BUCKET_NAME, key, acl)
+ self.assertEquals(response.http_response.status, 200, 'put acl')
+
+ response = self.conn.get_bucket_acl(BUCKET_NAME)
+ self.assertEquals(response.http_response.status, 200, 'get bucket acl')
+
+ bucket_acl = response.object.data
+
+ response = self.conn.put_bucket_acl(BUCKET_NAME, bucket_acl)
+ self.assertEquals(response.http_response.status, 200, 'put bucket acl')
+
+ response = self.conn.get_bucket_acl(BUCKET_NAME)
+ self.assertEquals(response.http_response.status, 200, 'get bucket logging')
+
+ bucket_logging = response.object.data
+
+ response = self.conn.put_bucket_acl(BUCKET_NAME, bucket_logging)
+ self.assertEquals(response.http_response.status, 200, 'put bucket logging')
+
+ response = self.conn.list_bucket(BUCKET_NAME)
+ self.assertEquals(response.http_response.status, 200, 'list bucket')
+ entries = response.entries
+ self.assertEquals(len(entries), 2, 'got back right number of keys')
+ # depends on weird_key < key
+ self.assertEquals(entries[0].key, weird_key, 'first key is right')
+ self.assertEquals(entries[1].key, key, 'second key is right')
+
+ response = self.conn.list_bucket(BUCKET_NAME, {'max-keys': 1})
+ self.assertEquals(response.http_response.status, 200, 'list bucket with args')
+ self.assertEquals(len(response.entries), 1, 'got back right number of keys')
+
+ for entry in entries:
+ response = self.conn.delete(BUCKET_NAME, entry.key)
+ self.assertEquals(response.http_response.status, 204, 'delete %s' % entry.key)
+
+ response = self.conn.list_all_my_buckets()
+ self.assertEquals(response.http_response.status, 200, 'list all my buckets')
+ buckets = response.entries
+
+ response = self.conn.delete_bucket(BUCKET_NAME)
+ self.assertEquals(response.http_response.status, 204, 'delete bucket')
+
+ response = self.conn.list_all_my_buckets()
+ self.assertEquals(response.http_response.status, 200, 'list all my buckets again')
+
+ self.assertEquals(len(response.entries), len(buckets) - 1, 'bucket count is correct')
+
+ def verify_list_bucket_response(self, response, bucket, is_truncated, parameters, next_marker=''):
+ prefix = ''
+ marker = ''
+
+ if parameters.has_key('prefix'):
+ prefix = parameters['prefix']
+ if parameters.has_key('marker'):
+ marker = parameters['marker']
+
+ self.assertEquals(bucket, response.name, 'bucket name should match')
+ self.assertEquals(prefix, response.prefix, 'prefix should match')
+ self.assertEquals(marker, response.marker, 'marker should match')
+ if parameters.has_key('max-keys'):
+ self.assertEquals(parameters['max-keys'], response.max_keys, 'max-keys should match')
+ self.assertEquals(parameters['delimiter'], response.delimiter, 'delimiter should match')
+ self.assertEquals(is_truncated, response.is_truncated, 'is_truncated should match')
+ self.assertEquals(next_marker, response.next_marker, 'next_marker should match')
+
+ def do_delimited_list(self, bucket_name, is_truncated, parameters, regular_expected, common_expected, test_name, next_marker=''):
+ response = self.conn.list_bucket(bucket_name, parameters)
+ self.assertEquals(response.http_response.status, 200, test_name)
+ self.assertEquals(regular_expected, len(response.entries), 'right number of regular entries')
+ self.assertEquals(common_expected, len(response.common_prefixes), 'right number of common prefixes')
+
+ self.verify_list_bucket_response(response, bucket_name, is_truncated, parameters, next_marker)
+
+ return response
+
+class TestQueryStringAuthGenerator(unittest.TestCase):
+ def setUp(self):
+ self.generator = S3.QueryStringAuthGenerator(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
+ if (self.generator.is_secure == True):
+ self.connection = httplib.HTTPSConnection(self.generator.server_name)
+ else:
+ self.connection = httplib.HTTPConnection(self.generator.server_name)
+
+ def check_url(self, url, method, status, message, data=''):
+ if (method == 'PUT'):
+ headers = { 'Content-Length': len(data) }
+ self.connection.request(method, url, data, headers)
+ else:
+ self.connection.request(method, url)
+
+ response = self.connection.getresponse()
+ self.assertEquals(response.status, status, message)
+
+ return response.read()
+
+ # test all operations for both regular and vanity domains
+ # regular: http://s3.amazonaws.com/bucket/key
+ # subdomain: http://bucket.s3.amazonaws.com/key
+ # testing pure vanity domains (http:///key) is not covered here
+ # but is possible with some additional setup (set the server in @conn to your vanity domain)
+
+ def test_subdomain(self):
+ self.run_tests(S3.CallingFormat.SUBDOMAIN)
+
+ def test_path(self):
+ self.run_tests(S3.CallingFormat.PATH)
+
+ def run_tests(self, calling_format):
+ self.generator.calling_format = calling_format
+
+ key = 'test'
+
+ self.check_url(self.generator.create_bucket(BUCKET_NAME), 'PUT', 200, 'create_bucket')
+ self.check_url(self.generator.put(BUCKET_NAME, key, ''), 'PUT', 200, 'put object', 'test data')
+ self.check_url(self.generator.get(BUCKET_NAME, key), 'GET', 200, 'get object')
+ self.check_url(self.generator.list_bucket(BUCKET_NAME), 'GET', 200, 'list bucket')
+ self.check_url(self.generator.list_all_my_buckets(), 'GET', 200, 'list all my buckets')
+ acl = self.check_url(self.generator.get_acl(BUCKET_NAME, key), 'GET', 200, 'get acl')
+ self.check_url(self.generator.put_acl(BUCKET_NAME, key, acl), 'PUT', 200, 'put acl', acl)
+ bucket_acl = self.check_url(self.generator.get_bucket_acl(BUCKET_NAME), 'GET', 200, 'get bucket acl')
+ self.check_url(self.generator.put_bucket_acl(BUCKET_NAME, bucket_acl), 'PUT', 200, 'put bucket acl', bucket_acl)
+ bucket_logging = self.check_url(self.generator.get_bucket_logging(BUCKET_NAME), 'GET', 200, 'get bucket logging')
+ self.check_url(self.generator.put_bucket_logging(BUCKET_NAME, bucket_logging), 'PUT', 200, 'put bucket logging', bucket_logging)
+ self.check_url(self.generator.delete(BUCKET_NAME, key), 'DELETE', 204, 'delete object')
+ self.check_url(self.generator.delete_bucket(BUCKET_NAME), 'DELETE', 204, 'delete bucket')
+
+
+if __name__ == '__main__':
+ unittest.main()
+
+
--
cgit v1.2.1