File s3-aws4.diff of Package libs3
Squashed 15 commits from
https://github.com/bji/libs3/pull/50/
diff --git a/GNUmakefile b/GNUmakefile
index 4d3e2e5..ab64908 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -38,7 +38,7 @@
# --------------------------------------------------------------------------
# Set libs3 version number, unless it is already set.
-LIBS3_VER_MAJOR ?= 2
+LIBS3_VER_MAJOR ?= 3
LIBS3_VER_MINOR ?= 0
LIBS3_VER := $(LIBS3_VER_MAJOR).$(LIBS3_VER_MINOR)
@@ -119,6 +119,9 @@ ifndef LIBXML2_CFLAGS
LIBXML2_CFLAGS := $(shell xml2-config --cflags)
endif
+ifndef OPENSSL_LIBS
+ OPENSSL_LIBS := -lssl -lcrypto
+endif
# --------------------------------------------------------------------------
# These CFLAGS assume a GNU compiler. For other compilers, write a script
@@ -142,7 +145,7 @@ CFLAGS += -Wall -Werror -Wshadow -Wextra -Iinc \
-D_ISOC99_SOURCE \
-D_POSIX_C_SOURCE=200112L
-LDFLAGS = $(CURL_LIBS) $(LIBXML2_LIBS) -lpthread
+LDFLAGS = $(CURL_LIBS) $(LIBXML2_LIBS) $(OPENSSL_LIBS) -lpthread
# --------------------------------------------------------------------------
diff --git a/GNUmakefile.mingw b/GNUmakefile.mingw
index f9ff2ba..b2712f4 100644
--- a/GNUmakefile.mingw
+++ b/GNUmakefile.mingw
@@ -38,7 +38,7 @@
# --------------------------------------------------------------------------
# Set libs3 version number, unless it is already set.
-LIBS3_VER_MAJOR ?= 2
+LIBS3_VER_MAJOR ?= 3
LIBS3_VER_MINOR ?= 0
LIBS3_VER := $(LIBS3_VER_MAJOR).$(LIBS3_VER_MINOR)
diff --git a/GNUmakefile.osx b/GNUmakefile.osx
index 76a45e9..103615a 100644
--- a/GNUmakefile.osx
+++ b/GNUmakefile.osx
@@ -38,7 +38,7 @@
# --------------------------------------------------------------------------
# Set libs3 version number, unless it is already set.
-LIBS3_VER_MAJOR ?= 2
+LIBS3_VER_MAJOR ?= 3
LIBS3_VER_MINOR ?= 0
LIBS3_VER := $(LIBS3_VER_MAJOR).$(LIBS3_VER_MINOR)
@@ -102,6 +102,7 @@ endif
ifndef LIBXML2_LIBS
LIBXML2_LIBS := $(shell xml2-config --libs)
+ LIBXML2_LIBS := $(filter-out -L$(shell xcrun --show-sdk-path)/usr/lib, $(LIBXML2_LIBS))
endif
ifndef LIBXML2_CFLAGS
@@ -274,7 +275,6 @@ $(BUILD)/bin/testsimplexml: $(BUILD)/obj/testsimplexml.o $(LIBS3_STATIC)
@ mkdir -p $(dir $@)
$(VERBOSE_SHOW) gcc -o $@ $^ $(LIBXML2_LIBS)
-
# --------------------------------------------------------------------------
# Clean target
diff --git a/inc/libs3.h b/inc/libs3.h
index 4e0aaad..4e678cd 100644
--- a/inc/libs3.h
+++ b/inc/libs3.h
@@ -228,6 +228,12 @@ extern "C" {
#define S3_INIT_ALL (S3_INIT_WINSOCK)
+/**
+ * The default region identifier used to scope the signing key
+ */
+#define S3_DEFAULT_REGION "us-east-1"
+
+
/** **************************************************************************
* Enumerations
************************************************************************** **/
@@ -711,6 +717,12 @@ typedef struct S3BucketContext
* The Amazon Security Token used to generate Temporary Security Credentials
**/
const char *securityToken;
+
+ /**
+ * The AWS region to which to scope the signing key used for authorization.
+ * If NULL, the default region ("us-east-1") will be used.
+ */
+ const char *authRegion;
} S3BucketContext;
@@ -1709,10 +1721,12 @@ void S3_set_request_context_verify_peer(S3RequestContext *requestContext,
* @param expires gives the number of seconds since Unix epoch for the
* expiration date of the request; after this time, the request will
* no longer be valid. If this value is negative, the largest
- * expiration date possible is used (currently, Jan 19, 2038).
+ * expiration interval possible is used (one week).
* @param resource gives a sub-resource to be fetched for the request, or NULL
- * for none. This should be of the form "?<resource>", i.e.
+ * for none. This should be of the form "?<resource>", i.e.
* "?torrent".
+ * @param httpMethod the HTTP request method that will be used with the
+ * generated query string (e.g. "GET").
* @return One of:
* S3StatusUriTooLong if, due to an internal error, the generated URI
* is longer than S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in
@@ -1721,7 +1735,8 @@ void S3_set_request_context_verify_peer(S3RequestContext *requestContext,
**/
S3Status S3_generate_authenticated_query_string
(char *buffer, const S3BucketContext *bucketContext,
- const char *key, int64_t expires, const char *resource);
+ const char *key, int expires, const char *resource,
+ const char *httpMethod);
/** **************************************************************************
@@ -1740,6 +1755,7 @@ S3Status S3_generate_authenticated_query_string
* Security Credentials
* @param hostName is the S3 host name to use; if NULL is passed in, the
* default S3 host as provided to S3_initialize() will be used.
+ * @param authRegion is the AWS region to use for the authorization signature
* @param requestContext if non-NULL, gives the S3RequestContext to add this
* request to, and does not perform the request immediately. If NULL,
* performs the request immediately and synchronously.
@@ -1750,10 +1766,10 @@ S3Status S3_generate_authenticated_query_string
**/
void S3_list_service(S3Protocol protocol, const char *accessKeyId,
const char *secretAccessKey, const char *securityToken,
- const char *hostName, S3RequestContext *requestContext,
- const S3ListServiceHandler *handler,
- void *callbackData);
-
+ const char *hostName, const char *authRegion,
+ S3RequestContext *requestContext,
+ const S3ListServiceHandler *handler, void *callbackData);
+
/** **************************************************************************
* Bucket Functions
@@ -1773,6 +1789,7 @@ void S3_list_service(S3Protocol protocol, const char *accessKeyId,
* @param hostName is the S3 host name to use; if NULL is passed in, the
* default S3 host as provided to S3_initialize() will be used.
* @param bucketName is the bucket name to test
+ * @param authRegion is the AWS region to use for the authorization signature
* @param locationConstraintReturnSize gives the number of bytes in the
* locationConstraintReturn parameter
* @param locationConstraintReturn provides the location into which to write
@@ -1792,13 +1809,14 @@ void S3_list_service(S3Protocol protocol, const char *accessKeyId,
**/
void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
const char *accessKeyId, const char *secretAccessKey,
- const char *securityToken, const char *hostName,
- const char *bucketName, int locationConstraintReturnSize,
+ const char *securityToken, const char *hostName,
+ const char *bucketName, const char *authRegion,
+ int locationConstraintReturnSize,
char *locationConstraintReturn,
S3RequestContext *requestContext,
const S3ResponseHandler *handler, void *callbackData);
-
+
/**
* Creates a new bucket.
*
@@ -1812,6 +1830,7 @@ void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
* @param hostName is the S3 host name to use; if NULL is passed in, the
* default S3 host as provided to S3_initialize() will be used.
* @param bucketName is the name of the bucket to be created
+ * @param authRegion is the AWS region to use for the authorization signature
* @param cannedAcl gives the "REST canned ACL" to use for the created bucket
* @param locationConstraint if non-NULL, gives the geographic location for
* the bucket to create.
@@ -1826,7 +1845,8 @@ void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
const char *secretAccessKey, const char *securityToken,
const char *hostName, const char *bucketName,
- S3CannedAcl cannedAcl, const char *locationConstraint,
+ const char *authRegion, S3CannedAcl cannedAcl,
+ const char *locationConstraint,
S3RequestContext *requestContext,
const S3ResponseHandler *handler, void *callbackData);
@@ -1846,6 +1866,7 @@ void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
* @param hostName is the S3 host name to use; if NULL is passed in, the
* default S3 host as provided to S3_initialize() will be used.
* @param bucketName is the name of the bucket to be deleted
+ * @param authRegion is the AWS region to use for the authorization signature
* @param requestContext if non-NULL, gives the S3RequestContext to add this
* request to, and does not perform the request immediately. If NULL,
* performs the request immediately and synchronously.
@@ -1856,8 +1877,9 @@ void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
**/
void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle,
const char *accessKeyId, const char *secretAccessKey,
- const char *securityToken, const char *hostName,
- const char *bucketName, S3RequestContext *requestContext,
+ const char *securityToken, const char *hostName,
+ const char *bucketName, const char *authRegion,
+ S3RequestContext *requestContext,
const S3ResponseHandler *handler, void *callbackData);
@@ -1917,7 +1939,7 @@ void S3_put_object(const S3BucketContext *bucketContext, const char *key,
const S3PutProperties *putProperties,
S3RequestContext *requestContext,
const S3PutObjectHandler *handler, void *callbackData);
-
+
/**
* Copies an object from one location to another. The object may be copied
diff --git a/inc/request.h b/inc/request.h
index b54e945..7d9cb03 100644
--- a/inc/request.h
+++ b/inc/request.h
@@ -40,7 +40,8 @@ typedef enum
HttpRequestTypePUT,
HttpRequestTypeCOPY,
HttpRequestTypeDELETE,
- HttpRequestTypePOST
+ HttpRequestTypePOST,
+ HttpRequestTypeInvalid
} HttpRequestType;
@@ -74,10 +75,10 @@ typedef struct RequestParams
const S3GetConditions *getConditions;
// Start byte
- uint64_t startByte;
+ size_t startByte;
// Byte count
- uint64_t byteCount;
+ size_t byteCount;
// Put properties
const S3PutProperties *putProperties;
diff --git a/inc/util.h b/inc/util.h
index 854b9d9..9a65229 100644
--- a/inc/util.h
+++ b/inc/util.h
@@ -40,7 +40,6 @@
#define ACS_GROUP_LOG_DELIVERY ACS_URL "s3/LogDelivery"
-
// Derived from S3 documentation
// This is the maximum number of bytes needed in a "compacted meta header"
@@ -64,6 +63,12 @@
#define MAX_CANONICALIZED_RESOURCE_SIZE \
(1 + 255 + 1 + MAX_URLENCODED_KEY_SIZE + (sizeof("?torrent") - 1) + 1)
+#define MAX_ACCESS_KEY_ID_LENGTH 32
+
+// Maximum length of a credential string
+// <access key>/<yyyymmdd>/<region>/s3/aws4_request
+#define MAX_CREDENTIAL_SIZE \
+ (MAX_ACCESS_KEY_ID_LENGTH + 1) + 8 + 1 + 32 + sizeof("/s3/aws4_request")
// Utilities -----------------------------------------------------------------
@@ -78,19 +83,6 @@ int64_t parseIso8601Time(const char *str);
uint64_t parseUnsignedInt(const char *str);
-// base64 encode bytes. The output buffer must have at least
-// ((4 * (inLen + 1)) / 3) bytes in it. Returns the number of bytes written
-// to [out].
-int base64Encode(const unsigned char *in, int inLen, char *out);
-
-// Compute HMAC-SHA-1 with key [key] and message [message], storing result
-// in [hmac]
-void HMAC_SHA1(unsigned char hmac[20], const unsigned char *key, int key_len,
- const unsigned char *message, int message_len);
-
-// Compute a 64-bit hash values given a set of bytes
-uint64_t hash(const unsigned char *k, int length);
-
// Because Windows seems to be missing isblank(), use our own; it's a very
// easy function to write in any case
int is_blank(char c);
diff --git a/src/acl.c b/src/acl.c
index b27b8db..c2eeb08 100644
--- a/src/acl.c
+++ b/src/acl.c
@@ -128,7 +128,8 @@ void S3_get_acl(const S3BucketContext *bucketContext, const char *key,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
0, // queryParams
"acl", // subResource
@@ -299,7 +300,7 @@ void S3_set_acl(const S3BucketContext *bucketContext, const char *key,
(*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
return;
}
-
+
// Convert aclGrants to XML document
S3Status status = generateAclXmlDocument
(ownerId, ownerDisplayName, aclGrantCount, aclGrants,
@@ -327,7 +328,8 @@ void S3_set_acl(const S3BucketContext *bucketContext, const char *key,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
0, // queryParams
"acl", // subResource
diff --git a/src/bucket.c b/src/bucket.c
index dcbcf2f..5f998ab 100644
--- a/src/bucket.c
+++ b/src/bucket.c
@@ -107,7 +107,8 @@ static void testBucketCompleteCallback(S3Status requestStatus,
void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
const char *accessKeyId, const char *secretAccessKey,
const char *securityToken, const char *hostName,
- const char *bucketName, int locationConstraintReturnSize,
+ const char *bucketName, const char *authRegion,
+ int locationConstraintReturnSize,
char *locationConstraintReturn,
S3RequestContext *requestContext,
const S3ResponseHandler *handler, void *callbackData)
@@ -140,7 +141,8 @@ void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle,
uriStyle, // uriStyle
accessKeyId, // accessKeyId
secretAccessKey, // secretAccessKey
- securityToken }, // securityToken
+ securityToken, // securityToken
+ authRegion }, // authRegion
0, // key
0, // queryParams
"location", // subResource
@@ -223,11 +225,11 @@ static void createBucketCompleteCallback(S3Status requestStatus,
free(cbData);
}
-
void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
const char *secretAccessKey, const char *securityToken,
const char *hostName, const char *bucketName,
- S3CannedAcl cannedAcl, const char *locationConstraint,
+ const char *authRegion, S3CannedAcl cannedAcl,
+ const char *locationConstraint,
S3RequestContext *requestContext,
const S3ResponseHandler *handler, void *callbackData)
{
@@ -254,7 +256,7 @@ void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
else {
cbData->docLen = 0;
}
-
+
// Set up S3PutProperties
S3PutProperties properties =
{
@@ -269,7 +271,7 @@ void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
0, // metaData
0 // useServerSideEncryption
};
-
+
// Set up the RequestParams
RequestParams params =
{
@@ -280,7 +282,8 @@ void S3_create_bucket(S3Protocol protocol, const char *accessKeyId,
S3UriStylePath, // uriStyle
accessKeyId, // accessKeyId
secretAccessKey, // secretAccessKey
- securityToken }, // securityToken
+ securityToken, // securityToken
+ authRegion }, // authRegion
0, // key
0, // queryParams
0, // subResource
@@ -317,7 +320,7 @@ static S3Status deleteBucketPropertiesCallback
(const S3ResponseProperties *responseProperties, void *callbackData)
{
DeleteBucketData *dbData = (DeleteBucketData *) callbackData;
-
+
return (*(dbData->responsePropertiesCallback))
(responseProperties, dbData->callbackData);
}
@@ -338,12 +341,13 @@ static void deleteBucketCompleteCallback(S3Status requestStatus,
void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle,
const char *accessKeyId, const char *secretAccessKey,
- const char *securityToken, const char *hostName,
- const char *bucketName, S3RequestContext *requestContext,
+ const char *securityToken, const char *hostName,
+ const char *bucketName, const char *authRegion,
+ S3RequestContext *requestContext,
const S3ResponseHandler *handler, void *callbackData)
{
// Create the callback data
- DeleteBucketData *dbData =
+ DeleteBucketData *dbData =
(DeleteBucketData *) malloc(sizeof(DeleteBucketData));
if (!dbData) {
(*(handler->completeCallback))(S3StatusOutOfMemory, 0, callbackData);
@@ -364,7 +368,8 @@ void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle,
uriStyle, // uriStyle
accessKeyId, // accessKeyId
secretAccessKey, // secretAccessKey
- securityToken }, // securityToken
+ securityToken, // securityToken
+ authRegion }, // authRegion
0, // key
0, // queryParams
0, // subResource
@@ -640,7 +645,7 @@ void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix,
// Compose the query params
string_buffer(queryParams, 4096);
string_buffer_initialize(queryParams);
-
+
#define safe_append(name, value) \
do { \
int fit; \
@@ -702,7 +707,7 @@ void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix,
}
simplexml_initialize(&(lbData->simpleXml), &listBucketXmlCallback, lbData);
-
+
lbData->responsePropertiesCallback =
handler->responseHandler.propertiesCallback;
lbData->listBucketCallback = handler->listBucketCallback;
@@ -724,7 +729,8 @@ void S3_list_bucket(const S3BucketContext *bucketContext, const char *prefix,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
0, // key
queryParams[0] ? queryParams : 0, // queryParams
0, // subResource
diff --git a/src/general.c b/src/general.c
index e5f8696..fa55939 100644
--- a/src/general.c
+++ b/src/general.c
@@ -184,7 +184,7 @@ const char *S3_get_status_name(S3Status status)
handlecase(ErrorUnexpectedContent);
handlecase(ErrorUnresolvableGrantByEmailAddress);
handlecase(ErrorUserKeyMustBeSpecified);
- handlecase(ErrorUnknown);
+ handlecase(ErrorUnknown);
handlecase(HttpErrorMovedTemporarily);
handlecase(HttpErrorBadRequest);
handlecase(HttpErrorForbidden);
diff --git a/src/multipart.c b/src/multipart.c
index b21e0f5..3101d0b 100644
--- a/src/multipart.c
+++ b/src/multipart.c
@@ -112,12 +112,13 @@ void S3_initiate_multipart(S3BucketContext *bucketContext, const char *key,
{
HttpRequestTypePOST, // httpRequestType
{ bucketContext->hostName, // hostName
- bucketContext->bucketName, // bucketName
+ bucketContext->bucketName, // bucketName
bucketContext->protocol, // protocol
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey, //secretAccessKey
- bucketContext->securityToken }, // secretToken
+ bucketContext->secretAccessKey, // secretAccessKey
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
0, // queryParams
"uploads", // subResource
@@ -155,8 +156,9 @@ void S3_abort_multipart_upload(S3BucketContext *bucketContext, const char *key,
bucketContext->protocol, // protocol
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey, //secretAccessKey
- bucketContext->securityToken }, // secretToken
+ bucketContext->secretAccessKey, // secretAccessKey
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
0, // queryParams
subResource, // subResource
@@ -189,8 +191,8 @@ void S3_upload_part(S3BucketContext *bucketContext, const char *key,
const char *upload_id, int partContentLength,
S3RequestContext *requestContext, void *callbackData)
{
- char subResource[512];
- snprintf(subResource, 512, "partNumber=%d&uploadId=%s", seq, upload_id);
+ char queryParams[512];
+ snprintf(queryParams, 512, "partNumber=%d&uploadId=%s", seq, upload_id);
RequestParams params =
{
@@ -200,11 +202,12 @@ void S3_upload_part(S3BucketContext *bucketContext, const char *key,
bucketContext->protocol, // protocol
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey, //secretAccessKey
- bucketContext->securityToken }, // secretToken
+ bucketContext->secretAccessKey, // secretAccessKey
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
- 0, // queryParams
- subResource, // subResource
+ queryParams, // queryParams
+ 0, // subResource
0, // copySourceBucketName
0, // copySourceKey
0, // getConditions
@@ -317,9 +320,9 @@ void S3_complete_multipart_upload(S3BucketContext *bucketContext,
S3RequestContext *requestContext,
void *callbackData)
{
- char subResource[512];
- snprintf(subResource, 512, "uploadId=%s", upload_id);
- CommitMultiPartData *data =
+ char queryParams[512];
+ snprintf(queryParams, 512, "uploadId=%s", upload_id);
+ CommitMultiPartData *data =
(CommitMultiPartData *) malloc(sizeof(CommitMultiPartData));
data->userdata = callbackData;
data->handler = handler;
@@ -337,11 +340,12 @@ void S3_complete_multipart_upload(S3BucketContext *bucketContext,
bucketContext->protocol, // protocol
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
- bucketContext->secretAccessKey, //secretAccessKey
- bucketContext->securityToken }, // secretToken
+ bucketContext->secretAccessKey, // secretAccessKey
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
- 0, // queryParams
- subResource, // subResource
+ queryParams, // queryParams
+ 0, // subResource
0, // copySourceBucketName
0, // copySourceKey
0, // getConditions
@@ -930,7 +934,8 @@ void S3_list_multipart_uploads(S3BucketContext *bucketContext,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
0, // key
queryParams[0] ? queryParams : 0, // queryParams
"uploads", // subResource
@@ -1051,7 +1056,8 @@ void S3_list_parts(S3BucketContext *bucketContext, const char *key,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
queryParams[0] ? queryParams : 0, // queryParams
subResource, // subResource
diff --git a/src/object.c b/src/object.c
index 445b067..cc2be6e 100644
--- a/src/object.c
+++ b/src/object.c
@@ -48,7 +48,8 @@ void S3_put_object(const S3BucketContext *bucketContext, const char *key,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
0, // queryParams
0, // subResource
@@ -187,9 +188,10 @@ void S3_copy_object(const S3BucketContext *bucketContext, const char *key,
void S3_copy_object_range(const S3BucketContext *bucketContext, const char *key,
- const char *destinationBucket, const char *destinationKey,
- const int partNo, const char *uploadId,
- const unsigned long startOffset, const unsigned long count,
+ const char *destinationBucket,
+ const char *destinationKey, const int partNo,
+ const char *uploadId, const unsigned long startOffset,
+ const unsigned long count,
const S3PutProperties *putProperties,
int64_t *lastModifiedReturn, int eTagReturnSize,
char *eTagReturn, S3RequestContext *requestContext,
@@ -219,11 +221,11 @@ void S3_copy_object_range(const S3BucketContext *bucketContext, const char *key,
string_buffer_initialize(data->lastModified);
// If there's a sequence ID > 0 then add a subResource, OTW pass in NULL
- char subResource[512];
- char *subRsrc = NULL;
+ char queryParams[512];
+ char *qp = NULL;
if (partNo > 0) {
- snprintf(subResource, 512, "partNumber=%d&uploadId=%s", partNo, uploadId);
- subRsrc = subResource;
+ snprintf(queryParams, 512, "partNumber=%d&uploadId=%s", partNo, uploadId);
+ qp = queryParams;
}
// Set up the RequestParams
@@ -237,10 +239,11 @@ void S3_copy_object_range(const S3BucketContext *bucketContext, const char *key,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
destinationKey ? destinationKey : key, // key
- 0, // queryParams
- subRsrc, // subResource
+ qp, // queryParams
+ 0, // subResource
bucketContext->bucketName, // copySourceBucketName
key, // copySourceKey
0, // getConditions
@@ -278,7 +281,8 @@ void S3_get_object(const S3BucketContext *bucketContext, const char *key,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
0, // queryParams
0, // subResource
@@ -317,7 +321,8 @@ void S3_head_object(const S3BucketContext *bucketContext, const char *key,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
0, // queryParams
0, // subResource
@@ -338,7 +343,7 @@ void S3_head_object(const S3BucketContext *bucketContext, const char *key,
// Perform the request
request_perform(¶ms, requestContext);
}
-
+
// delete object --------------------------------------------------------------
@@ -356,7 +361,8 @@ void S3_delete_object(const S3BucketContext *bucketContext, const char *key,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
key, // key
0, // queryParams
0, // subResource
diff --git a/src/request.c b/src/request.c
index efb31db..4056713 100644
--- a/src/request.c
+++ b/src/request.c
@@ -32,11 +32,22 @@
#include "request.h"
#include "request_context.h"
#include "response_headers_handler.h"
-#include "util.h"
+#ifdef __APPLE__
+#include <CommonCrypto/CommonHMAC.h>
+#define S3_SHA256_DIGEST_LENGTH CC_SHA256_DIGEST_LENGTH
+#else
+#include <openssl/hmac.h>
+#include <openssl/sha.h>
+#define S3_SHA256_DIGEST_LENGTH SHA256_DIGEST_LENGTH
+#endif
#define USER_AGENT_SIZE 256
#define REQUEST_STACK_SIZE 32
+#define SIGNATURE_SCOPE_SIZE 64
+
+//#define SIGNATURE_DEBUG
+
static int verifyPeer;
static char userAgentG[USER_AGENT_SIZE];
@@ -61,15 +72,24 @@ typedef struct RequestComputedValues
// Storage for amzHeaders (the +256 is for x-amz-acl and x-amz-date)
char amzHeadersRaw[COMPACTED_METADATA_BUFFER_SIZE + 256 + 1];
- // Canonicalized x-amz- headers
- string_multibuffer(canonicalizedAmzHeaders,
+ // Length of populated data in raw buffer
+ int amzHeadersRawLength;
+
+ // Canonicalized headers for signature
+ string_multibuffer(canonicalizedSignatureHeaders,
COMPACTED_METADATA_BUFFER_SIZE + 256 + 1);
+ // Delimited list of header names used for signature
+ char signedHeaders[COMPACTED_METADATA_BUFFER_SIZE];
+
// URL-Encoded key
char urlEncodedKey[MAX_URLENCODED_KEY_SIZE + 1];
// Canonicalized resource
- char canonicalizedResource[MAX_CANONICALIZED_RESOURCE_SIZE + 1];
+ char canonicalURI[MAX_CANONICALIZED_RESOURCE_SIZE + 1];
+
+ // Canonical sub-resource & query string
+ char canonicalQueryString[MAX_CANONICALIZED_RESOURCE_SIZE + 1];
// Cache-Control header (or empty)
char cacheControlHeader[128];
@@ -105,16 +125,28 @@ typedef struct RequestComputedValues
char rangeHeader[128];
// Authorization header
- char authorizationHeader[128];
+ char authorizationHeader[1024];
+
+ // Request date stamp
+ char requestDateISO8601[64];
+
+ // Credential used for authorization signature
+ char authCredential[MAX_CREDENTIAL_SIZE + 1];
+
+ // Computed request signature (hex string)
+ char requestSignatureHex[S3_SHA256_DIGEST_LENGTH * 2 + 1];
// Host header
char hostHeader[128];
+
+ // Hex string of hash of request payload
+ char payloadHash[S3_SHA256_DIGEST_LENGTH * 2 + 1];
} RequestComputedValues;
// Called whenever we detect that the request headers have been completely
// processed; which happens either when we get our first read/write callback,
-// or the request is finished being procesed. Returns nonzero on success,
+// or the request is finished being processed. Returns nonzero on success,
// zero on failure.
static void request_headers_done(Request *request)
{
@@ -186,7 +218,7 @@ static size_t curl_read_func(void *ptr, size_t size, size_t nmemb, void *data)
if (!request->toS3Callback || !request->toS3CallbackBytesRemaining) {
return 0;
}
-
+
// Don't tell the callback that we are willing to accept more data than we
// really are
if (len > request->toS3CallbackBytesRemaining) {
@@ -244,6 +276,50 @@ static size_t curl_write_func(void *ptr, size_t size, size_t nmemb,
}
+static S3Status append_amz_header(RequestComputedValues *values,
+ int addPrefix,
+ const char *headerName,
+ const char *headerValue)
+{
+ int rawPos = values->amzHeadersRawLength + 1;
+ values->amzHeaders[values->amzHeadersCount++] = &(values->amzHeadersRaw[rawPos]);
+
+ const char *headerStr = headerName;
+ if (addPrefix) {
+ char headerNameWithPrefix[S3_MAX_METADATA_SIZE - sizeof(": v")];
+ snprintf(headerNameWithPrefix, sizeof(headerNameWithPrefix),
+ S3_METADATA_HEADER_NAME_PREFIX "%s", headerName);
+ headerStr = headerNameWithPrefix;
+ }
+
+ // Make sure the new header (plus ": " plus string terminator) will fit
+ // in the buffer.
+ if ((values->amzHeadersRawLength + strlen(headerStr) + strlen(headerValue)
+ + 3) >= sizeof(values->amzHeadersRaw)) {
+ return S3StatusMetaDataHeadersTooLong;
+ }
+
+ unsigned long i = 0;
+ for (; i < strlen(headerStr); i++) {
+ values->amzHeadersRaw[rawPos++] = tolower(headerStr[i]);
+ }
+
+ snprintf(&(values->amzHeadersRaw[rawPos]), 3, ": ");
+ rawPos += 2;
+
+ for (i = 0; i < strlen(headerValue); i++) {
+ values->amzHeadersRaw[rawPos++] = headerValue[i];
+ }
+ rawPos--;
+
+ while (isblank(values->amzHeadersRaw[rawPos])) {
+ rawPos--;
+ }
+ values->amzHeadersRaw[++rawPos] = '\0';
+ values->amzHeadersRawLength = rawPos;
+ return S3StatusOK;
+}
+
// This function 'normalizes' all x-amz-meta headers provided in
// params->requestHeaders, which means it removes all whitespace from
// them such that they all look exactly like this:
@@ -255,73 +331,28 @@ static size_t curl_write_func(void *ptr, size_t size, size_t nmemb,
// these headers in params->amzHeaders (and also sets params->amzHeadersCount
// to be the count of the total number of x-amz- headers thus created).
static S3Status compose_amz_headers(const RequestParams *params,
+ int forceUnsignedPayload,
RequestComputedValues *values)
{
const S3PutProperties *properties = params->putProperties;
values->amzHeadersCount = 0;
- values->amzHeadersRaw[0] = 0;
- int len = 0;
-
- // Append a header to amzHeaders, trimming whitespace from the end.
- // Does NOT trim whitespace from the beginning.
-#define headers_append(isNewHeader, format, ...) \
- do { \
- if (isNewHeader) { \
- values->amzHeaders[values->amzHeadersCount++] = \
- &(values->amzHeadersRaw[len]); \
- } \
- len += snprintf(&(values->amzHeadersRaw[len]), \
- sizeof(values->amzHeadersRaw) - len, \
- format, __VA_ARGS__); \
- if (len >= (int) sizeof(values->amzHeadersRaw)) { \
- return S3StatusMetaDataHeadersTooLong; \
- } \
- while ((len > 0) && (values->amzHeadersRaw[len - 1] == ' ')) { \
- len--; \
- } \
- values->amzHeadersRaw[len++] = 0; \
- } while (0)
-
-#define header_name_tolower_copy(str, l) \
- do { \
- values->amzHeaders[values->amzHeadersCount++] = \
- &(values->amzHeadersRaw[len]); \
- if ((len + l) >= (int) sizeof(values->amzHeadersRaw)) { \
- return S3StatusMetaDataHeadersTooLong; \
- } \
- int todo = l; \
- while (todo--) { \
- if ((*(str) >= 'A') && (*(str) <= 'Z')) { \
- values->amzHeadersRaw[len++] = 'a' + (*(str) - 'A'); \
- } \
- else { \
- values->amzHeadersRaw[len++] = *(str); \
- } \
- (str)++; \
- } \
- } while (0)
+ values->amzHeadersRaw[0] = '\0';
+ values->amzHeadersRawLength = 0;
// Check and copy in the x-amz-meta headers
if (properties) {
int i;
for (i = 0; i < properties->metaDataCount; i++) {
const S3NameValue *property = &(properties->metaData[i]);
- char headerName[S3_MAX_METADATA_SIZE - sizeof(": v")];
- int l = snprintf(headerName, sizeof(headerName),
- S3_METADATA_HEADER_NAME_PREFIX "%s",
- property->name);
- char *hn = headerName;
- header_name_tolower_copy(hn, l);
- // Copy in the value
- headers_append(0, ": %s", property->value);
+ append_amz_header(values, 1, property->name, property->value);
}
// Add the x-amz-acl header, if necessary
const char *cannedAclString;
switch (properties->cannedAcl) {
case S3CannedAclPrivate:
- cannedAclString = 0;
+ cannedAclString = NULL;
break;
case S3CannedAclPublicRead:
cannedAclString = "public-read";
@@ -334,47 +365,72 @@ static S3Status compose_amz_headers(const RequestParams *params,
break;
}
if (cannedAclString) {
- headers_append(1, "x-amz-acl: %s", cannedAclString);
+ append_amz_header(values, 0, "x-amz-acl", cannedAclString);
}
// Add the x-amz-server-side-encryption header, if necessary
if (properties->useServerSideEncryption) {
- headers_append(1, "x-amz-server-side-encryption: %s", "AES256");
+ append_amz_header(values, 0, "x-amz-server-side-encryption",
+ "AES256");
}
}
// Add the x-amz-date header
- time_t now = time(NULL);
- char date[64];
- struct tm gmt;
- strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S GMT", gmtime_r(&now, &gmt));
- headers_append(1, "x-amz-date: %s", date);
+ append_amz_header(values, 0, "x-amz-date", values->requestDateISO8601);
if (params->httpRequestType == HttpRequestTypeCOPY) {
// Add the x-amz-copy-source header
- if (params->copySourceBucketName && params->copySourceBucketName[0] &&
- params->copySourceKey && params->copySourceKey[0]) {
- headers_append(1, "x-amz-copy-source: /%s/%s",
- params->copySourceBucketName,
- params->copySourceKey);
+ if (params->copySourceBucketName && params->copySourceBucketName[0]
+ && params->copySourceKey && params->copySourceKey[0]) {
+ char bucketKey[S3_MAX_METADATA_SIZE];
+ snprintf(bucketKey, sizeof(bucketKey), "/%s/%s",
+ params->copySourceBucketName, params->copySourceKey);
+ append_amz_header(values, 0, "x-amz-copy-source", bucketKey);
}
// If byteCount != 0 then we're just copying a range, add header
if (params->byteCount > 0) {
- headers_append(1, "x-amz-copy-source-range: bytes=%llu-%llu",
- (unsigned long long)params->startByte,
- (unsigned long long)params->startByte + params->byteCount);
+ char byteRange[S3_MAX_METADATA_SIZE];
+ snprintf(byteRange, sizeof(byteRange), "bytes=%zd-%zd",
+ params->startByte, params->startByte + params->byteCount);
+ append_amz_header(values, 0, "x-amz-copy-source-range", byteRange);
}
// And the x-amz-metadata-directive header
if (properties) {
- headers_append(1, "%s", "x-amz-metadata-directive: REPLACE");
+ append_amz_header(values, 0, "x-amz-metadata-directive", "REPLACE");
}
}
// Add the x-amz-security-token header if necessary
if (params->bucketContext.securityToken) {
- headers_append(1, "x-amz-security-token: %s",
- params->bucketContext.securityToken);
+ append_amz_header(values, 0, "x-amz-security-token",
+ params->bucketContext.securityToken);
+ }
+
+ if (!forceUnsignedPayload
+ && (params->httpRequestType == HttpRequestTypeGET
+ || params->httpRequestType == HttpRequestTypeCOPY
+ || params->httpRequestType == HttpRequestTypeDELETE
+ || params->httpRequestType == HttpRequestTypeHEAD)) {
+ // empty payload
+ unsigned char md[S3_SHA256_DIGEST_LENGTH];
+#ifdef __APPLE__
+ CC_SHA256("", 0, md);
+#else
+ SHA256((const unsigned char*) "", 0, md);
+#endif
+ values->payloadHash[0] = '\0';
+ int i = 0;
+ for (; i < S3_SHA256_DIGEST_LENGTH; i++) {
+ snprintf(&(values->payloadHash[i * 2]), 3, "%02x", md[i]);
+ }
}
+ else {
+ // TODO: figure out how to manage signed payloads
+ strcpy(values->payloadHash, "UNSIGNED-PAYLOAD");
+ }
+
+ append_amz_header(values, 0, "x-amz-content-sha256",
+ values->payloadHash);
return S3StatusOK;
}
@@ -460,14 +516,27 @@ static S3Status compose_standard_headers(const RequestParams *params,
len--;
}
values->hostHeader[len] = 0;
- } else {
- values->hostHeader[0] = 0;
+ }
+ else {
+ size_t len = snprintf(
+ values->hostHeader,
+ sizeof(values->hostHeader),
+ "Host: %s",
+ params->bucketContext.hostName ?
+ params->bucketContext.hostName : defaultHostNameG);
+ if (len >= sizeof(values->hostHeader)) {
+ return S3StatusUriTooLong;
+ }
+ while (is_blank(values->hostHeader[len])) {
+ len--;
+ }
+ values->hostHeader[len] = 0;
}
// Cache-Control
do_put_header("Cache-Control: %s", cacheControl, cacheControlHeader,
S3StatusBadCacheControl, S3StatusCacheControlTooLong);
-
+
// ContentType
do_put_header("Content-Type: %s", contentType, contentTypeHeader,
S3StatusBadContentType, S3StatusContentTypeTooLong);
@@ -481,12 +550,12 @@ static S3Status compose_standard_headers(const RequestParams *params,
contentDispositionFilename, contentDispositionHeader,
S3StatusBadContentDispositionFilename,
S3StatusContentDispositionFilenameTooLong);
-
+
// ContentEncoding
do_put_header("Content-Encoding: %s", contentEncoding,
contentEncodingHeader, S3StatusBadContentEncoding,
S3StatusContentEncodingTooLong);
-
+
// Expires
if (params->putProperties && (params->putProperties->expires >= 0)) {
time_t t = (time_t) params->putProperties->expires;
@@ -523,16 +592,16 @@ static S3Status compose_standard_headers(const RequestParams *params,
else {
values->ifUnmodifiedSinceHeader[0] = 0;
}
-
+
// If-Match header
do_get_header("If-Match: %s", ifMatchETag, ifMatchHeader,
S3StatusBadIfMatchETag, S3StatusIfMatchETagTooLong);
-
+
// If-None-Match header
do_get_header("If-None-Match: %s", ifNotMatchETag, ifNoneMatchHeader,
S3StatusBadIfNotMatchETag,
S3StatusIfNotMatchETagTooLong);
-
+
// Range header
if (params->startByte || params->byteCount) {
if (params->byteCount) {
@@ -565,26 +634,27 @@ static S3Status encode_key(const RequestParams *params,
}
-// Simple comparison function for comparing two HTTP header names that are
-// embedded within an HTTP header line, returning true if header1 comes
-// before header2 alphabetically, false if not
-static int headerle(const char *header1, const char *header2)
+// Simple comparison function for comparing two "<key><delim><value>"
+// delimited strings, returning true if the key of s1 comes
+// before the key of s2 alphabetically, false if not
+static int headerle(const char *s1, const char *s2, char delim)
{
while (1) {
- if (*header1 == ':') {
- return (*header2 != ':');
+ if (*s1 == delim) {
+ return (*s2 != delim);
}
- else if (*header2 == ':') {
+ else if (*s2 == delim) {
return 0;
}
- else if (*header2 < *header1) {
+ else if (*s2 < *s1) {
return 0;
}
- else if (*header2 > *header1) {
+ else if (*s2 > *s1) {
return 1;
}
- header1++, header2++;
+ s1++, s2++;
}
+ return 0;
}
@@ -596,44 +666,59 @@ static int headerle(const char *header1, const char *header2)
// all the string comparisons that would be done "going forward", and thus
// only does the necessary string comparisons to move values back into their
// sorted position.
-static void header_gnome_sort(const char **headers, int size)
+static void kv_gnome_sort(const char **values, int size, char delim)
{
int i = 0, last_highest = 0;
while (i < size) {
- if ((i == 0) || headerle(headers[i - 1], headers[i])) {
+ if ((i == 0) || headerle(values[i - 1], values[i], delim)) {
i = ++last_highest;
}
else {
- const char *tmp = headers[i];
- headers[i] = headers[i - 1];
- headers[--i] = tmp;
+ const char *tmp = values[i];
+ values[i] = values[i - 1];
+ values[--i] = tmp;
}
}
}
-// Canonicalizes the x-amz- headers into the canonicalizedAmzHeaders buffer
-static void canonicalize_amz_headers(RequestComputedValues *values)
+// Canonicalizes the signature headers into the canonicalizedSignatureHeaders buffer
+static void canonicalize_signature_headers(RequestComputedValues *values)
{
// Make a copy of the headers that will be sorted
- const char *sortedHeaders[S3_MAX_METADATA_COUNT];
+ const char *sortedHeaders[S3_MAX_METADATA_COUNT + 3];
memcpy(sortedHeaders, values->amzHeaders,
(values->amzHeadersCount * sizeof(sortedHeaders[0])));
+ // add the content-type header and host header
+ int headerCount = values->amzHeadersCount;
+ if (values->contentTypeHeader[0]) {
+ sortedHeaders[headerCount++] = values->contentTypeHeader;
+ }
+ if (values->hostHeader[0]) {
+ sortedHeaders[headerCount++] = values->hostHeader;
+ }
+ if (values->rangeHeader[0]) {
+ sortedHeaders[headerCount++] = values->rangeHeader;
+ }
+
// Now sort these
- header_gnome_sort(sortedHeaders, values->amzHeadersCount);
+ kv_gnome_sort(sortedHeaders, headerCount, ':');
// Now copy this sorted list into the buffer, all the while:
// - folding repeated headers into single lines, and
// - folding multiple lines
// - removing the space after the colon
- int lastHeaderLen = 0, i;
- char *buffer = values->canonicalizedAmzHeaders;
- for (i = 0; i < values->amzHeadersCount; i++) {
+ int lastHeaderLen = 0;
+ char *buffer = values->canonicalizedSignatureHeaders;
+ char *hbuf = values->signedHeaders;
+ int i = 0;
+ for (; i < headerCount; i++) {
const char *header = sortedHeaders[i];
const char *c = header;
+ char v;
// If the header names are the same, append the next value
if ((i > 0) &&
!strncmp(header, sortedHeaders[i - 1], lastHeaderLen)) {
@@ -646,8 +731,12 @@ static void canonicalize_amz_headers(RequestComputedValues *values)
else {
// Copy in everything up to the space in the ": "
while (*c != ' ') {
- *buffer++ = *c++;
+ v = tolower(*c++);
+ *buffer++ = v;
+ *hbuf++ = v;
}
+ // replace the ":" with a ";"
+ *(hbuf - 1) = ';';
// Save the header len since it's a new header
lastHeaderLen = c - header;
// Skip the space
@@ -662,7 +751,7 @@ static void canonicalize_amz_headers(RequestComputedValues *values)
while (is_blank(*c)) {
c++;
}
- // Also, what has most recently been copied into buffer amy
+ // Also, what has most recently been copied into buffer may
// have been whitespace, and since we're folding whitespace
// out around this newline sequence, back buffer up over
// any whitespace it contains
@@ -676,6 +765,9 @@ static void canonicalize_amz_headers(RequestComputedValues *values)
// Finally, add the newline
*buffer++ = '\n';
}
+ // Remove the extra trailing semicolon from the header name list
+ // and terminate the string.
+ *(hbuf - 1) = '\0';
// Terminate the buffer
*buffer = 0;
@@ -683,8 +775,7 @@ static void canonicalize_amz_headers(RequestComputedValues *values)
// Canonicalizes the resource into params->canonicalizedResource
-static void canonicalize_resource(const char *bucketName,
- const char *subResource,
+static void canonicalize_resource(const S3BucketContext *context,
const char *urlEncodedKey,
char *buffer)
{
@@ -694,9 +785,11 @@ static void canonicalize_resource(const char *bucketName,
#define append(str) len += sprintf(&(buffer[len]), "%s", str)
- if (bucketName && bucketName[0]) {
- buffer[len++] = '/';
- append(bucketName);
+ if (context->uriStyle == S3UriStylePath) {
+ if (context->bucketName && context->bucketName[0]) {
+ buffer[len++] = '/';
+ append(context->bucketName);
+ }
}
append("/");
@@ -705,10 +798,129 @@ static void canonicalize_resource(const char *bucketName,
append(urlEncodedKey);
}
+#undef append
+}
+
+
+static void sort_and_urlencode_query_string(const char *queryString,
+ char *result)
+{
+#ifdef SIGNATURE_DEBUG
+ printf("\n--\nsort_and_urlencode\nqueryString: %s\n", queryString);
+#endif
+
+ unsigned int numParams = 1;
+ const char *tmp = queryString;
+ while ((tmp = strchr(tmp, '&')) != NULL) {
+ numParams++;
+ tmp++;
+ }
+
+ const char* params[numParams];
+
+ char tokenized[strlen(queryString) + 1];
+ strncpy(tokenized, queryString, strlen(queryString) + 1);
+
+ char *tok = tokenized;
+ const char *token = NULL;
+ char *save = NULL;
+ unsigned int i = 0;
+
+ while ((token = strtok_r(tok, "&", &save)) != NULL) {
+ tok = NULL;
+ params[i++] = token;
+ }
+
+ kv_gnome_sort(params, numParams, '=');
+
+#ifdef SIGNATURE_DEBUG
+ for (i = 0; i < numParams; i++) {
+ printf("%d: %s\n", i, params[i]);
+ }
+#endif
+
+ unsigned int pi = 0;
+ char appendage[4];
+
+ for (; pi < numParams; pi++) {
+ const char *param = params[pi];
+ int foundEquals = 0;
+ for (i = 0; i < strlen(param); i++) {
+ char c = param[i];
+ if (isalnum(c) || (c == '_') || (c == '-') || (c == '~')
+ || (c == '.')) {
+ appendage[0] = c;
+ appendage[1] = '\0';
+ }
+ else if ((c == '=') && !foundEquals) {
+ appendage[0] = c;
+ appendage[1] = '\0';
+ foundEquals = 1;
+ }
+ else {
+ snprintf(appendage, 4, "%%%02X", c);
+ }
+ strncat(result, appendage, strlen(appendage));
+ }
+ strncat(result, "&", 1);
+ }
+ result[strlen(result) - 1] = '\0';
+}
+
+
+// Canonicalize the query string part of the request into a buffer
+static void canonicalize_query_string(const char *queryParams,
+ const char *subResource, char *buffer)
+{
+ int len = 0;
+
+ *buffer = 0;
+
+#define append(str) len += sprintf(&(buffer[len]), "%s", str)
+
+ if (queryParams && queryParams[0]) {
+ char sorted[strlen(queryParams) * 2];
+ sorted[0] = '\0';
+ sort_and_urlencode_query_string(queryParams, sorted);
+ append(sorted);
+ }
+
if (subResource && subResource[0]) {
- append("?");
+ if (queryParams && queryParams[0]) {
+ append("&");
+ }
append(subResource);
+ append("=");
+ }
+
+#undef append
+}
+
+
+static HttpRequestType http_request_method_to_type(const char *method)
+{
+ if (!method) {
+ return HttpRequestTypeInvalid;
+ }
+ if (strcmp(method, "POST") == 0) {
+ return HttpRequestTypePOST;
+ }
+ else if (strcmp(method, "GET") == 0) {
+ return HttpRequestTypeGET;
+ }
+ else if (strcmp(method, "HEAD") == 0) {
+ return HttpRequestTypeHEAD;
+ }
+ else if (strcmp(method, "PUT") == 0) {
+ return HttpRequestTypePUT;
}
+ else if (strcmp(method, "COPY") == 0) {
+ return HttpRequestTypeCOPY;
+ }
+ else if (strcmp(method, "DELETE") == 0) {
+ return HttpRequestTypeDELETE;
+ }
+ return HttpRequestTypeInvalid;
}
@@ -735,55 +947,138 @@ static const char *http_request_type_to_verb(HttpRequestType requestType)
static S3Status compose_auth_header(const RequestParams *params,
RequestComputedValues *values)
{
- // We allow for:
- // 17 bytes for HTTP-Verb + \n
- // 129 bytes for Content-MD5 + \n
- // 129 bytes for Content-Type + \n
- // 1 byte for empty Date + \n
- // CanonicalizedAmzHeaders & CanonicalizedResource
- char signbuf[17 + 129 + 129 + 1 +
- (sizeof(values->canonicalizedAmzHeaders) - 1) +
- (sizeof(values->canonicalizedResource) - 1) + 1];
+ const char *httpMethod = http_request_type_to_verb(params->httpRequestType);
+ int canonicalRequestLen = strlen(httpMethod) + 1 +
+ strlen(values->canonicalURI) + 1 +
+ strlen(values->canonicalQueryString) + 1 +
+ strlen(values->canonicalizedSignatureHeaders) + 1 +
+ strlen(values->signedHeaders) + 1 +
+ 2 * S3_SHA256_DIGEST_LENGTH + 1; // 2 hex digits for each byte
+
int len = 0;
-#define signbuf_append(format, ...) \
- len += snprintf(&(signbuf[len]), sizeof(signbuf) - len, \
+ char canonicalRequest[canonicalRequestLen];
+
+#define buf_append(buf, format, ...) \
+ len += snprintf(&(buf[len]), sizeof(buf) - len, \
format, __VA_ARGS__)
- signbuf_append
- ("%s\n", http_request_type_to_verb(params->httpRequestType));
+ canonicalRequest[0] = '\0';
+ buf_append(canonicalRequest, "%s\n", httpMethod);
+ buf_append(canonicalRequest, "%s\n", values->canonicalURI);
+ buf_append(canonicalRequest, "%s\n", values->canonicalQueryString);
+ buf_append(canonicalRequest, "%s\n", values->canonicalizedSignatureHeaders);
+ buf_append(canonicalRequest, "%s\n", values->signedHeaders);
- // For MD5 and Content-Type, use the value in the actual header, because
- // it's already been trimmed
- signbuf_append("%s\n", values->md5Header[0] ?
- &(values->md5Header[sizeof("Content-MD5: ") - 1]) : "");
+ buf_append(canonicalRequest, "%s", values->payloadHash);
- signbuf_append
- ("%s\n", values->contentTypeHeader[0] ?
- &(values->contentTypeHeader[sizeof("Content-Type: ") - 1]) : "");
+#ifdef SIGNATURE_DEBUG
+ printf("--\nCanonical Request:\n%s\n", canonicalRequest);
+#endif
- signbuf_append("%s", "\n"); // Date - we always use x-amz-date
+ len = 0;
+ unsigned char canonicalRequestHash[S3_SHA256_DIGEST_LENGTH];
+#ifdef __APPLE__
+ CC_SHA256(canonicalRequest, strlen(canonicalRequest), canonicalRequestHash);
+#else
+ const unsigned char *rqstData = (const unsigned char*) canonicalRequest;
+ SHA256(rqstData, strlen(canonicalRequest), canonicalRequestHash);
+#endif
+ char canonicalRequestHashHex[2 * S3_SHA256_DIGEST_LENGTH + 1];
+ canonicalRequestHashHex[0] = '\0';
+ int i = 0;
+ for (; i < S3_SHA256_DIGEST_LENGTH; i++) {
+ buf_append(canonicalRequestHashHex, "%02x", canonicalRequestHash[i]);
+ }
- signbuf_append("%s", values->canonicalizedAmzHeaders);
+ const char *awsRegion = S3_DEFAULT_REGION;
+ if (params->bucketContext.authRegion) {
+ awsRegion = params->bucketContext.authRegion;
+ }
+ char scope[SIGNATURE_SCOPE_SIZE + 1];
+ snprintf(scope, sizeof(scope), "%.8s/%s/s3/aws4_request",
+ values->requestDateISO8601, awsRegion);
- signbuf_append("%s", values->canonicalizedResource);
+ char stringToSign[17 + 17 + SIGNATURE_SCOPE_SIZE + 1
+ + strlen(canonicalRequestHashHex)];
+ snprintf(stringToSign, sizeof(stringToSign), "AWS4-HMAC-SHA256\n%s\n%s\n%s",
+ values->requestDateISO8601, scope, canonicalRequestHashHex);
- // Generate an HMAC-SHA-1 of the signbuf
- unsigned char hmac[20];
+#ifdef SIGNATURE_DEBUG
+ printf("--\nString to Sign:\n%s\n", stringToSign);
+#endif
- HMAC_SHA1(hmac, (unsigned char *) params->bucketContext.secretAccessKey,
- strlen(params->bucketContext.secretAccessKey),
- (unsigned char *) signbuf, len);
+ const char *secretAccessKey = params->bucketContext.secretAccessKey;
+ char accessKey[strlen(secretAccessKey) + 5];
+ snprintf(accessKey, sizeof(accessKey), "AWS4%s", secretAccessKey);
+
+#ifdef __APPLE__
+ unsigned char dateKey[S3_SHA256_DIGEST_LENGTH];
+ CCHmac(kCCHmacAlgSHA256, accessKey, strlen(accessKey),
+ values->requestDateISO8601, 8, dateKey);
+ unsigned char dateRegionKey[S3_SHA256_DIGEST_LENGTH];
+ CCHmac(kCCHmacAlgSHA256, dateKey, S3_SHA256_DIGEST_LENGTH, awsRegion,
+ strlen(awsRegion), dateRegionKey);
+ unsigned char dateRegionServiceKey[S3_SHA256_DIGEST_LENGTH];
+ CCHmac(kCCHmacAlgSHA256, dateRegionKey, S3_SHA256_DIGEST_LENGTH, "s3", 2,
+ dateRegionServiceKey);
+ unsigned char signingKey[S3_SHA256_DIGEST_LENGTH];
+ CCHmac(kCCHmacAlgSHA256, dateRegionServiceKey, S3_SHA256_DIGEST_LENGTH,
+ "aws4_request", strlen("aws4_request"), signingKey);
+
+ unsigned char finalSignature[S3_SHA256_DIGEST_LENGTH];
+ CCHmac(kCCHmacAlgSHA256, signingKey, S3_SHA256_DIGEST_LENGTH, stringToSign,
+ strlen(stringToSign), finalSignature);
+#else
+ const EVP_MD *sha256evp = EVP_sha256();
+ unsigned char dateKey[S3_SHA256_DIGEST_LENGTH];
+ HMAC(sha256evp, accessKey, strlen(accessKey),
+ (const unsigned char*) values->requestDateISO8601, 8, dateKey,
+ NULL);
+ unsigned char dateRegionKey[S3_SHA256_DIGEST_LENGTH];
+ HMAC(sha256evp, dateKey, S3_SHA256_DIGEST_LENGTH,
+ (const unsigned char*) awsRegion, strlen(awsRegion), dateRegionKey,
+ NULL);
+ unsigned char dateRegionServiceKey[S3_SHA256_DIGEST_LENGTH];
+ HMAC(sha256evp, dateRegionKey, S3_SHA256_DIGEST_LENGTH,
+ (const unsigned char*) "s3", 2, dateRegionServiceKey, NULL);
+ unsigned char signingKey[S3_SHA256_DIGEST_LENGTH];
+ HMAC(sha256evp, dateRegionServiceKey, S3_SHA256_DIGEST_LENGTH,
+ (const unsigned char*) "aws4_request", strlen("aws4_request"),
+ signingKey,
+ NULL);
+
+ unsigned char finalSignature[S3_SHA256_DIGEST_LENGTH];
+ HMAC(sha256evp, signingKey, S3_SHA256_DIGEST_LENGTH,
+ (const unsigned char*) stringToSign, strlen(stringToSign),
+ finalSignature, NULL);
+#endif
- // Now base-64 encode the results
- char b64[((20 + 1) * 4) / 3];
- int b64Len = base64Encode(hmac, 20, b64);
-
- snprintf(values->authorizationHeader, sizeof(values->authorizationHeader),
- "Authorization: AWS %s:%.*s", params->bucketContext.accessKeyId,
- b64Len, b64);
+ len = 0;
+ values->requestSignatureHex[0] = '\0';
+ for (i = 0; i < S3_SHA256_DIGEST_LENGTH; i++) {
+ buf_append(values->requestSignatureHex, "%02x", finalSignature[i]);
+ }
+
+ snprintf(values->authCredential, sizeof(values->authCredential),
+ "%s/%.8s/%s/s3/aws4_request", params->bucketContext.accessKeyId,
+ values->requestDateISO8601, awsRegion);
+
+ snprintf(
+ values->authorizationHeader,
+ sizeof(values->authorizationHeader),
+ "Authorization: AWS4-HMAC-SHA256 Credential=%s,SignedHeaders=%s,Signature=%s",
+ values->authCredential, values->signedHeaders,
+ values->requestSignatureHex);
+
+#ifdef SIGNATURE_DEBUG
+ printf("--\nAuthorization Header:\n%s\n", values->authorizationHeader);
+#endif
return S3StatusOK;
+
+#undef buf_append
+
}
@@ -794,7 +1089,7 @@ static S3Status compose_uri(char *buffer, int bufferSize,
const char *subResource, const char *queryParams)
{
int len = 0;
-
+
#define uri_append(fmt, ...) \
do { \
len += snprintf(&(buffer[len]), bufferSize - len, fmt, __VA_ARGS__); \
@@ -803,13 +1098,13 @@ static S3Status compose_uri(char *buffer, int bufferSize,
} \
} while (0)
- uri_append("http%s://",
+ uri_append("http%s://",
(bucketContext->protocol == S3ProtocolHTTP) ? "" : "s");
- const char *hostName =
+ const char *hostName =
bucketContext->hostName ? bucketContext->hostName : defaultHostNameG;
- if (bucketContext->bucketName &&
+ if (bucketContext->bucketName &&
bucketContext->bucketName[0]) {
if (bucketContext->uriStyle == S3UriStyleVirtualHost) {
if (strchr(bucketContext->bucketName, '.') == NULL) {
@@ -833,16 +1128,16 @@ static S3Status compose_uri(char *buffer, int bufferSize,
uri_append("%s", "/");
uri_append("%s", urlEncodedKey);
-
+
if (subResource && subResource[0]) {
uri_append("?%s", subResource);
}
-
+
if (queryParams) {
uri_append("%s%s", (subResource && subResource[0]) ? "&" : "?",
queryParams);
}
-
+
return S3StatusOK;
}
@@ -1006,12 +1301,12 @@ static void request_deinitialize(Request *request)
}
-static S3Status request_get(const RequestParams *params,
+static S3Status request_get(const RequestParams *params,
const RequestComputedValues *values,
Request **reqReturn)
{
Request *request = 0;
-
+
// Try to get one from the request stack. We hold the lock for the
// shortest time possible here.
pthread_mutex_lock(&requestStackMutexG);
@@ -1019,7 +1314,7 @@ static S3Status request_get(const RequestParams *params,
if (requestStackCountG) {
request = requestStackG[--requestStackCountG];
}
-
+
pthread_mutex_unlock(&requestStackMutexG);
// If we got one, deinitialize it for re-use
@@ -1046,7 +1341,7 @@ static S3Status request_get(const RequestParams *params,
request->status = S3StatusOK;
S3Status status;
-
+
// Start out with no headers
request->headers = 0;
@@ -1082,11 +1377,11 @@ static S3Status request_get(const RequestParams *params,
response_headers_handler_initialize(&(request->responseHeadersHandler));
request->propertiesCallbackMade = 0;
-
+
error_parser_initialize(&(request->errorParser));
*reqReturn = request;
-
+
return S3StatusOK;
}
@@ -1159,7 +1454,7 @@ S3Status request_api_initialize(const char *userAgentInfo, int flags,
snprintf(userAgentG, sizeof(userAgentG),
"Mozilla/4.0 (Compatible; %s; libs3 %s.%s; %s)",
userAgentInfo, LIBS3_VER_MAJOR, LIBS3_VER_MINOR, platform);
-
+
return S3StatusOK;
}
@@ -1173,57 +1468,85 @@ void request_api_deinitialize()
}
}
-void request_perform(const RequestParams *params, S3RequestContext *context)
+static S3Status setup_request(const RequestParams *params,
+ RequestComputedValues *computed,
+ int forceUnsignedPayload)
{
- Request *request;
S3Status status;
- int verifyPeerRequest = verifyPeer;
- CURLcode curlstatus;
-
-#define return_status(status) \
- (*(params->completeCallback))(status, 0, params->callbackData); \
- return
-
- // These will hold the computed values
- RequestComputedValues computed;
// Validate the bucket name
- if (params->bucketContext.bucketName &&
- ((status = S3_validate_bucket_name
- (params->bucketContext.bucketName,
- params->bucketContext.uriStyle)) != S3StatusOK)) {
- return_status(status);
+ if (params->bucketContext.bucketName
+ && ((status = S3_validate_bucket_name(params->bucketContext.bucketName,
+ params->bucketContext.uriStyle))
+ != S3StatusOK)) {
+ return status;
}
+ time_t now = time(NULL);
+ struct tm gmt;
+ gmtime_r(&now, &gmt);
+ strftime(computed->requestDateISO8601, sizeof(computed->requestDateISO8601),
+ "%Y%m%dT%H%M%SZ", &gmt);
+
// Compose the amz headers
- if ((status = compose_amz_headers(params, &computed)) != S3StatusOK) {
- return_status(status);
+ if ((status = compose_amz_headers(params, forceUnsignedPayload, computed))
+ != S3StatusOK) {
+ return status;
}
// Compose standard headers
- if ((status = compose_standard_headers
- (params, &computed)) != S3StatusOK) {
- return_status(status);
+ if ((status = compose_standard_headers(params, computed)) != S3StatusOK) {
+ return status;
}
// URL encode the key
- if ((status = encode_key(params, &computed)) != S3StatusOK) {
- return_status(status);
+ if ((status = encode_key(params, computed)) != S3StatusOK) {
+ return status;
}
// Compute the canonicalized amz headers
- canonicalize_amz_headers(&computed);
+ canonicalize_signature_headers(computed);
// Compute the canonicalized resource
- canonicalize_resource(params->bucketContext.bucketName,
- params->subResource, computed.urlEncodedKey,
- computed.canonicalizedResource);
+ canonicalize_resource(¶ms->bucketContext, computed->urlEncodedKey,
+ computed->canonicalURI);
+ canonicalize_query_string(params->queryParams, params->subResource,
+ computed->canonicalQueryString);
// Compose Authorization header
- if ((status = compose_auth_header(params, &computed)) != S3StatusOK) {
+ if ((status = compose_auth_header(params, computed)) != S3StatusOK) {
+ return status;
+ }
+
+#ifdef SIGNATURE_DEBUG
+ int i = 0;
+ printf("\n--\nAMZ Headers:\n");
+ for (; i < computed->amzHeadersCount; i++) {
+ printf("%s\n", computed->amzHeaders[i]);
+ }
+#endif
+
+ return status;
+}
+
+void request_perform(const RequestParams *params, S3RequestContext *context)
+{
+ Request *request;
+ S3Status status;
+ int verifyPeerRequest = verifyPeer;
+ CURLcode curlstatus;
+
+#define return_status(status) \
+ (*(params->completeCallback))(status, 0, params->callbackData); \
+ return
+
+ // These will hold the computed values
+ RequestComputedValues computed;
+
+ if ((status = setup_request(params, &computed, 0)) != S3StatusOK) {
return_status(status);
}
-
+
// Get an initialized Request structure now
if ((status = request_get(params, &computed, &request)) != S3StatusOK) {
return_status(status);
@@ -1233,12 +1556,12 @@ void request_perform(const RequestParams *params, S3RequestContext *context)
}
// Allow per-context override of verifyPeer
if (verifyPeerRequest != verifyPeer) {
- if ((curlstatus = curl_easy_setopt(request->curl,
- CURLOPT_SSL_VERIFYPEER,
- context->verifyPeer))
- != CURLE_OK) {
- return_status(S3StatusFailedToInitializeRequest);
- }
+ if ((curlstatus = curl_easy_setopt(request->curl,
+ CURLOPT_SSL_VERIFYPEER,
+ context->verifyPeer))
+ != CURLE_OK) {
+ return_status(S3StatusFailedToInitializeRequest);
+ }
}
// If a RequestContext was provided, add the request to the curl multi
@@ -1387,11 +1710,12 @@ S3Status request_curl_code_to_status(CURLcode code)
S3Status S3_generate_authenticated_query_string
(char *buffer, const S3BucketContext *bucketContext,
- const char *key, int64_t expires, const char *resource)
+ const char *key, int expires, const char *resource,
+ const char *httpMethod)
{
-#define MAX_EXPIRES (((int64_t) 1 << 31) - 1)
- // S3 seems to only accept expiration dates up to the number of seconds
- // representably by a signed 32-bit integer
+ // maximum expiration period is seven days (in seconds)
+#define MAX_EXPIRES 604800
+
if (expires < 0) {
expires = MAX_EXPIRES;
}
@@ -1399,68 +1723,32 @@ S3Status S3_generate_authenticated_query_string
expires = MAX_EXPIRES;
}
- // xxx todo: rework this so that it can be incorporated into shared code
- // with request_perform(). It's really unfortunate that this code is not
- // shared with request_perform().
+ RequestParams params =
+ { http_request_method_to_type(httpMethod), *bucketContext, key, NULL,
+ resource,
+ NULL, NULL, NULL, 0, 0, NULL, NULL, NULL, 0, NULL, NULL, NULL };
- // URL encode the key
- char urlEncodedKey[S3_MAX_KEY_SIZE * 3];
- if (key) {
- urlEncode(urlEncodedKey, key, strlen(key));
+ RequestComputedValues computed;
+ S3Status status = setup_request(¶ms, &computed, 1);
+ if (status != S3StatusOK) {
+ return status;
}
- else {
- urlEncodedKey[0] = 0;
- }
-
- // Compute canonicalized resource
- char canonicalizedResource[MAX_CANONICALIZED_RESOURCE_SIZE];
- canonicalize_resource(bucketContext->bucketName, resource, urlEncodedKey,
- canonicalizedResource);
-
- // We allow for:
- // 17 bytes for HTTP-Verb + \n
- // 1 byte for empty Content-MD5 + \n
- // 1 byte for empty Content-Type + \n
- // 20 bytes for Expires + \n
- // 0 bytes for CanonicalizedAmzHeaders
- // CanonicalizedResource
- char signbuf[17 + 1 + 1 + 1 + 20 + sizeof(canonicalizedResource) + 1];
- int len = 0;
-
-#define signbuf_append(format, ...) \
- len += snprintf(&(signbuf[len]), sizeof(signbuf) - len, \
- format, __VA_ARGS__)
-
- signbuf_append("%s\n", "GET"); // HTTP-Verb
- signbuf_append("%s\n", ""); // Content-MD5
- signbuf_append("%s\n", ""); // Content-Type
- signbuf_append("%llu\n", (unsigned long long) expires);
- signbuf_append("%s", canonicalizedResource);
-
- // Generate an HMAC-SHA-1 of the signbuf
- unsigned char hmac[20];
-
- HMAC_SHA1(hmac, (unsigned char *) bucketContext->secretAccessKey,
- strlen(bucketContext->secretAccessKey),
- (unsigned char *) signbuf, len);
-
- // Now base-64 encode the results
- char b64[((20 + 1) * 4) / 3];
- int b64Len = base64Encode(hmac, 20, b64);
-
- // Now urlEncode that
- char signature[sizeof(b64) * 3];
- urlEncode(signature, b64, b64Len);
- // Finally, compose the uri, with params:
- // ?AWSAccessKeyId=xxx[&Expires=]&Signature=xxx
- char queryParams[sizeof("AWSAccessKeyId=") + 20 +
- sizeof("&Expires=") + 20 +
- sizeof("&Signature=") + sizeof(signature) + 1];
+ // Finally, compose the URI, with params
+ char queryParams[sizeof("X-Amz-Algorithm=AWS4-HMAC-SHA256")
+ + sizeof("&X-Amz-Credential=") + MAX_CREDENTIAL_SIZE
+ + sizeof("&X-Amz-Date=") + 16 + sizeof("&X-Amz-Expires=") + 6
+ + sizeof("&X-Amz-SignedHeaders=") + 128 + sizeof("&X-Amz-Signature=")
+ + sizeof(computed.requestSignatureHex) + 1];
- sprintf(queryParams, "AWSAccessKeyId=%s&Expires=%ld&Signature=%s",
- bucketContext->accessKeyId, (long) expires, signature);
+ snprintf(queryParams, sizeof(queryParams),
+ "X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=%s"
+ "&X-Amz-Date=%s&X-Amz-Expires=%d"
+ "&X-Amz-SignedHeaders=%s&X-Amz-Signature=%s",
+ computed.authCredential, computed.requestDateISO8601, expires,
+ computed.signedHeaders, computed.requestSignatureHex);
return compose_uri(buffer, S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE,
- bucketContext, urlEncodedKey, resource, queryParams);
+ bucketContext, computed.urlEncodedKey, resource,
+ queryParams);
}
diff --git a/src/s3.c b/src/s3.c
index 0493b31..797a124 100644
--- a/src/s3.c
+++ b/src/s3.c
@@ -64,6 +64,7 @@ static S3Protocol protocolG = S3ProtocolHTTPS;
static S3UriStyle uriStyleG = S3UriStylePath;
static int retriesG = 5;
static int verifyPeerG = 0;
+static const char *awsRegionG = NULL;
// Environment variables, saved as globals ----------------------------------
@@ -156,6 +157,8 @@ static char putenvBufG[256];
#define TARGET_BUCKET_PREFIX_LEN (sizeof(TARGET_BUCKET_PREFIX) - 1)
#define TARGET_PREFIX_PREFIX "targetPrefix="
#define TARGET_PREFIX_PREFIX_LEN (sizeof(TARGET_PREFIX_PREFIX) - 1)
+#define HTTP_METHOD_PREFIX "method="
+#define HTTP_METHOD_PREFIX_LEN (sizeof(HTTP_METHOD_PREFIX) - 1)
// util ----------------------------------------------------------------------
@@ -164,7 +167,7 @@ static void S3_init()
{
S3Status status;
const char *hostname = getenv("S3_HOSTNAME");
-
+
if ((status = S3_initialize("s3", verifyPeerG|S3_INIT_ALL, hostname))
!= S3StatusOK) {
fprintf(stderr, "Failed to initialize libs3: %s\n",
@@ -201,13 +204,14 @@ static void usageExit(FILE *out)
" -r/--retries : retry retryable failures this number of times\n"
" (default is 5)\n"
" -v/--verify-peer : verify peer SSL certificate (default is no)\n"
+" -g/--region <REGION> : use <REGION> for request authorization\n"
"\n"
" Environment:\n"
"\n"
" S3_ACCESS_KEY_ID : S3 access key ID (required)\n"
" S3_SECRET_ACCESS_KEY : S3 secret access key (required)\n"
" S3_HOSTNAME : specify alternative S3 host (optional)\n"
-"\n"
+"\n"
" Commands (with <required parameters> and [optional parameters]) :\n"
"\n"
" (NOTE: all command parameters take a value and are specified using the\n"
@@ -322,6 +326,8 @@ static void usageExit(FILE *out)
" [expires] : Expiration date for query string\n"
" [resource] : Sub-resource of key for query string, without a\n"
" leading '?', for example, \"torrent\"\n"
+" [method] : HTTP method for use with the query string\n"
+" : (default is \"GET\")"
"\n"
" listmultiparts : Show multipart uploads\n"
" <bucket> : Bucket multipart uploads belongs to\n"
@@ -438,7 +444,7 @@ static int growbuffer_append(growbuffer **gb, const char *data, int dataLen)
}
memcpy(&(buf->data[buf->size]), data, toCopy);
-
+
buf->size += toCopy, data += toCopy, dataLen -= toCopy;
}
@@ -742,6 +748,7 @@ static struct option longOptionsG[] =
{ "show-properties", no_argument, 0, 's' },
{ "retries", required_argument, 0, 'r' },
{ "verify-peer", no_argument, 0, 'v' },
+ { "region", required_argument, 0, 'g' },
{ 0, 0, 0, 0 }
};
@@ -800,7 +807,7 @@ static S3Status responsePropertiesCallback
// This callback does the same thing for every request type: saves the status
// and error stuff in global variables
static void responseCompleteCallback(S3Status status,
- const S3ErrorDetails *error,
+ const S3ErrorDetails *error,
void *callbackData)
{
(void) callbackData;
@@ -913,8 +920,8 @@ static void list_service(int allDetails)
};
do {
- S3_list_service(protocolG, accessKeyIdG, secretAccessKeyG, 0, 0, 0,
- &listServiceHandler, &data);
+ S3_list_service(protocolG, accessKeyIdG, secretAccessKeyG, 0, 0,
+ awsRegionG, 0, &listServiceHandler, &data);
} while (S3_status_is_retryable(statusG) && should_retry());
if (statusG == S3StatusOK) {
@@ -957,7 +964,7 @@ static void test_bucket(int argc, char **argv, int optindex)
char locationConstraint[64];
do {
S3_test_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG, 0,
- 0, bucketName, sizeof(locationConstraint),
+ 0, bucketName, awsRegionG, sizeof(locationConstraint),
locationConstraint, 0, &responseHandler, 0);
} while (S3_status_is_retryable(statusG) && should_retry());
@@ -1055,9 +1062,9 @@ static void create_bucket(int argc, char **argv, int optindex)
};
do {
- S3_create_bucket(protocolG, accessKeyIdG, secretAccessKeyG, 0,
- 0, bucketName, cannedAcl, locationConstraint, 0,
- &responseHandler, 0);
+ S3_create_bucket(protocolG, accessKeyIdG, secretAccessKeyG, 0, 0,
+ bucketName, awsRegionG, cannedAcl, locationConstraint,
+ 0, &responseHandler, 0);
} while (S3_status_is_retryable(statusG) && should_retry());
if (statusG == S3StatusOK) {
@@ -1066,7 +1073,7 @@ static void create_bucket(int argc, char **argv, int optindex)
else {
printError();
}
-
+
S3_deinitialize();
}
@@ -1096,7 +1103,7 @@ static void delete_bucket(int argc, char **argv, int optindex)
do {
S3_delete_bucket(protocolG, uriStyleG, accessKeyIdG, secretAccessKeyG,
- 0, 0, bucketName, 0, &responseHandler, 0);
+ 0, 0, bucketName, awsRegionG, 0, &responseHandler, 0);
} while (S3_status_is_retryable(statusG) && should_retry());
if (statusG != S3StatusOK) {
@@ -1244,7 +1251,7 @@ static void list_bucket(const char *bucketName, const char *prefix,
int maxkeys, int allDetails)
{
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -1253,7 +1260,8 @@ static void list_bucket(const char *bucketName, const char *prefix,
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ListBucketHandler listBucketHandler =
@@ -1309,6 +1317,7 @@ static void list(int argc, char **argv, int optindex)
int maxkeys = 0, allDetails = 0;
while (optindex < argc) {
char *param = argv[optindex++];
+
if (!strncmp(param, PREFIX_PREFIX, PREFIX_PREFIX_LEN)) {
prefix = &(param[PREFIX_PREFIX_LEN]);
}
@@ -1392,7 +1401,7 @@ typedef struct list_parts_callback_data
typedef struct list_upload_callback_data
{
- char uploadId[1024];
+ char uploadId[1024];
} abort_upload_callback_data;
static void printListMultipartHeader(int allDetails)
@@ -1407,12 +1416,12 @@ static void printListPartsHeader()
printf("%-25s %-30s %-30s %-15s",
"LastModified",
"PartNumber", "ETag", "SIZE");
-
+
printf("\n");
printf("--------------------- "
- " ------------- "
+ " ------------- "
"------------------------------- "
- " -----");
+ " -----");
printf("\n");
}
@@ -1452,7 +1461,7 @@ static S3Status listMultipartCallback(int isTruncated, const char *nextKeyMarker
else {
data->nextUploadIdMarker[0] = 0;
}
-
+
if (uploadsCount && !data->uploadCount) {
printListMultipartHeader(data->allDetails);
}
@@ -1484,8 +1493,8 @@ static S3Status listMultipartCallback(int isTruncated, const char *nextKeyMarker
}
else {
time_t t = (time_t) upload->initiated;
- strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
- gmtime(&t));
+ strftime(timebuf, sizeof(timebuf), "%Y-%m-%dT%H:%M:%SZ",
+ gmtime(&t));
printf("%-50s %s %-50s", upload->key, timebuf, upload->uploadId);
if (data->allDetails) {
printf(" %-34s %-64s %-12s %-64s %-12s",
@@ -1512,16 +1521,16 @@ static S3Status listMultipartCallback(int isTruncated, const char *nextKeyMarker
static S3Status listPartsCallback(int isTruncated,
- const char *nextPartNumberMarker,
- const char *initiatorId,
- const char *initiatorDisplayName,
- const char *ownerId,
- const char *ownerDisplayName,
- const char *storageClass,
- int partsCount,
- int handlePartsStart,
- const S3ListPart *parts,
- void *callbackData)
+ const char *nextPartNumberMarker,
+ const char *initiatorId,
+ const char *initiatorDisplayName,
+ const char *ownerId,
+ const char *ownerDisplayName,
+ const char *storageClass,
+ int partsCount,
+ int handlePartsStart,
+ const S3ListPart *parts,
+ void *callbackData)
{
list_parts_callback_data *data =
(list_parts_callback_data *) callbackData;
@@ -1610,7 +1619,7 @@ static S3Status listPartsCallback(int isTruncated,
}
}
- data->partsCount += partsCount;
+ data->partsCount += partsCount;
return S3StatusOK;
}
@@ -1661,16 +1670,16 @@ static void list_multipart_uploads(int argc, char **argv, int optindex)
else if (!bucketName) {
bucketName = param;
}
-
+
else {
fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
usageExit(stderr);
}
}
if (bucketName) {
-
+
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -1679,7 +1688,8 @@ static void list_multipart_uploads(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ListMultipartUploadsHandler listMultipartUploadsHandler =
@@ -1691,7 +1701,7 @@ static void list_multipart_uploads(int argc, char **argv, int optindex)
list_multiparts_callback_data data;
memset(&data, 0, sizeof(list_multiparts_callback_data));
- if (keymarker != 0) {
+ if (keymarker != 0) {
snprintf(data.nextKeyMarker, sizeof(data.nextKeyMarker), "%s",
keymarker);
}
@@ -1783,16 +1793,16 @@ static void list_parts(int argc, char **argv, int optindex)
!strcmp(ad, "1")) {
allDetails = 1;
}
- }
+ }
else {
fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
usageExit(stderr);
}
}
if (bucketName) {
-
+
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -1801,7 +1811,8 @@ static void list_parts(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ListPartsHandler listPartsHandler =
@@ -1813,11 +1824,11 @@ static void list_parts(int argc, char **argv, int optindex)
list_parts_callback_data data;
memset(&data, 0, sizeof(list_parts_callback_data));
- if (partnumbermarker != 0) {
+ if (partnumbermarker != 0) {
snprintf(data.nextPartNumberMarker,
sizeof(data.nextPartNumberMarker), "%s", partnumbermarker);
}
-
+
data.partsCount = 0;
data.allDetails = allDetails;
data.noPrint = 0;
@@ -1846,9 +1857,7 @@ static void list_parts(int argc, char **argv, int optindex)
}
S3_deinitialize();
-
}
-
}
@@ -1859,7 +1868,7 @@ static void abort_multipart_upload(int argc, char **argv, int optindex)
"<upload-id>\n");
return;
}
-
+
// Split bucket/key
char *slash = argv[optindex];
while (*slash && (*slash != '/')) {
@@ -1876,22 +1885,22 @@ static void abort_multipart_upload(int argc, char **argv, int optindex)
const char *key = slash;
const char *uploadid = 0;
while (optindex < argc) {
- char *param = argv[optindex++];
+ char *param = argv[optindex++];
if (!strncmp(param, UPLOAD_ID_PREFIX, UPLOAD_ID_PREFIX_LEN)) {
uploadid = &(param[UPLOAD_ID_PREFIX_LEN]);
}
else if (!strncmp(param, FILENAME_PREFIX, FILENAME_PREFIX_LEN)) {
key = &(param[FILENAME_PREFIX_LEN]);
- }
+ }
else {
fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
usageExit(stderr);
}
}
if (bucketName) {
-
+
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -1900,7 +1909,8 @@ static void abort_multipart_upload(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3AbortMultipartUploadHandler abortMultipartUploadHandler =
@@ -1920,7 +1930,7 @@ static void abort_multipart_upload(int argc, char **argv, int optindex)
snprintf(data.nextUploadIdMarker, sizeof(data.nextUploadIdMarker),
"%s", uploadidmarker);
}
-
+
data.uploadCount = 0;
data.allDetails = allDetails;
*/
@@ -1929,7 +1939,7 @@ static void abort_multipart_upload(int argc, char **argv, int optindex)
S3_abort_multipart_upload(&bucketContext, key, uploadid,
&abortMultipartUploadHandler);
} while (S3_status_is_retryable(statusG) && should_retry());
-
+
S3_deinitialize();
}
}
@@ -1954,7 +1964,7 @@ static void delete_object(int argc, char **argv, int optindex)
const char *key = slash;
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -1963,11 +1973,12 @@ static void delete_object(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ResponseHandler responseHandler =
- {
+ {
0,
&responseCompleteCallback
};
@@ -2002,7 +2013,7 @@ static int putObjectDataCallback(int bufferSize, char *buffer,
{
put_object_callback_data *data =
(put_object_callback_data *) callbackData;
-
+
int ret = 0;
if (data->contentLength) {
@@ -2025,7 +2036,7 @@ static int putObjectDataCallback(int bufferSize, char *buffer,
printf("%llu bytes remaining ",
(unsigned long long) data->totalContentLength);
printf("(%d%% complete) ...\n",
- (int) (((data->totalOriginalContentLength -
+ (int) (((data->totalOriginalContentLength -
data->totalContentLength) * 100) /
data->totalOriginalContentLength));
}
@@ -2063,6 +2074,7 @@ S3Status MultipartResponseProperiesCallback
return S3StatusOK;
}
+
static int multipartPutXmlCallback(int bufferSize, char *buffer,
void *callbackData)
{
@@ -2089,7 +2101,8 @@ static int try_get_parts_info(const char *bucketName, const char *key,
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ListPartsHandler listPartsHandler =
@@ -2101,7 +2114,7 @@ static int try_get_parts_info(const char *bucketName, const char *key,
list_parts_callback_data data;
memset(&data, 0, sizeof(list_parts_callback_data));
-
+
data.partsCount = 0;
data.allDetails = 0;
data.manager = manager;
@@ -2127,10 +2140,11 @@ static int try_get_parts_info(const char *bucketName, const char *key,
printError();
return -1;
}
-
+
return 0;
}
+
static void put_object(int argc, char **argv, int optindex,
const char *srcBucketName, const char *srcKey, unsigned long long srcSize)
{
@@ -2342,7 +2356,7 @@ static void put_object(int argc, char **argv, int optindex,
contentLength;
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -2351,7 +2365,8 @@ static void put_object(int argc, char **argv, int optindex,
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3PutProperties putProperties =
@@ -2367,7 +2382,7 @@ static void put_object(int argc, char **argv, int optindex,
metaProperties,
useServerSideEncryption
};
-
+
if (contentLength <= MULTIPART_CHUNK_SIZE) {
S3PutObjectHandler putObjectHandler =
{
@@ -2415,7 +2430,7 @@ static void put_object(int argc, char **argv, int optindex,
&responsePropertiesCallback,
&responseCompleteCallback
},
- &initial_multipart_callback
+ &initial_multipart_callback
};
S3PutObjectHandler putObjectHandler = {
@@ -2425,29 +2440,29 @@ static void put_object(int argc, char **argv, int optindex,
S3MultipartCommitHandler commit_handler = {
{
- &responsePropertiesCallback,&responseCompleteCallback
+ &responsePropertiesCallback,&responseCompleteCallback
},
&multipartPutXmlCallback,
0
};
-
+
manager.etags = (char **) malloc(sizeof(char *) * totalSeq);
manager.next_etags_pos = 0;
-
+
if (uploadId) {
manager.upload_id = strdup(uploadId);
manager.remaining = contentLength;
- if(!try_get_parts_info(bucketName, key, &manager)) {
+ if (!try_get_parts_info(bucketName, key, &manager)) {
fseek(data.infile, -(manager.remaining), 2);
contentLength = manager.remaining;
goto upload;
- }else {
+ } else {
goto clean;
}
}
-
+
do {
- S3_initiate_multipart(&bucketContext,key,0, &handler,0, &manager);
+ S3_initiate_multipart(&bucketContext, key,0, &handler,0, &manager);
} while (S3_status_is_retryable(statusG) && should_retry());
if (manager.upload_id == 0 || statusG != S3StatusOK) {
@@ -2455,7 +2470,7 @@ static void put_object(int argc, char **argv, int optindex,
goto clean;
}
-upload:
+upload:
todoContentLength -= MULTIPART_CHUNK_SIZE * manager.next_etags_pos;
for (seq = manager.next_etags_pos + 1; seq <= totalSeq; seq++) {
memset(&partData, 0, sizeof(MultipartPartData));
@@ -2480,7 +2495,8 @@ upload:
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ResponseHandler copyResponseHandler = { &responsePropertiesCallback, &responseCompleteCallback };
@@ -2490,12 +2506,14 @@ upload:
unsigned long long count = partContentLength - 1; // Inclusive for copies
// The default copy callback tries to set this for us, need to allocate here
manager.etags[seq-1] = malloc(512); // TBD - magic #! Isa there a max etag defined?
- S3_copy_object_range(&srcBucketContext, srcKey, bucketName, key,
- seq, manager.upload_id,
- startOffset, count,
- &putProperties,
- &lastModified, 512 /*TBD - magic # */, manager.etags[seq-1], 0,
- ©ResponseHandler, 0);
+ S3_copy_object_range(&srcBucketContext, srcKey,
+ bucketName, key,
+ seq, manager.upload_id,
+ startOffset, count,
+ &putProperties,
+ &lastModified, 512 /*TBD - magic # */,
+ manager.etags[seq-1], 0,
+ ©ResponseHandler, 0);
} else {
S3_upload_part(&bucketContext, key, &putProperties,
&putObjectHandler, seq, manager.upload_id,
@@ -2509,7 +2527,7 @@ upload:
contentLength -= MULTIPART_CHUNK_SIZE;
todoContentLength -= MULTIPART_CHUNK_SIZE;
}
-
+
int i;
int size = 0;
size += growbuffer_append(&(manager.gb), "<CompleteMultipartUpload>",
@@ -2545,7 +2563,7 @@ upload:
growbuffer_destroy(manager.gb);
free(manager.etags);
}
-
+
S3_deinitialize();
}
@@ -2614,7 +2632,8 @@ static void copy_object(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ListBucketHandler listBucketHandler =
{
@@ -2627,7 +2646,9 @@ static void copy_object(int argc, char **argv, int optindex)
".", 1, 0, &listBucketHandler, &sourceSize);
} while (S3_status_is_retryable(statusG) && should_retry());
if (statusG != S3StatusOK) {
- fprintf(stderr, "\nERROR: Unable to get source object size\n");
+ fprintf(stderr, "\nERROR: Unable to get source object size (%s)\n",
+ S3_get_status_name(statusG));
+ fprintf(stderr, "%s\n", errorDetailsG);
exit(1);
}
if (sourceSize > MULTIPART_CHUNK_SIZE) {
@@ -2751,7 +2772,6 @@ static void copy_object(int argc, char **argv, int optindex)
}
}
-
S3BucketContext bucketContext =
{
0,
@@ -2760,7 +2780,8 @@ static void copy_object(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3PutProperties putProperties =
@@ -2778,7 +2799,7 @@ static void copy_object(int argc, char **argv, int optindex)
};
S3ResponseHandler responseHandler =
- {
+ {
&responsePropertiesCallback,
&responseCompleteCallback
};
@@ -2821,7 +2842,7 @@ static S3Status getObjectDataCallback(int bufferSize, const char *buffer,
FILE *outfile = (FILE *) callbackData;
size_t wrote = fwrite(buffer, 1, bufferSize, outfile);
-
+
return ((wrote < (size_t) bufferSize) ?
S3StatusAbortedByCallback : S3StatusOK);
}
@@ -2916,7 +2937,7 @@ static void get_object(int argc, char **argv, int optindex)
// unmodified
outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
}
-
+
if (!outfile) {
fprintf(stderr, "\nERROR: Failed to open output file %s: ",
filename);
@@ -2933,7 +2954,7 @@ static void get_object(int argc, char **argv, int optindex)
}
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -2942,7 +2963,8 @@ static void get_object(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3GetConditions getConditions =
@@ -2982,7 +3004,7 @@ static void head_object(int argc, char **argv, int optindex)
fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
usageExit(stderr);
}
-
+
// Head implies showing response properties
showResponsePropertiesG = 1;
@@ -3008,7 +3030,7 @@ static void head_object(int argc, char **argv, int optindex)
}
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -3017,7 +3039,8 @@ static void head_object(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ResponseHandler responseHandler =
@@ -3064,9 +3087,10 @@ static void generate_query_string(int argc, char **argv, int optindex)
key = 0;
}
- int64_t expires = -1;
+ int expires = -1;
const char *resource = 0;
+ const char *httpMethod = "GET";
while (optindex < argc) {
char *param = argv[optindex++];
@@ -3081,6 +3105,9 @@ static void generate_query_string(int argc, char **argv, int optindex)
else if (!strncmp(param, RESOURCE_PREFIX, RESOURCE_PREFIX_LEN)) {
resource = &(param[RESOURCE_PREFIX_LEN]);
}
+ else if (!strncmp(param, HTTP_METHOD_PREFIX, HTTP_METHOD_PREFIX_LEN)) {
+ httpMethod = &(param[HTTP_METHOD_PREFIX_LEN]);
+ }
else {
fprintf(stderr, "\nERROR: Unknown param: %s\n", param);
usageExit(stderr);
@@ -3088,7 +3115,7 @@ static void generate_query_string(int argc, char **argv, int optindex)
}
S3_init();
-
+
S3BucketContext bucketContext =
{
0,
@@ -3097,14 +3124,15 @@ static void generate_query_string(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
char buffer[S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE];
S3Status status = S3_generate_authenticated_query_string
- (buffer, &bucketContext, key, expires, resource);
-
+ (buffer, &bucketContext, key, expires, resource, httpMethod);
+
if (status != S3StatusOK) {
printf("Failed to generate authenticated query string: %s\n",
S3_get_status_name(status));
@@ -3169,7 +3197,7 @@ void get_acl(int argc, char **argv, int optindex)
// unmodified
outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
}
-
+
if (!outfile) {
fprintf(stderr, "\nERROR: Failed to open output file %s: ",
filename);
@@ -3200,7 +3228,8 @@ void get_acl(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ResponseHandler responseHandler =
@@ -3343,7 +3372,7 @@ void set_acl(int argc, char **argv, int optindex)
aclBuf[fread(aclBuf, 1, sizeof(aclBuf), infile)] = 0;
char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
-
+
// Parse it
int aclGrantCount;
S3AclGrant aclGrants[S3_MAX_ACL_GRANT_COUNT];
@@ -3364,7 +3393,8 @@ void set_acl(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ResponseHandler responseHandler =
@@ -3377,7 +3407,7 @@ void set_acl(int argc, char **argv, int optindex)
S3_set_acl(&bucketContext, key, ownerId, ownerDisplayName,
aclGrantCount, aclGrants, 0, &responseHandler, 0);
} while (S3_status_is_retryable(statusG) && should_retry());
-
+
if (statusG != S3StatusOK) {
printError();
}
@@ -3425,7 +3455,7 @@ void get_logging(int argc, char **argv, int optindex)
// unmodified
outfile = fopen(filename, "r+" FOPEN_EXTRA_FLAGS);
}
-
+
if (!outfile) {
fprintf(stderr, "\nERROR: Failed to open output file %s: ",
filename);
@@ -3457,7 +3487,8 @@ void get_logging(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ResponseHandler responseHandler =
@@ -3491,7 +3522,7 @@ void get_logging(int argc, char **argv, int optindex)
char composedId[S3_MAX_GRANTEE_USER_ID_SIZE +
S3_MAX_GRANTEE_DISPLAY_NAME_SIZE + 16];
const char *id;
-
+
switch (grant->granteeType) {
case S3GranteeTypeAmazonCustomerByEmail:
type = "Email";
@@ -3584,7 +3615,7 @@ void set_logging(int argc, char **argv, int optindex)
if (targetBucket) {
FILE *infile;
-
+
if (filename) {
if (!(infile = fopen(filename, "r" FOPEN_EXTRA_FLAGS))) {
fprintf(stderr, "\nERROR: Failed to open input file %s: ",
@@ -3602,7 +3633,7 @@ void set_logging(int argc, char **argv, int optindex)
aclBuf[fread(aclBuf, 1, sizeof(aclBuf), infile)] = 0;
char ownerId[S3_MAX_GRANTEE_USER_ID_SIZE];
char ownerDisplayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE];
-
+
// Parse it
if (!convert_simple_acl(aclBuf, ownerId, ownerDisplayName,
&aclGrantCount, aclGrants)) {
@@ -3624,7 +3655,8 @@ void set_logging(int argc, char **argv, int optindex)
uriStyleG,
accessKeyIdG,
secretAccessKeyG,
- 0
+ 0,
+ awsRegionG
};
S3ResponseHandler responseHandler =
@@ -3638,7 +3670,7 @@ void set_logging(int argc, char **argv, int optindex)
targetPrefix, aclGrantCount, aclGrants,
0, &responseHandler, 0);
} while (S3_status_is_retryable(statusG) && should_retry());
-
+
if (statusG != S3StatusOK) {
printError();
}
@@ -3654,7 +3686,7 @@ int main(int argc, char **argv)
// Parse args
while (1) {
int idx = 0;
- int c = getopt_long(argc, argv, "vfhusr:", longOptionsG, &idx);
+ int c = getopt_long(argc, argv, "vfhusr:g:", longOptionsG, &idx);
if (c == -1) {
// End of options
@@ -3682,11 +3714,14 @@ int main(int argc, char **argv)
retriesG += *v - '0';
v++;
}
+ }
break;
case 'v':
verifyPeerG = S3_INIT_VERIFY_PEER;
break;
- }
+ case 'g':
+ awsRegionG = strdup(optarg);
+ break;
default:
fprintf(stderr, "\nERROR: Unknown option: -%c\n", c);
// Usage exit
@@ -3701,7 +3736,7 @@ int main(int argc, char **argv)
}
const char *command = argv[optind++];
-
+
if (!strcmp(command, "help")) {
fprintf(stdout, "\ns3 is a program for performing single requests "
"to Amazon S3.\n");
diff --git a/src/service.c b/src/service.c
index cc505d8..b4b3123 100644
--- a/src/service.c
+++ b/src/service.c
@@ -100,7 +100,7 @@ static S3Status propertiesCallback
(const S3ResponseProperties *responseProperties, void *callbackData)
{
XmlCallbackData *cbData = (XmlCallbackData *) callbackData;
-
+
return (*(cbData->responsePropertiesCallback))
(responseProperties, cbData->callbackData);
}
@@ -132,7 +132,8 @@ static void completeCallback(S3Status requestStatus,
void S3_list_service(S3Protocol protocol, const char *accessKeyId,
const char *secretAccessKey, const char *securityToken,
- const char *hostName, S3RequestContext *requestContext,
+ const char *hostName, const char *authRegion,
+ S3RequestContext *requestContext,
const S3ListServiceHandler *handler, void *callbackData)
{
// Create and set up the callback data
@@ -167,7 +168,8 @@ void S3_list_service(S3Protocol protocol, const char *accessKeyId,
S3UriStylePath, // uriStyle
accessKeyId, // accessKeyId
secretAccessKey, // secretAccessKey
- securityToken }, // securityToken
+ securityToken, // securityToken
+ authRegion }, // authRegion
0, // key
0, // queryParams
0, // subResource
diff --git a/src/service_access_logging.c b/src/service_access_logging.c
index 28d0079..dc90a03 100644
--- a/src/service_access_logging.c
+++ b/src/service_access_logging.c
@@ -329,7 +329,8 @@ void S3_get_server_access_logging(const S3BucketContext *bucketContext,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
0, // key
0, // queryParams
"logging", // subResource
@@ -534,7 +535,8 @@ void S3_set_server_access_logging(const S3BucketContext *bucketContext,
bucketContext->uriStyle, // uriStyle
bucketContext->accessKeyId, // accessKeyId
bucketContext->secretAccessKey, // secretAccessKey
- bucketContext->securityToken }, // securityToken
+ bucketContext->securityToken, // securityToken
+ bucketContext->authRegion }, // authRegion
0, // key
0, // queryParams
"logging", // subResource
diff --git a/src/util.c b/src/util.c
index 590d037..bb2ca97 100644
--- a/src/util.c
+++ b/src/util.c
@@ -120,7 +120,7 @@ int64_t parseIso8601Time(const char *str)
str += 2;
stm.tm_isdst = -1;
-
+
int64_t ret = mktime(&stm);
// Skip the millis
@@ -131,7 +131,7 @@ int64_t parseIso8601Time(const char *str)
str++;
}
}
-
+
if (checkString(str, "-dd:dd") || checkString(str, "+dd:dd")) {
int sign = (*str++ == '-') ? -1 : 1;
int hours = nextnum();
@@ -164,393 +164,6 @@ uint64_t parseUnsignedInt(const char *str)
}
-int base64Encode(const unsigned char *in, int inLen, char *out)
-{
- static const char *ENC =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
-
- char *original_out = out;
-
- while (inLen) {
- // first 6 bits of char 1
- *out++ = ENC[*in >> 2];
- if (!--inLen) {
- // last 2 bits of char 1, 4 bits of 0
- *out++ = ENC[(*in & 0x3) << 4];
- *out++ = '=';
- *out++ = '=';
- break;
- }
- // last 2 bits of char 1, first 4 bits of char 2
- *out++ = ENC[((*in & 0x3) << 4) | (*(in + 1) >> 4)];
- in++;
- if (!--inLen) {
- // last 4 bits of char 2, 2 bits of 0
- *out++ = ENC[(*in & 0xF) << 2];
- *out++ = '=';
- break;
- }
- // last 4 bits of char 2, first 2 bits of char 3
- *out++ = ENC[((*in & 0xF) << 2) | (*(in + 1) >> 6)];
- in++;
- // last 6 bits of char 3
- *out++ = ENC[*in & 0x3F];
- in++, inLen--;
- }
-
- return (out - original_out);
-}
-
-
-#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
-
-#define blk0L(i) (block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) \
- | (rol(block->l[i], 8) & 0x00FF00FF))
-
-#define blk0B(i) (block->l[i])
-
-#define blk(i) (block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ \
- block->l[(i + 8) & 15] ^ \
- block->l[(i + 2) & 15] ^ \
- block->l[i & 15], 1))
-
-#define R0_L(v, w, x, y, z, i) \
- z += ((w & (x ^ y)) ^ y) + blk0L(i) + 0x5A827999 + rol(v, 5); \
- w = rol(w, 30);
-#define R0_B(v, w, x, y, z, i) \
- z += ((w & (x ^ y)) ^ y) + blk0B(i) + 0x5A827999 + rol(v, 5); \
- w = rol(w, 30);
-#define R1(v, w, x, y, z, i) \
- z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
- w = rol(w, 30);
-#define R2(v, w, x, y, z, i) \
- z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
- w = rol(w, 30);
-#define R3(v, w, x, y, z, i) \
- z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
- w = rol(w, 30);
-#define R4(v, w, x, y, z, i) \
- z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
- w = rol(w, 30);
-
-#define R0A_L(i) R0_L(a, b, c, d, e, i)
-#define R0B_L(i) R0_L(b, c, d, e, a, i)
-#define R0C_L(i) R0_L(c, d, e, a, b, i)
-#define R0D_L(i) R0_L(d, e, a, b, c, i)
-#define R0E_L(i) R0_L(e, a, b, c, d, i)
-
-#define R0A_B(i) R0_B(a, b, c, d, e, i)
-#define R0B_B(i) R0_B(b, c, d, e, a, i)
-#define R0C_B(i) R0_B(c, d, e, a, b, i)
-#define R0D_B(i) R0_B(d, e, a, b, c, i)
-#define R0E_B(i) R0_B(e, a, b, c, d, i)
-
-#define R1A(i) R1(a, b, c, d, e, i)
-#define R1B(i) R1(b, c, d, e, a, i)
-#define R1C(i) R1(c, d, e, a, b, i)
-#define R1D(i) R1(d, e, a, b, c, i)
-#define R1E(i) R1(e, a, b, c, d, i)
-
-#define R2A(i) R2(a, b, c, d, e, i)
-#define R2B(i) R2(b, c, d, e, a, i)
-#define R2C(i) R2(c, d, e, a, b, i)
-#define R2D(i) R2(d, e, a, b, c, i)
-#define R2E(i) R2(e, a, b, c, d, i)
-
-#define R3A(i) R3(a, b, c, d, e, i)
-#define R3B(i) R3(b, c, d, e, a, i)
-#define R3C(i) R3(c, d, e, a, b, i)
-#define R3D(i) R3(d, e, a, b, c, i)
-#define R3E(i) R3(e, a, b, c, d, i)
-
-#define R4A(i) R4(a, b, c, d, e, i)
-#define R4B(i) R4(b, c, d, e, a, i)
-#define R4C(i) R4(c, d, e, a, b, i)
-#define R4D(i) R4(d, e, a, b, c, i)
-#define R4E(i) R4(e, a, b, c, d, i)
-
-
-static void SHA1_transform(uint32_t state[5], const unsigned char buffer[64])
-{
- uint32_t a, b, c, d, e;
-
- typedef union {
- unsigned char c[64];
- uint32_t l[16];
- } u;
-
- unsigned char w[64];
- u *block = (u *) w;
-
- memcpy(block, buffer, 64);
-
- a = state[0];
- b = state[1];
- c = state[2];
- d = state[3];
- e = state[4];
-
- static uint32_t endianness_indicator = 0x1;
- if (((unsigned char *) &endianness_indicator)[0]) {
- R0A_L( 0);
- R0E_L( 1); R0D_L( 2); R0C_L( 3); R0B_L( 4); R0A_L( 5);
- R0E_L( 6); R0D_L( 7); R0C_L( 8); R0B_L( 9); R0A_L(10);
- R0E_L(11); R0D_L(12); R0C_L(13); R0B_L(14); R0A_L(15);
- }
- else {
- R0A_B( 0);
- R0E_B( 1); R0D_B( 2); R0C_B( 3); R0B_B( 4); R0A_B( 5);
- R0E_B( 6); R0D_B( 7); R0C_B( 8); R0B_B( 9); R0A_B(10);
- R0E_B(11); R0D_B(12); R0C_B(13); R0B_B(14); R0A_B(15);
- }
- R1E(16); R1D(17); R1C(18); R1B(19); R2A(20);
- R2E(21); R2D(22); R2C(23); R2B(24); R2A(25);
- R2E(26); R2D(27); R2C(28); R2B(29); R2A(30);
- R2E(31); R2D(32); R2C(33); R2B(34); R2A(35);
- R2E(36); R2D(37); R2C(38); R2B(39); R3A(40);
- R3E(41); R3D(42); R3C(43); R3B(44); R3A(45);
- R3E(46); R3D(47); R3C(48); R3B(49); R3A(50);
- R3E(51); R3D(52); R3C(53); R3B(54); R3A(55);
- R3E(56); R3D(57); R3C(58); R3B(59); R4A(60);
- R4E(61); R4D(62); R4C(63); R4B(64); R4A(65);
- R4E(66); R4D(67); R4C(68); R4B(69); R4A(70);
- R4E(71); R4D(72); R4C(73); R4B(74); R4A(75);
- R4E(76); R4D(77); R4C(78); R4B(79);
-
- state[0] += a;
- state[1] += b;
- state[2] += c;
- state[3] += d;
- state[4] += e;
-}
-
-
-typedef struct
-{
- uint32_t state[5];
- uint32_t count[2];
- unsigned char buffer[64];
-} SHA1Context;
-
-
-static void SHA1_init(SHA1Context *context)
-{
- context->state[0] = 0x67452301;
- context->state[1] = 0xEFCDAB89;
- context->state[2] = 0x98BADCFE;
- context->state[3] = 0x10325476;
- context->state[4] = 0xC3D2E1F0;
- context->count[0] = context->count[1] = 0;
-}
-
-
-static void SHA1_update(SHA1Context *context, const unsigned char *data,
- unsigned int len)
-{
- uint32_t i, j;
-
- j = (context->count[0] >> 3) & 63;
-
- if ((context->count[0] += len << 3) < (len << 3)) {
- context->count[1]++;
- }
-
- context->count[1] += (len >> 29);
-
- if ((j + len) > 63) {
- memcpy(&(context->buffer[j]), data, (i = 64 - j));
- SHA1_transform(context->state, context->buffer);
- for ( ; (i + 63) < len; i += 64) {
- SHA1_transform(context->state, &(data[i]));
- }
- j = 0;
- }
- else {
- i = 0;
- }
-
- memcpy(&(context->buffer[j]), &(data[i]), len - i);
-}
-
-
-static void SHA1_final(unsigned char digest[20], SHA1Context *context)
-{
- uint32_t i;
- unsigned char finalcount[8];
-
- for (i = 0; i < 8; i++) {
- finalcount[i] = (unsigned char)
- ((context->count[(i >= 4 ? 0 : 1)] >>
- ((3 - (i & 3)) * 8)) & 255);
- }
-
- SHA1_update(context, (unsigned char *) "\200", 1);
-
- while ((context->count[0] & 504) != 448) {
- SHA1_update(context, (unsigned char *) "\0", 1);
- }
-
- SHA1_update(context, finalcount, 8);
-
- for (i = 0; i < 20; i++) {
- digest[i] = (unsigned char)
- ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
- }
-
- memset(context->buffer, 0, 64);
- memset(context->state, 0, 20);
- memset(context->count, 0, 8);
- memset(&finalcount, 0, 8);
-
- SHA1_transform(context->state, context->buffer);
-}
-
-
-// HMAC-SHA-1:
-//
-// K - is key padded with zeros to 512 bits
-// m - is message
-// OPAD - 0x5c5c5c...
-// IPAD - 0x363636...
-//
-// HMAC(K,m) = SHA1((K ^ OPAD) . SHA1((K ^ IPAD) . m))
-void HMAC_SHA1(unsigned char hmac[20], const unsigned char *key, int key_len,
- const unsigned char *message, int message_len)
-{
- unsigned char kopad[64], kipad[64];
- int i;
-
- if (key_len > 64) {
- key_len = 64;
- }
-
- for (i = 0; i < key_len; i++) {
- kopad[i] = key[i] ^ 0x5c;
- kipad[i] = key[i] ^ 0x36;
- }
-
- for ( ; i < 64; i++) {
- kopad[i] = 0 ^ 0x5c;
- kipad[i] = 0 ^ 0x36;
- }
-
- unsigned char digest[20];
-
- SHA1Context context;
-
- SHA1_init(&context);
- SHA1_update(&context, kipad, 64);
- SHA1_update(&context, message, message_len);
- SHA1_final(digest, &context);
-
- SHA1_init(&context);
- SHA1_update(&context, kopad, 64);
- SHA1_update(&context, digest, 20);
- SHA1_final(hmac, &context);
-}
-
-#define rot(x,k) (((x) << (k)) | ((x) >> (32 - (k))))
-
-uint64_t hash(const unsigned char *k, int length)
-{
- uint32_t a, b, c;
-
- a = b = c = 0xdeadbeef + ((uint32_t) length);
-
- static uint32_t endianness_indicator = 0x1;
- if (((unsigned char *) &endianness_indicator)[0]) {
- while (length > 12) {
- a += k[0];
- a += ((uint32_t) k[1]) << 8;
- a += ((uint32_t) k[2]) << 16;
- a += ((uint32_t) k[3]) << 24;
- b += k[4];
- b += ((uint32_t) k[5]) << 8;
- b += ((uint32_t) k[6]) << 16;
- b += ((uint32_t) k[7]) << 24;
- c += k[8];
- c += ((uint32_t) k[9]) << 8;
- c += ((uint32_t) k[10]) << 16;
- c += ((uint32_t) k[11]) << 24;
- a -= c; a ^= rot(c, 4); c += b;
- b -= a; b ^= rot(a, 6); a += c;
- c -= b; c ^= rot(b, 8); b += a;
- a -= c; a ^= rot(c, 16); c += b;
- b -= a; b ^= rot(a, 19); a += c;
- c -= b; c ^= rot(b, 4); b += a;
- length -= 12;
- k += 12;
- }
-
- switch(length) {
- case 12: c += ((uint32_t) k[11]) << 24;
- case 11: c += ((uint32_t) k[10]) << 16;
- case 10: c += ((uint32_t) k[9]) << 8;
- case 9 : c += k[8];
- case 8 : b += ((uint32_t) k[7]) << 24;
- case 7 : b += ((uint32_t) k[6]) << 16;
- case 6 : b += ((uint32_t) k[5]) << 8;
- case 5 : b += k[4];
- case 4 : a += ((uint32_t) k[3]) << 24;
- case 3 : a += ((uint32_t) k[2]) << 16;
- case 2 : a += ((uint32_t) k[1]) << 8;
- case 1 : a += k[0]; break;
- case 0 : goto end;
- }
- }
- else {
- while (length > 12) {
- a += ((uint32_t) k[0]) << 24;
- a += ((uint32_t) k[1]) << 16;
- a += ((uint32_t) k[2]) << 8;
- a += ((uint32_t) k[3]);
- b += ((uint32_t) k[4]) << 24;
- b += ((uint32_t) k[5]) << 16;
- b += ((uint32_t) k[6]) << 8;
- b += ((uint32_t) k[7]);
- c += ((uint32_t) k[8]) << 24;
- c += ((uint32_t) k[9]) << 16;
- c += ((uint32_t) k[10]) << 8;
- c += ((uint32_t) k[11]);
- a -= c; a ^= rot(c, 4); c += b;
- b -= a; b ^= rot(a, 6); a += c;
- c -= b; c ^= rot(b, 8); b += a;
- a -= c; a ^= rot(c, 16); c += b;
- b -= a; b ^= rot(a, 19); a += c;
- c -= b; c ^= rot(b, 4); b += a;
- length -= 12;
- k += 12;
- }
-
- switch(length) {
- case 12: c += k[11];
- case 11: c += ((uint32_t) k[10]) << 8;
- case 10: c += ((uint32_t) k[9]) << 16;
- case 9 : c += ((uint32_t) k[8]) << 24;
- case 8 : b += k[7];
- case 7 : b += ((uint32_t) k[6]) << 8;
- case 6 : b += ((uint32_t) k[5]) << 16;
- case 5 : b += ((uint32_t) k[4]) << 24;
- case 4 : a += k[3];
- case 3 : a += ((uint32_t) k[2]) << 8;
- case 2 : a += ((uint32_t) k[1]) << 16;
- case 1 : a += ((uint32_t) k[0]) << 24; break;
- case 0 : goto end;
- }
- }
-
- c ^= b; c -= rot(b, 14);
- a ^= c; a -= rot(c, 11);
- b ^= a; b -= rot(a, 25);
- c ^= b; c -= rot(b, 16);
- a ^= c; a -= rot(c, 4);
- b ^= a; b -= rot(a, 14);
- c ^= b; c -= rot(b, 24);
-
- end:
- return ((((uint64_t) c) << 32) | b);
-}
-
int is_blank(char c)
{
return ((c == ' ') || (c == '\t'));
diff --git a/test/test.sh b/test/test.sh
index 4373941..413554b 100755
--- a/test/test.sh
+++ b/test/test.sh
@@ -106,7 +106,7 @@ COPY_BUCKET=${TEST_BUCKET_PREFIX}.copybucket
echo "$S3_COMMAND create $COPY_BUCKET"
$S3_COMMAND create $COPY_BUCKET
failures=$(($failures + (($? == 0) ? 0 : 1)))
-echo <<EOF
+cat <<EOF
$S3_COMMAND copy $TEST_BUCKET/key_5 $COPY_BUCKET/copykey
EOF
$S3_COMMAND copy $TEST_BUCKET/key_5 $COPY_BUCKET/copykey
@@ -156,11 +156,11 @@ $S3_COMMAND getacl $TEST_BUCKET filename=acl
failures=$(($failures + (($? == 0) ? 0 : 1)))
# Add READ for all AWS users, and READ_ACP for everyone
-echo <<EOF >> acl
-Group Authenticated AWS Users READ
+cat <<EOF >> acl
+Group Authenticated AWS Users READ
EOF
-echo <<EOF >> acl
-Group All Users READ_ACP
+cat <<EOF >> acl
+Group All Users READ_ACP
EOF
echo "$S3_COMMAND setacl $TEST_BUCKET filename=acl"
$S3_COMMAND setacl $TEST_BUCKET filename=acl
@@ -182,11 +182,11 @@ $S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl
failures=$(($failures + (($? == 0) ? 0 : 1)))
# Add READ for all AWS users, and READ_ACP for everyone
-echo <<EOF >> acl
-Group Authenticated AWS Users READ
+cat <<EOF >> acl
+Group Authenticated AWS Users READ
EOF
-echo <<EOF >> acl
-Group All Users READ_ACP
+cat <<EOF >> acl
+Group All Users READ_ACP
EOF
echo "$S3_COMMAND setacl $TEST_BUCKET/aclkey filename=acl"
$S3_COMMAND setacl $TEST_BUCKET/aclkey filename=acl