lightweight version of AmzonS3Client(AWS SDK) that can be used in tests with S3Ninja
public class AmazonS3TestClient extends AmazonS3Client { | |
private static Log log = LogFactory.getLog(AmazonS3TestClient.class); | |
String testBucketName = "yourBucketName"; | |
public AmazonS3TestClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) { | |
super(new StaticCredentialsProvider(awsCredentials), clientConfiguration); | |
} | |
@Override | |
public synchronized void setEndpoint(String endpoint) { | |
super.setEndpoint(endpoint); | |
} | |
@Override | |
public synchronized void setRegion(com.amazonaws.regions.Region region) { | |
//this.endpoint = uri; | |
} | |
@Override | |
public synchronized Region getRegion() { | |
return Region.US_Standard; | |
} | |
@Override | |
public boolean doesBucketExist(String bucketName) { | |
return true; | |
} | |
@Override | |
public PutObjectResult putObject(PutObjectRequest putObjectRequest) | |
throws SdkClientException, AmazonServiceException { | |
final File file = putObjectRequest.getFile(); | |
final InputStream isOrig = putObjectRequest.getInputStream(); | |
final String bucketName = putObjectRequest.getBucketName(); | |
final String key = putObjectRequest.getKey(); | |
ObjectMetadata metadata = putObjectRequest.getMetadata(); | |
InputStream input = isOrig; | |
if (metadata == null) | |
metadata = new ObjectMetadata(); | |
// If a file is specified for upload, we need to pull some additional | |
// information from it to auto-configure a few options | |
if (file == null) { | |
// When input is a FileInputStream, this wrapping enables | |
// unlimited mark-and-reset | |
if (input != null) | |
input = ReleasableInputStream.wrap(input); | |
} else { | |
// Always set the content length, even if it's already set | |
metadata.setContentLength(file.length()); | |
final boolean calculateMD5 = metadata.getContentMD5() == null; | |
// Only set the content type if it hasn't already been set | |
if (metadata.getContentType() == null) { | |
metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); | |
} | |
input = newResettableInputStream(file, "Unable to find file to upload"); | |
} | |
final ProgressListener listener; | |
final ObjectMetadata returnedMetadata; | |
MD5DigestCalculatingInputStream md5DigestStream = null; | |
try { | |
Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); | |
// Make backward compatible with buffer size via system property | |
final Integer bufsize = Constants.getS3StreamBufferSize(); | |
if (bufsize != null) { | |
AmazonWebServiceRequest awsreq = request.getOriginalRequest(); | |
// Note awsreq is never null at this point even if the original | |
// request was | |
awsreq.getRequestClientOptions() | |
.setReadLimit(bufsize.intValue()); | |
} | |
// Use internal interface to differentiate 0 from unset. | |
final Long contentLength = (Long) metadata.getRawMetadataValue(Headers.CONTENT_LENGTH); | |
if (contentLength == null) { | |
/* | |
* There's nothing we can do except for let the HTTP client buffer | |
* the input stream contents if the caller doesn't tell us how much | |
* data to expect in a stream since we have to explicitly tell | |
* Amazon S3 how much we're sending before we start sending any of | |
* it. | |
*/ | |
log.warn("No content length specified for stream data. " + | |
"Stream contents will be buffered in memory and could result in " + | |
"out of memory errors."); | |
} else { | |
final long expectedLength = contentLength.longValue(); | |
if (expectedLength >= 0) { | |
// Performs length check on the underlying data stream. | |
// For S3 encryption client, the underlying data stream here | |
// refers to the cipher-text data stream (ie not the underlying | |
// plain-text data stream which in turn may have been wrapped | |
// with it's own length check input stream.) | |
LengthCheckInputStream lcis = new LengthCheckInputStream( | |
input, | |
expectedLength, // expected data length to be uploaded | |
EXCLUDE_SKIPPED_BYTES); | |
input = lcis; | |
} | |
} | |
if (metadata.getContentType() == null) { | |
/* | |
* Default to the "application/octet-stream" if the user hasn't | |
* specified a content type. | |
*/ | |
metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); | |
} | |
populateRequestMetadata(request, metadata); | |
request.setContent(input); | |
listener = putObjectRequest.getGeneralProgressListener(); | |
publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT); | |
try { | |
returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); | |
} catch (Throwable t) { | |
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); | |
throw failure(t); | |
} | |
} finally { | |
cleanupDataSource(putObjectRequest, file, isOrig, input, log); | |
} | |
publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT); | |
final PutObjectResult result = createPutObjectResult(returnedMetadata); | |
return result; | |
} | |
private static PutObjectResult createPutObjectResult(ObjectMetadata metadata) { | |
final PutObjectResult result = new PutObjectResult(); | |
result.setVersionId(metadata.getVersionId()); | |
result.setSSEAlgorithm(metadata.getSSEAlgorithm()); | |
result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm()); | |
result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5()); | |
result.setExpirationTime(metadata.getExpirationTime()); | |
result.setExpirationTimeRuleId(metadata.getExpirationTimeRuleId()); | |
result.setETag(metadata.getETag()); | |
result.setMetadata(metadata); | |
result.setRequesterCharged(metadata.isRequesterCharged()); | |
return result; | |
} | |
protected <X extends AmazonWebServiceRequest> Request<X> createRequest(String bucketName, String key, X originalRequest, HttpMethodName httpMethod) { | |
return createRequest(bucketName, key, originalRequest, httpMethod, endpoint); | |
} | |
protected <X extends AmazonWebServiceRequest> Request<X> createRequest(String bucketName, String key, X originalRequest, HttpMethodName httpMethod, URI endpoint) { | |
Request<X> request = new DefaultRequest<X>(originalRequest, Constants.S3_SERVICE_DISPLAY_NAME); | |
request.setHttpMethod(httpMethod); | |
request.setEndpoint(endpoint); | |
return request; | |
} | |
private <X, Y extends AmazonWebServiceRequest> X invoke(Request<Y> request, | |
Unmarshaller<X, InputStream> unmarshaller, | |
String bucketName, | |
String key) throws URISyntaxException { | |
return invoke(request, new S3XmlResponseHandler<X>(unmarshaller), bucketName, key); | |
} | |
private <X, Y extends AmazonWebServiceRequest> X invoke(Request<Y> request, | |
HttpResponseHandler<AmazonWebServiceResponse<X>> responseHandler, | |
String bucket, String key) throws URISyntaxException { | |
AmazonWebServiceRequest originalRequest = request.getOriginalRequest(); | |
S3SignerProvider signerProvider = new S3SignerProvider(this, getSigner()); | |
ExecutionContext executionContext = createExecutionContext(originalRequest, signerProvider); | |
String[] keyTokens = key.split("/"); | |
String fileName = ""; | |
if(keyTokens.length > 0) | |
fileName= keyTokens[keyTokens.length - 1]; | |
else | |
fileName= "test"+new Random().nextInt()+".jpg"; | |
URI testEndpoint = new URI(endpoint.toString()+"/"+ testBucketName+"/"+fileName); | |
request.setEndpoint(testEndpoint); | |
Response<X> response = null; | |
try { | |
request.setTimeOffset(timeOffset); | |
if (!request.getHeaders().containsKey(Headers.CONTENT_TYPE)) { | |
request.addHeader(Headers.CONTENT_TYPE, | |
"application/octet-stream"); | |
} | |
executionContext.setCredentialsProvider(CredentialUtils.getCredentialsProvider(request.getOriginalRequest(), awsCredentialsProvider)); | |
response = client.execute(request, responseHandler, | |
errorResponseHandler, executionContext); | |
return response.getAwsResponse(); | |
} catch (ResetException ex) { | |
ex.setExtraInfo("If the request involves an input stream, the maximum stream buffer size can be configured via request.getRequestClientOptions().setReadLimit(int)"); | |
throw ex; | |
} catch (AmazonS3Exception ase) { | |
/** | |
* This is to handle the edge case: when the bucket is deleted and recreated in a different region, | |
* the cache still has the old region info. | |
* If region is not specified, the first request to this newly created bucket will fail because it used | |
* the outdated region present in cache. Here we update the cache with correct region. The subsequent | |
* requests will succeed. | |
* The recommended practice for any request is to provide region info always. | |
*/ | |
if (ase.getStatusCode() == 301) { | |
if (ase.getAdditionalDetails() != null) { | |
ase.setErrorMessage("Region error"); | |
} | |
} | |
throw ase; | |
} finally { | |
} | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment