Last active
February 16, 2020 16:59
-
-
Save harshavardhana/8d6eb9a9bef7272df799439889e1b0e1 to your computer and use it in GitHub Desktop.
Nuxeo patches to support MinIO - AssumeRole
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/addons/amazon-s3-online-storage/index.js b/addons/amazon-s3-online-storage/index.js | |
index ce3b044e..6105b299 100644 | |
--- a/addons/amazon-s3-online-storage/index.js | |
+++ b/addons/amazon-s3-online-storage/index.js | |
@@ -98,6 +98,8 @@ class S3Provider { | |
AWS.config.update({ | |
credentials: new AWS.Credentials(options.awsSecretKeyId, options.awsSecretAccessKey, options.awsSessionToken), | |
region: options.region, | |
+ endpoint: options.endpoint, | |
+ s3ForcePathStyle: options.pathstyleAccessEnabled || false, | |
useAccelerateEndpoint: options.useS3Accelerate || false, | |
}); | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
nuxeo.s3storage.awsid=harsha | |
nuxeo.s3storage.awssecret=harsha123 | |
nuxeo.s3storage.bucket=nuxeo-bucket | |
nuxeo.s3storage.bucket_prefix=nuxeo-binary/ | |
nuxeo.s3storage.endpoint=https://play.min.io | |
nuxeo.s3storage.pathstyleaccess=true | |
nuxeo.s3storage.digest=SHA-256 | |
nuxeo.s3storage.useDirectUpload=true | |
nuxeo.s3storage.transient.awsid=harsha | |
nuxeo.s3storage.transient.awssecret=harsha123 | |
nuxeo.s3storage.transient.bucket=nuxeo-bucket | |
# transient bucket to support custom endpoint | |
nuxeo.s3storage.transient.endpoint=https://play.min.io | |
# transient bucket to support custom endpoint path style | |
nuxeo.s3storage.transient.pathstyleaccess=true | |
nuxeo.s3storage.transient.bucket_prefix=nuxeo-transient/ | |
# set a dummy ARN, there is no roleArn on MinIO | |
nuxeo.s3storage.transient.roleArn="arn:xxx:xxx:xxx:xxxx" | |
nuxeo.s3storage.transient.targetMaxSizeMB=100 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/addons/nuxeo-core-binarymanager-cloud/nuxeo-core-binarymanager-s3/src/main/java/org/nuxeo/ecm/core/storage/sql/S3DirectBatchHandler.java b/addons/nuxeo-core-binarymanager-cloud/nuxeo-core-binarymanager-s3/src/main/java/org/nuxeo/ecm/core/storage/sql/S3DirectBatchHandler.java | |
index 68966576a7b..70fd406c2a2 100644 | |
--- a/addons/nuxeo-core-binarymanager-cloud/nuxeo-core-binarymanager-s3/src/main/java/org/nuxeo/ecm/core/storage/sql/S3DirectBatchHandler.java | |
+++ b/addons/nuxeo-core-binarymanager-cloud/nuxeo-core-binarymanager-s3/src/main/java/org/nuxeo/ecm/core/storage/sql/S3DirectBatchHandler.java | |
@@ -23,9 +23,13 @@ import static org.apache.commons.lang3.StringUtils.defaultIfEmpty; | |
import static org.apache.commons.lang3.StringUtils.defaultString; | |
import static org.apache.commons.lang3.StringUtils.isBlank; | |
import static org.apache.commons.lang3.StringUtils.isEmpty; | |
+import static org.apache.commons.lang3.StringUtils.isNotBlank; | |
+ | |
import static org.nuxeo.ecm.core.storage.sql.S3BinaryManager.AWS_ID_PROPERTY; | |
import static org.nuxeo.ecm.core.storage.sql.S3BinaryManager.AWS_SECRET_PROPERTY; | |
import static org.nuxeo.ecm.core.storage.sql.S3BinaryManager.AWS_SESSION_TOKEN_PROPERTY; | |
+import static org.nuxeo.ecm.core.storage.sql.S3BinaryManager.ENDPOINT_PROPERTY; | |
+import static org.nuxeo.ecm.core.storage.sql.S3BinaryManager.PATHSTYLEACCESS_PROPERTY; | |
import static org.nuxeo.ecm.core.storage.sql.S3BinaryManager.BUCKET_NAME_PROPERTY; | |
import static org.nuxeo.ecm.core.storage.sql.S3BinaryManager.BUCKET_PREFIX_PROPERTY; | |
import static org.nuxeo.ecm.core.storage.sql.S3BinaryManager.BUCKET_REGION_PROPERTY; | |
@@ -48,6 +52,7 @@ import org.nuxeo.ecm.core.blob.binary.LazyBinary; | |
import org.nuxeo.runtime.aws.NuxeoAWSRegionProvider; | |
import com.amazonaws.auth.AWSCredentialsProvider; | |
+import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; | |
import com.amazonaws.services.s3.AmazonS3; | |
import com.amazonaws.services.s3.AmazonS3ClientBuilder; | |
import com.amazonaws.services.s3.model.ObjectMetadata; | |
@@ -105,10 +110,14 @@ public class S3DirectBatchHandler extends AbstractBatchHandler { | |
protected AWSSecurityTokenService stsClient; | |
- protected AmazonS3 amazonS3; | |
+ protected String endpoint; | |
protected String region; | |
+ protected boolean pathstyleAccessEnabled; | |
+ | |
+ protected AmazonS3 amazonS3; | |
+ | |
protected String bucket; | |
protected String bucketPrefix; | |
@@ -127,146 +136,168 @@ public class S3DirectBatchHandler extends AbstractBatchHandler { | |
@Override | |
protected void initialize(Map<String, String> properties) { | |
- super.initialize(properties); | |
- region = properties.get(BUCKET_REGION_PROPERTY); | |
- if (isBlank(region)) { | |
- region = NuxeoAWSRegionProvider.getInstance().getRegion(); | |
- } | |
- bucket = properties.get(BUCKET_NAME_PROPERTY); | |
- if (isBlank(bucket)) { | |
- throw new NuxeoException("Missing configuration property: " + BUCKET_NAME_PROPERTY); | |
- } | |
- roleArn = properties.get(ROLE_ARN_PROPERTY); | |
- if (isBlank(roleArn)) { | |
- throw new NuxeoException("Missing configuration property: " + ROLE_ARN_PROPERTY); | |
- } | |
- bucketPrefix = defaultString(properties.get(BUCKET_PREFIX_PROPERTY)); | |
- accelerateModeEnabled = Boolean.parseBoolean(properties.get(ACCELERATE_MODE_ENABLED_PROPERTY)); | |
- String awsSecretKeyId = properties.get(AWS_ID_PROPERTY); | |
- String awsSecretAccessKey = properties.get(AWS_SECRET_PROPERTY); | |
- String awsSessionToken = properties.get(AWS_SESSION_TOKEN_PROPERTY); | |
- expiration = Integer.parseInt(defaultIfEmpty(properties.get(INFO_EXPIRATION), "0")); | |
- policy = properties.get(POLICY_TEMPLATE_PROPERTY); | |
- | |
- useServerSideEncryption = Boolean.parseBoolean(properties.get(S3BinaryManager.SERVERSIDE_ENCRYPTION_PROPERTY)); | |
- | |
- AWSCredentialsProvider credentials = S3Utils.getAWSCredentialsProvider(awsSecretKeyId, awsSecretAccessKey, | |
- awsSessionToken); | |
- stsClient = initializeSTSClient(credentials); | |
- amazonS3 = initializeS3Client(credentials); | |
- | |
- if (!isBlank(bucketPrefix) && !bucketPrefix.endsWith("/")) { | |
- log.warn(String.format("%s %s S3 bucket prefix should end with '/': added automatically.", | |
- BUCKET_PREFIX_PROPERTY, bucketPrefix)); | |
- bucketPrefix += "/"; | |
- } | |
- | |
- blobProviderId = defaultString(properties.get(BLOB_PROVIDER_ID_PROPERTY), transientStoreName); | |
+ super.initialize(properties); | |
+ region = properties.get(BUCKET_REGION_PROPERTY); | |
+ if (isBlank(region)) { | |
+ region = NuxeoAWSRegionProvider.getInstance().getRegion(); | |
+ } | |
+ bucket = properties.get(BUCKET_NAME_PROPERTY); | |
+ if (isBlank(bucket)) { | |
+ throw new NuxeoException("Missing configuration property: " + BUCKET_NAME_PROPERTY); | |
+ } | |
+ roleArn = properties.get(ROLE_ARN_PROPERTY); | |
+ if (isBlank(roleArn)) { | |
+ throw new NuxeoException("Missing configuration property: " + ROLE_ARN_PROPERTY); | |
+ } | |
+ bucketPrefix = defaultString(properties.get(BUCKET_PREFIX_PROPERTY)); | |
+ accelerateModeEnabled = Boolean.parseBoolean(properties.get(ACCELERATE_MODE_ENABLED_PROPERTY)); | |
+ String awsSecretKeyId = properties.get(AWS_ID_PROPERTY); | |
+ String awsSecretAccessKey = properties.get(AWS_SECRET_PROPERTY); | |
+ String awsSessionToken = properties.get(AWS_SESSION_TOKEN_PROPERTY); | |
+ expiration = Integer.parseInt(defaultIfEmpty(properties.get(INFO_EXPIRATION), "0")); | |
+ policy = properties.get(POLICY_TEMPLATE_PROPERTY); | |
+ | |
+ useServerSideEncryption = Boolean.parseBoolean(properties.get(S3BinaryManager.SERVERSIDE_ENCRYPTION_PROPERTY)); | |
+ | |
+ endpoint = properties.get(ENDPOINT_PROPERTY); | |
+ pathstyleAccessEnabled = Boolean.parseBoolean(properties.get(PATHSTYLEACCESS_PROPERTY)); | |
+ | |
+ AWSCredentialsProvider credentials = S3Utils.getAWSCredentialsProvider(awsSecretKeyId, awsSecretAccessKey, | |
+ awsSessionToken); | |
+ | |
+ stsClient = initializeSTSClient(credentials); | |
+ amazonS3 = initializeS3Client(credentials); | |
+ | |
+ log.warn(String.format("%s %s using credentials for stsRequest at %s", awsSecretKeyId, awsSecretAccessKey, | |
+ endpoint)); | |
+ if (!isBlank(bucketPrefix) && !bucketPrefix.endsWith("/")) { | |
+ log.warn(String.format("%s %s S3 bucket prefix should end with '/': added automatically.", | |
+ BUCKET_PREFIX_PROPERTY, bucketPrefix)); | |
+ bucketPrefix += "/"; | |
+ } | |
+ | |
+ blobProviderId = defaultString(properties.get(BLOB_PROVIDER_ID_PROPERTY), transientStoreName); | |
} | |
protected AWSSecurityTokenService initializeSTSClient(AWSCredentialsProvider credentials) { | |
- return AWSSecurityTokenServiceClientBuilder.standard() | |
- .withRegion(region) | |
- .withCredentials(credentials) | |
- .build(); | |
+ AWSSecurityTokenServiceClientBuilder stsBuilder = AWSSecurityTokenServiceClientBuilder.standard(); | |
+ | |
+ if (isNotBlank(endpoint)) { | |
+ stsBuilder = stsBuilder.withEndpointConfiguration(new EndpointConfiguration(endpoint, region)); | |
+ } else { | |
+ stsBuilder = stsBuilder.withRegion(region); | |
+ } | |
+ | |
+ return stsBuilder.withCredentials(credentials).build(); | |
} | |
protected AmazonS3 initializeS3Client(AWSCredentialsProvider credentials) { | |
- return AmazonS3ClientBuilder.standard() | |
- .withRegion(region) | |
- .withCredentials(credentials) | |
- .withAccelerateModeEnabled(accelerateModeEnabled) | |
- .build(); | |
+ AmazonS3ClientBuilder s3Builder = AmazonS3ClientBuilder.standard(); | |
+ | |
+ if (isNotBlank(endpoint)) { | |
+ s3Builder = s3Builder.withEndpointConfiguration(new EndpointConfiguration(endpoint, region)); | |
+ } else { | |
+ s3Builder = s3Builder.withRegion(region); | |
+ } | |
+ | |
+ if (pathstyleAccessEnabled) { | |
+ s3Builder.enablePathStyleAccess(); | |
+ } | |
+ return s3Builder.withCredentials(credentials) | |
+ .withAccelerateModeEnabled(accelerateModeEnabled) | |
+ .build(); | |
} | |
@Override | |
public Batch getBatch(String batchId) { | |
- Map<String, Serializable> parameters = getBatchParameters(batchId); | |
- if (parameters == null) { | |
- return null; | |
- } | |
- | |
- // create the batch | |
- Batch batch = new Batch(batchId, parameters, getName(), getTransientStore()); | |
- | |
- AssumeRoleRequest request = new AssumeRoleRequest().withRoleArn(roleArn) | |
- .withPolicy(policy) | |
- .withRoleSessionName(batchId); | |
- if (expiration > 0) { | |
- request.setDurationSeconds(expiration); | |
- } | |
- | |
- Credentials credentials = assumeRole(request); | |
- | |
- Map<String, Object> properties = batch.getProperties(); | |
- properties.put(INFO_AWS_SECRET_KEY_ID, credentials.getAccessKeyId()); | |
- properties.put(INFO_AWS_SECRET_ACCESS_KEY, credentials.getSecretAccessKey()); | |
- properties.put(INFO_AWS_SESSION_TOKEN, credentials.getSessionToken()); | |
- properties.put(INFO_BUCKET, bucket); | |
- properties.put(INFO_BASE_KEY, bucketPrefix); | |
- properties.put(INFO_EXPIRATION, credentials.getExpiration().toInstant().toEpochMilli()); | |
- properties.put(INFO_AWS_REGION, region); | |
- properties.put(INFO_USE_S3_ACCELERATE, accelerateModeEnabled); | |
- | |
- return batch; | |
+ Map<String, Serializable> parameters = getBatchParameters(batchId); | |
+ if (parameters == null) { | |
+ return null; | |
+ } | |
+ | |
+ // create the batch | |
+ Batch batch = new Batch(batchId, parameters, getName(), getTransientStore()); | |
+ | |
+ AssumeRoleRequest request = new AssumeRoleRequest().withRoleArn(roleArn) | |
+ .withPolicy(policy) | |
+ .withRoleSessionName(batchId); | |
+ if (expiration > 0) { | |
+ request.setDurationSeconds(expiration); | |
+ } | |
+ | |
+ Credentials credentials = assumeRole(request); | |
+ | |
+ Map<String, Object> properties = batch.getProperties(); | |
+ properties.put(INFO_AWS_SECRET_KEY_ID, credentials.getAccessKeyId()); | |
+ properties.put(INFO_AWS_SECRET_ACCESS_KEY, credentials.getSecretAccessKey()); | |
+ properties.put(INFO_AWS_SESSION_TOKEN, credentials.getSessionToken()); | |
+ properties.put(INFO_BUCKET, bucket); | |
+ properties.put(INFO_BASE_KEY, bucketPrefix); | |
+ properties.put(INFO_EXPIRATION, credentials.getExpiration().toInstant().toEpochMilli()); | |
+ properties.put(INFO_AWS_REGION, region); | |
+ properties.put("endpoint", endpoint); | |
+ properties.put("pathstyleAccessEnabled", pathstyleAccessEnabled); | |
+ properties.put(INFO_USE_S3_ACCELERATE, accelerateModeEnabled); | |
+ | |
+ return batch; | |
} | |
protected Credentials assumeRole(AssumeRoleRequest request) { | |
- return stsClient.assumeRole(request).getCredentials(); | |
+ return stsClient.assumeRole(request).getCredentials(); | |
} | |
@Override | |
public boolean completeUpload(String batchId, String fileIndex, BatchFileInfo fileInfo) { | |
- String fileKey = fileInfo.getKey(); | |
- ObjectMetadata metadata = amazonS3.getObjectMetadata(bucket, fileKey); | |
- String etag = metadata.getETag(); | |
- if (isEmpty(etag)) { | |
- return false; | |
- } | |
- String newFileKey = bucketPrefix + etag; | |
- String mimeType = metadata.getContentType(); | |
- String encoding = metadata.getContentEncoding(); | |
- | |
- // server-side encryption | |
- String targetSSEAlgorithm; | |
- if (useServerSideEncryption) { // TODO KMS | |
- targetSSEAlgorithm = ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION; | |
- } else { | |
- targetSSEAlgorithm = null; | |
- } | |
- | |
- ObjectMetadata newMetadata; | |
- if (metadata.getContentLength() > lowerThresholdToUseMultipartCopy()) { | |
- newMetadata = S3Utils.copyFileMultipart(amazonS3, metadata, bucket, fileKey, bucket, newFileKey, targetSSEAlgorithm, true); | |
- } else { | |
- newMetadata = S3Utils.copyFile(amazonS3, metadata, bucket, fileKey, bucket, newFileKey, targetSSEAlgorithm, true); | |
- boolean isMultipartUpload = REGEX_MULTIPART_ETAG.matcher(etag).find(); | |
- if (isMultipartUpload) { | |
- etag = newMetadata.getETag(); | |
- String previousFileKey = newFileKey; | |
- newFileKey = bucketPrefix + etag; | |
- newMetadata = S3Utils.copyFile(amazonS3, metadata, bucket, previousFileKey, bucket, newFileKey, true); | |
- } | |
- } | |
- | |
- String filename = fileInfo.getFilename(); | |
- long length = newMetadata.getContentLength(); | |
- String digest = newMetadata.getContentMD5() != null ? newMetadata.getContentMD5() : etag; | |
- Binary binary = new LazyBinary(digest, blobProviderId, null); | |
- Blob blob = new BinaryBlob(binary, digest, filename, mimeType, encoding, digest, length); | |
- Batch batch = getBatch(batchId); | |
- try { | |
- batch.addFile(fileIndex, blob, filename, mimeType); | |
- } catch (NuxeoException e) { | |
- amazonS3.deleteObject(bucket, newMetadata.getETag()); | |
- throw e; | |
- } | |
- | |
- return true; | |
+ String fileKey = fileInfo.getKey(); | |
+ ObjectMetadata metadata = amazonS3.getObjectMetadata(bucket, fileKey); | |
+ String etag = metadata.getETag(); | |
+ if (isEmpty(etag)) { | |
+ return false; | |
+ } | |
+ String newFileKey = bucketPrefix + etag; | |
+ String mimeType = metadata.getContentType(); | |
+ String encoding = metadata.getContentEncoding(); | |
+ | |
+ // server-side encryption | |
+ String targetSSEAlgorithm; | |
+ if (useServerSideEncryption) { // TODO KMS | |
+ targetSSEAlgorithm = ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION; | |
+ } else { | |
+ targetSSEAlgorithm = null; | |
+ } | |
+ | |
+ ObjectMetadata newMetadata; | |
+ if (metadata.getContentLength() > lowerThresholdToUseMultipartCopy()) { | |
+ newMetadata = S3Utils.copyFileMultipart(amazonS3, metadata, bucket, fileKey, bucket, newFileKey, targetSSEAlgorithm, true); | |
+ } else { | |
+ newMetadata = S3Utils.copyFile(amazonS3, metadata, bucket, fileKey, bucket, newFileKey, targetSSEAlgorithm, true); | |
+ boolean isMultipartUpload = REGEX_MULTIPART_ETAG.matcher(etag).find(); | |
+ if (isMultipartUpload) { | |
+ etag = newMetadata.getETag(); | |
+ String previousFileKey = newFileKey; | |
+ newFileKey = bucketPrefix + etag; | |
+ newMetadata = S3Utils.copyFile(amazonS3, metadata, bucket, previousFileKey, bucket, newFileKey, true); | |
+ } | |
+ } | |
+ | |
+ String filename = fileInfo.getFilename(); | |
+ long length = newMetadata.getContentLength(); | |
+ String digest = newMetadata.getContentMD5() != null ? newMetadata.getContentMD5() : etag; | |
+ Binary binary = new LazyBinary(digest, blobProviderId, null); | |
+ Blob blob = new BinaryBlob(binary, digest, filename, mimeType, encoding, digest, length); | |
+ Batch batch = getBatch(batchId); | |
+ try { | |
+ batch.addFile(fileIndex, blob, filename, mimeType); | |
+ } catch (NuxeoException e) { | |
+ amazonS3.deleteObject(bucket, newMetadata.getETag()); | |
+ throw e; | |
+ } | |
+ | |
+ return true; | |
} | |
protected long lowerThresholdToUseMultipartCopy() { | |
- return NON_MULTIPART_COPY_MAX_SIZE; | |
+ return NON_MULTIPART_COPY_MAX_SIZE; | |
} | |
} | |
diff --git a/packages/nuxeo-amazon-s3-package/src/main/resources/install/templates/s3binaries/nxserver/config/s3-config.xml.nxftl b/packages/nuxeo-amazon-s3-package/src/main/resources/install/templates/s3binaries/nxserver/config/s3-config.xml.nxftl | |
index dd849832e24..3684c68c48c 100644 | |
--- a/packages/nuxeo-amazon-s3-package/src/main/resources/install/templates/s3binaries/nxserver/config/s3-config.xml.nxftl | |
+++ b/packages/nuxeo-amazon-s3-package/src/main/resources/install/templates/s3binaries/nxserver/config/s3-config.xml.nxftl | |
@@ -12,6 +12,8 @@ | |
<property name="awstoken">${nuxeo.s3storage.awstoken}</property> | |
<property name="bucket">${nuxeo.s3storage.bucket}</property> | |
<property name="region">${nuxeo.s3storage.region}</property> | |
+ <property name="endpoint">${nuxeo.s3storage.endpoint}</property> | |
+ <property name="pathstyleaccess">${nuxeo.s3storage.pathstyleaccess}</property> | |
<property name="bucket_prefix">${nuxeo.s3storage.bucket_prefix}</property> | |
<!-- min file age, in second, to be removed from cache if the size max size is reached, default is 3600 --> | |
<property name="cacheminage">${nuxeo.s3storage.cacheminage}</property> | |
diff --git a/packages/nuxeo-amazon-s3-package/src/main/resources/install/templates/s3binaries/nxserver/config/s3directupload-config.xml.nxftl b/packages/nuxeo-amazon-s3-package/src/main/resources/install/templates/s3binaries/nxserver/config/s3directupload-config.xml.nxftl | |
index 9f7e31a60e0..2a3d1761e99 100644 | |
--- a/packages/nuxeo-amazon-s3-package/src/main/resources/install/templates/s3binaries/nxserver/config/s3directupload-config.xml.nxftl | |
+++ b/packages/nuxeo-amazon-s3-package/src/main/resources/install/templates/s3binaries/nxserver/config/s3directupload-config.xml.nxftl | |
@@ -27,6 +27,8 @@ | |
<property name="awstoken">${nuxeo.s3storage.transient.awstoken}</property> | |
<property name="bucket">${nuxeo.s3storage.transient.bucket}</property> | |
<property name="region">${nuxeo.s3storage.transient.region}</property> | |
+ <property name="endpoint">${nuxeo.s3storage.transient.endpoint}</property> | |
+ <property name="pathstyleaccess">${nuxeo.s3storage.transient.pathstyleaccess}</property> | |
<property name="bucket_prefix">${nuxeo.s3storage.transient.bucket_prefix}</property> | |
<property name="crypt.serverside">${nuxeo.s3storage.transient.crypt.serverside}</property> | |
<property name="cachesize">100MB</property> | |
@@ -51,6 +53,8 @@ | |
<property name="awstoken">${nuxeo.s3storage.awstoken}</property> | |
<property name="bucket">${nuxeo.s3storage.bucket}</property> | |
<property name="region">${nuxeo.s3storage.region}</property> | |
+ <property name="endpoint">${nuxeo.s3storage.endpoint}</property> | |
+ <property name="pathstyleaccess">${nuxeo.s3storage.pathstyleaccess}</property> | |
<property name="bucket_prefix">s3DirectUpload</property> | |
<property name="crypt.serverside">${nuxeo.s3storage.crypt.serverside}</property> | |
<property name="cachesize">100MB</property> | |
@@ -75,6 +79,8 @@ | |
<property name="awstoken">${nuxeo.s3storage.transient.awstoken}</property> | |
<property name="bucket">${nuxeo.s3storage.transient.bucket}</property> | |
<property name="region">${nuxeo.s3storage.transient.region}</property> | |
+ <property name="endpoint">${nuxeo.s3storage.transient.endpoint}</property> | |
+ <property name="pathstyleaccess">${nuxeo.s3storage.transient.pathstyleaccess}</property> | |
<property name="bucket_prefix">${nuxeo.s3storage.transient.bucket_prefix}</property> | |
<property name="crypt.serverside">${nuxeo.s3storage.transient.crypt.serverside}</property> | |
<property name="policyTemplate">{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":"s3:PutObject","Resource":["arn:aws:s3:::${nuxeo.s3storage.transient.bucket}/*"]}]}</property> | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment