Swisscom S3 Dynamic Storage の使用中に問題が発生しています。5 つ以上の並列スレッドで同時テスト CRUD 要求を行うと、ストレージ サービスは正しい応答ではなく 403 Forbidden をランダムに送信します。同じリクエストを 1 つずつ順番に実行すると、すべてが正常に機能します。
私が使用しているコードは以下のとおりです
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.*;
import com.amazonaws.util.StringInputStream;
import org.apache.commons.io.IOUtils;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
/**
* Tutorial https://javatutorial.net/java-s3-example
*/
public class AmazonS3ManualTest {
public static final String BUCKET_NAME = "??";
private static String accessKey = "??";
private static String secretKey = "??";
@Test
public void testOperations() throws IOException, InterruptedException {
final int maxCount = 5;
final AmazonS3Client amazonS3Client = getS3Client();
final CountDownLatch latch = new CountDownLatch(maxCount);
final ExecutorService executor = Executors.newFixedThreadPool(maxCount);
for (int i = 0; i < maxCount; i++) {
final int index = i;
executor.submit(() -> {
try {
final String FolderOne = "testFolderOne" + index;
final String FolderTwo = "testFolderTwo" + index;
final String FolderCopy = "copyFolder" + index;
try {
createFile(amazonS3Client, "/" + FolderOne + "/file.txt");
createFolder(amazonS3Client, FolderTwo + "/");
exists(amazonS3Client, FolderOne + "/file.txt");
exists(amazonS3Client, FolderTwo + "/");
copy(amazonS3Client, FolderOne + "/file.txt", FolderCopy + "/filecopy.txt");
delete(amazonS3Client, "/" + FolderOne);
delete(amazonS3Client, "/" + FolderTwo);
get(amazonS3Client, FolderCopy + "/filecopy.txt");
delete(amazonS3Client, "/" + FolderCopy + "/filecopy.txt");
isEmptyFolder(amazonS3Client, "/" + FolderCopy);
delete(amazonS3Client, "/ + FolderCopy");
} catch (Exception e) {
e.printStackTrace();
}
latch.countDown();
} catch (final Exception ignored) {
}
});
}
if (!latch.await(300, TimeUnit.SECONDS)) {
throw new RuntimeException("Waiting too long for the result");
}
}
private void isEmptyFolder(AmazonS3Client amazonS3Client, String folder) {
final ObjectListing objectListing = amazonS3Client.listObjects(BUCKET_NAME, folder);
assert(objectListing.getObjectSummaries().isEmpty());
}
private void get(AmazonS3Client amazonS3Client, String file) throws IOException {
GetObjectRequest request = new GetObjectRequest(BUCKET_NAME, file);
final S3Object object = amazonS3Client.getObject(request);
final S3ObjectInputStream objectContent = object.getObjectContent();
final String s = IOUtils.toString(objectContent);
assert(s.length() > 0);
}
private void copy(AmazonS3Client amazonS3Client, String source, String target) {
CopyObjectRequest request = new CopyObjectRequest(BUCKET_NAME, source, BUCKET_NAME, target);
amazonS3Client.copyObject(request);
}
private void delete(AmazonS3Client amazonS3Client, String path) {
deleteRecursive(amazonS3Client, path);
}
private void deleteRecursive(AmazonS3Client amazonS3Client, String path) {
ObjectListing objects = amazonS3Client.listObjects(BUCKET_NAME, path);
for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
if (objectSummary.getKey().equals(path)) {
continue;
}
if (objectSummary.getKey().endsWith("/")) {
deleteRecursive(amazonS3Client, objectSummary.getKey());
} else {
amazonS3Client.deleteObject(BUCKET_NAME, objectSummary.getKey());
}
}
amazonS3Client.deleteObject(BUCKET_NAME, path);
}
private void exists(AmazonS3Client amazonS3Client, String folder) {
GetObjectMetadataRequest request = new GetObjectMetadataRequest(BUCKET_NAME, folder);
try {
final ObjectMetadata objectMetadata = amazonS3Client.getObjectMetadata(request);
assert(objectMetadata != null);
} catch (AmazonS3Exception e) {
if (e.getMessage().contains("404")) {
assert(false);
return;
}
}
assert(true);
}
private void createFolder(AmazonS3Client amazonS3Client, String folder) {
final InputStream input = new ByteArrayInputStream(new byte[0]);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(0);
amazonS3Client.putObject(new PutObjectRequest(BUCKET_NAME, folder, input, metadata));
}
private void createFile(AmazonS3Client amazonS3Client, String fileName) throws IOException {
ObjectMetadata omd = new ObjectMetadata();
//omd.setContentType("html/text");
omd.setHeader("filename", fileName);
omd.setHeader("x-amz-server-side-encryption", "AES256");
// upload file to folder and set it to public
final StringInputStream testFile = new StringInputStream("Test");
final PutObjectRequest putObjectRequest = new PutObjectRequest(BUCKET_NAME, fileName, testFile, omd);
amazonS3Client.putObject(putObjectRequest.withCannedAcl(CannedAccessControlList.Private));
testFile.close();
}
private AmazonS3Client getS3Client() {
ClientConfiguration opts = new ClientConfiguration();
opts.setSignerOverride("S3SignerType"); // NOT "AWS3SignerType"
opts.setMaxConnections(100);
final AmazonS3Client s3 = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey), opts);
s3.setEndpoint("ds31s3.swisscom.com");
return s3;
}
}
取得している例外は次のとおりです。
com.amazonaws.services.s3.model.AmazonS3Exception: The AWS Access Key Id you provided does not exist in our records. (Service: Amazon S3; Status Code: 403; Error Code: InvalidAccessKeyId; Request ID: null), S3 Extended Request ID: null
このような状況は異常でスケーラブルではないため、このような状況で何ができるかをお勧めできますか。