Skip to content

Commit b6ae653

Browse files
AnthonyRosseding-backblazeameya9
authored
fix: Update Backblaze Client to Request Account Info (#1462)
Currently the Backblaze client is hard coded to only use a few clusters. This change updates the init function to request the account info which will include the s3 base URI which we will use for all other requests. --------- Co-authored-by: Eric Ding <[email protected]> Co-authored-by: Ameya Shendre <[email protected]>
1 parent 5296b9e commit b6ae653

File tree

6 files changed

+315
-101
lines changed

6 files changed

+315
-101
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
*.log
33

44
secrets.csv
5+
env.secrets
56
#!distributions/demo-server/src/main/resources/
67
#!distributions/demo-server/src/main/resources/demo-selfsigned-keystore.jks
78

extensions/data-transfer/portability-data-transfer-backblaze/build.gradle

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,15 @@ plugins {
2020
}
2121

2222
dependencies {
23-
compile project(':portability-spi-cloud')
24-
compile project(':portability-spi-transfer')
25-
compile project(':portability-transfer')
23+
implementation project(':portability-spi-cloud')
24+
implementation project(':portability-spi-transfer')
25+
implementation project(':portability-transfer')
2626

27-
compile('software.amazon.awssdk:s3:2.15.24')
28-
compile('org.apache.commons:commons-lang3:3.11')
29-
compile("commons-io:commons-io:2.6")
27+
implementation('software.amazon.awssdk:s3:2.15.24')
28+
implementation('org.apache.commons:commons-lang3:3.11')
29+
implementation("commons-io:commons-io:2.6")
30+
implementation("org.apache.httpcomponents:httpclient:${apacheHttpVersion}")
31+
implementation("com.googlecode.json-simple:json-simple:${jsonSimpleVersion}")
3032
}
3133

3234
configurePublication(project)

extensions/data-transfer/portability-data-transfer-backblaze/src/main/java/org/datatransferproject/datatransfer/backblaze/common/BackblazeDataTransferClient.java

Lines changed: 80 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -20,21 +20,23 @@
2020
import java.io.FileInputStream;
2121
import java.io.IOException;
2222
import java.io.InputStream;
23-
import java.net.URI;
2423
import java.util.ArrayList;
25-
import java.util.Arrays;
24+
import java.util.Base64;
2625
import java.util.List;
2726
import org.apache.commons.lang3.RandomStringUtils;
27+
import org.apache.http.client.methods.CloseableHttpResponse;
28+
import org.apache.http.client.methods.HttpGet;
29+
import org.apache.http.impl.client.CloseableHttpClient;
30+
import org.apache.http.impl.client.HttpClientBuilder;
31+
import org.apache.http.util.EntityUtils;
2832
import org.datatransferproject.api.launcher.Monitor;
2933
import org.datatransferproject.datatransfer.backblaze.exception.BackblazeCredentialsException;
30-
import org.datatransferproject.transfer.JobMetadata;
31-
import software.amazon.awssdk.auth.credentials.AwsSessionCredentials;
32-
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
34+
import org.json.simple.JSONObject;
35+
import org.json.simple.parser.JSONParser;
36+
import org.json.simple.parser.ParseException;
3337
import software.amazon.awssdk.awscore.exception.AwsServiceException;
34-
import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
3538
import software.amazon.awssdk.core.exception.SdkClientException;
3639
import software.amazon.awssdk.core.sync.RequestBody;
37-
import software.amazon.awssdk.regions.Region;
3840
import software.amazon.awssdk.services.s3.S3Client;
3941
import software.amazon.awssdk.services.s3.model.Bucket;
4042
import software.amazon.awssdk.services.s3.model.BucketAlreadyExistsException;
@@ -54,11 +56,25 @@
5456
import software.amazon.awssdk.services.s3.model.UploadPartRequest;
5557
import software.amazon.awssdk.services.s3.model.UploadPartResponse;
5658

59+
/**
60+
* Represents a client for handling data transfer operations with Backblaze B2's S3 compatible API.
61+
* This class is responsible for managing the initialization of connections, uploading files, and
62+
* handling multipart uploads for large files.
63+
*
64+
* <p>The client requires valid Backblaze credentials (keyId and applicationKey) and an instance of
65+
* a pre-configured S3 client factory for communication with the Backblaze API. Additionally, a
66+
* Monitor is used to log diagnostic messages during execution.
67+
*
68+
* <p>The class provides methods to: - Initialize the BackblazeDataTransferClient using user
69+
* credentials. - Upload files either as single uploads or using multipart uploads for larger files.
70+
* - Create or select appropriate buckets for data transfer.
71+
*
72+
* <p>The client implements retry mechanisms and proper error handling for common scenarios like
73+
* network failures or authentication issues.
74+
*/
5775
public class BackblazeDataTransferClient {
5876
private static final String DATA_TRANSFER_BUCKET_PREFIX_FORMAT_STRING = "%s-data-transfer";
5977
private static final int MAX_BUCKET_CREATION_ATTEMPTS = 10;
60-
private final List<String> BACKBLAZE_REGIONS =
61-
Arrays.asList("us-west-000", "us-west-001", "us-west-002", "eu-central-003");
6278

6379
private final long sizeThresholdForMultipartUpload;
6480
private final long partSizeForMultiPartUpload;
@@ -68,10 +84,10 @@ public class BackblazeDataTransferClient {
6884
private String bucketName;
6985

7086
public BackblazeDataTransferClient(
71-
Monitor monitor,
72-
BackblazeS3ClientFactory backblazeS3ClientFactory,
73-
long sizeThresholdForMultipartUpload,
74-
long partSizeForMultiPartUpload) {
87+
Monitor monitor,
88+
BackblazeS3ClientFactory backblazeS3ClientFactory,
89+
long sizeThresholdForMultipartUpload,
90+
long partSizeForMultiPartUpload) {
7591
this.monitor = monitor;
7692
this.backblazeS3ClientFactory = backblazeS3ClientFactory;
7793
// Avoid infinite loops
@@ -81,43 +97,25 @@ public BackblazeDataTransferClient(
8197
this.partSizeForMultiPartUpload = partSizeForMultiPartUpload;
8298
}
8399

84-
public void init(String keyId, String applicationKey, String exportService)
100+
public void init(
101+
String keyId, String applicationKey, String exportService, CloseableHttpClient httpClient)
85102
throws BackblazeCredentialsException, IOException {
86103
// Fetch all the available buckets and use that to find which region the user is in
87104
ListBucketsResponse listBucketsResponse = null;
88-
String userRegion = null;
89-
90-
// The Key ID starts with the region identifier number, so reorder the regions such that
91-
// the first region is most likely the user's region
92-
String regionId = keyId.substring(0, 3);
93-
BACKBLAZE_REGIONS.sort(
94-
(String region1, String region2) -> {
95-
if (region1.endsWith(regionId)) {
96-
return -1;
97-
}
98-
return 0;
99-
});
100105

101106
Throwable s3Exception = null;
102-
for (String region : BACKBLAZE_REGIONS) {
103-
try {
104-
s3Client = backblazeS3ClientFactory.createS3Client(keyId, applicationKey, region);
105-
106-
listBucketsResponse = s3Client.listBuckets();
107-
userRegion = region;
108-
break;
109-
} catch (S3Exception e) {
110-
s3Exception = e;
111-
if (s3Client != null) {
112-
s3Client.close();
113-
}
114-
if (e.statusCode() == 403) {
115-
monitor.debug(() -> String.format("User is not in region %s", region));
116-
}
107+
String userRegion = getAccountRegion(httpClient, keyId, applicationKey);
108+
s3Client = backblazeS3ClientFactory.createS3Client(keyId, applicationKey, userRegion);
109+
try {
110+
listBucketsResponse = s3Client.listBuckets();
111+
} catch (S3Exception e) {
112+
s3Exception = e;
113+
if (s3Client != null) {
114+
s3Client.close();
117115
}
118116
}
119117

120-
if (listBucketsResponse == null || userRegion == null) {
118+
if (listBucketsResponse == null) {
121119
throw new BackblazeCredentialsException(
122120
"User's credentials or permissions are not valid for any regions available", s3Exception);
123121
}
@@ -140,7 +138,7 @@ public String uploadFile(String fileKey, File file) throws IOException {
140138
() ->
141139
String.format(
142140
"File size is larger than %d bytes, so using multipart upload",
143-
sizeThresholdForMultipartUpload));
141+
sizeThresholdForMultipartUpload));
144142
return uploadFileUsingMultipartUpload(fileKey, file, contentLength);
145143
}
146144

@@ -156,6 +154,43 @@ public String uploadFile(String fileKey, File file) throws IOException {
156154
}
157155
}
158156

157+
private String getAccountRegion(
158+
CloseableHttpClient httpClient, String keyId, String applicationKey)
159+
throws BackblazeCredentialsException {
160+
161+
String auth = keyId + ":" + applicationKey;
162+
byte[] encodedAuth = Base64.getEncoder().encode(auth.getBytes());
163+
String authHeaderValue = "Basic " + new String(encodedAuth);
164+
165+
HttpGet request = new HttpGet("https://api.backblazeb2.com/b2api/v2/b2_authorize_account");
166+
request.addHeader("Authorization", authHeaderValue);
167+
168+
try {
169+
CloseableHttpResponse response = httpClient.execute(request);
170+
try (response) {
171+
int statusCode = response.getStatusLine().getStatusCode();
172+
173+
if (statusCode == 200) {
174+
String responseBody = EntityUtils.toString(response.getEntity());
175+
JSONParser parser = new JSONParser();
176+
JSONObject jsonResponse = (JSONObject) parser.parse(responseBody);
177+
String s3ApiUrl = (String) jsonResponse.get("s3ApiUrl");
178+
String region = s3ApiUrl.split("s3.")[1].split("\\.")[0];
179+
monitor.info(() -> "Region extracted from s3ApiUrl: " + region);
180+
return region;
181+
} else if (statusCode >= 400 && statusCode < 500) {
182+
// Don't retry on client errors (4xx)
183+
throw new BackblazeCredentialsException(
184+
"Failed to retrieve account's region. Status code: " + statusCode, null);
185+
} else {
186+
throw new IOException("Server returned status code: " + statusCode);
187+
}
188+
}
189+
} catch (IOException | ParseException e) {
190+
throw new BackblazeCredentialsException("Failed to retrieve account's region", e);
191+
}
192+
}
193+
159194
private String uploadFileUsingMultipartUpload(String fileKey, File file, long contentLength)
160195
throws IOException, AwsServiceException, SdkClientException {
161196
List<CompletedPart> completedParts = new ArrayList<>();
@@ -210,9 +245,7 @@ private String getOrCreateBucket(
210245
throws IOException {
211246

212247
String fullPrefix =
213-
String.format(
214-
DATA_TRANSFER_BUCKET_PREFIX_FORMAT_STRING,
215-
exportService.toLowerCase());
248+
String.format(DATA_TRANSFER_BUCKET_PREFIX_FORMAT_STRING, exportService.toLowerCase());
216249
try {
217250
for (Bucket bucket : listBucketsResponse.buckets()) {
218251
if (bucket.name().startsWith(fullPrefix)) {
@@ -233,7 +266,7 @@ private String getOrCreateBucket(
233266
.build();
234267
s3Client.createBucket(createBucketRequest);
235268
return bucketName;
236-
} catch (BucketAlreadyExistsException | BucketAlreadyOwnedByYouException e) {
269+
} catch (Exception e) {
237270
monitor.info(() -> "Bucket name already exists");
238271
}
239272
}

extensions/data-transfer/portability-data-transfer-backblaze/src/main/java/org/datatransferproject/datatransfer/backblaze/common/BackblazeDataTransferClientFactory.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import java.util.HashMap;
2121
import java.util.Map;
2222
import java.util.UUID;
23+
import org.apache.http.impl.client.HttpClientBuilder;
2324
import org.datatransferproject.api.launcher.Monitor;
2425
import org.datatransferproject.datatransfer.backblaze.exception.BackblazeCredentialsException;
2526
import org.datatransferproject.transfer.JobMetadata;
@@ -48,7 +49,7 @@ public BackblazeDataTransferClient getOrCreateB2Client( UUID jobId,
4849
SIZE_THRESHOLD_FOR_MULTIPART_UPLOAD,
4950
PART_SIZE_FOR_MULTIPART_UPLOAD);
5051
String exportService = JobMetadata.getExportService();
51-
backblazeDataTransferClient.init(authData.getToken(), authData.getSecret(), exportService);
52+
backblazeDataTransferClient.init(authData.getToken(), authData.getSecret(), exportService, HttpClientBuilder.create().build());
5253
backblazeDataTransferClientMap.put(jobId, backblazeDataTransferClient);
5354
}
5455
return backblazeDataTransferClientMap.get(jobId);
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
package org.datatransferproject.datatransfer.backblaze.common;
2+
3+
import org.apache.http.impl.client.HttpClientBuilder;
4+
import org.datatransferproject.api.launcher.Monitor;
5+
import org.datatransferproject.launcher.monitor.ConsoleMonitor;
6+
7+
import java.io.File;
8+
9+
public class BackblazeIntegrationTest {
10+
public static void main(String[] args) throws Exception {
11+
// Create dependencies
12+
Monitor monitor = new ConsoleMonitor(ConsoleMonitor.Level.DEBUG);
13+
BaseBackblazeS3ClientFactory factory = new BaseBackblazeS3ClientFactory();
14+
15+
// Create client with appropriate thresholds
16+
// 20MB threshold for multipart upload, 5MB part size
17+
BackblazeDataTransferClient client = new BackblazeDataTransferClient(
18+
monitor,
19+
factory,
20+
20 * 1024 * 1024,
21+
5 * 1024 * 1024
22+
);
23+
24+
// Get credentials from environment
25+
String keyId = System.getenv("BACKBLAZE_KEY");
26+
String appKey = System.getenv("BACKBLAZE_SECRET");
27+
28+
if (keyId == null || appKey == null) {
29+
System.err.println("Please set BACKBLAZE_KEY and BACKBLAZE_SECRET environment variables");
30+
return;
31+
}
32+
33+
System.out.println("Initializing client with credentials...");
34+
35+
// Initialize client with your credentials
36+
// The "test-service" string is used as a prefix for bucket naming
37+
client.init(keyId, appKey, "test-service", HttpClientBuilder.create().build());
38+
39+
System.out.println("Client initialized successfully!");
40+
41+
// Test file upload
42+
File testFile = new File("/Users/anthony.ross/Desktop/test.txt"); // Replace with an actual file path
43+
44+
System.out.println("Uploading file: " + testFile.getAbsolutePath());
45+
String versionId = client.uploadFile("test-upload-" + System.currentTimeMillis(), testFile);
46+
47+
System.out.println("Upload successful! Version ID: " + versionId);
48+
}
49+
}

0 commit comments

Comments
 (0)