Skip to content

Commit 768945d

Browse files
authored
Add WALG_UPLOAD_DISK_CONCURRENCY env (#504)
* Add WALG_UPLOAD_DISK_CONCURRENCY env * Add additional params * Try configuring the CLONE as well * Dynamic WALG_UPLOAD_DISK_CONCURRENCY depending on CPU limits * check for errors
1 parent aef20ed commit 768945d

File tree

1 file changed

+34
-13
lines changed

1 file changed

+34
-13
lines changed

controllers/postgres_controller.go

Lines changed: 34 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
coreosv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
3131
networkingv1 "k8s.io/api/networking/v1"
3232
apierrors "k8s.io/apimachinery/pkg/api/errors"
33+
"k8s.io/apimachinery/pkg/api/resource"
3334
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3435
"k8s.io/apimachinery/pkg/labels"
3536
"k8s.io/apimachinery/pkg/runtime"
@@ -441,6 +442,20 @@ func (r *PostgresReconciler) updatePodEnvironmentConfigMap(ctx context.Context,
441442
// region
442443
region := backupConfig.S3Region
443444

445+
// set the WALG_UPLOAD_DISK_CONCURRENCY based on the configured cpu limits
446+
q, err := resource.ParseQuantity(p.Spec.Size.CPU)
447+
if err != nil {
448+
return fmt.Errorf("error while parsing the postgres cpu size: %w", err)
449+
}
450+
uploadDiskConcurrency := "1"
451+
if q.Value() > 32 {
452+
uploadDiskConcurrency = "32"
453+
} else if q.Value() > 1 {
454+
uploadDiskConcurrency = fmt.Sprint(q.Value())
455+
}
456+
uploadConcurrency := "32"
457+
downloadConcurrency := "32"
458+
444459
// use the rest as provided in the secret
445460
bucketName := backupConfig.S3BucketName
446461
backupSchedule := backupConfig.Schedule
@@ -452,19 +467,25 @@ func (r *PostgresReconciler) updatePodEnvironmentConfigMap(ctx context.Context,
452467

453468
// create updated content for pod environment configmap
454469
data := map[string]string{
455-
"USE_WALG_BACKUP": "true",
456-
"USE_WALG_RESTORE": "true",
457-
"WALE_S3_PREFIX": "s3://" + bucketName + "/$(SCOPE)",
458-
"WALG_S3_PREFIX": "s3://" + bucketName + "/$(SCOPE)",
459-
"CLONE_WALG_S3_PREFIX": "s3://" + bucketName + "/$(CLONE_SCOPE)",
460-
"WALE_BACKUP_THRESHOLD_PERCENTAGE": "100",
461-
"AWS_ENDPOINT": awsEndpoint,
462-
"WALE_S3_ENDPOINT": walES3Endpoint, // same as above, but slightly modified
463-
"AWS_S3_FORCE_PATH_STYLE": "true",
464-
"AWS_REGION": region, // now we can use AWS S3
465-
"WALG_DISABLE_S3_SSE": walgDisableSSE, // server side encryption
466-
"BACKUP_SCHEDULE": backupSchedule,
467-
"BACKUP_NUM_TO_RETAIN": backupNumToRetain,
470+
"USE_WALG_BACKUP": "true",
471+
"USE_WALG_RESTORE": "true",
472+
"WALE_S3_PREFIX": "s3://" + bucketName + "/$(SCOPE)",
473+
"WALG_S3_PREFIX": "s3://" + bucketName + "/$(SCOPE)",
474+
"CLONE_WALG_S3_PREFIX": "s3://" + bucketName + "/$(CLONE_SCOPE)",
475+
"WALE_BACKUP_THRESHOLD_PERCENTAGE": "100",
476+
"AWS_ENDPOINT": awsEndpoint,
477+
"WALE_S3_ENDPOINT": walES3Endpoint, // same as above, but slightly modified
478+
"AWS_S3_FORCE_PATH_STYLE": "true",
479+
"AWS_REGION": region, // now we can use AWS S3
480+
"WALG_DISABLE_S3_SSE": walgDisableSSE, // server side encryption
481+
"BACKUP_SCHEDULE": backupSchedule,
482+
"BACKUP_NUM_TO_RETAIN": backupNumToRetain,
483+
"WALG_UPLOAD_DISK_CONCURRENCY": uploadDiskConcurrency,
484+
"CLONE_WALG_UPLOAD_DISK_CONCURRENCY": uploadDiskConcurrency,
485+
"WALG_UPLOAD_CONCURRENCY": uploadConcurrency,
486+
"CLONE_WALG_UPLOAD_CONCURRENCY": uploadConcurrency,
487+
"WALG_DOWNLOAD_CONCURRENCY": downloadConcurrency,
488+
"CLONE_WALG_DOWNLOAD_CONCURRENCY": downloadConcurrency,
468489
}
469490

470491
cm := &corev1.ConfigMap{}

0 commit comments

Comments
 (0)