@@ -652,9 +652,11 @@ static int cb_s3_init(struct flb_output_instance *ins,
652
652
flb_plg_error (ctx -> ins , "unknown compression: %s" , tmp );
653
653
return -1 ;
654
654
}
655
- if (ctx -> use_put_object == FLB_FALSE && ctx -> compression == FLB_AWS_COMPRESS_ARROW ) {
655
+ if (ctx -> use_put_object == FLB_FALSE &&
656
+ (ctx -> compression == FLB_AWS_COMPRESS_ARROW ||
657
+ ctx -> compression == FLB_AWS_COMPRESS_PARQUET )) {
656
658
flb_plg_error (ctx -> ins ,
657
- "use_put_object must be enabled when Apache Arrow is enabled" );
659
+ "use_put_object must be enabled when Apache Arrow or Parquet is enabled" );
658
660
return -1 ;
659
661
}
660
662
ctx -> compression = ret ;
@@ -679,7 +681,7 @@ static int cb_s3_init(struct flb_output_instance *ins,
679
681
flb_plg_error (ctx -> ins , "upload_chunk_size must be at least 5,242,880 bytes" );
680
682
return -1 ;
681
683
}
682
- if (ctx -> compression == FLB_AWS_COMPRESS_GZIP ) {
684
+ if (ctx -> compression != FLB_AWS_COMPRESS_NONE ) {
683
685
if (ctx -> upload_chunk_size > MAX_CHUNKED_UPLOAD_COMPRESS_SIZE ) {
684
686
flb_plg_error (ctx -> ins , "upload_chunk_size in compressed multipart upload cannot exceed 5GB" );
685
687
return -1 ;
@@ -1003,7 +1005,7 @@ static int upload_data(struct flb_s3 *ctx, struct s3_file *chunk,
1003
1005
file_first_log_time = chunk -> first_log_time ;
1004
1006
}
1005
1007
1006
- if (ctx -> compression == FLB_AWS_COMPRESS_GZIP ) {
1008
+ if (ctx -> compression != FLB_AWS_COMPRESS_NONE ) {
1007
1009
/* Map payload */
1008
1010
ret = flb_aws_compression_compress (ctx -> compression , body , body_size , & payload_buf , & payload_size );
1009
1011
if (ret == -1 ) {
@@ -1046,7 +1048,9 @@ static int upload_data(struct flb_s3 *ctx, struct s3_file *chunk,
1046
1048
goto multipart ;
1047
1049
}
1048
1050
else {
1049
- if (ctx -> use_put_object == FLB_FALSE && ctx -> compression == FLB_AWS_COMPRESS_GZIP ) {
1051
+ if (ctx -> use_put_object == FLB_FALSE &&
1052
+ (ctx -> compression == FLB_AWS_COMPRESS_ARROW ||
1053
+ ctx -> compression == FLB_AWS_COMPRESS_PARQUET )) {
1050
1054
flb_plg_info (ctx -> ins , "Pre-compression upload_chunk_size= %zu, After compression, chunk is only %zu bytes, "
1051
1055
"the chunk was too small, using PutObject to upload" , preCompress_size , body_size );
1052
1056
}
@@ -1068,7 +1072,7 @@ static int upload_data(struct flb_s3 *ctx, struct s3_file *chunk,
1068
1072
* remove chunk from buffer list
1069
1073
*/
1070
1074
ret = s3_put_object (ctx , tag , file_first_log_time , body , body_size );
1071
- if (ctx -> compression == FLB_AWS_COMPRESS_GZIP ) {
1075
+ if (ctx -> compression != FLB_AWS_COMPRESS_NONE ) {
1072
1076
flb_free (payload_buf );
1073
1077
}
1074
1078
if (ret < 0 ) {
@@ -1095,7 +1099,7 @@ static int upload_data(struct flb_s3 *ctx, struct s3_file *chunk,
1095
1099
if (chunk ) {
1096
1100
s3_store_file_unlock (chunk );
1097
1101
}
1098
- if (ctx -> compression == FLB_AWS_COMPRESS_GZIP ) {
1102
+ if (ctx -> compression != FLB_AWS_COMPRESS_NONE ) {
1099
1103
flb_free (payload_buf );
1100
1104
}
1101
1105
return FLB_RETRY ;
@@ -1109,7 +1113,7 @@ static int upload_data(struct flb_s3 *ctx, struct s3_file *chunk,
1109
1113
if (chunk ) {
1110
1114
s3_store_file_unlock (chunk );
1111
1115
}
1112
- if (ctx -> compression == FLB_AWS_COMPRESS_GZIP ) {
1116
+ if (ctx -> compression != FLB_AWS_COMPRESS_NONE ) {
1113
1117
flb_free (payload_buf );
1114
1118
}
1115
1119
return FLB_RETRY ;
@@ -1119,7 +1123,7 @@ static int upload_data(struct flb_s3 *ctx, struct s3_file *chunk,
1119
1123
1120
1124
ret = upload_part (ctx , m_upload , body , body_size );
1121
1125
if (ret < 0 ) {
1122
- if (ctx -> compression == FLB_AWS_COMPRESS_GZIP ) {
1126
+ if (ctx -> compression != FLB_AWS_COMPRESS_NONE ) {
1123
1127
flb_free (payload_buf );
1124
1128
}
1125
1129
m_upload -> upload_errors += 1 ;
@@ -1136,7 +1140,7 @@ static int upload_data(struct flb_s3 *ctx, struct s3_file *chunk,
1136
1140
s3_store_file_delete (ctx , chunk );
1137
1141
chunk = NULL ;
1138
1142
}
1139
- if (ctx -> compression == FLB_AWS_COMPRESS_GZIP ) {
1143
+ if (ctx -> compression != FLB_AWS_COMPRESS_NONE ) {
1140
1144
flb_free (payload_buf );
1141
1145
}
1142
1146
if (m_upload -> bytes >= ctx -> file_size ) {
@@ -2371,8 +2375,8 @@ static struct flb_config_map config_map[] = {
2371
2375
{
2372
2376
FLB_CONFIG_MAP_STR , "compression" , NULL ,
2373
2377
0 , FLB_FALSE , 0 ,
2374
- "Compression type for S3 objects. 'gzip' and 'arrow ' are the supported values. "
2375
- "'arrow' is only an available if Apache Arrow was enabled at compile time. "
2378
+ "Compression type for S3 objects. 'gzip', 'arrow' and 'parquet ' are the supported values. "
2379
+ "'arrow' and 'parquet' are only available if Apache Arrow was enabled at compile time. "
2376
2380
"Defaults to no compression. "
2377
2381
"If 'gzip' is selected, the Content-Encoding HTTP Header will be set to 'gzip'."
2378
2382
},
0 commit comments