Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion presto-docs/src/main/sphinx/connector/iceberg.rst
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ Property Name Description
``iceberg.file-format`` The storage file format for Iceberg tables. The available ``PARQUET`` Yes No, write is not supported yet
values are ``PARQUET`` and ``ORC``.

``iceberg.compression-codec`` The compression codec to use when writing files. The ``GZIP`` Yes No, write is not supported yet
``iceberg.compression-codec`` The compression codec to use when writing files. The ``ZSTD`` Yes No, write is not supported yet
available values are ``NONE``, ``SNAPPY``, ``GZIP``,
``LZ4``, and ``ZSTD``.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@

import static com.facebook.airlift.units.DataSize.Unit.MEGABYTE;
import static com.facebook.airlift.units.DataSize.succinctDataSize;
import static com.facebook.presto.hive.HiveCompressionCodec.GZIP;
import static com.facebook.presto.hive.HiveCompressionCodec.ZSTD;
import static com.facebook.presto.iceberg.CatalogType.HIVE;
import static com.facebook.presto.iceberg.IcebergFileFormat.PARQUET;
import static com.facebook.presto.iceberg.util.StatisticsUtil.decodeMergeFlags;
Expand All @@ -47,7 +47,7 @@
public class IcebergConfig
{
private IcebergFileFormat fileFormat = PARQUET;
private HiveCompressionCodec compressionCodec = GZIP;
private HiveCompressionCodec compressionCodec = ZSTD;
private CatalogType catalogType = HIVE;
private String catalogWarehouse;
private String catalogWarehouseDataDir;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
import static com.facebook.airlift.configuration.testing.ConfigAssertions.recordDefaults;
import static com.facebook.airlift.units.DataSize.Unit.MEGABYTE;
import static com.facebook.airlift.units.DataSize.succinctDataSize;
import static com.facebook.presto.hive.HiveCompressionCodec.GZIP;
import static com.facebook.presto.hive.HiveCompressionCodec.NONE;
import static com.facebook.presto.hive.HiveCompressionCodec.ZSTD;
import static com.facebook.presto.iceberg.CatalogType.HADOOP;
import static com.facebook.presto.iceberg.CatalogType.HIVE;
import static com.facebook.presto.iceberg.IcebergFileFormat.ORC;
Expand All @@ -46,7 +46,7 @@ public void testDefaults()
{
assertRecordedDefaults(recordDefaults(IcebergConfig.class)
.setFileFormat(PARQUET)
.setCompressionCodec(GZIP)
.setCompressionCodec(ZSTD)
.setCatalogType(HIVE)
.setCatalogWarehouse(null)
.setCatalogWarehouseDataDir(null)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ protected void checkTableProperties(String schemaName, String tableName, String
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.format.default", "PARQUET")))
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.parquet.compression-codec", "GZIP")))
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.parquet.compression-codec", "ZSTD")))
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "commit.retry.num-retries", "4")))
.anySatisfy(row -> assertThat(row)
Expand Down Expand Up @@ -321,7 +321,7 @@ protected void checkORCFormatTableProperties(String tableName, String deleteMode
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.format.default", "ORC")))
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.orc.compression-codec", "ZLIB")))
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.orc.compression-codec", "ZSTD")))
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.parquet.compression-codec", "zstd")))
.anySatisfy(row -> assertThat(row)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ protected void checkTableProperties(String tableName, String deleteMode)
.anySatisfy(row -> assertThat(row.getField(0)).isEqualTo("nessie.commit.id"))
.anySatisfy(row -> assertThat(row).isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "gc.enabled", "false")))
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.parquet.compression-codec", "GZIP")))
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.parquet.compression-codec", "ZSTD")))
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.metadata.delete-after-commit.enabled", "false")))
.anySatisfy(row -> assertThat(row)
Expand Down Expand Up @@ -141,7 +141,7 @@ protected void checkORCFormatTableProperties(String tableName, String deleteMode
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.format.default", "ORC")))
.anySatisfy(row -> assertThat(row)
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.orc.compression-codec", "ZLIB")))
.isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "write.orc.compression-codec", "ZSTD")))
.anySatisfy(row -> assertThat(row.getField(0)).isEqualTo("nessie.commit.id"))
.anySatisfy(row -> assertThat(row).isEqualTo(new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, "gc.enabled", "false")))
.anySatisfy(row -> assertThat(row)
Expand Down
Loading