Skip to content

[Variant] Define shredding schema for VariantArrayBuilder #7921

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions parquet-variant-compute/benches/variant_kernels.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

use arrow::array::{Array, ArrayRef, StringArray};
use arrow::util::test_util::seedable_rng;
use arrow_schema::{DataType, Field, Fields};
use criterion::{criterion_group, criterion_main, Criterion};
use parquet_variant::{Variant, VariantBuilder};
use parquet_variant_compute::variant_get::{variant_get, GetOptions};
Expand All @@ -27,6 +28,14 @@ use rand::Rng;
use rand::SeedableRng;
use std::fmt::Write;
use std::sync::Arc;

fn unshredded_schema_fields() -> Fields {
let metadata_field = Field::new("metadata", DataType::BinaryView, false);
let value_field = Field::new("value", DataType::BinaryView, false);

Fields::from(vec![metadata_field, value_field])
}

fn benchmark_batch_json_string_to_variant(c: &mut Criterion) {
let input_array = StringArray::from_iter_values(json_repeated_struct(8000));
let array_ref: ArrayRef = Arc::new(input_array);
Expand Down Expand Up @@ -93,7 +102,7 @@ pub fn variant_get_bench(c: &mut Criterion) {
};

c.bench_function("variant_get_primitive", |b| {
b.iter(|| variant_get(&input.clone(), options.clone()))
b.iter(|| variant_get(&input.clone(), options.clone(), unshredded_schema_fields()))
});
}

Expand All @@ -108,7 +117,7 @@ criterion_main!(benches);
fn create_primitive_variant_array(size: usize) -> VariantArray {
let mut rng = StdRng::seed_from_u64(42);

let mut variant_builder = VariantArrayBuilder::new(1);
let mut variant_builder = VariantArrayBuilder::try_new(1, unshredded_schema_fields()).unwrap();

for _ in 0..size {
let mut builder = VariantBuilder::new();
Expand Down
13 changes: 10 additions & 3 deletions parquet-variant-compute/src/from_json.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

use crate::{VariantArray, VariantArrayBuilder};
use arrow::array::{Array, ArrayRef, StringArray};
use arrow_schema::ArrowError;
use arrow_schema::{ArrowError, DataType, Field, Fields};
use parquet_variant_json::json_to_variant;

/// Parse a batch of JSON strings into a batch of Variants represented as
Expand All @@ -34,7 +34,14 @@ pub fn batch_json_string_to_variant(input: &ArrayRef) -> Result<VariantArray, Ar
)),
}?;

let mut variant_array_builder = VariantArrayBuilder::new(input_string_array.len());
let metadata_field = Field::new("metadata", DataType::BinaryView, false);
let value_field = Field::new("value", DataType::BinaryView, false);

let schema = Fields::from(vec![metadata_field, value_field]);

let mut variant_array_builder =
VariantArrayBuilder::try_new(input_string_array.len(), schema).unwrap();

for i in 0..input.len() {
if input.is_null(i) {
// The subfields are expected to be non-nullable according to the parquet variant spec.
Expand Down Expand Up @@ -70,7 +77,7 @@ mod test {
let variant_array = batch_json_string_to_variant(&array_ref).unwrap();

let metadata_array = variant_array.metadata_field().as_binary_view();
let value_array = variant_array.value_field().as_binary_view();
let value_array = variant_array.value_field().unwrap().as_binary_view();

// Compare row 0
assert!(!variant_array.is_null(0));
Expand Down
1 change: 1 addition & 0 deletions parquet-variant-compute/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
// under the License.

mod from_json;
pub mod shredding;
mod to_json;
mod variant_array;
mod variant_array_builder;
Expand Down
310 changes: 310 additions & 0 deletions parquet-variant-compute/src/shredding.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,310 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use arrow_schema::{ArrowError, DataType, FieldRef, Fields};

// Keywords defined by the shredding spec
pub const METADATA: &str = "metadata";
pub const VALUE: &str = "value";
pub const TYPED_VALUE: &str = "typed_value";

#[derive(Debug, PartialEq, Clone)]
pub enum ValueSchema {
MissingValue,
Value(usize),
ShreddedValue(usize),
PartiallyShredded {
value_idx: usize,
shredded_value_idx: usize,
},
}

#[derive(Debug, Clone)]
pub struct VariantSchema {
inner: Fields,

metadata_idx: usize,

// these indicies are for the top-level most `value` and `typed_value` columns
value_schema: ValueSchema,
}

impl VariantSchema {
/// find column metadata and ensure
/// returns the column index
/// # Requirements
/// - Must contain a "metadata" field of type BinaryView
/// - Must contain at least one of "value" (optional BinaryView) or "typed_value" (optional with valid Parquet type)
/// - Both "value" and "typed_value" can only be null simultaneously for shredded object fields
fn validate_metadata(fields: &Fields) -> Result<usize, ArrowError> {
let (metadata_idx, metadata_field) = fields
.iter()
.enumerate()
.find(|(_, f)| f.name() == METADATA)
.ok_or_else(|| {
ArrowError::InvalidArgumentError(
"Invalid VariantArray: StructArray must contain a 'metadata' field".to_string(),
)
})?;

if metadata_field.is_nullable() {
return Err(ArrowError::InvalidArgumentError(
"Invalid VariantArray: metadata field can not be nullable".to_string(),
));
}

if metadata_field.data_type() != &DataType::BinaryView {
return Err(ArrowError::NotYetImplemented(format!(
"VariantArray 'metadata' field must be BinaryView, got {}",
metadata_field.data_type()
)));
}

Ok(metadata_idx)
}

/// Both `value` and `typed_value` are optional fields used together to encode a single value.
///
/// Values in the two fields must be interpreted according to the following table:
///
/// | `value` | `typed_value` | Meaning |
/// |----------|---------------|-------------------------------------------------------------|
/// | null | null | The value is missing; only valid for shredded object fields |
/// | non-null | null | The value is present and may be any type, including null |
/// | null | non-null | The value is present and is the shredded type |
/// | non-null | non-null | The value is present and is a partially shredded object |
fn validate_value_and_typed_value(
fields: &Fields,
inside_shredded_object: bool,
) -> Result<ValueSchema, ArrowError> {
let value_field_res = fields.iter().enumerate().find(|(_, f)| f.name() == VALUE);
let typed_value_field_res = fields
.iter()
.enumerate()
.find(|(_, f)| f.name() == TYPED_VALUE);

// validate types
if let Some((_, value_field)) = value_field_res {
if value_field.data_type() != &DataType::BinaryView {
return Err(ArrowError::NotYetImplemented(format!(
"VariantArray 'value' field must be BinaryView, got {}",
value_field.data_type()
)));
}
}

if let Some((_, typed_value_field)) = typed_value_field_res {
match typed_value_field.data_type() {
DataType::Boolean
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::Int64
| DataType::Float32
| DataType::Float64
| DataType::Decimal32(_, _)
| DataType::Decimal64(_, _)
| DataType::Decimal128(_, _)
| DataType::Date32
| DataType::Date64
| DataType::Time32(_)
| DataType::Time64(_)
| DataType::Timestamp(_, _)
| DataType::Utf8View
| DataType::BinaryView
| DataType::ListView(_)
| DataType::Struct(_)
| DataType::Dictionary(_, _) => {}
foreign => {
return Err(ArrowError::NotYetImplemented(format!(
"Unsupported VariantArray 'typed_value' field, got {foreign}"
)))
}
}
}

match (value_field_res, typed_value_field_res) {
(None, None) => {
if inside_shredded_object {
return Ok(ValueSchema::MissingValue);
}

Err(ArrowError::InvalidArgumentError("Invalid VariantArray: StructArray must contain either `value` or `typed_value` fields or both.".to_string()))
}
(Some(value_field), None) => Ok(ValueSchema::Value(value_field.0)),
(None, Some(shredded_field)) => Ok(ValueSchema::ShreddedValue(shredded_field.0)),
(Some((value_idx, _)), Some((shredded_value_idx, shredded_field))) => {
match shredded_field.data_type() {
DataType::Struct(fields) => {
let _ = Self::validate_value_and_typed_value(fields, false)?;

Ok(ValueSchema::PartiallyShredded {
value_idx,
shredded_value_idx,
})
}
DataType::Dictionary(_key, shredded_schema) => {
if let DataType::Struct(fields) = shredded_schema.as_ref() {
let _ = Self::validate_value_and_typed_value(fields, true)?;

Ok(ValueSchema::PartiallyShredded {
value_idx,
shredded_value_idx,
})
} else {
Err(ArrowError::InvalidArgumentError(
"Invalid VariantArray: shredded fields must be of struct or list types".to_string(),
))
}
}
_ => Err(ArrowError::InvalidArgumentError(
"Invalid VariantArray: shredded fields must be of struct or list types"
.to_string(),
)),
}
}
}
}

pub fn try_new(fields: Fields) -> Result<Self, ArrowError> {
let metadata_idx = Self::validate_metadata(&fields)?;
let value_schema = Self::validate_value_and_typed_value(&fields, false)?;

Ok(Self {
inner: fields.clone(),
metadata_idx,
value_schema,
})
}

pub fn inner(&self) -> &Fields {
&self.inner
}

pub fn into_inner(self) -> Fields {
self.inner
}

pub fn metadata_idx(&self) -> usize {
self.metadata_idx
}

pub fn metadata(&self) -> &FieldRef {
self.inner.get(self.metadata_idx).unwrap()
}

pub fn value_idx(&self) -> Option<usize> {
match self.value_schema {
ValueSchema::MissingValue => None,
ValueSchema::ShreddedValue(_) => None,
ValueSchema::Value(value_idx) => Some(value_idx),
ValueSchema::PartiallyShredded { value_idx, .. } => Some(value_idx),
Comment on lines +211 to +215
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could also consider:

Suggested change
match self.value_schema {
ValueSchema::MissingValue => None,
ValueSchema::ShreddedValue(_) => None,
ValueSchema::Value(value_idx) => Some(value_idx),
ValueSchema::PartiallyShredded { value_idx, .. } => Some(value_idx),
use ValueSchema::*;
match self.value_schema {
MissingValue | ShreddedValue(_) => None,
Value(value_idx) | PartiallyShredded { value_idx, .. } => Some(value_idx),

Less redundancy... but I'm not sure it actually improves readability very much?

}
}

pub fn value(&self) -> Option<&FieldRef> {
self.value_idx().map(|i| self.inner.get(i).unwrap())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I realize the unwrap should be safe, but is there any harm in using flat_map instead to eliminate the possibility of panic?

Suggested change
self.value_idx().map(|i| self.inner.get(i).unwrap())
self.value_idx().flat_map(|i| self.inner.get(i))

Downside is, if the value index were ever incorrect, we would silently fail by returning None instead of panicking. But on the other hand, if the value index were ever incorrect, we're just as likely to silently return the wrong field rather than panic. I'm not sure panicking only some of the time actually helps?

(again below)

}

pub fn shredded_value_idx(&self) -> Option<usize> {
match self.value_schema {
ValueSchema::MissingValue => None,
ValueSchema::Value(_) => None,
ValueSchema::ShreddedValue(shredded_idx) => Some(shredded_idx),
ValueSchema::PartiallyShredded {
shredded_value_idx, ..
} => Some(shredded_value_idx),
Comment on lines +224 to +230
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Similar to above, but I think this one is actually a readability improvement:

Suggested change
match self.value_schema {
ValueSchema::MissingValue => None,
ValueSchema::Value(_) => None,
ValueSchema::ShreddedValue(shredded_idx) => Some(shredded_idx),
ValueSchema::PartiallyShredded {
shredded_value_idx, ..
} => Some(shredded_value_idx),
use ValueSchema::*;
match self.value_schema {
MissingValue | Value(_) => None,
ShreddedValue(shredded_idx) | PartiallyShredded { shredded_value_idx, .. } => {
Some(shredded_value_idx)
}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or, maybe it just needs the use ValueSchema::* part, to avoid the fmt line breaks on ShreddedValue?

Suggested change
match self.value_schema {
ValueSchema::MissingValue => None,
ValueSchema::Value(_) => None,
ValueSchema::ShreddedValue(shredded_idx) => Some(shredded_idx),
ValueSchema::PartiallyShredded {
shredded_value_idx, ..
} => Some(shredded_value_idx),
use ValueSchema::*;
match self.value_schema {
MissingValue => None,
Value(_) => None,
ShreddedValue(shredded_idx) => Some(shredded_idx),
PartiallyShredded { shredded_value_idx, .. } => Some(shredded_value_idx),

}
}

pub fn shredded_value(&self) -> Option<&FieldRef> {
self.shredded_value_idx()
.map(|i| self.inner.get(i).unwrap())
}
}

#[cfg(test)]
mod tests {
use super::*;

use arrow_schema::Field;

#[test]
fn test_unshredded_variant_schema() {
// a regular variant schema
let metadata_field = Field::new("metadata", DataType::BinaryView, false);
let value_field = Field::new("value", DataType::BinaryView, false);

let fields = Fields::from(vec![metadata_field, value_field]);
let variant_schema = VariantSchema::try_new(fields).unwrap();

assert_eq!(variant_schema.metadata_idx, 0);
assert_eq!(variant_schema.value_schema, ValueSchema::Value(1));
}

#[test]
fn test_unshredded_variant_schema_order_agnostic() {
let metadata_field = Field::new("metadata", DataType::BinaryView, false);
let value_field = Field::new("value", DataType::BinaryView, false);

let fields = Fields::from(vec![value_field, metadata_field]); // note the order switch
let variant_schema = VariantSchema::try_new(fields).unwrap();

assert_eq!(variant_schema.value_schema, ValueSchema::Value(0));
assert_eq!(variant_schema.metadata_idx, 1);
}

#[test]
fn test_shredded_variant_schema() {
let metadata_field = Field::new("metadata", DataType::BinaryView, false);
let shredded_field = Field::new("typed_value", DataType::Int8, true);

let fields = Fields::from(vec![metadata_field, shredded_field]);
let variant_schema = VariantSchema::try_new(fields).unwrap();

assert_eq!(variant_schema.metadata_idx, 0);
assert_eq!(variant_schema.value_schema, ValueSchema::ShreddedValue(1));
}

#[test]
fn test_regular_variant_schema_missing_metadata() {
let value_field = Field::new("value", DataType::BinaryView, false);
let schema = Fields::from(vec![value_field]);

let err = VariantSchema::try_new(schema).unwrap_err();

assert_eq!(
err.to_string(),
"Invalid argument error: Invalid VariantArray: StructArray must contain a 'metadata' field"
);
}

#[test]
fn test_regular_variant_schema_nullable_metadata() {
let metadata_field = Field::new("metadata", DataType::BinaryView, true);
let value_field = Field::new("value", DataType::BinaryView, false);

let schema = Fields::from(vec![metadata_field, value_field]);

let err = VariantSchema::try_new(schema).unwrap_err();

assert_eq!(
err.to_string(),
"Invalid argument error: Invalid VariantArray: metadata field can not be nullable"
);
}
}
Loading
Loading