Skip to content

Commit 3e089d2

Browse files
Perf: optimize actual_buffer_size to use only data buffer capacity for coalesce (#7967)
# Which issue does this PR close? This is a very interesting idea that we only calculate the data buffer size when we choose to gc, because we almost only care about the gc for data buffers, not for other field views/nulls. GC is only for databuffers, so the *2 calculation should also compare the databuffer size? # Rationale for this change optimize actual_buffer_size to use only data buffer capacity # What changes are included in this PR? optimize actual_buffer_size to use only data buffer capacity # Are these changes tested? The performance improvement for some high select benchmark with low null ratio is very good about 2X fast: ```rust cargo bench --bench coalesce_kernels "single_utf8view" Compiling arrow-select v55.2.0 (/Users/zhuqi/arrow-rs/arrow-select) Compiling arrow-cast v55.2.0 (/Users/zhuqi/arrow-rs/arrow-cast) Compiling arrow-string v55.2.0 (/Users/zhuqi/arrow-rs/arrow-string) Compiling arrow-ord v55.2.0 (/Users/zhuqi/arrow-rs/arrow-ord) Compiling arrow-csv v55.2.0 (/Users/zhuqi/arrow-rs/arrow-csv) Compiling arrow-json v55.2.0 (/Users/zhuqi/arrow-rs/arrow-json) Compiling arrow v55.2.0 (/Users/zhuqi/arrow-rs/arrow) Finished `bench` profile [optimized] target(s) in 13.26s Running benches/coalesce_kernels.rs (target/release/deps/coalesce_kernels-bb9750abedb10ad6) filter: single_utf8view, 8192, nulls: 0, selectivity: 0.001 time: [30.946 ms 31.071 ms 31.193 ms] change: [−1.7086% −1.1581% −0.6036%] (p = 0.00 < 0.05) Change within noise threshold. Found 5 outliers among 100 measurements (5.00%) 4 (4.00%) low mild 1 (1.00%) high mild filter: single_utf8view, 8192, nulls: 0, selectivity: 0.01 time: [3.8178 ms 3.8311 ms 3.8444 ms] change: [−4.0521% −3.5467% −3.0345%] (p = 0.00 < 0.05) Performance has improved. Found 1 outliers among 100 measurements (1.00%) 1 (1.00%) low mild Benchmarking filter: single_utf8view, 8192, nulls: 0, selectivity: 0.1: Warming up for 3.0000 s Warning: Unable to complete 100 samples in 5.0s. You may wish to increase target time to 9.9s, enable flat sampling, or reduce sample count to 40. filter: single_utf8view, 8192, nulls: 0, selectivity: 0.1 time: [1.9337 ms 1.9406 ms 1.9478 ms] change: [+0.3699% +0.9557% +1.5666%] (p = 0.00 < 0.05) Change within noise threshold. Found 5 outliers among 100 measurements (5.00%) 2 (2.00%) low mild 3 (3.00%) high severe filter: single_utf8view, 8192, nulls: 0, selectivity: 0.8 time: [797.60 µs 805.31 µs 813.85 µs] change: [−59.177% −58.412% −57.639%] (p = 0.00 < 0.05) Performance has improved. Found 2 outliers among 100 measurements (2.00%) 1 (1.00%) high mild 1 (1.00%) high severe filter: single_utf8view, 8192, nulls: 0.1, selectivity: 0.001 time: [43.742 ms 43.924 ms 44.108 ms] change: [−1.2146% −0.5778% +0.0828%] (p = 0.08 > 0.05) No change in performance detected. filter: single_utf8view, 8192, nulls: 0.1, selectivity: 0.01 time: [5.5736 ms 5.5987 ms 5.6247 ms] change: [−0.2381% +0.4740% +1.1711%] (p = 0.18 > 0.05) No change in performance detected. filter: single_utf8view, 8192, nulls: 0.1, selectivity: 0.1 time: [2.2963 ms 2.3035 ms 2.3109 ms] change: [−0.9314% −0.5125% −0.0931%] (p = 0.02 < 0.05) Change within noise threshold. Benchmarking filter: single_utf8view, 8192, nulls: 0.1, selectivity: 0.8: Warming up for 3.0000 s Warning: Unable to complete 100 samples in 5.0s. You may wish to increase target time to 8.1s, enable flat sampling, or reduce sample count to 50. filter: single_utf8view, 8192, nulls: 0.1, selectivity: 0.8 time: [1.5482 ms 1.5697 ms 1.5903 ms] change: [−45.794% −44.386% −43.000%] (p = 0.00 < 0.05) Performance has improved. ``` If tests are not included in your PR, please explain why (for example, are they covered by existing tests)? # Are there any user-facing changes? If there are user-facing changes then we may require documentation to be updated before approving the PR. If there are any breaking changes to public APIs, please call them out. --------- Co-authored-by: Andrew Lamb <[email protected]>
1 parent a7f3ba8 commit 3e089d2

File tree

2 files changed

+26
-21
lines changed

2 files changed

+26
-21
lines changed

arrow-select/src/coalesce.rs

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -785,21 +785,27 @@ mod tests {
785785

786786
#[test]
787787
fn test_string_view_many_small_compact() {
788-
// The strings are 28 long, so each batch has 400 * 28 = 5600 bytes
788+
// 200 rows alternating long (28) and short (≤12) strings.
789+
// Only the 100 long strings go into data buffers: 100 × 28 = 2800.
789790
let batch = stringview_batch_repeated(
790-
400,
791+
200,
791792
[Some("This string is 28 bytes long"), Some("small string")],
792793
);
793794
let output_batches = Test::new()
794795
// First allocated buffer is 8kb.
795-
// Appending five batches of 5600 bytes will use 5600 * 5 = 28kb (8kb, an 16kb and 32kbkb)
796+
// Appending 10 batches of 2800 bytes will use 2800 * 10 = 14kb (8kb, an 16kb and 32kbkb)
797+
.with_batch(batch.clone())
798+
.with_batch(batch.clone())
799+
.with_batch(batch.clone())
800+
.with_batch(batch.clone())
801+
.with_batch(batch.clone())
796802
.with_batch(batch.clone())
797803
.with_batch(batch.clone())
798804
.with_batch(batch.clone())
799805
.with_batch(batch.clone())
800806
.with_batch(batch.clone())
801807
.with_batch_size(8000)
802-
.with_expected_output_sizes(vec![2000]) // only 2000 rows total
808+
.with_expected_output_sizes(vec![2000]) // only 1000 rows total
803809
.run();
804810

805811
// expect a nice even distribution of buffers
@@ -854,22 +860,27 @@ mod tests {
854860

855861
#[test]
856862
fn test_string_view_large_small() {
857-
// The strings are 37 bytes long, so each batch has 200 * 28 = 5600 bytes
863+
// The strings are 37 bytes long, so each batch has 100 * 28 = 2800 bytes
858864
let mixed_batch = stringview_batch_repeated(
859-
400,
865+
200,
860866
[Some("This string is 28 bytes long"), Some("small string")],
861867
);
862868
// These strings aren't copied, this array has an 8k buffer
863869
let all_large = stringview_batch_repeated(
864-
100,
870+
50,
865871
[Some(
866872
"This buffer has only large strings in it so there are no buffer copies",
867873
)],
868874
);
869875

870876
let output_batches = Test::new()
871877
// First allocated buffer is 8kb.
872-
// Appending five batches of 5600 bytes will use 5600 * 5 = 28kb (8kb, an 16kb and 32kbkb)
878+
// Appending five batches of 2800 bytes will use 2800 * 10 = 28kb (8kb, an 16kb and 32kbkb)
879+
.with_batch(mixed_batch.clone())
880+
.with_batch(mixed_batch.clone())
881+
.with_batch(all_large.clone())
882+
.with_batch(mixed_batch.clone())
883+
.with_batch(all_large.clone())
873884
.with_batch(mixed_batch.clone())
874885
.with_batch(mixed_batch.clone())
875886
.with_batch(all_large.clone())
@@ -883,26 +894,17 @@ mod tests {
883894
col_as_string_view("c0", output_batches.first().unwrap()),
884895
vec![
885896
ExpectedLayout {
886-
len: 8176,
897+
len: 8190,
887898
capacity: 8192,
888899
},
889-
// this buffer was allocated but not used when the all_large batch was pushed
890900
ExpectedLayout {
891-
len: 3024,
901+
len: 16366,
892902
capacity: 16384,
893903
},
894904
ExpectedLayout {
895-
len: 7000,
896-
capacity: 8192,
897-
},
898-
ExpectedLayout {
899-
len: 5600,
905+
len: 6244,
900906
capacity: 32768,
901907
},
902-
ExpectedLayout {
903-
len: 7000,
904-
capacity: 8192,
905-
},
906908
],
907909
);
908910
}

arrow-select/src/coalesce/byte_view.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,10 @@ impl<B: ByteViewType> InProgressArray for InProgressByteViewArray<B> {
284284
(false, 0)
285285
} else {
286286
let ideal_buffer_size = s.total_buffer_bytes_used();
287-
let actual_buffer_size = s.get_buffer_memory_size();
287+
// We don't use get_buffer_memory_size here, because gc is for the contents of the
288+
// data buffers, not views and nulls.
289+
let actual_buffer_size =
290+
s.data_buffers().iter().map(|b| b.capacity()).sum::<usize>();
288291
// copying strings is expensive, so only do it if the array is
289292
// sparse (uses at least 2x the memory it needs)
290293
let need_gc =

0 commit comments

Comments
 (0)