|
5 | 5 | "fmt"
|
6 | 6 | "math/rand"
|
7 | 7 | "path/filepath"
|
| 8 | + "sync" |
8 | 9 | "testing"
|
9 | 10 | "time"
|
10 | 11 |
|
@@ -75,49 +76,6 @@ func TestParquetQueryableFallbackLogic(t *testing.T) {
|
75 | 76 | }
|
76 | 77 | ctx := user.InjectOrgID(context.Background(), "user-1")
|
77 | 78 |
|
78 |
| - t.Run("should fallback when vertical sharding is enabled", func(t *testing.T) { |
79 |
| - finder := &blocksFinderMock{} |
80 |
| - stores := createStore() |
81 |
| - |
82 |
| - q := &blocksStoreQuerier{ |
83 |
| - minT: minT, |
84 |
| - maxT: maxT, |
85 |
| - finder: finder, |
86 |
| - stores: stores, |
87 |
| - consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), |
88 |
| - logger: log.NewNopLogger(), |
89 |
| - metrics: newBlocksStoreQueryableMetrics(prometheus.NewPedanticRegistry()), |
90 |
| - limits: &blocksStoreLimitsMock{}, |
91 |
| - |
92 |
| - storeGatewayConsistencyCheckMaxAttempts: 3, |
93 |
| - } |
94 |
| - |
95 |
| - mParquetQuerier := &mockParquetQuerier{} |
96 |
| - pq := &parquetQuerierWithFallback{ |
97 |
| - minT: minT, |
98 |
| - maxT: maxT, |
99 |
| - finder: finder, |
100 |
| - blocksStoreQuerier: q, |
101 |
| - parquetQuerier: mParquetQuerier, |
102 |
| - metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), |
103 |
| - limits: defaultOverrides(t, 4), |
104 |
| - logger: log.NewNopLogger(), |
105 |
| - defaultBlockStoreType: parquetBlockStore, |
106 |
| - } |
107 |
| - |
108 |
| - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ |
109 |
| - &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, |
110 |
| - &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, |
111 |
| - }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) |
112 |
| - |
113 |
| - t.Run("select", func(t *testing.T) { |
114 |
| - ss := pq.Select(ctx, true, nil, matchers...) |
115 |
| - require.NoError(t, ss.Err()) |
116 |
| - require.Len(t, stores.queriedBlocks, 2) |
117 |
| - require.Len(t, mParquetQuerier.queriedBlocks, 0) |
118 |
| - }) |
119 |
| - }) |
120 |
| - |
121 | 79 | t.Run("should fallback all blocks", func(t *testing.T) {
|
122 | 80 | finder := &blocksFinderMock{}
|
123 | 81 | stores := createStore()
|
@@ -671,3 +629,90 @@ func (m *mockParquetQuerier) Reset() {
|
671 | 629 | func (mockParquetQuerier) Close() error {
|
672 | 630 | return nil
|
673 | 631 | }
|
| 632 | + |
| 633 | +func TestMaterializedLabelsFilterCallback(t *testing.T) { |
| 634 | + tests := []struct { |
| 635 | + name string |
| 636 | + setupContext func() context.Context |
| 637 | + expectedFilterReturned bool |
| 638 | + expectedCallbackReturned bool |
| 639 | + }{ |
| 640 | + { |
| 641 | + name: "no shard matcher in context", |
| 642 | + setupContext: func() context.Context { |
| 643 | + return context.Background() |
| 644 | + }, |
| 645 | + expectedFilterReturned: false, |
| 646 | + expectedCallbackReturned: false, |
| 647 | + }, |
| 648 | + { |
| 649 | + name: "shard matcher exists but is not sharded", |
| 650 | + setupContext: func() context.Context { |
| 651 | + // Create a ShardInfo with TotalShards = 0 (not sharded) |
| 652 | + shardInfo := &storepb.ShardInfo{ |
| 653 | + ShardIndex: 0, |
| 654 | + TotalShards: 0, // Not sharded |
| 655 | + By: true, |
| 656 | + Labels: []string{"__name__"}, |
| 657 | + } |
| 658 | + |
| 659 | + buffers := &sync.Pool{New: func() interface{} { |
| 660 | + b := make([]byte, 0, 100) |
| 661 | + return &b |
| 662 | + }} |
| 663 | + shardMatcher := shardInfo.Matcher(buffers) |
| 664 | + |
| 665 | + return injectShardMatcherIntoContext(context.Background(), shardMatcher) |
| 666 | + }, |
| 667 | + expectedFilterReturned: false, |
| 668 | + expectedCallbackReturned: false, |
| 669 | + }, |
| 670 | + { |
| 671 | + name: "shard matcher exists and is sharded", |
| 672 | + setupContext: func() context.Context { |
| 673 | + // Create a ShardInfo with TotalShards > 0 (sharded) |
| 674 | + shardInfo := &storepb.ShardInfo{ |
| 675 | + ShardIndex: 0, |
| 676 | + TotalShards: 2, // Sharded |
| 677 | + By: true, |
| 678 | + Labels: []string{"__name__"}, |
| 679 | + } |
| 680 | + |
| 681 | + buffers := &sync.Pool{New: func() interface{} { |
| 682 | + b := make([]byte, 0, 100) |
| 683 | + return &b |
| 684 | + }} |
| 685 | + shardMatcher := shardInfo.Matcher(buffers) |
| 686 | + |
| 687 | + return injectShardMatcherIntoContext(context.Background(), shardMatcher) |
| 688 | + }, |
| 689 | + expectedFilterReturned: true, |
| 690 | + expectedCallbackReturned: true, |
| 691 | + }, |
| 692 | + } |
| 693 | + |
| 694 | + for _, tt := range tests { |
| 695 | + t.Run(tt.name, func(t *testing.T) { |
| 696 | + ctx := tt.setupContext() |
| 697 | + |
| 698 | + filter, exists := materializedLabelsFilterCallback(ctx, nil) |
| 699 | + |
| 700 | + require.Equal(t, tt.expectedCallbackReturned, exists) |
| 701 | + |
| 702 | + if tt.expectedFilterReturned { |
| 703 | + require.NotNil(t, filter) |
| 704 | + |
| 705 | + // Test that the filter can be used |
| 706 | + testLabels := labels.FromStrings("__name__", "test_metric", "label1", "value1") |
| 707 | + // We can't easily test the actual filtering logic without knowing the internal |
| 708 | + // shard matching implementation, but we can at least verify the filter interface works |
| 709 | + _ = filter.Filter(testLabels) |
| 710 | + |
| 711 | + // Cleanup |
| 712 | + filter.Close() |
| 713 | + } else { |
| 714 | + require.Nil(t, filter) |
| 715 | + } |
| 716 | + }) |
| 717 | + } |
| 718 | +} |
0 commit comments