|
14 | 14 | import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse;
|
15 | 15 | import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse;
|
16 | 16 | import org.opensearch.action.admin.cluster.state.ClusterStateRequest;
|
| 17 | +import org.opensearch.action.search.SearchResponse; |
17 | 18 | import org.opensearch.cluster.health.ClusterHealthStatus;
|
18 | 19 | import org.opensearch.common.settings.Settings;
|
19 | 20 | import org.opensearch.core.rest.RestStatus;
|
|
28 | 29 | import java.io.IOException;
|
29 | 30 | import java.util.Arrays;
|
30 | 31 | import java.util.Collection;
|
| 32 | +import java.util.HashMap; |
31 | 33 | import java.util.HashSet;
|
32 | 34 | import java.util.List;
|
33 | 35 | import java.util.Map;
|
34 | 36 | import java.util.Set;
|
35 | 37 | import java.util.stream.Collectors;
|
36 | 38 | import java.util.stream.Stream;
|
37 | 39 |
|
| 40 | +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; |
| 41 | +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; |
38 | 42 | import static org.hamcrest.Matchers.equalTo;
|
39 | 43 |
|
40 | 44 | @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 3)
|
@@ -857,4 +861,72 @@ public void testReadWriteWeightedRoutingMetadataOnNodeRestart() throws Exception
|
857 | 861 | );
|
858 | 862 |
|
859 | 863 | }
|
| 864 | + |
| 865 | + /** |
| 866 | + * https://github.com/opensearch-project/OpenSearch/issues/18817 |
| 867 | + * For regression in custom string query preference with awareness attributes enabled. |
| 868 | + * We expect preference will consistently route to the same shard replica. However, when awareness attributes |
| 869 | + * are configured this does not hold. |
| 870 | + */ |
| 871 | + public void testCustomPreferenceShardIdCombination() { |
| 872 | + // Configure cluster with awareness attributes |
| 873 | + Settings commonSettings = Settings.builder() |
| 874 | + .put("cluster.routing.allocation.awareness.attributes", "rack") |
| 875 | + .put("cluster.routing.allocation.awareness.force.rack.values", "rack1,rack2") |
| 876 | + .put("cluster.routing.use_adaptive_replica_selection", false) |
| 877 | + .put("cluster.search.ignore_awareness_attributes", false) |
| 878 | + .build(); |
| 879 | + |
| 880 | + // Start cluster |
| 881 | + internalCluster().startClusterManagerOnlyNode(commonSettings); |
| 882 | + internalCluster().startDataOnlyNodes(2, Settings.builder().put(commonSettings).put("node.attr.rack", "rack1").build()); |
| 883 | + internalCluster().startDataOnlyNodes(2, Settings.builder().put(commonSettings).put("node.attr.rack", "rack2").build()); |
| 884 | + |
| 885 | + ensureStableCluster(5); |
| 886 | + ensureGreen(); |
| 887 | + |
| 888 | + // Create index with specific shard configuration |
| 889 | + assertAcked( |
| 890 | + prepareCreate("test_index").setSettings( |
| 891 | + Settings.builder().put("index.number_of_shards", 6).put("index.number_of_replicas", 1).build() |
| 892 | + ) |
| 893 | + ); |
| 894 | + |
| 895 | + ensureGreen("test_index"); |
| 896 | + |
| 897 | + // Index test documents |
| 898 | + for (int i = 0; i < 30; i++) { |
| 899 | + client().prepareIndex("test_index").setId(String.valueOf(i)).setSource("field", "value" + i).get(); |
| 900 | + } |
| 901 | + refreshAndWaitForReplication("test_index"); |
| 902 | + |
| 903 | + /* |
| 904 | + Execute the same match all query with custom string preference. |
| 905 | + For each search and each shard in the response we record the node on which the shard was located. |
| 906 | + Given the custom string preference, we expect each shard or each search should report the exact same node id. |
| 907 | + Otherwise, the custom string pref is not producing consistent shard routing. |
| 908 | + */ |
| 909 | + Map<String, Set<String>> shardToNodes = new HashMap<>(); |
| 910 | + for (int i = 0; i < 20; i++) { |
| 911 | + SearchResponse response = client().prepareSearch("test_index") |
| 912 | + .setQuery(matchAllQuery()) |
| 913 | + .setPreference("test_preference_123") |
| 914 | + .setSize(30) |
| 915 | + .get(); |
| 916 | + for (int j = 0; j < response.getHits().getHits().length; j++) { |
| 917 | + String shardId = response.getHits().getAt(j).getShard().getShardId().toString(); |
| 918 | + String nodeId = response.getHits().getAt(j).getShard().getNodeId(); |
| 919 | + shardToNodes.computeIfAbsent(shardId, k -> new HashSet<>()).add(nodeId); |
| 920 | + } |
| 921 | + } |
| 922 | + |
| 923 | + /* |
| 924 | + If more than one node was responsible for serving a request for a given shard, |
| 925 | + then there was a regression in the custom preference string. |
| 926 | + */ |
| 927 | + logger.info("--> shard to node mappings: {}", shardToNodes); |
| 928 | + for (Map.Entry<String, Set<String>> entry : shardToNodes.entrySet()) { |
| 929 | + assertThat("Shard " + entry.getKey() + " should consistently route to the same node", entry.getValue().size(), equalTo(1)); |
| 930 | + } |
| 931 | + } |
860 | 932 | }
|
0 commit comments