@@ -1310,3 +1310,109 @@ func TestQuerierMaxSamplesLimit(t *testing.T) {
1310
1310
Error : "query processing would load too many samples into memory in query execution" ,
1311
1311
})
1312
1312
}
1313
+
1314
+ func TestQuerierDistributedExecution (t * testing.T ) {
1315
+ // e2e test setup
1316
+ s , err := e2e .NewScenario (networkName )
1317
+ require .NoError (t , err )
1318
+ defer s .Close ()
1319
+
1320
+ consul := e2edb .NewConsulWithName ("consul" )
1321
+ memcached := e2ecache .NewMemcached ()
1322
+ require .NoError (t , s .StartAndWaitReady (consul , memcached ))
1323
+
1324
+ // initialize the flags
1325
+ baseFlags := mergeFlags (AlertmanagerLocalFlags (), BlocksStorageFlags ())
1326
+ flags := mergeFlags (
1327
+ baseFlags ,
1328
+ map [string ]string {
1329
+ "-blocks-storage.tsdb.head-compaction-interval" : "4m" ,
1330
+ "-blocks-storage.tsdb.block-ranges-period" : "2h" ,
1331
+ "-blocks-storage.tsdb.ship-interval" : "1h" ,
1332
+ "-blocks-storage.bucket-store.sync-interval" : "1s" ,
1333
+ "-blocks-storage.tsdb.retention-period" : "24h" ,
1334
+ "-blocks-storage.bucket-store.index-cache.backend" : tsdb .IndexCacheBackendInMemory ,
1335
+ "-querier.query-store-for-labels-enabled" : "true" ,
1336
+ // Ingester.
1337
+ "-ring.store" : "consul" ,
1338
+ "-consul.hostname" : consul .NetworkHTTPEndpoint (),
1339
+ // Distributor.
1340
+ "-distributor.replication-factor" : "1" ,
1341
+ // Store-gateway.
1342
+ "-store-gateway.sharding-enabled" : "false" ,
1343
+ // Alert manager
1344
+ "-alertmanager.web.external-url" : "http://localhost/alertmanager" ,
1345
+ "-frontend.query-vertical-shard-size" : "1" ,
1346
+ "-frontend.max-cache-freshness" : "1m" ,
1347
+ // enable experimental promQL funcs
1348
+ "-querier.enable-promql-experimental-functions" : "true" ,
1349
+ // enable distributed execution (logical plan execution)
1350
+ "-querier.distributed-exec-enabled" : "true" ,
1351
+ },
1352
+ )
1353
+
1354
+ minio := e2edb .NewMinio (9000 , flags ["-blocks-storage.s3.bucket-name" ])
1355
+ require .NoError (t , s .StartAndWaitReady (minio ))
1356
+
1357
+ // start services
1358
+ var queryScheduler * e2ecortex.CortexService
1359
+ queryScheduler = e2ecortex .NewQueryScheduler ("query-scheduler" , flags , "" )
1360
+ require .NoError (t , s .StartAndWaitReady (queryScheduler ))
1361
+ flags ["-frontend.scheduler-address" ] = queryScheduler .NetworkGRPCEndpoint ()
1362
+ flags ["-querier.scheduler-address" ] = queryScheduler .NetworkGRPCEndpoint ()
1363
+
1364
+ queryFrontend := e2ecortex .NewQueryFrontend ("query-frontend" , flags , "" )
1365
+ require .NoError (t , s .Start (queryFrontend ))
1366
+
1367
+ ingester := e2ecortex .NewIngester ("ingester" , e2ecortex .RingStoreConsul , consul .NetworkHTTPEndpoint (), flags , "" )
1368
+ distributor := e2ecortex .NewDistributor ("distributor" , e2ecortex .RingStoreConsul , consul .NetworkHTTPEndpoint (), flags , "" )
1369
+ querier1 := e2ecortex .NewQuerier ("querier-1" , e2ecortex .RingStoreConsul , consul .NetworkHTTPEndpoint (), flags , "" )
1370
+ querier2 := e2ecortex .NewQuerier ("querier-2" , e2ecortex .RingStoreConsul , consul .NetworkHTTPEndpoint (), flags , "" )
1371
+
1372
+ require .NoError (t , s .StartAndWaitReady (querier1 , querier2 , ingester , distributor ))
1373
+ require .NoError (t , s .WaitReady (queryFrontend ))
1374
+
1375
+ // wait until distributor and queriers have updated the ring.
1376
+ require .NoError (t , distributor .WaitSumMetrics (e2e .Equals (512 ), "cortex_ring_tokens_total" ))
1377
+ require .NoError (t , querier1 .WaitSumMetrics (e2e .Equals (512 ), "cortex_ring_tokens_total" ))
1378
+ require .NoError (t , querier2 .WaitSumMetrics (e2e .Equals (512 ), "cortex_ring_tokens_total" ))
1379
+
1380
+ // push some series to Cortex.
1381
+ distClient , err := e2ecortex .NewClient (distributor .HTTPEndpoint (), "" , "" , "" , userID )
1382
+ require .NoError (t , err )
1383
+
1384
+ series1Timestamp := time .Now ()
1385
+ series2Timestamp := series1Timestamp .Add (blockRangePeriod * 2 )
1386
+ series1 , expectedVector1 := generateSeries ("series_1" , series1Timestamp , prompb.Label {Name : "series_1" , Value : "series_1" })
1387
+ series2 , expectedVector2 := generateSeries ("series_2" , series2Timestamp , prompb.Label {Name : "series_2" , Value : "series_2" })
1388
+
1389
+ res , err := distClient .Push (series1 )
1390
+ require .NoError (t , err )
1391
+ require .Equal (t , 200 , res .StatusCode )
1392
+
1393
+ res , err = distClient .Push (series2 )
1394
+ require .NoError (t , err )
1395
+ require .Equal (t , 200 , res .StatusCode )
1396
+
1397
+ for _ , q := range []* e2ecortex.CortexService {querier1 , querier2 } {
1398
+ c , err := e2ecortex .NewClient ("" , q .HTTPEndpoint (), "" , "" , userID )
1399
+ require .NoError (t , err )
1400
+
1401
+ _ , err = c .Query ("series_1" , now )
1402
+ require .NoError (t , err )
1403
+ }
1404
+
1405
+ require .NoError (t , queryScheduler .WaitSumMetrics (e2e .Equals (2 ), "cortex_query_scheduler_connected_querier_clients" ))
1406
+
1407
+ // main tests
1408
+ // - make sure queries are still executable with distributed execution enabled
1409
+ res , body , err = c .QueryRaw (`sum({job="test"})` , series1Timestamp , map [string ]string {})
1410
+ require .NoError (t , err )
1411
+ require .Equal (t , 200 , res .StatusCode )
1412
+ require .Equal (t , expectedVector1 , string (body ))
1413
+
1414
+ res , body , err = c .QueryRaw (`sum({job="test"})` , series2Timestamp , map [string ]string {})
1415
+ require .NoError (t , err )
1416
+ require .Equal (t , 200 , res .StatusCode )
1417
+ require .Equal (t , expectedVector2 , string (body ))
1418
+ }
0 commit comments