@@ -352,6 +352,74 @@ fn can_copy_from() {
352
352
) ;
353
353
}
354
354
355
+ /// Check that we do not create the index on `block$` twice. There was a bug
356
+ /// that if an immutable entity type had a `block` field and index creation
357
+ /// was postponed, we would emit the index on `block$` twice, once from
358
+ /// `Table.create_time_travel_indexes` and once through
359
+ /// `IndexList.indexes_for_table`
360
+ #[ test]
361
+ fn postponed_indexes_with_block_column ( ) {
362
+ fn index_list ( ) -> IndexList {
363
+ // To generate this list, print the output of `layout.as_ddl(None)`, run
364
+ // that in Postgres and do `select indexdef from pg_indexes where
365
+ // schemaname = 'sgd0815'`
366
+ const INDEX_DEFS : & [ & str ] = & [
367
+ "CREATE UNIQUE INDEX data_pkey ON sgd0815.data USING btree (vid)" ,
368
+ "CREATE UNIQUE INDEX data_id_key ON sgd0815.data USING btree (id)" ,
369
+ "CREATE INDEX data_block ON sgd0815.data USING btree (block$)" ,
370
+ "CREATE INDEX attr_1_0_data_block ON sgd0815.data USING btree (block, \" block$\" )" ,
371
+ ] ;
372
+
373
+ let mut indexes: HashMap < String , Vec < CreateIndex > > = HashMap :: new ( ) ;
374
+ indexes. insert (
375
+ "data" . to_string ( ) ,
376
+ INDEX_DEFS
377
+ . iter ( )
378
+ . map ( |def| CreateIndex :: parse ( def. to_string ( ) ) )
379
+ . collect ( ) ,
380
+ ) ;
381
+ IndexList { indexes }
382
+ }
383
+ // Names of the two indexes we are interested in. Not the leading space
384
+ // to guard a little against overlapping names
385
+ const BLOCK_IDX : & str = " data_block" ;
386
+ const ATTR_IDX : & str = " attr_1_0_data_block" ;
387
+
388
+ let layout = test_layout ( BLOCK_GQL ) ;
389
+
390
+ // Create everything
391
+ let sql = layout. as_ddl ( None ) . unwrap ( ) ;
392
+ assert ! ( sql. contains( BLOCK_IDX ) ) ;
393
+ assert ! ( sql. contains( ATTR_IDX ) ) ;
394
+
395
+ // Defer attribute indexes
396
+ let sql = layout. as_ddl ( Some ( index_list ( ) ) ) . unwrap ( ) ;
397
+ assert ! ( sql. contains( BLOCK_IDX ) ) ;
398
+ assert ! ( !sql. contains( ATTR_IDX ) ) ;
399
+ // This used to be duplicated
400
+ let count = sql. matches ( BLOCK_IDX ) . count ( ) ;
401
+ assert_eq ! ( 1 , count) ;
402
+
403
+ let table = layout. table ( & SqlName :: from ( "Data" ) ) . unwrap ( ) ;
404
+ let sql = table. create_postponed_indexes ( vec ! [ ] , false ) ;
405
+ assert_eq ! ( 1 , sql. len( ) ) ;
406
+ assert ! ( !sql[ 0 ] . contains( BLOCK_IDX ) ) ;
407
+ assert ! ( sql[ 0 ] . contains( ATTR_IDX ) ) ;
408
+
409
+ let dst_nsp = Namespace :: new ( "sgd2" . to_string ( ) ) . unwrap ( ) ;
410
+ let arr = index_list ( )
411
+ . indexes_for_table ( & dst_nsp, & table. name . to_string ( ) , & table, true , false )
412
+ . unwrap ( ) ;
413
+ assert_eq ! ( 1 , arr. len( ) ) ;
414
+ assert ! ( !arr[ 0 ] . 1 . contains( BLOCK_IDX ) ) ;
415
+ assert ! ( arr[ 0 ] . 1 . contains( ATTR_IDX ) ) ;
416
+
417
+ let arr = index_list ( )
418
+ . indexes_for_table ( & dst_nsp, & table. name . to_string ( ) , & table, false , false )
419
+ . unwrap ( ) ;
420
+ assert_eq ! ( 0 , arr. len( ) ) ;
421
+ }
422
+
355
423
const THING_GQL : & str = r#"
356
424
type Thing @entity {
357
425
id: ID!
@@ -1109,3 +1177,15 @@ on "sgd0815"."stats_3_day" using btree("volume");
1109
1177
create index stats_3_day_dims
1110
1178
on "sgd0815"."stats_3_day"(group_2, group_1, timestamp);
1111
1179
"# ;
1180
+
1181
+ const BLOCK_GQL : & str = r#"
1182
+ type Block @entity(immutable: true) {
1183
+ id: ID!
1184
+ number: Int!
1185
+ }
1186
+
1187
+ type Data @entity(immutable: true) {
1188
+ id: ID!
1189
+ block: Block!
1190
+ }
1191
+ "# ;
0 commit comments