diff --git a/internal/storage/bucket/migrations/19-transactions-fill-pcv/up.sql b/internal/storage/bucket/migrations/19-transactions-fill-pcv/up.sql index d7865df07d..a57ae4cd1b 100644 --- a/internal/storage/bucket/migrations/19-transactions-fill-pcv/up.sql +++ b/internal/storage/bucket/migrations/19-transactions-fill-pcv/up.sql @@ -7,10 +7,10 @@ do $$ drop table if exists moves_view; - create temp table moves_view as + create table moves_view as select transactions_seq, public.aggregate_objects(jsonb_build_object(accounts_address, volumes)) as volumes from ( - select transactions_seq::numeric, accounts_address, public.aggregate_objects(json_build_object(asset, json_build_object('input', (post_commit_volumes).inputs, 'output', (post_commit_volumes).outputs))::jsonb) as volumes + select transactions_seq, accounts_address, public.aggregate_objects(json_build_object(asset, json_build_object('input', (post_commit_volumes).inputs, 'output', (post_commit_volumes).outputs))::jsonb) as volumes from ( SELECT DISTINCT ON (moves.transactions_seq, accounts_address, asset) moves.transactions_seq, accounts_address, asset, first_value(post_commit_volumes) OVER ( @@ -27,8 +27,11 @@ do $$ group by transactions_seq; create index moves_view_idx on moves_view(transactions_seq); + -- speed up hash join when updating rows later + alter table moves_view add foreign key(transactions_seq) references transactions(seq); if (select count(*) from moves_view) = 0 then + drop table moves_view; return; end if; @@ -46,7 +49,10 @@ do $$ from data where transactions.seq = data.transactions_seq; - exit when not found; + if not found then + drop table moves_view; + exit; + end if; _offset = _offset + _batch_size; diff --git a/internal/storage/bucket/migrations/27-fix-invalid-pcv/up.sql b/internal/storage/bucket/migrations/27-fix-invalid-pcv/up.sql index 33d9751a72..de742a279c 100644 --- a/internal/storage/bucket/migrations/27-fix-invalid-pcv/up.sql +++ b/internal/storage/bucket/migrations/27-fix-invalid-pcv/up.sql @@ -7,10 +7,10 @@ do $$ drop table if exists moves_view; - create temp table moves_view as + create table moves_view as select transactions_seq, public.aggregate_objects(jsonb_build_object(accounts_address, volumes)) as volumes from ( - select transactions_seq::numeric, accounts_address, public.aggregate_objects(json_build_object(asset, json_build_object('input', (post_commit_volumes).inputs, 'output', (post_commit_volumes).outputs))::jsonb) as volumes + select transactions_seq, accounts_address, public.aggregate_objects(json_build_object(asset, json_build_object('input', (post_commit_volumes).inputs, 'output', (post_commit_volumes).outputs))::jsonb) as volumes from ( SELECT DISTINCT ON (moves.transactions_seq, accounts_address, asset) moves.transactions_seq, accounts_address, asset, first_value(post_commit_volumes) OVER ( @@ -27,8 +27,11 @@ do $$ group by transactions_seq; create index moves_view_idx on moves_view(transactions_seq); + -- speed up hash join when updating rows later + alter table moves_view add foreign key(transactions_seq) references transactions(seq); if (select count(*) from moves_view) = 0 then + drop table moves_view; return; end if; @@ -46,7 +49,10 @@ do $$ from data where transactions.seq = data.transactions_seq; - exit when not found; + if not found then + drop table moves_view; + exit; + end if; _offset = _offset + _batch_size; diff --git a/internal/storage/bucket/migrations/28-fix-pcv-missing-asset/up.sql b/internal/storage/bucket/migrations/28-fix-pcv-missing-asset/up.sql index c8bc4f21f4..25166375bb 100644 --- a/internal/storage/bucket/migrations/28-fix-pcv-missing-asset/up.sql +++ b/internal/storage/bucket/migrations/28-fix-pcv-missing-asset/up.sql @@ -7,10 +7,10 @@ do $$ drop table if exists moves_view; - create temp table moves_view as + create table moves_view as select transactions_seq, public.aggregate_objects(jsonb_build_object(accounts_address, volumes)) as volumes from ( - select transactions_seq::numeric, accounts_address, public.aggregate_objects(json_build_object(asset, json_build_object('input', (post_commit_volumes).inputs, 'output', (post_commit_volumes).outputs))::jsonb) as volumes + select transactions_seq, accounts_address, public.aggregate_objects(json_build_object(asset, json_build_object('input', (post_commit_volumes).inputs, 'output', (post_commit_volumes).outputs))::jsonb) as volumes from ( SELECT DISTINCT ON (moves.transactions_seq, accounts_address, asset) moves.transactions_seq, accounts_address, asset, first_value(post_commit_volumes) OVER ( @@ -27,8 +27,11 @@ do $$ group by transactions_seq; create index moves_view_idx on moves_view(transactions_seq); + -- speed up hash join when updating rows later + alter table moves_view add foreign key(transactions_seq) references transactions(seq); if (select count(*) from moves_view) = 0 then + drop table moves_view; return; end if; @@ -46,7 +49,10 @@ do $$ from data where transactions.seq = data.transactions_seq; - exit when not found; + if not found then + drop table moves_view; + exit; + end if; _offset = _offset + _batch_size; diff --git a/internal/storage/bucket/migrations/31-fix-transaction-updated-at/up.sql b/internal/storage/bucket/migrations/31-fix-transaction-updated-at/up.sql index 8fdf3bdedd..3b0e83fd4b 100644 --- a/internal/storage/bucket/migrations/31-fix-transaction-updated-at/up.sql +++ b/internal/storage/bucket/migrations/31-fix-transaction-updated-at/up.sql @@ -7,14 +7,18 @@ do $$ drop table if exists txs_view; - create temp table txs_view as + create table txs_view as select * from transactions where updated_at is null; if (select count(*) from txs_view) = 0 then + drop table txs_view; return; end if; + -- speed up hash join when updating rows later + create index txs_view_seq_idx on txs_view(seq); + alter table txs_view add foreign key(seq) references transactions(seq); perform pg_notify('migrations-{{ .Schema }}', 'init: ' || (select count(*) from txs_view)); @@ -29,10 +33,12 @@ do $$ update transactions set updated_at = transactions.inserted_at from data - where transactions.seq = data.seq and - transactions.ledger = data.ledger; + where transactions.seq = data.seq; - exit when not found; + if not found then + drop table txs_view; + exit; + end if; _offset = _offset + _batch_size; diff --git a/internal/storage/bucket/migrations/34-fix-memento-format/up.sql b/internal/storage/bucket/migrations/34-fix-memento-format/up.sql index 023b33bbfc..3972f03be4 100644 --- a/internal/storage/bucket/migrations/34-fix-memento-format/up.sql +++ b/internal/storage/bucket/migrations/34-fix-memento-format/up.sql @@ -1,7 +1,7 @@ do $$ declare _offset integer := 0; - _batch_size integer := 1000; + _batch_size integer := 10000; begin set search_path = '{{ .Schema }}'; @@ -15,9 +15,8 @@ do $$ with data as ( select * from logs + where seq >= _offset and seq < _offset + _batch_size order by seq - offset _offset - limit _batch_size ) update logs set memento = convert_to( @@ -82,7 +81,9 @@ do $$ from data where logs.seq = data.seq; - exit when not found; + if _offset >= (select max(seq) from logs) then + exit; + end if; _offset = _offset + _batch_size; @@ -90,7 +91,5 @@ do $$ commit; end loop; - - drop table if exists txs_view; end $$; \ No newline at end of file