Skip to content

Commit

Permalink
fix: recreate account index concurrently
Browse files Browse the repository at this point in the history
  • Loading branch information
gfyrag committed Oct 24, 2024
1 parent 2e1536a commit aefade2
Show file tree
Hide file tree
Showing 5 changed files with 60 additions and 4 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
name: Recreate accounts unique index
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
-- There is already a covering index on accounts table (including seq column).
-- As we will remove the seq column in next migration, we have to create a new index without it (PG will remove it automatically in background).
-- Also, we create the index concurrently to avoid locking the table.
-- And, as there is already an index on this table, the index creation should not fail.
--
-- We create this index in a dedicated as, as the doc mentions it (https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-MULTI-STATEMENT)
-- multi statements queries are automatically wrapped inside transaction block, and it's forbidden
-- to create index concurrently inside a transaction block.
create unique index concurrently accounts_ledger2 on "{{.Bucket}}".accounts (ledger, address)
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,54 @@ alter table accounts_metadata
drop column accounts_seq;

alter table transactions
drop column seq;

alter table accounts
drop column seq;

-- rename index create in previous migration, as the drop of the column seq of accounts table has automatically dropped the index accounts_ledger
alter index accounts_ledger2
rename to accounts_ledger;

create or replace function set_log_hash()
returns trigger
security definer
language plpgsql
as
$$
declare
previousHash bytea;
marshalledAsJSON varchar;
begin
select hash into previousHash
from logs
where ledger = new.ledger
order by id desc
limit 1;

-- select only fields participating in the hash on the backend and format json representation the same way
select '{' ||
'"type":"' || new.type || '",' ||
'"data":' || encode(new.memento, 'escape') || ',' ||
'"date":"' || (to_json(new.date::timestamp)#>>'{}') || 'Z",' ||
'"idempotencyKey":"' || coalesce(new.idempotency_key, '') || '",' ||
'"id":0,' ||
'"hash":null' ||
'}' into marshalledAsJSON;

new.hash = (
select public.digest(
case
when previousHash is null
then marshalledAsJSON::bytea
else '"' || encode(previousHash::bytea, 'base64')::bytea || E'"\n' || convert_to(marshalledAsJSON, 'LATIN1')::bytea
end || E'\n', 'sha256'::text
)
);

return new;
end;
$$ set search_path from current;

alter table logs
drop column seq;
4 changes: 0 additions & 4 deletions internal/storage/driver/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,6 @@ type Driver struct {

func (d *Driver) CreateLedger(ctx context.Context, l *ledger.Ledger) (*ledgerstore.Store, error) {

if l.Metadata == nil {
l.Metadata = metadata.Metadata{}
}

b := bucket.New(d.db, l.Bucket)
if err := b.Migrate(ctx, d.tracer); err != nil {
return nil, fmt.Errorf("migrating bucket: %w", err)
Expand Down

0 comments on commit aefade2

Please sign in to comment.