diff --git a/internal/storage/bucket/migrations/19-accounts-recreate-unique-index/notes.yaml b/internal/storage/bucket/migrations/19-accounts-recreate-unique-index/notes.yaml new file mode 100644 index 000000000..4b7c24021 --- /dev/null +++ b/internal/storage/bucket/migrations/19-accounts-recreate-unique-index/notes.yaml @@ -0,0 +1 @@ +name: Recreate accounts unique index diff --git a/internal/storage/bucket/migrations/19-accounts-recreate-unique-index/up.sql b/internal/storage/bucket/migrations/19-accounts-recreate-unique-index/up.sql new file mode 100644 index 000000000..1d8734023 --- /dev/null +++ b/internal/storage/bucket/migrations/19-accounts-recreate-unique-index/up.sql @@ -0,0 +1,9 @@ +-- There is already a covering index on accounts table (including seq column). +-- As we will remove the seq column in next migration, we have to create a new index without it (PG will remove it automatically in background). +-- Also, we create the index concurrently to avoid locking the table. +-- And, as there is already an index on this table, the index creation should not fail. +-- +-- We create this index in a dedicated as, as the doc mentions it (https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-MULTI-STATEMENT) +-- multi statements queries are automatically wrapped inside transaction block, and it's forbidden +-- to create index concurrently inside a transaction block. +create unique index concurrently accounts_ledger2 on "{{.Bucket}}".accounts (ledger, address) \ No newline at end of file diff --git a/internal/storage/bucket/migrations/19-clean-database/notes.yaml b/internal/storage/bucket/migrations/20-clean-database/notes.yaml similarity index 100% rename from internal/storage/bucket/migrations/19-clean-database/notes.yaml rename to internal/storage/bucket/migrations/20-clean-database/notes.yaml diff --git a/internal/storage/bucket/migrations/19-clean-database/up.sql b/internal/storage/bucket/migrations/20-clean-database/up.sql similarity index 79% rename from internal/storage/bucket/migrations/19-clean-database/up.sql rename to internal/storage/bucket/migrations/20-clean-database/up.sql index 39597ae54..92aa70f53 100644 --- a/internal/storage/bucket/migrations/19-clean-database/up.sql +++ b/internal/storage/bucket/migrations/20-clean-database/up.sql @@ -73,4 +73,54 @@ alter table accounts_metadata drop column accounts_seq; alter table transactions +drop column seq; + +alter table accounts +drop column seq; + +-- rename index create in previous migration, as the drop of the column seq of accounts table has automatically dropped the index accounts_ledger +alter index accounts_ledger2 +rename to accounts_ledger; + +create or replace function set_log_hash() + returns trigger + security definer + language plpgsql +as +$$ +declare + previousHash bytea; + marshalledAsJSON varchar; +begin + select hash into previousHash + from logs + where ledger = new.ledger + order by id desc + limit 1; + + -- select only fields participating in the hash on the backend and format json representation the same way + select '{' || + '"type":"' || new.type || '",' || + '"data":' || encode(new.memento, 'escape') || ',' || + '"date":"' || (to_json(new.date::timestamp)#>>'{}') || 'Z",' || + '"idempotencyKey":"' || coalesce(new.idempotency_key, '') || '",' || + '"id":0,' || + '"hash":null' || + '}' into marshalledAsJSON; + + new.hash = ( + select public.digest( + case + when previousHash is null + then marshalledAsJSON::bytea + else '"' || encode(previousHash::bytea, 'base64')::bytea || E'"\n' || convert_to(marshalledAsJSON, 'LATIN1')::bytea + end || E'\n', 'sha256'::text + ) + ); + + return new; +end; +$$ set search_path from current; + +alter table logs drop column seq; \ No newline at end of file diff --git a/internal/storage/driver/driver.go b/internal/storage/driver/driver.go index 08f6e8a3e..2e220743b 100644 --- a/internal/storage/driver/driver.go +++ b/internal/storage/driver/driver.go @@ -35,10 +35,6 @@ type Driver struct { func (d *Driver) CreateLedger(ctx context.Context, l *ledger.Ledger) (*ledgerstore.Store, error) { - if l.Metadata == nil { - l.Metadata = metadata.Metadata{} - } - b := bucket.New(d.db, l.Bucket) if err := b.Migrate(ctx, d.tracer); err != nil { return nil, fmt.Errorf("migrating bucket: %w", err)