From be2497c29764d0a13bf34799293b787538448982 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Wed, 4 Dec 2024 12:14:41 +0000 Subject: [PATCH 01/26] feat: change postgres PK type use INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY instead of SERIAL ref: https://wiki.postgresql.org/wiki/Don%27t_Do_This#Don.27t_use_serial Signed-off-by: Michal Fiedorowicz --- .../migrations/00001_ingestion_logs.sql | 10 ++-- .../postgres/migrations/00002_change_sets.sql | 46 ++++++++++--------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql b/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql index b56982fa..3499c320 100644 --- a/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql +++ b/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql @@ -3,7 +3,7 @@ -- Create the ingestion_logs table CREATE TABLE IF NOT EXISTS ingestion_logs ( - id SERIAL PRIMARY KEY, + id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, ingestion_log_ksuid CHAR(27) NOT NULL, data_type VARCHAR(255), state INTEGER, @@ -21,10 +21,10 @@ CREATE TABLE IF NOT EXISTS ingestion_logs ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_ingestion_logs_ingestion_log_ksuid ON ingestion_logs(ingestion_log_ksuid); -CREATE INDEX IF NOT EXISTS idx_ingestion_logs_data_type ON ingestion_logs(data_type); -CREATE INDEX IF NOT EXISTS idx_ingestion_logs_state ON ingestion_logs(state); -CREATE INDEX IF NOT EXISTS idx_ingestion_logs_request_id ON ingestion_logs(request_id); +CREATE INDEX IF NOT EXISTS idx_ingestion_logs_ingestion_log_ksuid ON ingestion_logs (ingestion_log_ksuid); +CREATE INDEX IF NOT EXISTS idx_ingestion_logs_data_type ON ingestion_logs (data_type); +CREATE INDEX IF NOT EXISTS idx_ingestion_logs_state ON ingestion_logs (state); +CREATE INDEX IF NOT EXISTS idx_ingestion_logs_request_id ON ingestion_logs (request_id); -- +goose Down diff --git a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql index 3944b49f..6da52de4 100644 --- a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql +++ b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql @@ -3,42 +3,44 @@ -- Create the change_sets table CREATE TABLE IF NOT EXISTS change_sets ( - id SERIAL PRIMARY KEY, + id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, change_set_ksuid CHAR(27) NOT NULL, - ingestion_log_id INTEGER NOT NULL, - branch_name VARCHAR(255), - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP + ingestion_log_id INTEGER NOT NULL, + branch_name VARCHAR(255), + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_change_sets_change_set_ksuid ON change_sets(change_set_ksuid); +CREATE INDEX IF NOT EXISTS idx_change_sets_change_set_ksuid ON change_sets (change_set_ksuid); -- Create the changes table CREATE TABLE IF NOT EXISTS changes ( - id SERIAL PRIMARY KEY, - change_ksuid CHAR(27) NOT NULL, - change_set_id INTEGER NOT NULL, - change_type VARCHAR(50) NOT NULL, - object_type VARCHAR(100) NOT NULL, - object_id INTEGER, - object_version INTEGER, - data JSONB, + id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + change_ksuid CHAR(27) NOT NULL, + change_set_id INTEGER NOT NULL, + change_type VARCHAR(50) NOT NULL, + object_type VARCHAR(100) NOT NULL, + object_id INTEGER, + object_version INTEGER, + data JSONB, sequence_number INTEGER, - created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, - updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP + created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_changes_change_ksuid ON changes(change_ksuid); -CREATE INDEX IF NOT EXISTS idx_changes_change_set_id ON changes(change_set_id); -CREATE INDEX IF NOT EXISTS idx_changes_change_type ON changes(change_type); -CREATE INDEX IF NOT EXISTS idx_changes_object_type ON changes(object_type); +CREATE INDEX IF NOT EXISTS idx_changes_change_ksuid ON changes (change_ksuid); +CREATE INDEX IF NOT EXISTS idx_changes_change_set_id ON changes (change_set_id); +CREATE INDEX IF NOT EXISTS idx_changes_change_type ON changes (change_type); +CREATE INDEX IF NOT EXISTS idx_changes_object_type ON changes (object_type); -- Add foreign key constraints -ALTER TABLE change_sets ADD CONSTRAINT fk_change_sets_ingestion_logs FOREIGN KEY (ingestion_log_id) REFERENCES ingestion_logs(id); -ALTER TABLE changes ADD CONSTRAINT fk_changes_change_sets FOREIGN KEY (change_set_id) REFERENCES change_sets(id); +ALTER TABLE change_sets + ADD CONSTRAINT fk_change_sets_ingestion_logs FOREIGN KEY (ingestion_log_id) REFERENCES ingestion_logs (id); +ALTER TABLE changes + ADD CONSTRAINT fk_changes_change_sets FOREIGN KEY (change_set_id) REFERENCES change_sets (id); -- +goose Down From 0199542ec42b2731dbbf1b01c5fda4254a51972b Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Wed, 4 Dec 2024 12:22:39 +0000 Subject: [PATCH 02/26] feat: reconciler - implement postgres.CreateIngestionLog Signed-off-by: Michal Fiedorowicz --- diode-server/dbstore/postgres/repositories.go | 34 ++++++++++++++++--- .../reconciler/ingestion_processor.go | 6 ++++ 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/diode-server/dbstore/postgres/repositories.go b/diode-server/dbstore/postgres/repositories.go index 89024cf1..a8f5561d 100644 --- a/diode-server/dbstore/postgres/repositories.go +++ b/diode-server/dbstore/postgres/repositories.go @@ -3,6 +3,11 @@ package postgres import ( "context" "errors" + "fmt" + + "github.com/jackc/pgx/v5/pgtype" + "github.com/jackc/pgx/v5/pgxpool" + "google.golang.org/protobuf/encoding/protojson" "github.com/netboxlabs/diode/diode-server/gen/dbstore/postgres" "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" @@ -10,19 +15,40 @@ import ( // IngestionLogRepository allows interacting with ingestion logs. type IngestionLogRepository struct { + pool *pgxpool.Pool queries *postgres.Queries } // NewIngestionLogRepository creates a new IngestionLogRepository. -func NewIngestionLogRepository(db postgres.DBTX) *IngestionLogRepository { +func NewIngestionLogRepository(pool *pgxpool.Pool) *IngestionLogRepository { return &IngestionLogRepository{ - queries: postgres.New(db), + pool: pool, + queries: postgres.New(pool), } } // CreateIngestionLog creates a new ingestion log. -func (r *IngestionLogRepository) CreateIngestionLog(_ context.Context, _ *reconcilerpb.IngestionLog, _ []byte) error { - return errors.New("not implemented") +func (r *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) error { + entityJSON, err := protojson.Marshal(ingestionLog.Entity) + if err != nil { + return fmt.Errorf("failed to marshal entity: %w", err) + } + params := postgres.CreateIngestionLogParams{ + IngestionLogKsuid: ingestionLog.Id, + DataType: pgtype.Text{String: ingestionLog.DataType, Valid: true}, + State: pgtype.Int4{Int32: int32(ingestionLog.State), Valid: true}, + RequestID: pgtype.Text{String: ingestionLog.RequestId, Valid: true}, + IngestionTs: pgtype.Int8{Int64: ingestionLog.IngestionTs, Valid: true}, + ProducerAppName: pgtype.Text{String: ingestionLog.ProducerAppName, Valid: true}, + ProducerAppVersion: pgtype.Text{String: ingestionLog.ProducerAppVersion, Valid: true}, + SdkName: pgtype.Text{String: ingestionLog.SdkName, Valid: true}, + SdkVersion: pgtype.Text{String: ingestionLog.SdkVersion, Valid: true}, + Entity: entityJSON, + SourceMetadata: sourceMetadata, + } + + _, err = r.queries.CreateIngestionLog(ctx, params) + return err } // ChangeSetRepository allows interacting with change sets. diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index f61bd1fd..6befc467 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -439,6 +439,12 @@ func (p *IngestionProcessor) CreateIngestionLogs(ctx context.Context, ingestReq continue } + if err = p.ingestionLogRepository.CreateIngestionLog(ctx, ingestionLog, nil); err != nil { + p.logger.Debug("failed to create ingestion log in ingestion log repo", "error", err) + errs = append(errs, fmt.Errorf("failed to create ingestion log: %v", err)) + continue + } + generateIngestionLogChan <- IngestionLogToProcess{ key: key, ingestionLog: ingestionLog, From da907600f1ab97a9c86e8b098c2586d10e636309 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Wed, 4 Dec 2024 12:30:34 +0000 Subject: [PATCH 03/26] feat: change postgres char to varchar ref: https://wiki.postgresql.org/wiki/Don%27t_Do_This#Don.27t_use_char.28n.29 Signed-off-by: Michal Fiedorowicz --- .../dbstore/postgres/migrations/00001_ingestion_logs.sql | 2 +- .../dbstore/postgres/migrations/00002_change_sets.sql | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql b/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql index 3499c320..32da304b 100644 --- a/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql +++ b/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql @@ -4,7 +4,7 @@ CREATE TABLE IF NOT EXISTS ingestion_logs ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - ingestion_log_ksuid CHAR(27) NOT NULL, + ingestion_log_ksuid VARCHAR(27) NOT NULL, data_type VARCHAR(255), state INTEGER, request_id VARCHAR(255), diff --git a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql index 6da52de4..ff75db24 100644 --- a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql +++ b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql @@ -4,7 +4,7 @@ CREATE TABLE IF NOT EXISTS change_sets ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - change_set_ksuid CHAR(27) NOT NULL, + change_set_ksuid VARCHAR(27) NOT NULL, ingestion_log_id INTEGER NOT NULL, branch_name VARCHAR(255), created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, @@ -18,7 +18,7 @@ CREATE INDEX IF NOT EXISTS idx_change_sets_change_set_ksuid ON change_sets (chan CREATE TABLE IF NOT EXISTS changes ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - change_ksuid CHAR(27) NOT NULL, + change_ksuid VARCHAR(27) NOT NULL, change_set_id INTEGER NOT NULL, change_type VARCHAR(50) NOT NULL, object_type VARCHAR(100) NOT NULL, From b469c387f2289d60c029cf15a692daa4caedd670 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Thu, 5 Dec 2024 10:15:13 +0000 Subject: [PATCH 04/26] chore: SQL format Signed-off-by: Michal Fiedorowicz --- .../dbstore/postgres/migrations/00002_change_sets.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql index ff75db24..82f76618 100644 --- a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql +++ b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql @@ -5,7 +5,7 @@ CREATE TABLE IF NOT EXISTS change_sets ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, change_set_ksuid VARCHAR(27) NOT NULL, - ingestion_log_id INTEGER NOT NULL, + ingestion_log_id INTEGER NOT NULL, branch_name VARCHAR(255), created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP @@ -18,7 +18,7 @@ CREATE INDEX IF NOT EXISTS idx_change_sets_change_set_ksuid ON change_sets (chan CREATE TABLE IF NOT EXISTS changes ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - change_ksuid VARCHAR(27) NOT NULL, + change_ksuid VARCHAR(27) NOT NULL, change_set_id INTEGER NOT NULL, change_type VARCHAR(50) NOT NULL, object_type VARCHAR(100) NOT NULL, From 013e630c0df61698e32c35b89a8c362af9d2d109 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 15:07:44 +0000 Subject: [PATCH 05/26] feat: update postgres schemas and queries generate code with sqlc and mockery Signed-off-by: Michal Fiedorowicz --- .../migrations/00001_ingestion_logs.sql | 4 +- .../postgres/migrations/00002_change_sets.sql | 22 +- .../dbstore/postgres/queries/change_sets.sql | 4 +- .../postgres/queries/ingestion_logs.sql | 39 +++- .../gen/dbstore/postgres/change_sets.sql.go | 24 +- .../dbstore/postgres/ingestion_logs.sql.go | 212 +++++++++++++++++- diode-server/gen/dbstore/postgres/types.go | 22 +- .../reconciler/mocks/changesetrepository.go | 51 +++-- .../mocks/ingestionlogrepository.go | 196 +++++++++++++++- diode-server/reconciler/repositories.go | 8 +- 10 files changed, 520 insertions(+), 62 deletions(-) diff --git a/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql b/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql index 32da304b..05750377 100644 --- a/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql +++ b/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql @@ -4,7 +4,7 @@ CREATE TABLE IF NOT EXISTS ingestion_logs ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - ingestion_log_ksuid VARCHAR(27) NOT NULL, + ingestion_log_uuid VARCHAR(255) NOT NULL, data_type VARCHAR(255), state INTEGER, request_id VARCHAR(255), @@ -21,7 +21,7 @@ CREATE TABLE IF NOT EXISTS ingestion_logs ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_ingestion_logs_ingestion_log_ksuid ON ingestion_logs (ingestion_log_ksuid); +CREATE INDEX IF NOT EXISTS idx_ingestion_logs_ingestion_log_uuid ON ingestion_logs (ingestion_log_uuid); CREATE INDEX IF NOT EXISTS idx_ingestion_logs_data_type ON ingestion_logs (data_type); CREATE INDEX IF NOT EXISTS idx_ingestion_logs_state ON ingestion_logs (state); CREATE INDEX IF NOT EXISTS idx_ingestion_logs_request_id ON ingestion_logs (request_id); diff --git a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql index 82f76618..d40c3a0b 100644 --- a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql +++ b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql @@ -4,21 +4,21 @@ CREATE TABLE IF NOT EXISTS change_sets ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - change_set_ksuid VARCHAR(27) NOT NULL, - ingestion_log_id INTEGER NOT NULL, - branch_name VARCHAR(255), + change_set_uuid VARCHAR(255) NOT NULL, + ingestion_log_id INTEGER NOT NULL, + branch_id VARCHAR(255), created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_change_sets_change_set_ksuid ON change_sets (change_set_ksuid); +CREATE INDEX IF NOT EXISTS idx_change_sets_change_set_uuid ON change_sets (change_set_uuid); -- Create the changes table CREATE TABLE IF NOT EXISTS changes ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - change_ksuid VARCHAR(27) NOT NULL, + change_uuid VARCHAR(255) NOT NULL, change_set_id INTEGER NOT NULL, change_type VARCHAR(50) NOT NULL, object_type VARCHAR(100) NOT NULL, @@ -31,7 +31,7 @@ CREATE TABLE IF NOT EXISTS changes ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_changes_change_ksuid ON changes (change_ksuid); +CREATE INDEX IF NOT EXISTS idx_changes_change_uuid ON changes (change_uuid); CREATE INDEX IF NOT EXISTS idx_changes_change_set_id ON changes (change_set_id); CREATE INDEX IF NOT EXISTS idx_changes_change_type ON changes (change_type); CREATE INDEX IF NOT EXISTS idx_changes_object_type ON changes (object_type); @@ -42,8 +42,18 @@ ALTER TABLE change_sets ALTER TABLE changes ADD CONSTRAINT fk_changes_change_sets FOREIGN KEY (change_set_id) REFERENCES change_sets (id); +CREATE VIEW changes_view AS +( +SELECT changes.* +FROM change_sets + LEFT JOIN changes on change_sets.id = changes.change_set_id + ); + -- +goose Down +-- Drop the changes_view view +DROP VIEW IF EXISTS changes_view; + -- Drop the changes table DROP TABLE changes; diff --git a/diode-server/dbstore/postgres/queries/change_sets.sql b/diode-server/dbstore/postgres/queries/change_sets.sql index 280f24c6..9b440c91 100644 --- a/diode-server/dbstore/postgres/queries/change_sets.sql +++ b/diode-server/dbstore/postgres/queries/change_sets.sql @@ -1,12 +1,12 @@ -- name: CreateChangeSet :one -INSERT INTO change_sets (change_set_ksuid, ingestion_log_id, branch_name) +INSERT INTO change_sets (change_set_uuid, ingestion_log_id, branch_id) VALUES ($1, $2, $3) RETURNING *; -- name: CreateChange :one -INSERT INTO changes (change_ksuid, change_set_id, change_type, object_type, object_id, object_version, data, +INSERT INTO changes (change_uuid, change_set_id, change_type, object_type, object_id, object_version, data, sequence_number) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING *; diff --git a/diode-server/dbstore/postgres/queries/ingestion_logs.sql b/diode-server/dbstore/postgres/queries/ingestion_logs.sql index 50fc3e21..d5966f73 100644 --- a/diode-server/dbstore/postgres/queries/ingestion_logs.sql +++ b/diode-server/dbstore/postgres/queries/ingestion_logs.sql @@ -1,4 +1,39 @@ -- name: CreateIngestionLog :one -INSERT INTO ingestion_logs (ingestion_log_ksuid, data_type, state, request_id, ingestion_ts, producer_app_name, +INSERT INTO ingestion_logs (ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, source_metadata) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING *; +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) +RETURNING *; + +-- name: UpdateIngestionLogStateWithError :exec +UPDATE ingestion_logs +SET state = $2, + error = $3 +WHERE id = $1 +RETURNING *; + +-- name: CountIngestionLogsPerState :many +SELECT state, COUNT(*) AS count +FROM ingestion_logs +GROUP BY state; + +-- name: RetrieveIngestionLogs :many +SELECT * +FROM ingestion_logs +WHERE (state = sqlc.narg('state') OR sqlc.narg('state') IS NULL) + AND (data_type = sqlc.narg('data_type') OR sqlc.narg('data_type') IS NULL) + AND (ingestion_ts >= sqlc.narg('ingestion_ts_start') OR sqlc.narg('ingestion_ts_start') IS NULL) + AND (ingestion_ts <= sqlc.narg('ingestion_ts_end') OR sqlc.narg('ingestion_ts_end') IS NULL) +ORDER BY id DESC +LIMIT sqlc.arg('limit') OFFSET sqlc.arg('offset'); + +-- name: RetrieveIngestionLogsWithChangeSets :many +SELECT sqlc.embed(ingestion_logs), sqlc.embed(change_sets), sqlc.embed(changes_view) +FROM ingestion_logs + LEFT JOIN change_sets on ingestion_logs.id = change_sets.ingestion_log_id + LEFT JOIN changes_view on change_sets.id = changes_view.change_set_id +WHERE (ingestion_logs.state = sqlc.narg('state') OR sqlc.narg('state') IS NULL) + AND (ingestion_logs.data_type = sqlc.narg('data_type') OR sqlc.narg('data_type') IS NULL) + AND (ingestion_logs.ingestion_ts >= sqlc.narg('ingestion_ts_start') OR sqlc.narg('ingestion_ts_start') IS NULL) + AND (ingestion_logs.ingestion_ts <= sqlc.narg('ingestion_ts_end') OR sqlc.narg('ingestion_ts_end') IS NULL) +ORDER BY ingestion_logs.id DESC, changes_view.sequence_number ASC +LIMIT sqlc.arg('limit') OFFSET sqlc.arg('offset'); diff --git a/diode-server/gen/dbstore/postgres/change_sets.sql.go b/diode-server/gen/dbstore/postgres/change_sets.sql.go index 4b00f0c3..09b3d06b 100644 --- a/diode-server/gen/dbstore/postgres/change_sets.sql.go +++ b/diode-server/gen/dbstore/postgres/change_sets.sql.go @@ -13,14 +13,14 @@ import ( const createChange = `-- name: CreateChange :one -INSERT INTO changes (change_ksuid, change_set_id, change_type, object_type, object_id, object_version, data, +INSERT INTO changes (change_uuid, change_set_id, change_type, object_type, object_id, object_version, data, sequence_number) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) -RETURNING id, change_ksuid, change_set_id, change_type, object_type, object_id, object_version, data, sequence_number, created_at, updated_at +RETURNING id, change_uuid, change_set_id, change_type, object_type, object_id, object_version, data, sequence_number, created_at, updated_at ` type CreateChangeParams struct { - ChangeKsuid string `json:"change_ksuid"` + ChangeUuid string `json:"change_uuid"` ChangeSetID int32 `json:"change_set_id"` ChangeType string `json:"change_type"` ObjectType string `json:"object_type"` @@ -32,7 +32,7 @@ type CreateChangeParams struct { func (q *Queries) CreateChange(ctx context.Context, arg CreateChangeParams) (Change, error) { row := q.db.QueryRow(ctx, createChange, - arg.ChangeKsuid, + arg.ChangeUuid, arg.ChangeSetID, arg.ChangeType, arg.ObjectType, @@ -44,7 +44,7 @@ func (q *Queries) CreateChange(ctx context.Context, arg CreateChangeParams) (Cha var i Change err := row.Scan( &i.ID, - &i.ChangeKsuid, + &i.ChangeUuid, &i.ChangeSetID, &i.ChangeType, &i.ObjectType, @@ -60,25 +60,25 @@ func (q *Queries) CreateChange(ctx context.Context, arg CreateChangeParams) (Cha const createChangeSet = `-- name: CreateChangeSet :one -INSERT INTO change_sets (change_set_ksuid, ingestion_log_id, branch_name) +INSERT INTO change_sets (change_set_uuid, ingestion_log_id, branch_id) VALUES ($1, $2, $3) -RETURNING id, change_set_ksuid, ingestion_log_id, branch_name, created_at, updated_at +RETURNING id, change_set_uuid, ingestion_log_id, branch_id, created_at, updated_at ` type CreateChangeSetParams struct { - ChangeSetKsuid string `json:"change_set_ksuid"` + ChangeSetUuid string `json:"change_set_uuid"` IngestionLogID int32 `json:"ingestion_log_id"` - BranchName pgtype.Text `json:"branch_name"` + BranchID pgtype.Text `json:"branch_id"` } func (q *Queries) CreateChangeSet(ctx context.Context, arg CreateChangeSetParams) (ChangeSet, error) { - row := q.db.QueryRow(ctx, createChangeSet, arg.ChangeSetKsuid, arg.IngestionLogID, arg.BranchName) + row := q.db.QueryRow(ctx, createChangeSet, arg.ChangeSetUuid, arg.IngestionLogID, arg.BranchID) var i ChangeSet err := row.Scan( &i.ID, - &i.ChangeSetKsuid, + &i.ChangeSetUuid, &i.IngestionLogID, - &i.BranchName, + &i.BranchID, &i.CreatedAt, &i.UpdatedAt, ) diff --git a/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go b/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go index 4ce844a0..e7fed2d1 100644 --- a/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go +++ b/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go @@ -11,14 +11,46 @@ import ( "github.com/jackc/pgx/v5/pgtype" ) +const countIngestionLogsPerState = `-- name: CountIngestionLogsPerState :many +SELECT state, COUNT(*) AS count +FROM ingestion_logs +GROUP BY state +` + +type CountIngestionLogsPerStateRow struct { + State pgtype.Int4 `json:"state"` + Count int64 `json:"count"` +} + +func (q *Queries) CountIngestionLogsPerState(ctx context.Context) ([]CountIngestionLogsPerStateRow, error) { + rows, err := q.db.Query(ctx, countIngestionLogsPerState) + if err != nil { + return nil, err + } + defer rows.Close() + var items []CountIngestionLogsPerStateRow + for rows.Next() { + var i CountIngestionLogsPerStateRow + if err := rows.Scan(&i.State, &i.Count); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const createIngestionLog = `-- name: CreateIngestionLog :one -INSERT INTO ingestion_logs (ingestion_log_ksuid, data_type, state, request_id, ingestion_ts, producer_app_name, +INSERT INTO ingestion_logs (ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, source_metadata) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING id, ingestion_log_ksuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) +RETURNING id, ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at ` type CreateIngestionLogParams struct { - IngestionLogKsuid string `json:"ingestion_log_ksuid"` + IngestionLogUuid string `json:"ingestion_log_uuid"` DataType pgtype.Text `json:"data_type"` State pgtype.Int4 `json:"state"` RequestID pgtype.Text `json:"request_id"` @@ -33,7 +65,7 @@ type CreateIngestionLogParams struct { func (q *Queries) CreateIngestionLog(ctx context.Context, arg CreateIngestionLogParams) (IngestionLog, error) { row := q.db.QueryRow(ctx, createIngestionLog, - arg.IngestionLogKsuid, + arg.IngestionLogUuid, arg.DataType, arg.State, arg.RequestID, @@ -48,7 +80,7 @@ func (q *Queries) CreateIngestionLog(ctx context.Context, arg CreateIngestionLog var i IngestionLog err := row.Scan( &i.ID, - &i.IngestionLogKsuid, + &i.IngestionLogUuid, &i.DataType, &i.State, &i.RequestID, @@ -65,3 +97,173 @@ func (q *Queries) CreateIngestionLog(ctx context.Context, arg CreateIngestionLog ) return i, err } + +const retrieveIngestionLogs = `-- name: RetrieveIngestionLogs :many +SELECT id, ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at +FROM ingestion_logs +WHERE (state = $1 OR $1 IS NULL) + AND (data_type = $2 OR $2 IS NULL) + AND (ingestion_ts >= $3 OR $3 IS NULL) + AND (ingestion_ts <= $4 OR $4 IS NULL) +ORDER BY id DESC +LIMIT $6 OFFSET $5 +` + +type RetrieveIngestionLogsParams struct { + State pgtype.Int4 `json:"state"` + DataType pgtype.Text `json:"data_type"` + IngestionTsStart pgtype.Int8 `json:"ingestion_ts_start"` + IngestionTsEnd pgtype.Int8 `json:"ingestion_ts_end"` + Offset int32 `json:"offset"` + Limit int32 `json:"limit"` +} + +func (q *Queries) RetrieveIngestionLogs(ctx context.Context, arg RetrieveIngestionLogsParams) ([]IngestionLog, error) { + rows, err := q.db.Query(ctx, retrieveIngestionLogs, + arg.State, + arg.DataType, + arg.IngestionTsStart, + arg.IngestionTsEnd, + arg.Offset, + arg.Limit, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []IngestionLog + for rows.Next() { + var i IngestionLog + if err := rows.Scan( + &i.ID, + &i.IngestionLogUuid, + &i.DataType, + &i.State, + &i.RequestID, + &i.IngestionTs, + &i.ProducerAppName, + &i.ProducerAppVersion, + &i.SdkName, + &i.SdkVersion, + &i.Entity, + &i.Error, + &i.SourceMetadata, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const retrieveIngestionLogsWithChangeSets = `-- name: RetrieveIngestionLogsWithChangeSets :many +SELECT ingestion_logs.id, ingestion_logs.ingestion_log_uuid, ingestion_logs.data_type, ingestion_logs.state, ingestion_logs.request_id, ingestion_logs.ingestion_ts, ingestion_logs.producer_app_name, ingestion_logs.producer_app_version, ingestion_logs.sdk_name, ingestion_logs.sdk_version, ingestion_logs.entity, ingestion_logs.error, ingestion_logs.source_metadata, ingestion_logs.created_at, ingestion_logs.updated_at, change_sets.id, change_sets.change_set_uuid, change_sets.ingestion_log_id, change_sets.branch_id, change_sets.created_at, change_sets.updated_at, changes_view.id, changes_view.change_uuid, changes_view.change_set_id, changes_view.change_type, changes_view.object_type, changes_view.object_id, changes_view.object_version, changes_view.data, changes_view.sequence_number, changes_view.created_at, changes_view.updated_at +FROM ingestion_logs + LEFT JOIN change_sets on ingestion_logs.id = change_sets.ingestion_log_id + LEFT JOIN changes_view on change_sets.id = changes_view.change_set_id +WHERE (ingestion_logs.state = $1 OR $1 IS NULL) + AND (ingestion_logs.data_type = $2 OR $2 IS NULL) + AND (ingestion_logs.ingestion_ts >= $3 OR $3 IS NULL) + AND (ingestion_logs.ingestion_ts <= $4 OR $4 IS NULL) +ORDER BY ingestion_logs.id DESC, changes_view.sequence_number ASC +LIMIT $6 OFFSET $5 +` + +type RetrieveIngestionLogsWithChangeSetsParams struct { + State pgtype.Int4 `json:"state"` + DataType pgtype.Text `json:"data_type"` + IngestionTsStart pgtype.Int8 `json:"ingestion_ts_start"` + IngestionTsEnd pgtype.Int8 `json:"ingestion_ts_end"` + Offset int32 `json:"offset"` + Limit int32 `json:"limit"` +} + +type RetrieveIngestionLogsWithChangeSetsRow struct { + IngestionLog IngestionLog `json:"ingestion_log"` + ChangeSet ChangeSet `json:"change_set"` + ChangesView ChangesView `json:"changes_view"` +} + +func (q *Queries) RetrieveIngestionLogsWithChangeSets(ctx context.Context, arg RetrieveIngestionLogsWithChangeSetsParams) ([]RetrieveIngestionLogsWithChangeSetsRow, error) { + rows, err := q.db.Query(ctx, retrieveIngestionLogsWithChangeSets, + arg.State, + arg.DataType, + arg.IngestionTsStart, + arg.IngestionTsEnd, + arg.Offset, + arg.Limit, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []RetrieveIngestionLogsWithChangeSetsRow + for rows.Next() { + var i RetrieveIngestionLogsWithChangeSetsRow + if err := rows.Scan( + &i.IngestionLog.ID, + &i.IngestionLog.IngestionLogUuid, + &i.IngestionLog.DataType, + &i.IngestionLog.State, + &i.IngestionLog.RequestID, + &i.IngestionLog.IngestionTs, + &i.IngestionLog.ProducerAppName, + &i.IngestionLog.ProducerAppVersion, + &i.IngestionLog.SdkName, + &i.IngestionLog.SdkVersion, + &i.IngestionLog.Entity, + &i.IngestionLog.Error, + &i.IngestionLog.SourceMetadata, + &i.IngestionLog.CreatedAt, + &i.IngestionLog.UpdatedAt, + &i.ChangeSet.ID, + &i.ChangeSet.ChangeSetUuid, + &i.ChangeSet.IngestionLogID, + &i.ChangeSet.BranchID, + &i.ChangeSet.CreatedAt, + &i.ChangeSet.UpdatedAt, + &i.ChangesView.ID, + &i.ChangesView.ChangeUuid, + &i.ChangesView.ChangeSetID, + &i.ChangesView.ChangeType, + &i.ChangesView.ObjectType, + &i.ChangesView.ObjectID, + &i.ChangesView.ObjectVersion, + &i.ChangesView.Data, + &i.ChangesView.SequenceNumber, + &i.ChangesView.CreatedAt, + &i.ChangesView.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const updateIngestionLogStateWithError = `-- name: UpdateIngestionLogStateWithError :exec +UPDATE ingestion_logs +SET state = $2, + error = $3 +WHERE id = $1 +RETURNING id, ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at +` + +type UpdateIngestionLogStateWithErrorParams struct { + ID int32 `json:"id"` + State pgtype.Int4 `json:"state"` + Error []byte `json:"error"` +} + +func (q *Queries) UpdateIngestionLogStateWithError(ctx context.Context, arg UpdateIngestionLogStateWithErrorParams) error { + _, err := q.db.Exec(ctx, updateIngestionLogStateWithError, arg.ID, arg.State, arg.Error) + return err +} diff --git a/diode-server/gen/dbstore/postgres/types.go b/diode-server/gen/dbstore/postgres/types.go index 9a9489a3..78b88c63 100644 --- a/diode-server/gen/dbstore/postgres/types.go +++ b/diode-server/gen/dbstore/postgres/types.go @@ -10,7 +10,7 @@ import ( type Change struct { ID int32 `json:"id"` - ChangeKsuid string `json:"change_ksuid"` + ChangeUuid string `json:"change_uuid"` ChangeSetID int32 `json:"change_set_id"` ChangeType string `json:"change_type"` ObjectType string `json:"object_type"` @@ -24,16 +24,30 @@ type Change struct { type ChangeSet struct { ID int32 `json:"id"` - ChangeSetKsuid string `json:"change_set_ksuid"` + ChangeSetUuid string `json:"change_set_uuid"` IngestionLogID int32 `json:"ingestion_log_id"` - BranchName pgtype.Text `json:"branch_name"` + BranchID pgtype.Text `json:"branch_id"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + UpdatedAt pgtype.Timestamptz `json:"updated_at"` +} + +type ChangesView struct { + ID pgtype.Int4 `json:"id"` + ChangeUuid pgtype.Text `json:"change_uuid"` + ChangeSetID pgtype.Int4 `json:"change_set_id"` + ChangeType pgtype.Text `json:"change_type"` + ObjectType pgtype.Text `json:"object_type"` + ObjectID pgtype.Int4 `json:"object_id"` + ObjectVersion pgtype.Int4 `json:"object_version"` + Data []byte `json:"data"` + SequenceNumber pgtype.Int4 `json:"sequence_number"` CreatedAt pgtype.Timestamptz `json:"created_at"` UpdatedAt pgtype.Timestamptz `json:"updated_at"` } type IngestionLog struct { ID int32 `json:"id"` - IngestionLogKsuid string `json:"ingestion_log_ksuid"` + IngestionLogUuid string `json:"ingestion_log_uuid"` DataType pgtype.Text `json:"data_type"` State pgtype.Int4 `json:"state"` RequestID pgtype.Text `json:"request_id"` diff --git a/diode-server/reconciler/mocks/changesetrepository.go b/diode-server/reconciler/mocks/changesetrepository.go index e80840a2..eb217f32 100644 --- a/diode-server/reconciler/mocks/changesetrepository.go +++ b/diode-server/reconciler/mocks/changesetrepository.go @@ -1,13 +1,13 @@ -// Code generated by mockery v2.49.1. DO NOT EDIT. +// Code generated by mockery v2.50.0. DO NOT EDIT. package mocks import ( context "context" - mock "github.com/stretchr/testify/mock" + changeset "github.com/netboxlabs/diode/diode-server/reconciler/changeset" - reconcilerpb "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" + mock "github.com/stretchr/testify/mock" ) // ChangeSetRepository is an autogenerated mock type for the ChangeSetRepository type @@ -23,22 +23,34 @@ func (_m *ChangeSetRepository) EXPECT() *ChangeSetRepository_Expecter { return &ChangeSetRepository_Expecter{mock: &_m.Mock} } -// CreateChangeSet provides a mock function with given fields: ctx, changeSet -func (_m *ChangeSetRepository) CreateChangeSet(ctx context.Context, changeSet *reconcilerpb.ChangeSet) error { - ret := _m.Called(ctx, changeSet) +// CreateChangeSet provides a mock function with given fields: ctx, changeSet, ingestionLogID +func (_m *ChangeSetRepository) CreateChangeSet(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32) (*int32, error) { + ret := _m.Called(ctx, changeSet, ingestionLogID) if len(ret) == 0 { panic("no return value specified for CreateChangeSet") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.ChangeSet) error); ok { - r0 = rf(ctx, changeSet) + var r0 *int32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, changeset.ChangeSet, int32) (*int32, error)); ok { + return rf(ctx, changeSet, ingestionLogID) + } + if rf, ok := ret.Get(0).(func(context.Context, changeset.ChangeSet, int32) *int32); ok { + r0 = rf(ctx, changeSet, ingestionLogID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*int32) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, changeset.ChangeSet, int32) error); ok { + r1 = rf(ctx, changeSet, ingestionLogID) } else { - r0 = ret.Error(0) + r1 = ret.Error(1) } - return r0 + return r0, r1 } // ChangeSetRepository_CreateChangeSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateChangeSet' @@ -48,24 +60,25 @@ type ChangeSetRepository_CreateChangeSet_Call struct { // CreateChangeSet is a helper method to define mock.On call // - ctx context.Context -// - changeSet *reconcilerpb.ChangeSet -func (_e *ChangeSetRepository_Expecter) CreateChangeSet(ctx interface{}, changeSet interface{}) *ChangeSetRepository_CreateChangeSet_Call { - return &ChangeSetRepository_CreateChangeSet_Call{Call: _e.mock.On("CreateChangeSet", ctx, changeSet)} +// - changeSet changeset.ChangeSet +// - ingestionLogID int32 +func (_e *ChangeSetRepository_Expecter) CreateChangeSet(ctx interface{}, changeSet interface{}, ingestionLogID interface{}) *ChangeSetRepository_CreateChangeSet_Call { + return &ChangeSetRepository_CreateChangeSet_Call{Call: _e.mock.On("CreateChangeSet", ctx, changeSet, ingestionLogID)} } -func (_c *ChangeSetRepository_CreateChangeSet_Call) Run(run func(ctx context.Context, changeSet *reconcilerpb.ChangeSet)) *ChangeSetRepository_CreateChangeSet_Call { +func (_c *ChangeSetRepository_CreateChangeSet_Call) Run(run func(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32)) *ChangeSetRepository_CreateChangeSet_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*reconcilerpb.ChangeSet)) + run(args[0].(context.Context), args[1].(changeset.ChangeSet), args[2].(int32)) }) return _c } -func (_c *ChangeSetRepository_CreateChangeSet_Call) Return(_a0 error) *ChangeSetRepository_CreateChangeSet_Call { - _c.Call.Return(_a0) +func (_c *ChangeSetRepository_CreateChangeSet_Call) Return(_a0 *int32, _a1 error) *ChangeSetRepository_CreateChangeSet_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *ChangeSetRepository_CreateChangeSet_Call) RunAndReturn(run func(context.Context, *reconcilerpb.ChangeSet) error) *ChangeSetRepository_CreateChangeSet_Call { +func (_c *ChangeSetRepository_CreateChangeSet_Call) RunAndReturn(run func(context.Context, changeset.ChangeSet, int32) (*int32, error)) *ChangeSetRepository_CreateChangeSet_Call { _c.Call.Return(run) return _c } diff --git a/diode-server/reconciler/mocks/ingestionlogrepository.go b/diode-server/reconciler/mocks/ingestionlogrepository.go index c9371d2e..8b096854 100644 --- a/diode-server/reconciler/mocks/ingestionlogrepository.go +++ b/diode-server/reconciler/mocks/ingestionlogrepository.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.49.1. DO NOT EDIT. +// Code generated by mockery v2.50.0. DO NOT EDIT. package mocks @@ -23,22 +23,92 @@ func (_m *IngestionLogRepository) EXPECT() *IngestionLogRepository_Expecter { return &IngestionLogRepository_Expecter{mock: &_m.Mock} } +// CountIngestionLogsPerState provides a mock function with given fields: ctx +func (_m *IngestionLogRepository) CountIngestionLogsPerState(ctx context.Context) (map[reconcilerpb.State]int32, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for CountIngestionLogsPerState") + } + + var r0 map[reconcilerpb.State]int32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (map[reconcilerpb.State]int32, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) map[reconcilerpb.State]int32); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[reconcilerpb.State]int32) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IngestionLogRepository_CountIngestionLogsPerState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CountIngestionLogsPerState' +type IngestionLogRepository_CountIngestionLogsPerState_Call struct { + *mock.Call +} + +// CountIngestionLogsPerState is a helper method to define mock.On call +// - ctx context.Context +func (_e *IngestionLogRepository_Expecter) CountIngestionLogsPerState(ctx interface{}) *IngestionLogRepository_CountIngestionLogsPerState_Call { + return &IngestionLogRepository_CountIngestionLogsPerState_Call{Call: _e.mock.On("CountIngestionLogsPerState", ctx)} +} + +func (_c *IngestionLogRepository_CountIngestionLogsPerState_Call) Run(run func(ctx context.Context)) *IngestionLogRepository_CountIngestionLogsPerState_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *IngestionLogRepository_CountIngestionLogsPerState_Call) Return(_a0 map[reconcilerpb.State]int32, _a1 error) *IngestionLogRepository_CountIngestionLogsPerState_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *IngestionLogRepository_CountIngestionLogsPerState_Call) RunAndReturn(run func(context.Context) (map[reconcilerpb.State]int32, error)) *IngestionLogRepository_CountIngestionLogsPerState_Call { + _c.Call.Return(run) + return _c +} + // CreateIngestionLog provides a mock function with given fields: ctx, ingestionLog, sourceMetadata -func (_m *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) error { +func (_m *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) (*int32, error) { ret := _m.Called(ctx, ingestionLog, sourceMetadata) if len(ret) == 0 { panic("no return value specified for CreateIngestionLog") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.IngestionLog, []byte) error); ok { + var r0 *int32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.IngestionLog, []byte) (*int32, error)); ok { + return rf(ctx, ingestionLog, sourceMetadata) + } + if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.IngestionLog, []byte) *int32); ok { r0 = rf(ctx, ingestionLog, sourceMetadata) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*int32) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, *reconcilerpb.IngestionLog, []byte) error); ok { + r1 = rf(ctx, ingestionLog, sourceMetadata) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // IngestionLogRepository_CreateIngestionLog_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateIngestionLog' @@ -61,12 +131,122 @@ func (_c *IngestionLogRepository_CreateIngestionLog_Call) Run(run func(ctx conte return _c } -func (_c *IngestionLogRepository_CreateIngestionLog_Call) Return(_a0 error) *IngestionLogRepository_CreateIngestionLog_Call { +func (_c *IngestionLogRepository_CreateIngestionLog_Call) Return(_a0 *int32, _a1 error) *IngestionLogRepository_CreateIngestionLog_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *IngestionLogRepository_CreateIngestionLog_Call) RunAndReturn(run func(context.Context, *reconcilerpb.IngestionLog, []byte) (*int32, error)) *IngestionLogRepository_CreateIngestionLog_Call { + _c.Call.Return(run) + return _c +} + +// RetrieveIngestionLogs provides a mock function with given fields: ctx, filter, limit, offset +func (_m *IngestionLogRepository) RetrieveIngestionLogs(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32) ([]*reconcilerpb.IngestionLog, error) { + ret := _m.Called(ctx, filter, limit, offset) + + if len(ret) == 0 { + panic("no return value specified for RetrieveIngestionLogs") + } + + var r0 []*reconcilerpb.IngestionLog + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) ([]*reconcilerpb.IngestionLog, error)); ok { + return rf(ctx, filter, limit, offset) + } + if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) []*reconcilerpb.IngestionLog); ok { + r0 = rf(ctx, filter, limit, offset) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*reconcilerpb.IngestionLog) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) error); ok { + r1 = rf(ctx, filter, limit, offset) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IngestionLogRepository_RetrieveIngestionLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveIngestionLogs' +type IngestionLogRepository_RetrieveIngestionLogs_Call struct { + *mock.Call +} + +// RetrieveIngestionLogs is a helper method to define mock.On call +// - ctx context.Context +// - filter *reconcilerpb.RetrieveIngestionLogsRequest +// - limit int32 +// - offset int32 +func (_e *IngestionLogRepository_Expecter) RetrieveIngestionLogs(ctx interface{}, filter interface{}, limit interface{}, offset interface{}) *IngestionLogRepository_RetrieveIngestionLogs_Call { + return &IngestionLogRepository_RetrieveIngestionLogs_Call{Call: _e.mock.On("RetrieveIngestionLogs", ctx, filter, limit, offset)} +} + +func (_c *IngestionLogRepository_RetrieveIngestionLogs_Call) Run(run func(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32)) *IngestionLogRepository_RetrieveIngestionLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*reconcilerpb.RetrieveIngestionLogsRequest), args[2].(int32), args[3].(int32)) + }) + return _c +} + +func (_c *IngestionLogRepository_RetrieveIngestionLogs_Call) Return(_a0 []*reconcilerpb.IngestionLog, _a1 error) *IngestionLogRepository_RetrieveIngestionLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *IngestionLogRepository_RetrieveIngestionLogs_Call) RunAndReturn(run func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) ([]*reconcilerpb.IngestionLog, error)) *IngestionLogRepository_RetrieveIngestionLogs_Call { + _c.Call.Return(run) + return _c +} + +// UpdateIngestionLogStateWithError provides a mock function with given fields: ctx, id, state, ingestionError +func (_m *IngestionLogRepository) UpdateIngestionLogStateWithError(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError) error { + ret := _m.Called(ctx, id, state, ingestionError) + + if len(ret) == 0 { + panic("no return value specified for UpdateIngestionLogStateWithError") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int32, reconcilerpb.State, *reconcilerpb.IngestionError) error); ok { + r0 = rf(ctx, id, state, ingestionError) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// IngestionLogRepository_UpdateIngestionLogStateWithError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateIngestionLogStateWithError' +type IngestionLogRepository_UpdateIngestionLogStateWithError_Call struct { + *mock.Call +} + +// UpdateIngestionLogStateWithError is a helper method to define mock.On call +// - ctx context.Context +// - id int32 +// - state reconcilerpb.State +// - ingestionError *reconcilerpb.IngestionError +func (_e *IngestionLogRepository_Expecter) UpdateIngestionLogStateWithError(ctx interface{}, id interface{}, state interface{}, ingestionError interface{}) *IngestionLogRepository_UpdateIngestionLogStateWithError_Call { + return &IngestionLogRepository_UpdateIngestionLogStateWithError_Call{Call: _e.mock.On("UpdateIngestionLogStateWithError", ctx, id, state, ingestionError)} +} + +func (_c *IngestionLogRepository_UpdateIngestionLogStateWithError_Call) Run(run func(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError)) *IngestionLogRepository_UpdateIngestionLogStateWithError_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int32), args[2].(reconcilerpb.State), args[3].(*reconcilerpb.IngestionError)) + }) + return _c +} + +func (_c *IngestionLogRepository_UpdateIngestionLogStateWithError_Call) Return(_a0 error) *IngestionLogRepository_UpdateIngestionLogStateWithError_Call { _c.Call.Return(_a0) return _c } -func (_c *IngestionLogRepository_CreateIngestionLog_Call) RunAndReturn(run func(context.Context, *reconcilerpb.IngestionLog, []byte) error) *IngestionLogRepository_CreateIngestionLog_Call { +func (_c *IngestionLogRepository_UpdateIngestionLogStateWithError_Call) RunAndReturn(run func(context.Context, int32, reconcilerpb.State, *reconcilerpb.IngestionError) error) *IngestionLogRepository_UpdateIngestionLogStateWithError_Call { _c.Call.Return(run) return _c } diff --git a/diode-server/reconciler/repositories.go b/diode-server/reconciler/repositories.go index 339f994b..90c98d31 100644 --- a/diode-server/reconciler/repositories.go +++ b/diode-server/reconciler/repositories.go @@ -4,14 +4,18 @@ import ( "context" "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" + "github.com/netboxlabs/diode/diode-server/reconciler/changeset" ) // IngestionLogRepository is an interface for interacting with ingestion logs. type IngestionLogRepository interface { - CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) error + CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) (*int32, error) + UpdateIngestionLogStateWithError(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError) error + RetrieveIngestionLogs(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32) ([]*reconcilerpb.IngestionLog, error) + CountIngestionLogsPerState(ctx context.Context) (map[reconcilerpb.State]int32, error) } // ChangeSetRepository is an interface for interacting with change sets. type ChangeSetRepository interface { - CreateChangeSet(ctx context.Context, changeSet *reconcilerpb.ChangeSet) error + CreateChangeSet(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32) (*int32, error) } From ad4918c81835a299239d07a3533eef70620c7b23 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 15:08:48 +0000 Subject: [PATCH 06/26] chore: regenerate mocks Signed-off-by: Michal Fiedorowicz --- diode-server/netboxdiodeplugin/mocks/netboxapi.go | 2 +- diode-server/reconciler/mocks/client.go | 4 ++-- diode-server/reconciler/mocks/redisclient.go | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/diode-server/netboxdiodeplugin/mocks/netboxapi.go b/diode-server/netboxdiodeplugin/mocks/netboxapi.go index fb7ab9c2..48b610a1 100644 --- a/diode-server/netboxdiodeplugin/mocks/netboxapi.go +++ b/diode-server/netboxdiodeplugin/mocks/netboxapi.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.49.1. DO NOT EDIT. +// Code generated by mockery v2.50.0. DO NOT EDIT. package mocks diff --git a/diode-server/reconciler/mocks/client.go b/diode-server/reconciler/mocks/client.go index 842ecd68..f2c2e177 100644 --- a/diode-server/reconciler/mocks/client.go +++ b/diode-server/reconciler/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.49.1. DO NOT EDIT. +// Code generated by mockery v2.50.0. DO NOT EDIT. package mocks @@ -25,7 +25,7 @@ func (_m *Client) EXPECT() *Client_Expecter { return &Client_Expecter{mock: &_m.Mock} } -// Close provides a mock function with given fields: +// Close provides a mock function with no fields func (_m *Client) Close() error { ret := _m.Called() diff --git a/diode-server/reconciler/mocks/redisclient.go b/diode-server/reconciler/mocks/redisclient.go index 74586f30..e2ede4a9 100644 --- a/diode-server/reconciler/mocks/redisclient.go +++ b/diode-server/reconciler/mocks/redisclient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.49.1. DO NOT EDIT. +// Code generated by mockery v2.50.0. DO NOT EDIT. package mocks @@ -23,7 +23,7 @@ func (_m *RedisClient) EXPECT() *RedisClient_Expecter { return &RedisClient_Expecter{mock: &_m.Mock} } -// Close provides a mock function with given fields: +// Close provides a mock function with no fields func (_m *RedisClient) Close() error { ret := _m.Called() @@ -238,7 +238,7 @@ func (_c *RedisClient_Ping_Call) RunAndReturn(run func(context.Context) *redis.S return _c } -// Pipeline provides a mock function with given fields: +// Pipeline provides a mock function with no fields func (_m *RedisClient) Pipeline() redis.Pipeliner { ret := _m.Called() From 8bcc1994a71773c04f9c3c7a4f60d7b301761c24 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 15:09:26 +0000 Subject: [PATCH 07/26] feat: add branch ID to change set Signed-off-by: Michal Fiedorowicz --- diode-server/reconciler/changeset/changeset.go | 1 + 1 file changed, 1 insertion(+) diff --git a/diode-server/reconciler/changeset/changeset.go b/diode-server/reconciler/changeset/changeset.go index 7a79170c..15fae7be 100644 --- a/diode-server/reconciler/changeset/changeset.go +++ b/diode-server/reconciler/changeset/changeset.go @@ -12,6 +12,7 @@ const ( type ChangeSet struct { ChangeSetID string `json:"change_set_id"` ChangeSet []Change `json:"change_set"` + BranchID *string `json:"branch_id,omitempty"` } // Change represents a change for the change set From 413b6c2d7a903a351dcd535e4de8a0911609c0c0 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 15:17:51 +0000 Subject: [PATCH 08/26] feat: implement storage of ingestion logs and change sets in postgres database Signed-off-by: Michal Fiedorowicz --- diode-server/cmd/reconciler/main.go | 2 +- diode-server/dbstore/postgres/repositories.go | 240 +++++++++++++++++- .../reconciler/ingestion_processor.go | 98 +++---- .../ingestion_processor_internal_test.go | 24 +- diode-server/reconciler/server.go | 32 ++- 5 files changed, 297 insertions(+), 99 deletions(-) diff --git a/diode-server/cmd/reconciler/main.go b/diode-server/cmd/reconciler/main.go index acce1777..c2103e3a 100644 --- a/diode-server/cmd/reconciler/main.go +++ b/diode-server/cmd/reconciler/main.go @@ -55,7 +55,7 @@ func main() { os.Exit(1) } - gRPCServer, err := reconciler.NewServer(ctx, s.Logger()) + gRPCServer, err := reconciler.NewServer(ctx, s.Logger(), ingestionLogRepo, changeSetRepo) if err != nil { s.Logger().Error("failed to instantiate gRPC server", "error", err) os.Exit(1) diff --git a/diode-server/dbstore/postgres/repositories.go b/diode-server/dbstore/postgres/repositories.go index a8f5561d..fe66e9d3 100644 --- a/diode-server/dbstore/postgres/repositories.go +++ b/diode-server/dbstore/postgres/repositories.go @@ -2,7 +2,7 @@ package postgres import ( "context" - "errors" + "encoding/json" "fmt" "github.com/jackc/pgx/v5/pgtype" @@ -10,7 +10,9 @@ import ( "google.golang.org/protobuf/encoding/protojson" "github.com/netboxlabs/diode/diode-server/gen/dbstore/postgres" + "github.com/netboxlabs/diode/diode-server/gen/diode/v1/diodepb" "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" + "github.com/netboxlabs/diode/diode-server/reconciler/changeset" ) // IngestionLogRepository allows interacting with ingestion logs. @@ -28,13 +30,13 @@ func NewIngestionLogRepository(pool *pgxpool.Pool) *IngestionLogRepository { } // CreateIngestionLog creates a new ingestion log. -func (r *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) error { +func (r *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) (*int32, error) { entityJSON, err := protojson.Marshal(ingestionLog.Entity) if err != nil { - return fmt.Errorf("failed to marshal entity: %w", err) + return nil, fmt.Errorf("failed to marshal entity: %w", err) } params := postgres.CreateIngestionLogParams{ - IngestionLogKsuid: ingestionLog.Id, + IngestionLogUuid: ingestionLog.Id, DataType: pgtype.Text{String: ingestionLog.DataType, Valid: true}, State: pgtype.Int4{Int32: int32(ingestionLog.State), Valid: true}, RequestID: pgtype.Text{String: ingestionLog.RequestId, Valid: true}, @@ -47,23 +49,241 @@ func (r *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingesti SourceMetadata: sourceMetadata, } - _, err = r.queries.CreateIngestionLog(ctx, params) - return err + createdIngestionLog, err := r.queries.CreateIngestionLog(ctx, params) + if err != nil { + return nil, err + } + return &createdIngestionLog.ID, nil +} + +// UpdateIngestionLogStateWithError updates an ingestion log with a new state and error. +func (r *IngestionLogRepository) UpdateIngestionLogStateWithError(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError) error { + params := postgres.UpdateIngestionLogStateWithErrorParams{ + ID: id, + State: pgtype.Int4{Int32: int32(state), Valid: true}, + } + + if ingestionError != nil { + ingestionErrJSON, err := json.Marshal(ingestionError) + if err != nil { + return fmt.Errorf("failed to marshal error: %w", err) + } + params.Error = ingestionErrJSON + } + return r.queries.UpdateIngestionLogStateWithError(ctx, params) +} + +// CountIngestionLogsPerState counts ingestion logs per state. +func (r *IngestionLogRepository) CountIngestionLogsPerState(ctx context.Context) (map[reconcilerpb.State]int32, error) { + counts, err := r.queries.CountIngestionLogsPerState(ctx) + if err != nil { + return nil, err + } + + stateCounts := make(map[reconcilerpb.State]int32) + for _, stateCount := range counts { + stateCounts[reconcilerpb.State(stateCount.State.Int32)] = int32(stateCount.Count) + } + return stateCounts, nil +} + +// RetrieveIngestionLogs retrieves ingestion logs. +func (r *IngestionLogRepository) RetrieveIngestionLogs(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32) ([]*reconcilerpb.IngestionLog, error) { + params := postgres.RetrieveIngestionLogsWithChangeSetsParams{ + Limit: limit, + Offset: offset, + } + if filter.State != nil { + params.State = pgtype.Int4{Int32: int32(*filter.State), Valid: true} + } + if filter.DataType != "" { + params.DataType = pgtype.Text{String: filter.DataType, Valid: true} + } + if filter.IngestionTsStart > 0 { + params.IngestionTsStart = pgtype.Int8{Int64: filter.IngestionTsStart, Valid: true} + } + if filter.IngestionTsEnd > 0 { + params.IngestionTsEnd = pgtype.Int8{Int64: filter.IngestionTsEnd, Valid: true} + } + + rawIngestionLogs, err := r.queries.RetrieveIngestionLogsWithChangeSets(ctx, params) + if err != nil { + return nil, err + } + + changeSetsMap := make(map[int32]*changeset.ChangeSet) + for _, row := range rawIngestionLogs { + var changeData map[string]any + if row.ChangesView.Data != nil { + if err := json.Unmarshal(row.ChangesView.Data, &changeData); err != nil { + return nil, fmt.Errorf("failed to unmarshal change data: %w", err) + } + } + + change := changeset.Change{ + ChangeID: row.ChangesView.ChangeUuid.String, + ChangeType: row.ChangesView.ChangeType.String, + ObjectType: row.ChangesView.ObjectType.String, + Data: changeData, + } + objID := int(row.ChangesView.ObjectID.Int32) + if row.ChangesView.ObjectID.Valid { + change.ObjectID = &objID + } + objVersion := int(row.ChangesView.ObjectVersion.Int32) + if row.ChangesView.ObjectVersion.Valid { + change.ObjectVersion = &objVersion + } + + changeSet, ok := changeSetsMap[row.ChangeSet.ID] + if !ok { + changes := make([]changeset.Change, 0) + changes = append(changes, change) + + changeSet = &changeset.ChangeSet{ + ChangeSetID: row.ChangeSet.ChangeSetUuid, + ChangeSet: changes, + } + if row.ChangeSet.BranchID.Valid { + changeSet.BranchID = &row.ChangeSet.BranchID.String + } + changeSetsMap[row.ChangeSet.ID] = changeSet + continue + } + + changeSet.ChangeSet = append(changeSet.ChangeSet, change) + } + + ingestionLogs := make([]*reconcilerpb.IngestionLog, 0, len(rawIngestionLogs)) + ingestionLogsMap := make(map[int32]*reconcilerpb.IngestionLog) + for _, row := range rawIngestionLogs { + if _, ok := ingestionLogsMap[row.IngestionLog.ID]; ok { + continue + } + + ingestionLog := row.IngestionLog + entity := &diodepb.Entity{} + if err := protojson.Unmarshal(ingestionLog.Entity, entity); err != nil { + return nil, fmt.Errorf("failed to unmarshal entity: %w", err) + } + var ingestionErr reconcilerpb.IngestionError + if ingestionLog.Error != nil { + if err := protojson.Unmarshal(ingestionLog.Error, &ingestionErr); err != nil { + return nil, fmt.Errorf("failed to unmarshal error: %w", err) + } + } + + changeSet, ok := changeSetsMap[row.ChangeSet.ID] + if !ok { + return nil, fmt.Errorf("change set not found for ingestion log: %d", row.IngestionLog.ID) + } + var compressedChangeSet []byte + if len(changeSet.ChangeSet) > 0 { + b, err := changeset.CompressChangeSet(changeSet) + if err != nil { + return nil, fmt.Errorf("failed to compress change set: %w", err) + } + compressedChangeSet = b + } + + log := &reconcilerpb.IngestionLog{ + Id: ingestionLog.IngestionLogUuid, + DataType: ingestionLog.DataType.String, + State: reconcilerpb.State(ingestionLog.State.Int32), + RequestId: ingestionLog.RequestID.String, + IngestionTs: ingestionLog.IngestionTs.Int64, + ProducerAppName: ingestionLog.ProducerAppName.String, + ProducerAppVersion: ingestionLog.ProducerAppVersion.String, + SdkName: ingestionLog.SdkName.String, + SdkVersion: ingestionLog.SdkVersion.String, + Entity: entity, + Error: &ingestionErr, + ChangeSet: &reconcilerpb.ChangeSet{ + Id: row.ChangeSet.ChangeSetUuid, + Data: compressedChangeSet, + }, + } + + ingestionLogsMap[ingestionLog.ID] = log + ingestionLogs = append(ingestionLogs, log) + } + + return ingestionLogs, nil } // ChangeSetRepository allows interacting with change sets. type ChangeSetRepository struct { + pool *pgxpool.Pool queries *postgres.Queries } // NewChangeSetRepository creates a new ChangeSetRepository. -func NewChangeSetRepository(db postgres.DBTX) *ChangeSetRepository { +func NewChangeSetRepository(pool *pgxpool.Pool) *ChangeSetRepository { return &ChangeSetRepository{ - queries: postgres.New(db), + pool: pool, + queries: postgres.New(pool), } } // CreateChangeSet creates a new change set. -func (r *ChangeSetRepository) CreateChangeSet(_ context.Context, _ *reconcilerpb.ChangeSet) error { - return errors.New("not implemented") +func (r *ChangeSetRepository) CreateChangeSet(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32) (*int32, error) { + tx, err := r.pool.Begin(ctx) + if err != nil { + return nil, fmt.Errorf("failed to start transaction: %w", err) + } + + rollback := func() { + if err := tx.Rollback(ctx); err != nil { + panic(fmt.Errorf("failed to rollback transaction: %w", err)) + } + } + + qtx := r.queries.WithTx(tx) + params := postgres.CreateChangeSetParams{ + ChangeSetUuid: changeSet.ChangeSetID, + IngestionLogID: ingestionLogID, + } + if changeSet.BranchID != nil { + params.BranchID = pgtype.Text{String: *changeSet.BranchID, Valid: true} + } + cs, err := qtx.CreateChangeSet(ctx, params) + + if err != nil { + rollback() + return nil, fmt.Errorf("failed to create change set: %w", err) + } + + for i, change := range changeSet.ChangeSet { + dataJSON, err := json.Marshal(change.Data) + if err != nil { + rollback() + return nil, fmt.Errorf("failed to marshal entity: %w", err) + } + + changeParams := postgres.CreateChangeParams{ + ChangeUuid: change.ChangeID, + ChangeSetID: cs.ID, + ChangeType: change.ChangeType, + ObjectType: change.ObjectType, + Data: dataJSON, + SequenceNumber: pgtype.Int4{Int32: int32(i), Valid: true}, + } + if change.ObjectID != nil { + changeParams.ObjectID = pgtype.Int4{Int32: int32(*change.ObjectID), Valid: true} + } + if change.ObjectVersion != nil { + changeParams.ObjectVersion = pgtype.Int4{Int32: int32(*change.ObjectVersion), Valid: true} + } + + if _, err = qtx.CreateChange(ctx, changeParams); err != nil { + rollback() + return nil, fmt.Errorf("failed to create change: %w", err) + } + } + + if err := tx.Commit(ctx); err != nil { + rollback() + return nil, fmt.Errorf("failed to commit transaction: %w", err) + } + return &cs.ID, nil } diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index 6befc467..1f76e585 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -1,9 +1,7 @@ package reconciler import ( - "bytes" "context" - "encoding/json" "errors" "fmt" "log/slog" @@ -11,7 +9,7 @@ import ( "regexp" "strconv" - "github.com/andybalholm/brotli" + "github.com/google/uuid" "github.com/kelseyhightower/envconfig" "github.com/redis/go-redis/v9" "github.com/segmentio/ksuid" @@ -69,10 +67,11 @@ type IngestionProcessor struct { // IngestionLogToProcess represents an ingestion log to process type IngestionLogToProcess struct { - key string - ingestionLog *reconcilerpb.IngestionLog - changeSet *changeset.ChangeSet - errors []error + key string + ingestionLogID int32 + ingestionLog *reconcilerpb.IngestionLog + changeSet *changeset.ChangeSet + errors []error } // NewIngestionProcessor creates a new ingestion processor @@ -297,10 +296,9 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan ingestionLog.errors = append(ingestionLog.errors, fmt.Errorf("failed to prepare change set: %v", err)) - ingestionLog.ingestionLog.State = reconcilerpb.State_FAILED - ingestionLog.ingestionLog.Error = extractIngestionError(err) + ingestionErr := extractIngestionError(err) - if _, err = p.writeIngestionLog(ctx, ingestionLog.key, ingestionLog.ingestionLog); err != nil { + if err = p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } break @@ -308,37 +306,21 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan ingestionLog.changeSet = changeSet - if len(changeSet.ChangeSet) > 0 { - csCompressed, err := compressChangeSet(changeSet) - if err != nil { - ingestionLog.ingestionLog.State = reconcilerpb.State_FAILED - ingestionLog.errors = append(ingestionLog.errors, err) - - if _, err = p.writeIngestionLog(ctx, ingestionLog.key, ingestionLog.ingestionLog); err != nil { - ingestionLog.errors = append(ingestionLog.errors, err) - } - break - } - - ingestionLog.ingestionLog.ChangeSet = &reconcilerpb.ChangeSet{ - Id: changeSet.ChangeSetID, - Data: csCompressed, - } - - if _, err = p.writeIngestionLog(ctx, ingestionLog.key, ingestionLog.ingestionLog); err != nil { - ingestionLog.errors = append(ingestionLog.errors, err) - } + if _, err = p.changeSetRepository.CreateChangeSet(ctx, *changeSet, ingestionLog.ingestionLogID); err != nil { + ingestionLog.errors = append(ingestionLog.errors, fmt.Errorf("failed to create change set: %v", err)) + } + if len(changeSet.ChangeSet) > 0 { if applyChangeSetChan != nil { applyChangeSetChan <- IngestionLogToProcess{ - key: ingestionLog.key, - ingestionLog: ingestionLog.ingestionLog, - changeSet: ingestionLog.changeSet, + key: ingestionLog.key, + ingestionLogID: ingestionLog.ingestionLogID, + ingestionLog: ingestionLog.ingestionLog, + changeSet: ingestionLog.changeSet, } } } else { - ingestionLog.ingestionLog.State = reconcilerpb.State_NO_CHANGES - if _, err = p.writeIngestionLog(ctx, ingestionLog.key, ingestionLog.ingestionLog); err != nil { + if err := p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_NO_CHANGES, nil); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } } @@ -379,17 +361,15 @@ func (p *IngestionProcessor) ApplyChangeSet(ctx context.Context, applyChan <-cha p.logger.Debug("failed to apply change set", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID, "error", err) ingestionLog.errors = append(ingestionLog.errors, fmt.Errorf("failed to apply chang eset: %v", err)) - ingestionLog.ingestionLog.State = reconcilerpb.State_FAILED - ingestionLog.ingestionLog.Error = extractIngestionError(err) + ingestionErr := extractIngestionError(err) - if _, err = p.writeIngestionLog(ctx, ingestionLog.key, ingestionLog.ingestionLog); err != nil { + if err := p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } break } - ingestionLog.ingestionLog.State = reconcilerpb.State_RECONCILED - if _, err := p.writeIngestionLog(ctx, ingestionLog.key, ingestionLog.ingestionLog); err != nil { + if err := p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_RECONCILED, nil); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } p.logger.Debug("change set applied", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID) @@ -416,13 +396,13 @@ func (p *IngestionProcessor) CreateIngestionLogs(ctx context.Context, ingestReq continue } - ingestionLogID := ksuid.New().String() + ingestionLogKSUID := ksuid.New().String() - key := fmt.Sprintf("ingest-entity:%s-%d-%s", objectType, ingestionTs, ingestionLogID) + key := fmt.Sprintf("ingest-entity:%s-%d-%s", objectType, ingestionTs, ingestionLogKSUID) p.logger.Debug("ingest entity key", "key", key) ingestionLog := &reconcilerpb.IngestionLog{ - Id: ingestionLogID, + Id: ingestionLogKSUID, RequestId: ingestReq.GetId(), ProducerAppName: ingestReq.GetProducerAppName(), ProducerAppVersion: ingestReq.GetProducerAppVersion(), @@ -434,20 +414,18 @@ func (p *IngestionProcessor) CreateIngestionLogs(ctx context.Context, ingestReq State: reconcilerpb.State_QUEUED, } - if _, err = p.writeIngestionLog(ctx, key, ingestionLog); err != nil { - errs = append(errs, fmt.Errorf("failed to write JSON: %v", err)) - continue - } + ingestionLog.Id = uuid.NewString() - if err = p.ingestionLogRepository.CreateIngestionLog(ctx, ingestionLog, nil); err != nil { - p.logger.Debug("failed to create ingestion log in ingestion log repo", "error", err) + ingestionLogID, err := p.ingestionLogRepository.CreateIngestionLog(ctx, ingestionLog, nil) + if err != nil { errs = append(errs, fmt.Errorf("failed to create ingestion log: %v", err)) continue } generateIngestionLogChan <- IngestionLogToProcess{ - key: key, - ingestionLog: ingestionLog, + key: key, + ingestionLogID: *ingestionLogID, + ingestionLog: ingestionLog, } } @@ -492,24 +470,6 @@ func normalizeIngestionLog(l []byte) []byte { return re.ReplaceAll(l, []byte(`"ingestionTs":$1`)) } -func compressChangeSet(cs *changeset.ChangeSet) ([]byte, error) { - csJSON, err := json.Marshal(cs) - if err != nil { - return nil, fmt.Errorf("failed to marshal change set JSON: %v", err) - } - - var brotliBuf bytes.Buffer - brotliWriter := brotli.NewWriter(&brotliBuf) - if _, err = brotliWriter.Write(csJSON); err != nil { - return nil, fmt.Errorf("failed to compress change set: %v", err) - } - if err = brotliWriter.Close(); err != nil { - return nil, fmt.Errorf("failed to compress change set: %v", err) - } - - return brotliBuf.Bytes(), nil -} - func extractObjectType(in *diodepb.Entity) (string, error) { switch in.GetEntity().(type) { case *diodepb.Entity_Device: diff --git a/diode-server/reconciler/ingestion_processor_internal_test.go b/diode-server/reconciler/ingestion_processor_internal_test.go index 06a9cf2c..dc0c0cef 100644 --- a/diode-server/reconciler/ingestion_processor_internal_test.go +++ b/diode-server/reconciler/ingestion_processor_internal_test.go @@ -28,6 +28,7 @@ import ( mr "github.com/netboxlabs/diode/diode-server/reconciler/mocks" ) +func int32Ptr(i int32) *int32 { return &i } func strPtr(s string) *string { return &s } func TestWriteIngestionLog(t *testing.T) { @@ -235,6 +236,8 @@ func TestHandleStreamMessage(t *testing.T) { mockRedisClient := new(mr.RedisClient) mockRedisStreamClient := new(mr.RedisClient) mockNbClient := new(mnp.NetBoxAPI) + mockIngestionLogRepo := new(mr.IngestionLogRepository) + mockChangeSetRepo := new(mr.ChangeSetRepository) logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) p := &IngestionProcessor{ @@ -247,6 +250,8 @@ func TestHandleStreamMessage(t *testing.T) { ReconcilerRateLimiterRPS: 20, ReconcilerRateLimiterBurst: 1, }, + ingestionLogRepository: mockIngestionLogRepo, + changeSetRepository: mockChangeSetRepo, } request := redis.XMessage{} @@ -286,6 +291,7 @@ func TestHandleStreamMessage(t *testing.T) { mockNbClient.On("ApplyChangeSet", ctx, mock.Anything).Return(tt.changeSetResponse, tt.changeSetError) if tt.entities[0].Entity != nil { mockRedisClient.On("Do", ctx, "JSON.SET", mock.Anything, "$", mock.Anything).Return(redis.NewCmd(ctx)) + mockIngestionLogRepo.On("CreateIngestionLog", ctx, mock.Anything, mock.Anything).Return(int32Ptr(1), nil) } mockRedisStreamClient.On("XAck", ctx, mock.Anything, mock.Anything, mock.Anything).Return(redis.NewIntCmd(ctx)) mockRedisStreamClient.On("XDel", ctx, mock.Anything, mock.Anything).Return(redis.NewIntCmd(ctx)) @@ -299,6 +305,7 @@ func TestHandleStreamMessage(t *testing.T) { if tt.validMsg { mockRedisClient.AssertExpectations(t) + mockIngestionLogRepo.AssertExpectations(t) } }) } @@ -421,7 +428,7 @@ func TestCompressChangeSet(t *testing.T) { }, }, } - compressed, err := compressChangeSet(&cs) + compressed, err := changeset.CompressChangeSet(&cs) require.NoError(t, err) // Decompress the compressed data @@ -522,6 +529,8 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { ctx := context.Background() mockRedisClient := new(mr.RedisClient) mockNbClient := new(mnp.NetBoxAPI) + mockIngestionLogRepo := new(mr.IngestionLogRepository) + mockChangeSetRepo := new(mr.ChangeSetRepository) logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) p := &IngestionProcessor{ @@ -533,6 +542,8 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { ReconcilerRateLimiterRPS: 20, ReconcilerRateLimiterBurst: 1, }, + ingestionLogRepository: mockIngestionLogRepo, + changeSetRepository: mockChangeSetRepo, } // Set up the mock expectation @@ -541,11 +552,12 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { cmd.SetErr(errors.New("error")) } redisKey := fmt.Sprintf("ingest-entity:%s-%d-%s", tt.ingestionLog.DataType, tt.ingestionLog.IngestionTs, tt.ingestionLog.Id) - mockRedisClient.On("Do", ctx, "JSON.SET", redisKey, "$", mock.Anything). - Return(cmd) + mockRedisClient.On("Do", ctx, "JSON.SET", redisKey, "$", mock.Anything).Return(cmd) + ingestionLogID := int32(1) mockNbClient.On("RetrieveObjectState", ctx, mock.Anything).Return(tt.mockRetrieveObjectStateResponse, nil) if tt.autoApplyChangesets { + mockIngestionLogRepo.On("UpdateIngestionLogStateWithError", ctx, ingestionLogID, tt.expectedStatus, mock.Anything).Return(nil) mockNbClient.On("ApplyChangeSet", ctx, mock.Anything).Return(tt.mockApplyChangeSetResponse, nil) } @@ -565,8 +577,9 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { } generateChangeSetChannel <- IngestionLogToProcess{ - key: redisKey, - ingestionLog: tt.ingestionLog, + key: redisKey, + ingestionLogID: ingestionLogID, + ingestionLog: tt.ingestionLog, } close(generateChangeSetChannel) @@ -576,6 +589,7 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { } mockRedisClient.AssertExpectations(t) + mockIngestionLogRepo.AssertExpectations(t) require.NotNil(t, tt.ingestionLog.ChangeSet) require.Equal(t, tt.expectedStatus, tt.ingestionLog.State) }) diff --git a/diode-server/reconciler/server.go b/diode-server/reconciler/server.go index ca1c7856..420a020d 100644 --- a/diode-server/reconciler/server.go +++ b/diode-server/reconciler/server.go @@ -30,16 +30,18 @@ const ( type Server struct { reconcilerpb.UnimplementedReconcilerServiceServer - config Config - logger *slog.Logger - grpcListener net.Listener - grpcServer *grpc.Server - redisClient RedisClient - apiKeys APIKeys + config Config + logger *slog.Logger + grpcListener net.Listener + grpcServer *grpc.Server + redisClient RedisClient + ingestionLogRepository IngestionLogRepository + changeSetRepository ChangeSetRepository + apiKeys APIKeys } // NewServer creates a new reconciler server -func NewServer(ctx context.Context, logger *slog.Logger) (*Server, error) { +func NewServer(ctx context.Context, logger *slog.Logger, ingestionLogRepo IngestionLogRepository, changeSetRepo ChangeSetRepository) (*Server, error) { var cfg Config envconfig.MustProcess("", &cfg) @@ -67,12 +69,14 @@ func NewServer(ctx context.Context, logger *slog.Logger) (*Server, error) { grpcServer := grpc.NewServer(grpc.ChainUnaryInterceptor(auth)) component := &Server{ - config: cfg, - logger: logger, - grpcListener: grpcListener, - grpcServer: grpcServer, - redisClient: redisClient, - apiKeys: apiKeys, + config: cfg, + logger: logger, + grpcListener: grpcListener, + grpcServer: grpcServer, + redisClient: redisClient, + ingestionLogRepository: ingestionLogRepo, + changeSetRepository: changeSetRepo, + apiKeys: apiKeys, } reconcilerpb.RegisterReconcilerServiceServer(grpcServer, component) @@ -140,7 +144,7 @@ func (s *Server) RetrieveIngestionDataSources(_ context.Context, in *reconcilerp // RetrieveIngestionLogs retrieves logs func (s *Server) RetrieveIngestionLogs(ctx context.Context, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { - return retrieveIngestionLogs(ctx, s.logger, s.redisClient, in) + return retrieveIngestionLogs(ctx, s.logger, s.ingestionLogRepository, s.changeSetRepository, in) } func validateRetrieveIngestionDataSourcesRequest(in *reconcilerpb.RetrieveIngestionDataSourcesRequest) error { From df235eba3514a7faf2ba46051d30e49339c60c0e Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 15:18:22 +0000 Subject: [PATCH 09/26] feat: retrieve ingestion logs from postgres Signed-off-by: Michal Fiedorowicz --- .../reconciler/changeset/changeset.go | 27 ++ diode-server/reconciler/logs_retriever.go | 262 ++++-------------- .../reconciler/logs_retriever_test.go | 49 ---- 3 files changed, 84 insertions(+), 254 deletions(-) delete mode 100644 diode-server/reconciler/logs_retriever_test.go diff --git a/diode-server/reconciler/changeset/changeset.go b/diode-server/reconciler/changeset/changeset.go index 15fae7be..99975317 100644 --- a/diode-server/reconciler/changeset/changeset.go +++ b/diode-server/reconciler/changeset/changeset.go @@ -1,5 +1,13 @@ package changeset +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/andybalholm/brotli" +) + const ( // ChangeTypeCreate is the change type for a creation ChangeTypeCreate = "create" @@ -24,3 +32,22 @@ type Change struct { ObjectVersion *int `json:"object_version,omitempty"` Data any `json:"data"` } + +// CompressChangeSet compresses a change set +func CompressChangeSet(cs *ChangeSet) ([]byte, error) { + csJSON, err := json.Marshal(cs) + if err != nil { + return nil, fmt.Errorf("failed to marshal change set JSON: %v", err) + } + + var brotliBuf bytes.Buffer + brotliWriter := brotli.NewWriter(&brotliBuf) + if _, err = brotliWriter.Write(csJSON); err != nil { + return nil, fmt.Errorf("failed to compress change set: %v", err) + } + if err = brotliWriter.Close(); err != nil { + return nil, fmt.Errorf("failed to compress change set: %v", err) + } + + return brotliBuf.Bytes(), nil +} diff --git a/diode-server/reconciler/logs_retriever.go b/diode-server/reconciler/logs_retriever.go index fbda1674..f423c578 100644 --- a/diode-server/reconciler/logs_retriever.go +++ b/diode-server/reconciler/logs_retriever.go @@ -5,196 +5,66 @@ import ( "context" "encoding/base64" "encoding/binary" - "encoding/json" "fmt" "log/slog" - "strings" - - "github.com/redis/go-redis/v9" - "google.golang.org/protobuf/encoding/protojson" "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" ) -type extraAttributesWrapper struct { - ExtraAttributes string `json:"$"` - IngestionTs string `json:"ingestion_ts"` -} -type redisLogResult struct { - ExtraAttributes extraAttributesWrapper `json:"extra_attributes"` - ID string `json:"id"` -} - -type redisLogsResponse struct { - Results []redisLogResult `json:"results"` - TotalResults int32 `json:"total_results"` -} - -func convertMapInterface(data interface{}) interface{} { - switch v := data.(type) { - case map[interface{}]interface{}: - converted := make(map[string]interface{}) - for key, value := range v { - converted[fmt.Sprintf("%v", key)] = convertMapInterface(value) // Recursive conversion for nested maps - } - return converted - case []interface{}: - // If the value is a slice, apply conversion recursively to each element - for i, value := range v { - v[i] = convertMapInterface(value) - } - return v - default: - return v - } -} - -func encodeIntToBase64(num int32) string { - // Create a buffer to hold the binary representation - buf := new(bytes.Buffer) - - // Write the int value as a binary value into the buffer - if err := binary.Write(buf, binary.BigEndian, num); err != nil { - fmt.Println("error writing binary:", err) - } - - // Encode the binary data to base64 - return base64.StdEncoding.EncodeToString(buf.Bytes()) -} +func retrieveIngestionMetrics(ctx context.Context, ingestionLogRepo IngestionLogRepository) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { + var metrics reconcilerpb.IngestionMetrics -func decodeBase64ToInt(encoded string) (int32, error) { - // Decode the base64 string back to bytes - decoded, err := base64.StdEncoding.DecodeString(encoded) + ingestionLogsPerState, err := ingestionLogRepo.CountIngestionLogsPerState(ctx) if err != nil { - return 0, err - } - - // Convert the byte slice back to int64 - buf := bytes.NewReader(decoded) - var num int32 - if err := binary.Read(buf, binary.BigEndian, &num); err != nil { - return 0, err - } - - return num, nil -} - -func retrieveIngestionMetrics(ctx context.Context, client RedisClient) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { - pipe := client.Pipeline() - - results := []*redis.Cmd{ - pipe.Do(ctx, "FT.SEARCH", "ingest-entity", "*", "LIMIT", 0, 0), - } - for s := reconcilerpb.State_QUEUED; s <= reconcilerpb.State_NO_CHANGES; s++ { - stateName, ok := reconcilerpb.State_name[int32(s)] - if !ok { - return nil, fmt.Errorf("failed to retrieve ingestion logs: failed to get state name of %d", s) + return nil, err + } + + var totalIngestionLogs int32 + + for state, count := range ingestionLogsPerState { + totalIngestionLogs += count + switch state { + case reconcilerpb.State_QUEUED: + metrics.Queued = count + case reconcilerpb.State_RECONCILED: + metrics.Reconciled = count + case reconcilerpb.State_FAILED: + metrics.Failed = count + case reconcilerpb.State_NO_CHANGES: + metrics.NoChanges = count } - stateName = escapeSpecialChars(stateName) - results = append(results, pipe.Do(ctx, "FT.SEARCH", "ingest-entity", fmt.Sprintf("@state:{%s}", stateName), "LIMIT", 0, 0)) } - if _, err := pipe.Exec(ctx); err != nil { - return nil, fmt.Errorf("failed to retrieve ingestion logs: %w", err) - } - - var metrics reconcilerpb.IngestionMetrics - - for q := range results { - res, err := results[q].Result() - if err != nil { - return nil, fmt.Errorf("failed to retrieve ingestion logs: %w", err) - } + metrics.Total = totalIngestionLogs - conv := convertMapInterface(res) - totalRes, ok := conv.(map[string]interface{})["total_results"].(int64) - if !ok { - return nil, fmt.Errorf("failed to retrieve ingestion logs: failed to parse total_results") - } - total := int32(totalRes) - if q == int(reconcilerpb.State_QUEUED) { - metrics.Queued = total - } else if q == int(reconcilerpb.State_RECONCILED) { - metrics.Reconciled = total - } else if q == int(reconcilerpb.State_FAILED) { - metrics.Failed = total - } else if q == int(reconcilerpb.State_NO_CHANGES) { - metrics.NoChanges = total - } else { - metrics.Total = total - } - } - return &reconcilerpb.RetrieveIngestionLogsResponse{Logs: nil, Metrics: &metrics, NextPageToken: ""}, nil + return &reconcilerpb.RetrieveIngestionLogsResponse{Metrics: &metrics}, nil } -func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, client RedisClient, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { +func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, ingestionLogRepo IngestionLogRepository, changeSetRepo ChangeSetRepository, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { if in.GetOnlyMetrics() { logger.Debug("retrieving only ingestion metrics") - return retrieveIngestionMetrics(ctx, client) + return retrieveIngestionMetrics(ctx, ingestionLogRepo) } pageSize := in.GetPageSize() if in.PageSize == nil || pageSize >= 1000 { - pageSize = 100 // Default to 100 + pageSize = 100 } - query := buildQueryFilter(in) - - // Construct the base FT.SEARCH query - queryArgs := []interface{}{ - "FT.SEARCH", - "ingest-entity", // Index name - query, - } - - // Apply sorting by id in descending order - queryArgs = append(queryArgs, "SORTBY", "id", "DESC") - - var err error - - // Apply limit for pagination - var offset int32 + offset := int32(0) if in.PageToken != "" { - offset, err = decodeBase64ToInt(in.PageToken) + decodedPageToken, err := decodeBase64ToInt(in.PageToken) if err != nil { - logger.Warn("error decoding page token", "error", err) + return nil, err } + offset = decodedPageToken } - queryArgs = append(queryArgs, "LIMIT", offset, pageSize) - logger.Debug("retrieving ingestion logs", "query", queryArgs) - - // Execute the query using Redis - result, err := client.Do(ctx, queryArgs...).Result() + logs, err := ingestionLogRepo.RetrieveIngestionLogs(ctx, in, pageSize, offset) if err != nil { return nil, fmt.Errorf("failed to retrieve ingestion logs: %w", err) } - res := convertMapInterface(result) - - jsonBytes, err := json.Marshal(res) - if err != nil { - return nil, fmt.Errorf("error marshaling ingestion logs: %w", err) - } - - var response redisLogsResponse - - // Unmarshal the result into the struct - if err = json.Unmarshal(jsonBytes, &response); err != nil { - return nil, fmt.Errorf("error parsing JSON: %w", err) - } - - logs := make([]*reconcilerpb.IngestionLog, 0) - - for _, logsResult := range response.Results { - ingestionLog := &reconcilerpb.IngestionLog{} - if err := protojson.Unmarshal([]byte(logsResult.ExtraAttributes.ExtraAttributes), ingestionLog); err != nil { - return nil, fmt.Errorf("error parsing ExtraAttributes JSON: %v", err) - } - - logs = append(logs, ingestionLog) - } - var nextPageToken string if len(logs) == int(pageSize) { @@ -204,70 +74,52 @@ func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, client Redi // Fill metrics var metrics reconcilerpb.IngestionMetrics + total := int32(len(logs)) if in.State != nil { if in.GetState() == reconcilerpb.State_UNSPECIFIED { - metrics.Total = response.TotalResults + metrics.Total = total } else if in.GetState() == reconcilerpb.State_QUEUED { - metrics.Queued = response.TotalResults + metrics.Queued = total } else if in.GetState() == reconcilerpb.State_RECONCILED { - metrics.Reconciled = response.TotalResults + metrics.Reconciled = total } else if in.GetState() == reconcilerpb.State_FAILED { - metrics.Failed = response.TotalResults + metrics.Failed = total } else if in.GetState() == reconcilerpb.State_NO_CHANGES { - metrics.NoChanges = response.TotalResults + metrics.NoChanges = total } } else { - metrics.Total = response.TotalResults + metrics.Total = total } return &reconcilerpb.RetrieveIngestionLogsResponse{Logs: logs, Metrics: &metrics, NextPageToken: nextPageToken}, nil } -func buildQueryFilter(req *reconcilerpb.RetrieveIngestionLogsRequest) string { - queryFilter := "*" - - // apply optional filters for ingestion timestamps (start and end) - if req.GetIngestionTsStart() > 0 || req.GetIngestionTsEnd() > 0 { - ingestionTsFilter := fmt.Sprintf("@ingestion_ts:[%d inf]", req.GetIngestionTsStart()) - - if req.GetIngestionTsEnd() > 0 { - ingestionTsFilter = fmt.Sprintf("@ingestion_ts:[%d %d]", req.GetIngestionTsStart(), req.GetIngestionTsEnd()) - } - - queryFilter = ingestionTsFilter - } - - // apply optional filters for ingestion state - if req.State != nil { - state := escapeSpecialChars(req.GetState().String()) - stateFilter := fmt.Sprintf("@state:{%s}", state) - if queryFilter == "*" { - queryFilter = stateFilter - } else { - queryFilter = fmt.Sprintf("%s %s", queryFilter, stateFilter) - } +func decodeBase64ToInt(encoded string) (int32, error) { + // Decode the base64 string back to bytes + decoded, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + return 0, err } - if req.GetDataType() != "" { - dataType := escapeSpecialChars(req.GetDataType()) - dataTypeFilter := fmt.Sprintf("@data_type:{%s}", dataType) - if queryFilter == "*" { - queryFilter = dataTypeFilter - } else { - queryFilter = fmt.Sprintf("%s %s", queryFilter, dataTypeFilter) - } + // Convert the byte slice back to int64 + buf := bytes.NewReader(decoded) + var num int32 + if err := binary.Read(buf, binary.BigEndian, &num); err != nil { + return 0, err } - return queryFilter + return num, nil } -func escapeSpecialChars(s string) string { - //replace ,.<>{}[]"':;!@#$%^&*()-+=~ with double backslash - //ref: https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/escaping/ - oldNew := []string{ - ",", "\\,", ".", "\\.", "<", "\\<", ">", "\\>", "{", "\\{", "}", "\\}", "[", "\\[", "]", "\\]", "\"", "\\\"", - "'", "\\'", ":", "\\:", ";", "\\;", "!", "\\!", "@", "\\@", "#", "\\#", "$", "\\$", "%", "\\%", "^", "\\^", - "&", "\\&", "*", "\\*", "(", "\\(", ")", "\\)", "-", "\\-", "+", "\\+", "=", "\\=", "~", "\\~", +func encodeIntToBase64(num int32) string { + // Create a buffer to hold the binary representation + buf := new(bytes.Buffer) + + // Write the int value as a binary value into the buffer + if err := binary.Write(buf, binary.BigEndian, num); err != nil { + fmt.Println("error writing binary:", err) } - return strings.NewReplacer(oldNew...).Replace(s) + + // Encode the binary data to base64 + return base64.StdEncoding.EncodeToString(buf.Bytes()) } diff --git a/diode-server/reconciler/logs_retriever_test.go b/diode-server/reconciler/logs_retriever_test.go deleted file mode 100644 index c15137a7..00000000 --- a/diode-server/reconciler/logs_retriever_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package reconciler - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEscapeSpecialChars(t *testing.T) { - tests := []struct { - name string - input string - want string - }{ - { - name: "empty string", - input: "", - want: "", - }, - { - name: "string with all considered special characters", - input: `,.<>{}[]"':;!@#$%^&*()-+=~`, - want: `\,\.\<\>\{\}\[\]\"\'\:\;\!\@\#\$\%\^\&\*\(\)\-\+\=\~`, - }, - { - name: "producer app name", - input: "example-app", - want: "example\\-app", - }, - { - name: "producer app version", - input: "0.1.0", - want: "0\\.1\\.0", - }, - { - name: "request ID", - input: "123e4567-e89b-12d3-a456-426614174000", - want: "123e4567\\-e89b\\-12d3\\-a456\\-426614174000", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := escapeSpecialChars(tt.input); got != tt.want { - assert.Equal(t, tt.want, got) - } - }) - } -} From a72bdc4c5298b2f64223d447ffc952b89f73d1f3 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 15:55:22 +0000 Subject: [PATCH 10/26] chore: adjust unit tests Signed-off-by: Michal Fiedorowicz --- diode-server/ingester/component_test.go | 5 ++++- diode-server/reconciler/ingestion_processor.go | 1 + .../reconciler/ingestion_processor_internal_test.go | 7 ++----- diode-server/reconciler/logs_retriever.go | 2 +- diode-server/reconciler/server_test.go | 9 +++++++-- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/diode-server/ingester/component_test.go b/diode-server/ingester/component_test.go index bbe24ab7..1deb4d78 100644 --- a/diode-server/ingester/component_test.go +++ b/diode-server/ingester/component_test.go @@ -18,6 +18,7 @@ import ( pb "github.com/netboxlabs/diode/diode-server/gen/diode/v1/diodepb" "github.com/netboxlabs/diode/diode-server/ingester" "github.com/netboxlabs/diode/diode-server/reconciler" + "github.com/netboxlabs/diode/diode-server/reconciler/mocks" ) func getFreePort() (string, error) { @@ -70,7 +71,9 @@ const bufSize = 1024 * 1024 func startReconcilerServer(ctx context.Context, t *testing.T) *reconciler.Server { logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) - server, err := reconciler.NewServer(ctx, logger) + ingestionLogRepoMock := mocks.NewIngestionLogRepository(t) + changeSetRepoMock := mocks.NewChangeSetRepository(t) + server, err := reconciler.NewServer(ctx, logger, ingestionLogRepoMock, changeSetRepoMock) require.NoError(t, err) errChan := make(chan error, 1) diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index 1f76e585..50d22dd2 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -369,6 +369,7 @@ func (p *IngestionProcessor) ApplyChangeSet(ctx context.Context, applyChan <-cha break } + ingestionLog.ingestionLog.State = reconcilerpb.State_RECONCILED if err := p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_RECONCILED, nil); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } diff --git a/diode-server/reconciler/ingestion_processor_internal_test.go b/diode-server/reconciler/ingestion_processor_internal_test.go index dc0c0cef..b4d33210 100644 --- a/diode-server/reconciler/ingestion_processor_internal_test.go +++ b/diode-server/reconciler/ingestion_processor_internal_test.go @@ -290,7 +290,6 @@ func TestHandleStreamMessage(t *testing.T) { } mockNbClient.On("ApplyChangeSet", ctx, mock.Anything).Return(tt.changeSetResponse, tt.changeSetError) if tt.entities[0].Entity != nil { - mockRedisClient.On("Do", ctx, "JSON.SET", mock.Anything, "$", mock.Anything).Return(redis.NewCmd(ctx)) mockIngestionLogRepo.On("CreateIngestionLog", ctx, mock.Anything, mock.Anything).Return(int32Ptr(1), nil) } mockRedisStreamClient.On("XAck", ctx, mock.Anything, mock.Anything, mock.Anything).Return(redis.NewIntCmd(ctx)) @@ -304,7 +303,6 @@ func TestHandleStreamMessage(t *testing.T) { } if tt.validMsg { - mockRedisClient.AssertExpectations(t) mockIngestionLogRepo.AssertExpectations(t) } }) @@ -552,7 +550,6 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { cmd.SetErr(errors.New("error")) } redisKey := fmt.Sprintf("ingest-entity:%s-%d-%s", tt.ingestionLog.DataType, tt.ingestionLog.IngestionTs, tt.ingestionLog.Id) - mockRedisClient.On("Do", ctx, "JSON.SET", redisKey, "$", mock.Anything).Return(cmd) ingestionLogID := int32(1) mockNbClient.On("RetrieveObjectState", ctx, mock.Anything).Return(tt.mockRetrieveObjectStateResponse, nil) @@ -560,6 +557,7 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { mockIngestionLogRepo.On("UpdateIngestionLogStateWithError", ctx, ingestionLogID, tt.expectedStatus, mock.Anything).Return(nil) mockNbClient.On("ApplyChangeSet", ctx, mock.Anything).Return(tt.mockApplyChangeSetResponse, nil) } + mockChangeSetRepo.On("CreateChangeSet", ctx, mock.Anything, ingestionLogID).Return(int32Ptr(1), nil) bufCapacity := 1 @@ -588,9 +586,8 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { <-applyChangeSetDone } - mockRedisClient.AssertExpectations(t) mockIngestionLogRepo.AssertExpectations(t) - require.NotNil(t, tt.ingestionLog.ChangeSet) + mockChangeSetRepo.AssertExpectations(t) require.Equal(t, tt.expectedStatus, tt.ingestionLog.State) }) } diff --git a/diode-server/reconciler/logs_retriever.go b/diode-server/reconciler/logs_retriever.go index f423c578..e34a2b8f 100644 --- a/diode-server/reconciler/logs_retriever.go +++ b/diode-server/reconciler/logs_retriever.go @@ -40,7 +40,7 @@ func retrieveIngestionMetrics(ctx context.Context, ingestionLogRepo IngestionLog return &reconcilerpb.RetrieveIngestionLogsResponse{Metrics: &metrics}, nil } -func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, ingestionLogRepo IngestionLogRepository, changeSetRepo ChangeSetRepository, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { +func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, ingestionLogRepo IngestionLogRepository, _ ChangeSetRepository, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { if in.GetOnlyMetrics() { logger.Debug("retrieving only ingestion metrics") return retrieveIngestionMetrics(ctx, ingestionLogRepo) diff --git a/diode-server/reconciler/server_test.go b/diode-server/reconciler/server_test.go index 1a625b05..d008bded 100644 --- a/diode-server/reconciler/server_test.go +++ b/diode-server/reconciler/server_test.go @@ -16,6 +16,7 @@ import ( pb "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" "github.com/netboxlabs/diode/diode-server/reconciler" + "github.com/netboxlabs/diode/diode-server/reconciler/mocks" ) func startTestServer(ctx context.Context, t *testing.T, redisAddr string) (*reconciler.Server, *grpc.ClientConn) { @@ -26,7 +27,9 @@ func startTestServer(ctx context.Context, t *testing.T, redisAddr string) (*reco s := grpc.NewServer() logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) - server, err := reconciler.NewServer(ctx, logger) + ingestionLogRepoMock := mocks.NewIngestionLogRepository(t) + changeSetRepoMock := mocks.NewChangeSetRepository(t) + server, err := reconciler.NewServer(ctx, logger, ingestionLogRepoMock, changeSetRepoMock) require.NoError(t, err) pb.RegisterReconcilerServiceServer(s, server) @@ -60,7 +63,9 @@ func TestNewServer(t *testing.T) { defer teardownEnv() logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) - server, err := reconciler.NewServer(ctx, logger) + ingestionLogRepoMock := mocks.NewIngestionLogRepository(t) + changeSetRepoMock := mocks.NewChangeSetRepository(t) + server, err := reconciler.NewServer(ctx, logger, ingestionLogRepoMock, changeSetRepoMock) require.NoError(t, err) require.NotNil(t, server) From 35b87cdadf1a7983f12e60a56fc240cd624d0be6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 09:26:31 +0000 Subject: [PATCH 11/26] chore(deps): bump golang.org/x/crypto in /diode-server (#200) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.28.0 to 0.31.0. - [Commits](https://github.com/golang/crypto/compare/v0.28.0...v0.31.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- diode-server/go.mod | 8 ++++---- diode-server/go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/diode-server/go.mod b/diode-server/go.mod index 60457df1..e551ed36 100644 --- a/diode-server/go.mod +++ b/diode-server/go.mod @@ -48,11 +48,11 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.28.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/sync v0.9.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.22.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/diode-server/go.sum b/diode-server/go.sum index dd9dac85..1ca9ba18 100644 --- a/diode-server/go.sum +++ b/diode-server/go.sum @@ -97,19 +97,19 @@ github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= From 19897cc6cb2a127ccaf94caf8a79b1307779130b Mon Sep 17 00:00:00 2001 From: Luke Tucker <64618+ltucker@users.noreply.github.com> Date: Thu, 12 Dec 2024 12:05:25 -0500 Subject: [PATCH 12/26] chore: docker compose development overrides (#201) --- .gitignore | 1 + diode-server/Makefile | 4 ++-- diode-server/docker/dev.env | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 diode-server/docker/dev.env diff --git a/.gitignore b/.gitignore index e085b141..7fb83e3d 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ coverage.txt # diode-server diode-server/.coverage/ diode-server/build/ +diode-server/docker/dev.env # Python __pycache__/ diff --git a/diode-server/Makefile b/diode-server/Makefile index 78617d90..6a5da8f4 100644 --- a/diode-server/Makefile +++ b/diode-server/Makefile @@ -85,12 +85,12 @@ docker-compose-down: .PHONY: docker-compose-dev-up docker-compose-dev-up: docker-all @DIODE_VERSION=$(DIODE_VERSION) COMMIT_SHA=$(COMMIT_SHA) DIODE_TAG=$(DIODE_VERSION)-$(COMMIT_SHA) PROJECT_NAME=diode-dev \ - $(DOCKER_COMPOSE) --env-file docker/sample.env -f docker/docker-compose.yaml -f docker/docker-compose.dev.yaml up -d --build + $(DOCKER_COMPOSE) --env-file docker/sample.env --env-file docker/dev.env -f docker/docker-compose.yaml -f docker/docker-compose.dev.yaml up -d --build .PHONY: docker-compose-dev-down docker-compose-dev-down: @DIODE_VERSION=$(DIODE_VERSION) COMMIT_SHA=$(COMMIT_SHA) DIODE_TAG=$(DIODE_VERSION)-$(COMMIT_SHA) PROJECT_NAME=diode-dev \ - $(DOCKER_COMPOSE) --env-file docker/sample.env -f docker/docker-compose.yaml -f docker/docker-compose.dev.yaml down --remove-orphans + $(DOCKER_COMPOSE) --env-file docker/sample.env --env-file docker/dev.env -f docker/docker-compose.yaml -f docker/docker-compose.dev.yaml down --remove-orphans .PHONY: docker-compose-netbox-up docker-compose-netbox-up: diff --git a/diode-server/docker/dev.env b/diode-server/docker/dev.env new file mode 100644 index 00000000..a7c1c905 --- /dev/null +++ b/diode-server/docker/dev.env @@ -0,0 +1 @@ +NETBOX_DIODE_PLUGIN_API_BASE_URL=http://host.docker.internal:8000/netbox/api/plugins/diode From 57d9c37e3b55e41330146cdc97ca1727e20e46ef Mon Sep 17 00:00:00 2001 From: Luke Tucker <64618+ltucker@users.noreply.github.com> Date: Mon, 16 Dec 2024 09:22:44 -0500 Subject: [PATCH 13/26] feat: add branch support (#202) Co-authored-by: Michal Fiedorowicz --- diode-server/netboxdiodeplugin/client.go | 17 ++++++ diode-server/netboxdiodeplugin/client_test.go | 56 +++++++++++++++++++ diode-server/reconciler/applier/applier.go | 4 +- .../reconciler/applier/applier_test.go | 2 +- diode-server/reconciler/differ/differ.go | 8 ++- .../reconciler/differ/differ_dcim_test.go | 2 +- .../reconciler/differ/differ_ipam_test.go | 2 +- .../reconciler/differ/differ_virt_test.go | 2 +- .../reconciler/ingestion_processor.go | 4 +- 9 files changed, 87 insertions(+), 10 deletions(-) diff --git a/diode-server/netboxdiodeplugin/client.go b/diode-server/netboxdiodeplugin/client.go index 9e4fc3d4..24aaa1c2 100644 --- a/diode-server/netboxdiodeplugin/client.go +++ b/diode-server/netboxdiodeplugin/client.go @@ -15,6 +15,7 @@ import ( "os" "reflect" "strconv" + "strings" "time" "github.com/mitchellh/mapstructure" @@ -42,6 +43,11 @@ const ( defaultBaseURL = "http://127.0.0.1:8080/api/plugins/diode" defaultHTTPTimeoutSeconds = 5 + + // NetBoxBranchHeader is an HTTP header that indicates the NetBox branch to target + NetBoxBranchHeader = "X-NetBox-Branch" + // NetBoxBranchParam is a query parameter that indicates the NetBox branch to target + NetBoxBranchParam = "_branch" ) var ( @@ -265,6 +271,7 @@ type ObjectState struct { type RetrieveObjectStateQueryParams struct { ObjectType string ObjectID int + BranchID string Params map[string]string } @@ -280,6 +287,10 @@ func (c *Client) RetrieveObjectState(ctx context.Context, params RetrieveObjectS if params.ObjectID > 0 { queryParams.Set("object_id", strconv.Itoa(params.ObjectID)) } + branchID := strings.TrimSpace(params.BranchID) + if branchID != "" { + queryParams.Set(NetBoxBranchParam, branchID) + } for k, v := range params.Params { queryParams.Set(k, v) } @@ -391,6 +402,7 @@ func statusMapToStringHookFunc() mapstructure.DecodeHookFunc { type ChangeSetRequest struct { ChangeSetID string `json:"change_set_id"` ChangeSet []Change `json:"change_set"` + BranchID string `json:"-"` // Supplied as header } // Change represents a change @@ -430,6 +442,11 @@ func (c *Client) ApplyChangeSet(ctx context.Context, payload ChangeSetRequest) ( } req.Header.Set("Content-Type", "application/json") + branchID := strings.TrimSpace(payload.BranchID) + if branchID != "" { + req.Header.Set(NetBoxBranchHeader, branchID) + } + resp, err := c.httpClient.Do(req) if err != nil { return nil, err diff --git a/diode-server/netboxdiodeplugin/client_test.go b/diode-server/netboxdiodeplugin/client_test.go index 37ac2e5e..cc33bfb4 100644 --- a/diode-server/netboxdiodeplugin/client_test.go +++ b/diode-server/netboxdiodeplugin/client_test.go @@ -202,6 +202,25 @@ func TestRetrieveObjectState(t *testing.T) { tlsSkipVerify: true, shouldError: false, }, + { + name: "valid response for DCIM site with branch", + params: netboxdiodeplugin.RetrieveObjectStateQueryParams{ObjectType: netbox.DcimSiteObjectType, ObjectID: 1, BranchID: "branch_id"}, + mockServerResponse: `{"object_type":"dcim.site","object_change_id":1,"object":{"id":1,"name":"site 01", "slug": "site-01"}}`, + apiKey: "foobar", + response: &netboxdiodeplugin.ObjectState{ + ObjectType: netbox.DcimSiteObjectType, + ObjectChangeID: 1, + Object: &netbox.DcimSiteDataWrapper{ + Site: &netbox.DcimSite{ + ID: 1, + Name: "site 01", + Slug: "site-01", + }, + }, + }, + tlsSkipVerify: true, + shouldError: false, + }, { name: "valid response for DCIM DeviceRole", params: netboxdiodeplugin.RetrieveObjectStateQueryParams{ObjectType: netbox.DcimDeviceRoleObjectType, ObjectID: 1}, @@ -552,6 +571,11 @@ func TestRetrieveObjectState(t *testing.T) { assert.Equal(t, r.URL.Query().Get("object_id"), objectID) assert.Equal(t, r.Header.Get("Authorization"), fmt.Sprintf("Token %s", tt.apiKey)) assert.Equal(t, r.Header.Get("User-Agent"), fmt.Sprintf("%s/%s", netboxdiodeplugin.SDKName, netboxdiodeplugin.SDKVersion)) + if tt.params.BranchID != "" { + assert.Equal(t, r.URL.Query().Get(netboxdiodeplugin.NetBoxBranchParam), tt.params.BranchID) + } else { + assert.False(t, r.URL.Query().Has(netboxdiodeplugin.NetBoxBranchParam)) + } _, _ = w.Write([]byte(tt.mockServerResponse)) } @@ -624,6 +648,33 @@ func TestApplyChangeSet(t *testing.T) { }, shouldError: false, }, + { + name: "valid apply change set response with branch", + apiKey: "foobar", + changeSetRequest: netboxdiodeplugin.ChangeSetRequest{ + ChangeSetID: "00000000-0000-0000-0000-000000000000", + BranchID: "test-branch", + ChangeSet: []netboxdiodeplugin.Change{ + { + ChangeID: "00000000-0000-0000-0000-000000000001", + ChangeType: "create", + ObjectType: "dcim.device", + ObjectID: nil, + ObjectVersion: nil, + Data: &netbox.DcimDevice{ + Name: "test", + }, + }, + }, + }, + mockServerResponse: `{"change_set_id":"00000000-0000-0000-0000-000000000000","result":"success"}`, + mockStatusCode: http.StatusOK, + response: &netboxdiodeplugin.ChangeSetResponse{ + ChangeSetID: "00000000-0000-0000-0000-000000000000", + Result: "success", + }, + shouldError: false, + }, { name: "invalid request", apiKey: "foobar", @@ -722,6 +773,11 @@ func TestApplyChangeSet(t *testing.T) { assert.Equal(t, r.Header.Get("Authorization"), fmt.Sprintf("Token %s", tt.apiKey)) assert.Equal(t, r.Header.Get("User-Agent"), fmt.Sprintf("%s/%s", netboxdiodeplugin.SDKName, netboxdiodeplugin.SDKVersion)) assert.Equal(t, r.Header.Get("Content-Type"), "application/json") + if tt.changeSetRequest.BranchID != "" { + assert.Equal(t, r.Header.Get(netboxdiodeplugin.NetBoxBranchHeader), tt.changeSetRequest.BranchID) + } else { + assert.Len(t, r.Header.Values(netboxdiodeplugin.NetBoxBranchHeader), 0) + } w.WriteHeader(tt.mockStatusCode) _, _ = w.Write([]byte(tt.mockServerResponse)) } diff --git a/diode-server/reconciler/applier/applier.go b/diode-server/reconciler/applier/applier.go index 70501441..e3aa71aa 100644 --- a/diode-server/reconciler/applier/applier.go +++ b/diode-server/reconciler/applier/applier.go @@ -9,7 +9,7 @@ import ( ) // ApplyChangeSet applies a change set to NetBox -func ApplyChangeSet(ctx context.Context, logger *slog.Logger, cs changeset.ChangeSet, nbClient netboxdiodeplugin.NetBoxAPI) error { +func ApplyChangeSet(ctx context.Context, logger *slog.Logger, cs changeset.ChangeSet, branchID string, nbClient netboxdiodeplugin.NetBoxAPI) error { changes := make([]netboxdiodeplugin.Change, 0) for _, change := range cs.ChangeSet { changes = append(changes, netboxdiodeplugin.Change{ @@ -25,6 +25,8 @@ func ApplyChangeSet(ctx context.Context, logger *slog.Logger, cs changeset.Chang req := netboxdiodeplugin.ChangeSetRequest{ ChangeSetID: cs.ChangeSetID, ChangeSet: changes, + // TODO(mfiedorowicz): take branch from ChangeSet, remove parameter + BranchID: branchID, } resp, err := nbClient.ApplyChangeSet(ctx, req) diff --git a/diode-server/reconciler/applier/applier_test.go b/diode-server/reconciler/applier/applier_test.go index d70c5893..caf9683f 100644 --- a/diode-server/reconciler/applier/applier_test.go +++ b/diode-server/reconciler/applier/applier_test.go @@ -61,7 +61,7 @@ func TestApplyChangeSet(t *testing.T) { mockNetBoxAPI.On("ApplyChangeSet", ctx, req).Return(resp, nil) - err := applier.ApplyChangeSet(ctx, logger, cs, mockNetBoxAPI) + err := applier.ApplyChangeSet(ctx, logger, cs, "", mockNetBoxAPI) assert.NoError(t, err) mockNetBoxAPI.AssertExpectations(t) } diff --git a/diode-server/reconciler/differ/differ.go b/diode-server/reconciler/differ/differ.go index 7ccb9033..e7f00ade 100644 --- a/diode-server/reconciler/differ/differ.go +++ b/diode-server/reconciler/differ/differ.go @@ -30,7 +30,7 @@ type ObjectState struct { } // Diff compares ingested entity with the intended state in NetBox and returns a change set -func Diff(ctx context.Context, entity IngestEntity, netboxAPI netboxdiodeplugin.NetBoxAPI) (*changeset.ChangeSet, error) { +func Diff(ctx context.Context, entity IngestEntity, branchID string, netboxAPI netboxdiodeplugin.NetBoxAPI) (*changeset.ChangeSet, error) { // extract ingested entity (actual) actual, err := extractIngestEntityData(entity) if err != nil { @@ -52,7 +52,7 @@ func Diff(ctx context.Context, entity IngestEntity, netboxAPI netboxdiodeplugin. // retrieve root object all its nested objects from NetBox (intended) intendedNestedObjectsMap := make(map[string]netbox.ComparableData) for _, obj := range actualNestedObjects { - intended, err := retrieveObjectState(ctx, netboxAPI, obj) + intended, err := retrieveObjectState(ctx, netboxAPI, obj, branchID) if err != nil { return nil, err } @@ -93,16 +93,18 @@ func Diff(ctx context.Context, entity IngestEntity, netboxAPI netboxdiodeplugin. ObjectID: objectID, ObjectVersion: nil, Data: obj.Data(), + // TODO(mfiedorowicz): include branchID }) } return &changeset.ChangeSet{ChangeSetID: uuid.NewString(), ChangeSet: changes}, nil } -func retrieveObjectState(ctx context.Context, netboxAPI netboxdiodeplugin.NetBoxAPI, change netbox.ComparableData) (netbox.ComparableData, error) { +func retrieveObjectState(ctx context.Context, netboxAPI netboxdiodeplugin.NetBoxAPI, change netbox.ComparableData, branchID string) (netbox.ComparableData, error) { params := netboxdiodeplugin.RetrieveObjectStateQueryParams{ ObjectID: 0, ObjectType: change.DataType(), + BranchID: branchID, Params: change.ObjectStateQueryParams(), } resp, err := netboxAPI.RetrieveObjectState(ctx, params) diff --git a/diode-server/reconciler/differ/differ_dcim_test.go b/diode-server/reconciler/differ/differ_dcim_test.go index c61599dc..3dbc74c5 100644 --- a/diode-server/reconciler/differ/differ_dcim_test.go +++ b/diode-server/reconciler/differ/differ_dcim_test.go @@ -4348,7 +4348,7 @@ func TestDcimPrepare(t *testing.T) { }, nil) } - cs, err := differ.Diff(ctx, tt.ingestEntity, mockClient) + cs, err := differ.Diff(ctx, tt.ingestEntity, "", mockClient) if tt.wantErr { require.Error(t, err) return diff --git a/diode-server/reconciler/differ/differ_ipam_test.go b/diode-server/reconciler/differ/differ_ipam_test.go index 8f044232..7b30c92b 100644 --- a/diode-server/reconciler/differ/differ_ipam_test.go +++ b/diode-server/reconciler/differ/differ_ipam_test.go @@ -1858,7 +1858,7 @@ func TestIpamPrepare(t *testing.T) { }, nil) } - cs, err := differ.Diff(ctx, tt.ingestEntity, mockClient) + cs, err := differ.Diff(ctx, tt.ingestEntity, "", mockClient) if tt.wantErr { require.Error(t, err) return diff --git a/diode-server/reconciler/differ/differ_virt_test.go b/diode-server/reconciler/differ/differ_virt_test.go index f6bbee4f..09e8278c 100644 --- a/diode-server/reconciler/differ/differ_virt_test.go +++ b/diode-server/reconciler/differ/differ_virt_test.go @@ -1644,7 +1644,7 @@ func TestVirtualizationPrepare(t *testing.T) { }, nil) } - cs, err := differ.Diff(ctx, tt.ingestEntity, mockClient) + cs, err := differ.Diff(ctx, tt.ingestEntity, "", mockClient) if tt.wantErr { require.Error(t, err) return diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index 50d22dd2..a0dcd5f3 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -282,7 +282,7 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan State: int(ingestionLog.ingestionLog.GetState()), } - changeSet, err := differ.Diff(ctx, ingestEntity, p.nbClient) + changeSet, err := differ.Diff(ctx, ingestEntity, "", p.nbClient) if err != nil { tags := map[string]string{ "request_id": ingestEntity.RequestID, @@ -357,7 +357,7 @@ func (p *IngestionProcessor) ApplyChangeSet(ctx context.Context, applyChan <-cha p.logger.Debug("applying change set", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID) - if err := applier.ApplyChangeSet(ctx, p.logger, *ingestionLog.changeSet, p.nbClient); err != nil { + if err := applier.ApplyChangeSet(ctx, p.logger, *ingestionLog.changeSet, "", p.nbClient); err != nil { p.logger.Debug("failed to apply change set", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID, "error", err) ingestionLog.errors = append(ingestionLog.errors, fmt.Errorf("failed to apply chang eset: %v", err)) From 9a3b23569a0baf59793916ec0922f13dfff2e123 Mon Sep 17 00:00:00 2001 From: Luke Tucker <64618+ltucker@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:43:03 -0500 Subject: [PATCH 14/26] feat: ActionIngestionLog rpc (#203) --- diode-proto/diode/v1/reconciler.proto | 27 + .../gen/diode/v1/diodepb/ingester.pb.go | 422 +++--------- .../diode/v1/reconcilerpb/reconciler.pb.go | 645 ++++++++++-------- .../v1/reconcilerpb/reconciler.pb.validate.go | 384 +++++++++++ .../v1/reconcilerpb/reconciler_grpc.pb.go | 39 ++ 5 files changed, 904 insertions(+), 613 deletions(-) diff --git a/diode-proto/diode/v1/reconciler.proto b/diode-proto/diode/v1/reconciler.proto index b4ea156b..e8cba9fa 100644 --- a/diode-proto/diode/v1/reconciler.proto +++ b/diode-proto/diode/v1/reconciler.proto @@ -111,10 +111,37 @@ message RetrieveIngestionLogsResponse { string next_page_token = 3; // Token for the next page of results, if any } +enum ActionType { + ACTION_UNSPECIFIED = 0; + ACTION_DIFF = 1; + ACTION_APPLY = 2; + ACTION_REJECT = 3; +} + +// The request to take action on an ingestion log +message ActionIngestionLogRequest { + ActionType action = 1; + string ingestion_log_id = 2; + optional string branch_id = 3; +} + +// The response from the ActionIngestionLog request +message ActionIngestionLogResponse { + message Error { + string message = 1; + int32 code = 2; + } + + IngestionLog log = 1; + repeated Error errors = 2; +} + // Reconciler service API service ReconcilerService { // Retrieves ingestion data sources rpc RetrieveIngestionDataSources(RetrieveIngestionDataSourcesRequest) returns (RetrieveIngestionDataSourcesResponse) {} // Retrieves ingestion logs rpc RetrieveIngestionLogs(RetrieveIngestionLogsRequest) returns (RetrieveIngestionLogsResponse); + // Takes action on an ingestion log + rpc ActionIngestionLog(ActionIngestionLogRequest) returns (ActionIngestionLogResponse); } diff --git a/diode-server/gen/diode/v1/diodepb/ingester.pb.go b/diode-server/gen/diode/v1/diodepb/ingester.pb.go index 7bff5d5e..e8f9a734 100644 --- a/diode-server/gen/diode/v1/diodepb/ingester.pb.go +++ b/diode-server/gen/diode/v1/diodepb/ingester.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: diode/v1/ingester.proto @@ -46,11 +46,9 @@ type Device struct { func (x *Device) Reset() { *x = Device{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Device) String() string { @@ -61,7 +59,7 @@ func (*Device) ProtoMessage() {} func (x *Device) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -198,11 +196,9 @@ type Interface struct { func (x *Interface) Reset() { *x = Interface{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Interface) String() string { @@ -213,7 +209,7 @@ func (*Interface) ProtoMessage() {} func (x *Interface) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -343,11 +339,9 @@ type Cluster struct { func (x *Cluster) Reset() { *x = Cluster{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Cluster) String() string { @@ -358,7 +352,7 @@ func (*Cluster) ProtoMessage() {} func (x *Cluster) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -436,11 +430,9 @@ type ClusterType struct { func (x *ClusterType) Reset() { *x = ClusterType{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClusterType) String() string { @@ -451,7 +443,7 @@ func (*ClusterType) ProtoMessage() {} func (x *ClusterType) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -508,11 +500,9 @@ type ClusterGroup struct { func (x *ClusterGroup) Reset() { *x = ClusterGroup{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClusterGroup) String() string { @@ -523,7 +513,7 @@ func (*ClusterGroup) ProtoMessage() {} func (x *ClusterGroup) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -591,11 +581,9 @@ type VirtualMachine struct { func (x *VirtualMachine) Reset() { *x = VirtualMachine{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VirtualMachine) String() string { @@ -606,7 +594,7 @@ func (*VirtualMachine) ProtoMessage() {} func (x *VirtualMachine) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -743,11 +731,9 @@ type VMInterface struct { func (x *VMInterface) Reset() { *x = VMInterface{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VMInterface) String() string { @@ -758,7 +744,7 @@ func (*VMInterface) ProtoMessage() {} func (x *VMInterface) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -837,11 +823,9 @@ type VirtualDisk struct { func (x *VirtualDisk) Reset() { *x = VirtualDisk{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VirtualDisk) String() string { @@ -852,7 +836,7 @@ func (*VirtualDisk) ProtoMessage() {} func (x *VirtualDisk) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -923,11 +907,9 @@ type IPAddress struct { func (x *IPAddress) Reset() { *x = IPAddress{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IPAddress) String() string { @@ -938,7 +920,7 @@ func (*IPAddress) ProtoMessage() {} func (x *IPAddress) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1043,11 +1025,9 @@ type DeviceType struct { func (x *DeviceType) Reset() { *x = DeviceType{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DeviceType) String() string { @@ -1058,7 +1038,7 @@ func (*DeviceType) ProtoMessage() {} func (x *DeviceType) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1136,11 +1116,9 @@ type Manufacturer struct { func (x *Manufacturer) Reset() { *x = Manufacturer{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Manufacturer) String() string { @@ -1151,7 +1129,7 @@ func (*Manufacturer) ProtoMessage() {} func (x *Manufacturer) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1209,11 +1187,9 @@ type Platform struct { func (x *Platform) Reset() { *x = Platform{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Platform) String() string { @@ -1224,7 +1200,7 @@ func (*Platform) ProtoMessage() {} func (x *Platform) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1292,11 +1268,9 @@ type Prefix struct { func (x *Prefix) Reset() { *x = Prefix{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Prefix) String() string { @@ -1307,7 +1281,7 @@ func (*Prefix) ProtoMessage() {} func (x *Prefix) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1393,11 +1367,9 @@ type Role struct { func (x *Role) Reset() { *x = Role{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Role) String() string { @@ -1408,7 +1380,7 @@ func (*Role) ProtoMessage() {} func (x *Role) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1476,11 +1448,9 @@ type Site struct { func (x *Site) Reset() { *x = Site{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Site) String() string { @@ -1491,7 +1461,7 @@ func (*Site) ProtoMessage() {} func (x *Site) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1575,11 +1545,9 @@ type Tag struct { func (x *Tag) Reset() { *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Tag) String() string { @@ -1590,7 +1558,7 @@ func (*Tag) ProtoMessage() {} func (x *Tag) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1656,11 +1624,9 @@ type Entity struct { func (x *Entity) Reset() { *x = Entity{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Entity) String() string { @@ -1671,7 +1637,7 @@ func (*Entity) ProtoMessage() {} func (x *Entity) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1916,11 +1882,9 @@ type IngestRequest struct { func (x *IngestRequest) Reset() { *x = IngestRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IngestRequest) String() string { @@ -1931,7 +1895,7 @@ func (*IngestRequest) ProtoMessage() {} func (x *IngestRequest) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2006,11 +1970,9 @@ type IngestResponse struct { func (x *IngestResponse) Reset() { *x = IngestResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_ingester_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_ingester_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IngestResponse) String() string { @@ -2021,7 +1983,7 @@ func (*IngestResponse) ProtoMessage() {} func (x *IngestResponse) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_ingester_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2736,236 +2698,6 @@ func file_diode_v1_ingester_proto_init() { if File_diode_v1_ingester_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_diode_v1_ingester_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Device); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Interface); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Cluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ClusterType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ClusterGroup); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*VirtualMachine); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*VMInterface); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*VirtualDisk); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*IPAddress); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*DeviceType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*Manufacturer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*Platform); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*Prefix); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*Role); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*Site); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*Entity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*IngestRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_ingester_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*IngestResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_diode_v1_ingester_proto_msgTypes[0].OneofWrappers = []any{} file_diode_v1_ingester_proto_msgTypes[1].OneofWrappers = []any{} file_diode_v1_ingester_proto_msgTypes[2].OneofWrappers = []any{} diff --git a/diode-server/gen/diode/v1/reconcilerpb/reconciler.pb.go b/diode-server/gen/diode/v1/reconcilerpb/reconciler.pb.go index df6dc146..053f4091 100644 --- a/diode-server/gen/diode/v1/reconcilerpb/reconciler.pb.go +++ b/diode-server/gen/diode/v1/reconcilerpb/reconciler.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: diode/v1/reconciler.proto @@ -77,6 +77,58 @@ func (State) EnumDescriptor() ([]byte, []int) { return file_diode_v1_reconciler_proto_rawDescGZIP(), []int{0} } +type ActionType int32 + +const ( + ActionType_ACTION_UNSPECIFIED ActionType = 0 + ActionType_ACTION_DIFF ActionType = 1 + ActionType_ACTION_APPLY ActionType = 2 + ActionType_ACTION_REJECT ActionType = 3 +) + +// Enum value maps for ActionType. +var ( + ActionType_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ACTION_DIFF", + 2: "ACTION_APPLY", + 3: "ACTION_REJECT", + } + ActionType_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ACTION_DIFF": 1, + "ACTION_APPLY": 2, + "ACTION_REJECT": 3, + } +) + +func (x ActionType) Enum() *ActionType { + p := new(ActionType) + *p = x + return p +} + +func (x ActionType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ActionType) Descriptor() protoreflect.EnumDescriptor { + return file_diode_v1_reconciler_proto_enumTypes[1].Descriptor() +} + +func (ActionType) Type() protoreflect.EnumType { + return &file_diode_v1_reconciler_proto_enumTypes[1] +} + +func (x ActionType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ActionType.Descriptor instead. +func (ActionType) EnumDescriptor() ([]byte, []int) { + return file_diode_v1_reconciler_proto_rawDescGZIP(), []int{1} +} + // An ingestion data source type IngestionDataSource struct { state protoimpl.MessageState @@ -89,11 +141,9 @@ type IngestionDataSource struct { func (x *IngestionDataSource) Reset() { *x = IngestionDataSource{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IngestionDataSource) String() string { @@ -104,7 +154,7 @@ func (*IngestionDataSource) ProtoMessage() {} func (x *IngestionDataSource) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -146,11 +196,9 @@ type RetrieveIngestionDataSourcesRequest struct { func (x *RetrieveIngestionDataSourcesRequest) Reset() { *x = RetrieveIngestionDataSourcesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RetrieveIngestionDataSourcesRequest) String() string { @@ -161,7 +209,7 @@ func (*RetrieveIngestionDataSourcesRequest) ProtoMessage() {} func (x *RetrieveIngestionDataSourcesRequest) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -208,11 +256,9 @@ type RetrieveIngestionDataSourcesResponse struct { func (x *RetrieveIngestionDataSourcesResponse) Reset() { *x = RetrieveIngestionDataSourcesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RetrieveIngestionDataSourcesResponse) String() string { @@ -223,7 +269,7 @@ func (*RetrieveIngestionDataSourcesResponse) ProtoMessage() {} func (x *RetrieveIngestionDataSourcesResponse) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -258,11 +304,9 @@ type IngestionError struct { func (x *IngestionError) Reset() { *x = IngestionError{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IngestionError) String() string { @@ -273,7 +317,7 @@ func (*IngestionError) ProtoMessage() {} func (x *IngestionError) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -324,11 +368,9 @@ type IngestionMetrics struct { func (x *IngestionMetrics) Reset() { *x = IngestionMetrics{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IngestionMetrics) String() string { @@ -339,7 +381,7 @@ func (*IngestionMetrics) ProtoMessage() {} func (x *IngestionMetrics) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -401,11 +443,9 @@ type ChangeSet struct { func (x *ChangeSet) Reset() { *x = ChangeSet{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ChangeSet) String() string { @@ -416,7 +456,7 @@ func (*ChangeSet) ProtoMessage() {} func (x *ChangeSet) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -467,11 +507,9 @@ type IngestionLog struct { func (x *IngestionLog) Reset() { *x = IngestionLog{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IngestionLog) String() string { @@ -482,7 +520,7 @@ func (*IngestionLog) ProtoMessage() {} func (x *IngestionLog) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -599,11 +637,9 @@ type RetrieveIngestionLogsRequest struct { func (x *RetrieveIngestionLogsRequest) Reset() { *x = RetrieveIngestionLogsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RetrieveIngestionLogsRequest) String() string { @@ -614,7 +650,7 @@ func (*RetrieveIngestionLogsRequest) ProtoMessage() {} func (x *RetrieveIngestionLogsRequest) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -698,11 +734,9 @@ type RetrieveIngestionLogsResponse struct { func (x *RetrieveIngestionLogsResponse) Reset() { *x = RetrieveIngestionLogsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RetrieveIngestionLogsResponse) String() string { @@ -713,7 +747,7 @@ func (*RetrieveIngestionLogsResponse) ProtoMessage() {} func (x *RetrieveIngestionLogsResponse) ProtoReflect() protoreflect.Message { mi := &file_diode_v1_reconciler_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -749,6 +783,122 @@ func (x *RetrieveIngestionLogsResponse) GetNextPageToken() string { return "" } +// The request to take action on an ingestion log +type ActionIngestionLogRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Action ActionType `protobuf:"varint,1,opt,name=action,proto3,enum=diode.v1.ActionType" json:"action,omitempty"` + IngestionLogId string `protobuf:"bytes,2,opt,name=ingestion_log_id,json=ingestionLogId,proto3" json:"ingestion_log_id,omitempty"` + BranchId *string `protobuf:"bytes,3,opt,name=branch_id,json=branchId,proto3,oneof" json:"branch_id,omitempty"` +} + +func (x *ActionIngestionLogRequest) Reset() { + *x = ActionIngestionLogRequest{} + mi := &file_diode_v1_reconciler_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActionIngestionLogRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActionIngestionLogRequest) ProtoMessage() {} + +func (x *ActionIngestionLogRequest) ProtoReflect() protoreflect.Message { + mi := &file_diode_v1_reconciler_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActionIngestionLogRequest.ProtoReflect.Descriptor instead. +func (*ActionIngestionLogRequest) Descriptor() ([]byte, []int) { + return file_diode_v1_reconciler_proto_rawDescGZIP(), []int{9} +} + +func (x *ActionIngestionLogRequest) GetAction() ActionType { + if x != nil { + return x.Action + } + return ActionType_ACTION_UNSPECIFIED +} + +func (x *ActionIngestionLogRequest) GetIngestionLogId() string { + if x != nil { + return x.IngestionLogId + } + return "" +} + +func (x *ActionIngestionLogRequest) GetBranchId() string { + if x != nil && x.BranchId != nil { + return *x.BranchId + } + return "" +} + +// The response from the ActionIngestionLog request +type ActionIngestionLogResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Log *IngestionLog `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` + Errors []*ActionIngestionLogResponse_Error `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *ActionIngestionLogResponse) Reset() { + *x = ActionIngestionLogResponse{} + mi := &file_diode_v1_reconciler_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActionIngestionLogResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActionIngestionLogResponse) ProtoMessage() {} + +func (x *ActionIngestionLogResponse) ProtoReflect() protoreflect.Message { + mi := &file_diode_v1_reconciler_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActionIngestionLogResponse.ProtoReflect.Descriptor instead. +func (*ActionIngestionLogResponse) Descriptor() ([]byte, []int) { + return file_diode_v1_reconciler_proto_rawDescGZIP(), []int{10} +} + +func (x *ActionIngestionLogResponse) GetLog() *IngestionLog { + if x != nil { + return x.Log + } + return nil +} + +func (x *ActionIngestionLogResponse) GetErrors() []*ActionIngestionLogResponse_Error { + if x != nil { + return x.Errors + } + return nil +} + type IngestionError_Details struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -761,11 +911,9 @@ type IngestionError_Details struct { func (x *IngestionError_Details) Reset() { *x = IngestionError_Details{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IngestionError_Details) String() string { @@ -775,8 +923,8 @@ func (x *IngestionError_Details) String() string { func (*IngestionError_Details) ProtoMessage() {} func (x *IngestionError_Details) ProtoReflect() protoreflect.Message { - mi := &file_diode_v1_reconciler_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_diode_v1_reconciler_proto_msgTypes[11] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -823,11 +971,9 @@ type IngestionError_Details_Error struct { func (x *IngestionError_Details_Error) Reset() { *x = IngestionError_Details_Error{} - if protoimpl.UnsafeEnabled { - mi := &file_diode_v1_reconciler_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_diode_v1_reconciler_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IngestionError_Details_Error) String() string { @@ -837,8 +983,8 @@ func (x *IngestionError_Details_Error) String() string { func (*IngestionError_Details_Error) ProtoMessage() {} func (x *IngestionError_Details_Error) ProtoReflect() protoreflect.Message { - mi := &file_diode_v1_reconciler_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_diode_v1_reconciler_proto_msgTypes[12] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -867,6 +1013,59 @@ func (x *IngestionError_Details_Error) GetChangeId() string { return "" } +type ActionIngestionLogResponse_Error struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` +} + +func (x *ActionIngestionLogResponse_Error) Reset() { + *x = ActionIngestionLogResponse_Error{} + mi := &file_diode_v1_reconciler_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActionIngestionLogResponse_Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActionIngestionLogResponse_Error) ProtoMessage() {} + +func (x *ActionIngestionLogResponse_Error) ProtoReflect() protoreflect.Message { + mi := &file_diode_v1_reconciler_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActionIngestionLogResponse_Error.ProtoReflect.Descriptor instead. +func (*ActionIngestionLogResponse_Error) Descriptor() ([]byte, []int) { + return file_diode_v1_reconciler_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *ActionIngestionLogResponse_Error) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *ActionIngestionLogResponse_Error) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + var File_diode_v1_reconciler_proto protoreflect.FileDescriptor var file_diode_v1_reconciler_proto_rawDesc = []byte{ @@ -994,39 +1193,73 @@ var file_diode_v1_reconciler_proto_rawDesc = []byte{ 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x2a, 0x50, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x51, - 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x43, 0x4f, 0x4e, - 0x43, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x4f, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, - 0x53, 0x10, 0x04, 0x32, 0xfe, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, - 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7f, 0x0a, 0x1c, 0x52, 0x65, 0x74, - 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x64, 0x69, 0x6f, 0x64, + 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x19, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x67, 0x65, + 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2c, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x14, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, + 0x10, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x72, 0x61, 0x6e, 0x63, + 0x68, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x62, 0x72, + 0x61, 0x6e, 0x63, 0x68, 0x49, 0x64, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x62, 0x72, + 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x22, 0xc1, 0x01, 0x0a, 0x1a, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, + 0x12, 0x42, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x35, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x2a, 0x50, 0x0a, 0x05, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x43, 0x49, 0x4c, 0x45, 0x44, 0x10, + 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0e, 0x0a, + 0x0a, 0x4e, 0x4f, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x53, 0x10, 0x04, 0x2a, 0x5a, 0x0a, + 0x0a, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x49, + 0x46, 0x46, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, + 0x50, 0x50, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x03, 0x32, 0xdf, 0x02, 0x0a, 0x11, 0x52, 0x65, + 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x7f, 0x0a, 0x1c, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, 0x73, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, + 0x2d, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x69, + 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, + 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x68, 0x0a, 0x15, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, + 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x26, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, - 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, - 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x15, 0x52, 0x65, - 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, - 0x6f, 0x67, 0x73, 0x12, 0x26, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, - 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x64, 0x69, - 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, - 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xa4, 0x01, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x2e, 0x64, 0x69, 0x6f, - 0x64, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x65, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x65, 0x74, 0x62, 0x6f, 0x78, 0x6c, 0x61, 0x62, 0x73, 0x2f, - 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2f, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2d, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x31, 0x2f, - 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x65, 0x72, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x44, - 0x58, 0x58, 0xaa, 0x02, 0x08, 0x44, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x08, - 0x44, 0x69, 0x6f, 0x64, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x14, 0x44, 0x69, 0x6f, 0x64, 0x65, - 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x09, 0x44, 0x69, 0x6f, 0x64, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x27, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x76, 0x65, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, + 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x12, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, + 0x12, 0x23, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xa4, 0x01, 0x0a, 0x0c, + 0x63, 0x6f, 0x6d, 0x2e, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x52, 0x65, + 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x65, 0x74, 0x62, + 0x6f, 0x78, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x64, 0x69, 0x6f, 0x64, 0x65, 0x2f, 0x64, 0x69, 0x6f, + 0x64, 0x65, 0x2d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x64, 0x69, + 0x6f, 0x64, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x65, + 0x72, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x44, 0x58, 0x58, 0xaa, 0x02, 0x08, 0x44, 0x69, 0x6f, 0x64, + 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x08, 0x44, 0x69, 0x6f, 0x64, 0x65, 0x5c, 0x56, 0x31, 0xe2, + 0x02, 0x14, 0x44, 0x69, 0x6f, 0x64, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x09, 0x44, 0x69, 0x6f, 0x64, 0x65, 0x3a, 0x3a, + 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1041,43 +1274,52 @@ func file_diode_v1_reconciler_proto_rawDescGZIP() []byte { return file_diode_v1_reconciler_proto_rawDescData } -var file_diode_v1_reconciler_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_diode_v1_reconciler_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_diode_v1_reconciler_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_diode_v1_reconciler_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_diode_v1_reconciler_proto_goTypes = []any{ (State)(0), // 0: diode.v1.State - (*IngestionDataSource)(nil), // 1: diode.v1.IngestionDataSource - (*RetrieveIngestionDataSourcesRequest)(nil), // 2: diode.v1.RetrieveIngestionDataSourcesRequest - (*RetrieveIngestionDataSourcesResponse)(nil), // 3: diode.v1.RetrieveIngestionDataSourcesResponse - (*IngestionError)(nil), // 4: diode.v1.IngestionError - (*IngestionMetrics)(nil), // 5: diode.v1.IngestionMetrics - (*ChangeSet)(nil), // 6: diode.v1.ChangeSet - (*IngestionLog)(nil), // 7: diode.v1.IngestionLog - (*RetrieveIngestionLogsRequest)(nil), // 8: diode.v1.RetrieveIngestionLogsRequest - (*RetrieveIngestionLogsResponse)(nil), // 9: diode.v1.RetrieveIngestionLogsResponse - (*IngestionError_Details)(nil), // 10: diode.v1.IngestionError.Details - (*IngestionError_Details_Error)(nil), // 11: diode.v1.IngestionError.Details.Error - (*diodepb.Entity)(nil), // 12: diode.v1.Entity + (ActionType)(0), // 1: diode.v1.ActionType + (*IngestionDataSource)(nil), // 2: diode.v1.IngestionDataSource + (*RetrieveIngestionDataSourcesRequest)(nil), // 3: diode.v1.RetrieveIngestionDataSourcesRequest + (*RetrieveIngestionDataSourcesResponse)(nil), // 4: diode.v1.RetrieveIngestionDataSourcesResponse + (*IngestionError)(nil), // 5: diode.v1.IngestionError + (*IngestionMetrics)(nil), // 6: diode.v1.IngestionMetrics + (*ChangeSet)(nil), // 7: diode.v1.ChangeSet + (*IngestionLog)(nil), // 8: diode.v1.IngestionLog + (*RetrieveIngestionLogsRequest)(nil), // 9: diode.v1.RetrieveIngestionLogsRequest + (*RetrieveIngestionLogsResponse)(nil), // 10: diode.v1.RetrieveIngestionLogsResponse + (*ActionIngestionLogRequest)(nil), // 11: diode.v1.ActionIngestionLogRequest + (*ActionIngestionLogResponse)(nil), // 12: diode.v1.ActionIngestionLogResponse + (*IngestionError_Details)(nil), // 13: diode.v1.IngestionError.Details + (*IngestionError_Details_Error)(nil), // 14: diode.v1.IngestionError.Details.Error + (*ActionIngestionLogResponse_Error)(nil), // 15: diode.v1.ActionIngestionLogResponse.Error + (*diodepb.Entity)(nil), // 16: diode.v1.Entity } var file_diode_v1_reconciler_proto_depIdxs = []int32{ - 1, // 0: diode.v1.RetrieveIngestionDataSourcesResponse.ingestion_data_sources:type_name -> diode.v1.IngestionDataSource - 10, // 1: diode.v1.IngestionError.details:type_name -> diode.v1.IngestionError.Details + 2, // 0: diode.v1.RetrieveIngestionDataSourcesResponse.ingestion_data_sources:type_name -> diode.v1.IngestionDataSource + 13, // 1: diode.v1.IngestionError.details:type_name -> diode.v1.IngestionError.Details 0, // 2: diode.v1.IngestionLog.state:type_name -> diode.v1.State - 12, // 3: diode.v1.IngestionLog.entity:type_name -> diode.v1.Entity - 4, // 4: diode.v1.IngestionLog.error:type_name -> diode.v1.IngestionError - 6, // 5: diode.v1.IngestionLog.change_set:type_name -> diode.v1.ChangeSet + 16, // 3: diode.v1.IngestionLog.entity:type_name -> diode.v1.Entity + 5, // 4: diode.v1.IngestionLog.error:type_name -> diode.v1.IngestionError + 7, // 5: diode.v1.IngestionLog.change_set:type_name -> diode.v1.ChangeSet 0, // 6: diode.v1.RetrieveIngestionLogsRequest.state:type_name -> diode.v1.State - 7, // 7: diode.v1.RetrieveIngestionLogsResponse.logs:type_name -> diode.v1.IngestionLog - 5, // 8: diode.v1.RetrieveIngestionLogsResponse.metrics:type_name -> diode.v1.IngestionMetrics - 11, // 9: diode.v1.IngestionError.Details.errors:type_name -> diode.v1.IngestionError.Details.Error - 2, // 10: diode.v1.ReconcilerService.RetrieveIngestionDataSources:input_type -> diode.v1.RetrieveIngestionDataSourcesRequest - 8, // 11: diode.v1.ReconcilerService.RetrieveIngestionLogs:input_type -> diode.v1.RetrieveIngestionLogsRequest - 3, // 12: diode.v1.ReconcilerService.RetrieveIngestionDataSources:output_type -> diode.v1.RetrieveIngestionDataSourcesResponse - 9, // 13: diode.v1.ReconcilerService.RetrieveIngestionLogs:output_type -> diode.v1.RetrieveIngestionLogsResponse - 12, // [12:14] is the sub-list for method output_type - 10, // [10:12] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 8, // 7: diode.v1.RetrieveIngestionLogsResponse.logs:type_name -> diode.v1.IngestionLog + 6, // 8: diode.v1.RetrieveIngestionLogsResponse.metrics:type_name -> diode.v1.IngestionMetrics + 1, // 9: diode.v1.ActionIngestionLogRequest.action:type_name -> diode.v1.ActionType + 8, // 10: diode.v1.ActionIngestionLogResponse.log:type_name -> diode.v1.IngestionLog + 15, // 11: diode.v1.ActionIngestionLogResponse.errors:type_name -> diode.v1.ActionIngestionLogResponse.Error + 14, // 12: diode.v1.IngestionError.Details.errors:type_name -> diode.v1.IngestionError.Details.Error + 3, // 13: diode.v1.ReconcilerService.RetrieveIngestionDataSources:input_type -> diode.v1.RetrieveIngestionDataSourcesRequest + 9, // 14: diode.v1.ReconcilerService.RetrieveIngestionLogs:input_type -> diode.v1.RetrieveIngestionLogsRequest + 11, // 15: diode.v1.ReconcilerService.ActionIngestionLog:input_type -> diode.v1.ActionIngestionLogRequest + 4, // 16: diode.v1.ReconcilerService.RetrieveIngestionDataSources:output_type -> diode.v1.RetrieveIngestionDataSourcesResponse + 10, // 17: diode.v1.ReconcilerService.RetrieveIngestionLogs:output_type -> diode.v1.RetrieveIngestionLogsResponse + 12, // 18: diode.v1.ReconcilerService.ActionIngestionLog:output_type -> diode.v1.ActionIngestionLogResponse + 16, // [16:19] is the sub-list for method output_type + 13, // [13:16] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name } func init() { file_diode_v1_reconciler_proto_init() } @@ -1085,148 +1327,15 @@ func file_diode_v1_reconciler_proto_init() { if File_diode_v1_reconciler_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_diode_v1_reconciler_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*IngestionDataSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*RetrieveIngestionDataSourcesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*RetrieveIngestionDataSourcesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*IngestionError); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*IngestionMetrics); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ChangeSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*IngestionLog); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*RetrieveIngestionLogsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*RetrieveIngestionLogsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*IngestionError_Details); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_diode_v1_reconciler_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*IngestionError_Details_Error); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_diode_v1_reconciler_proto_msgTypes[7].OneofWrappers = []any{} + file_diode_v1_reconciler_proto_msgTypes[9].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_diode_v1_reconciler_proto_rawDesc, - NumEnums: 1, - NumMessages: 11, + NumEnums: 2, + NumMessages: 14, NumExtensions: 0, NumServices: 1, }, diff --git a/diode-server/gen/diode/v1/reconcilerpb/reconciler.pb.validate.go b/diode-server/gen/diode/v1/reconcilerpb/reconciler.pb.validate.go index 0545e34c..79187394 100644 --- a/diode-server/gen/diode/v1/reconcilerpb/reconciler.pb.validate.go +++ b/diode-server/gen/diode/v1/reconcilerpb/reconciler.pb.validate.go @@ -1280,6 +1280,281 @@ var _ interface { ErrorName() string } = RetrieveIngestionLogsResponseValidationError{} +// Validate checks the field values on ActionIngestionLogRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ActionIngestionLogRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ActionIngestionLogRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ActionIngestionLogRequestMultiError, or nil if none found. +func (m *ActionIngestionLogRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ActionIngestionLogRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Action + + // no validation rules for IngestionLogId + + if m.BranchId != nil { + // no validation rules for BranchId + } + + if len(errors) > 0 { + return ActionIngestionLogRequestMultiError(errors) + } + + return nil +} + +// ActionIngestionLogRequestMultiError is an error wrapping multiple validation +// errors returned by ActionIngestionLogRequest.ValidateAll() if the +// designated constraints aren't met. +type ActionIngestionLogRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ActionIngestionLogRequestMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ActionIngestionLogRequestMultiError) AllErrors() []error { return m } + +// ActionIngestionLogRequestValidationError is the validation error returned by +// ActionIngestionLogRequest.Validate if the designated constraints aren't met. +type ActionIngestionLogRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ActionIngestionLogRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ActionIngestionLogRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ActionIngestionLogRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ActionIngestionLogRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ActionIngestionLogRequestValidationError) ErrorName() string { + return "ActionIngestionLogRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ActionIngestionLogRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sActionIngestionLogRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ActionIngestionLogRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ActionIngestionLogRequestValidationError{} + +// Validate checks the field values on ActionIngestionLogResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ActionIngestionLogResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ActionIngestionLogResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ActionIngestionLogResponseMultiError, or nil if none found. +func (m *ActionIngestionLogResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ActionIngestionLogResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetLog()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ActionIngestionLogResponseValidationError{ + field: "Log", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ActionIngestionLogResponseValidationError{ + field: "Log", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLog()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ActionIngestionLogResponseValidationError{ + field: "Log", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetErrors() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ActionIngestionLogResponseValidationError{ + field: fmt.Sprintf("Errors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ActionIngestionLogResponseValidationError{ + field: fmt.Sprintf("Errors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ActionIngestionLogResponseValidationError{ + field: fmt.Sprintf("Errors[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return ActionIngestionLogResponseMultiError(errors) + } + + return nil +} + +// ActionIngestionLogResponseMultiError is an error wrapping multiple +// validation errors returned by ActionIngestionLogResponse.ValidateAll() if +// the designated constraints aren't met. +type ActionIngestionLogResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ActionIngestionLogResponseMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ActionIngestionLogResponseMultiError) AllErrors() []error { return m } + +// ActionIngestionLogResponseValidationError is the validation error returned +// by ActionIngestionLogResponse.Validate if the designated constraints aren't met. +type ActionIngestionLogResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ActionIngestionLogResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ActionIngestionLogResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ActionIngestionLogResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ActionIngestionLogResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ActionIngestionLogResponseValidationError) ErrorName() string { + return "ActionIngestionLogResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ActionIngestionLogResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sActionIngestionLogResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ActionIngestionLogResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ActionIngestionLogResponseValidationError{} + // Validate checks the field values on IngestionError_Details with the rules // defined in the proto definition for this message. If any rules are // violated, the first error encountered is returned, or nil if there are no violations. @@ -1526,3 +1801,112 @@ var _ interface { Cause() error ErrorName() string } = IngestionError_Details_ErrorValidationError{} + +// Validate checks the field values on ActionIngestionLogResponse_Error with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *ActionIngestionLogResponse_Error) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ActionIngestionLogResponse_Error with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ActionIngestionLogResponse_ErrorMultiError, or nil if none found. +func (m *ActionIngestionLogResponse_Error) ValidateAll() error { + return m.validate(true) +} + +func (m *ActionIngestionLogResponse_Error) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Message + + // no validation rules for Code + + if len(errors) > 0 { + return ActionIngestionLogResponse_ErrorMultiError(errors) + } + + return nil +} + +// ActionIngestionLogResponse_ErrorMultiError is an error wrapping multiple +// validation errors returned by +// ActionIngestionLogResponse_Error.ValidateAll() if the designated +// constraints aren't met. +type ActionIngestionLogResponse_ErrorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ActionIngestionLogResponse_ErrorMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ActionIngestionLogResponse_ErrorMultiError) AllErrors() []error { return m } + +// ActionIngestionLogResponse_ErrorValidationError is the validation error +// returned by ActionIngestionLogResponse_Error.Validate if the designated +// constraints aren't met. +type ActionIngestionLogResponse_ErrorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ActionIngestionLogResponse_ErrorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ActionIngestionLogResponse_ErrorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ActionIngestionLogResponse_ErrorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ActionIngestionLogResponse_ErrorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ActionIngestionLogResponse_ErrorValidationError) ErrorName() string { + return "ActionIngestionLogResponse_ErrorValidationError" +} + +// Error satisfies the builtin error interface +func (e ActionIngestionLogResponse_ErrorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sActionIngestionLogResponse_Error.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ActionIngestionLogResponse_ErrorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ActionIngestionLogResponse_ErrorValidationError{} diff --git a/diode-server/gen/diode/v1/reconcilerpb/reconciler_grpc.pb.go b/diode-server/gen/diode/v1/reconcilerpb/reconciler_grpc.pb.go index 1721feae..9f5f83c0 100644 --- a/diode-server/gen/diode/v1/reconcilerpb/reconciler_grpc.pb.go +++ b/diode-server/gen/diode/v1/reconcilerpb/reconciler_grpc.pb.go @@ -21,6 +21,7 @@ const _ = grpc.SupportPackageIsVersion7 const ( ReconcilerService_RetrieveIngestionDataSources_FullMethodName = "/diode.v1.ReconcilerService/RetrieveIngestionDataSources" ReconcilerService_RetrieveIngestionLogs_FullMethodName = "/diode.v1.ReconcilerService/RetrieveIngestionLogs" + ReconcilerService_ActionIngestionLog_FullMethodName = "/diode.v1.ReconcilerService/ActionIngestionLog" ) // ReconcilerServiceClient is the client API for ReconcilerService service. @@ -31,6 +32,8 @@ type ReconcilerServiceClient interface { RetrieveIngestionDataSources(ctx context.Context, in *RetrieveIngestionDataSourcesRequest, opts ...grpc.CallOption) (*RetrieveIngestionDataSourcesResponse, error) // Retrieves ingestion logs RetrieveIngestionLogs(ctx context.Context, in *RetrieveIngestionLogsRequest, opts ...grpc.CallOption) (*RetrieveIngestionLogsResponse, error) + // Takes action on an ingestion log + ActionIngestionLog(ctx context.Context, in *ActionIngestionLogRequest, opts ...grpc.CallOption) (*ActionIngestionLogResponse, error) } type reconcilerServiceClient struct { @@ -59,6 +62,15 @@ func (c *reconcilerServiceClient) RetrieveIngestionLogs(ctx context.Context, in return out, nil } +func (c *reconcilerServiceClient) ActionIngestionLog(ctx context.Context, in *ActionIngestionLogRequest, opts ...grpc.CallOption) (*ActionIngestionLogResponse, error) { + out := new(ActionIngestionLogResponse) + err := c.cc.Invoke(ctx, ReconcilerService_ActionIngestionLog_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ReconcilerServiceServer is the server API for ReconcilerService service. // All implementations must embed UnimplementedReconcilerServiceServer // for forward compatibility @@ -67,6 +79,8 @@ type ReconcilerServiceServer interface { RetrieveIngestionDataSources(context.Context, *RetrieveIngestionDataSourcesRequest) (*RetrieveIngestionDataSourcesResponse, error) // Retrieves ingestion logs RetrieveIngestionLogs(context.Context, *RetrieveIngestionLogsRequest) (*RetrieveIngestionLogsResponse, error) + // Takes action on an ingestion log + ActionIngestionLog(context.Context, *ActionIngestionLogRequest) (*ActionIngestionLogResponse, error) mustEmbedUnimplementedReconcilerServiceServer() } @@ -80,6 +94,9 @@ func (UnimplementedReconcilerServiceServer) RetrieveIngestionDataSources(context func (UnimplementedReconcilerServiceServer) RetrieveIngestionLogs(context.Context, *RetrieveIngestionLogsRequest) (*RetrieveIngestionLogsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RetrieveIngestionLogs not implemented") } +func (UnimplementedReconcilerServiceServer) ActionIngestionLog(context.Context, *ActionIngestionLogRequest) (*ActionIngestionLogResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ActionIngestionLog not implemented") +} func (UnimplementedReconcilerServiceServer) mustEmbedUnimplementedReconcilerServiceServer() {} // UnsafeReconcilerServiceServer may be embedded to opt out of forward compatibility for this service. @@ -129,6 +146,24 @@ func _ReconcilerService_RetrieveIngestionLogs_Handler(srv interface{}, ctx conte return interceptor(ctx, in, info, handler) } +func _ReconcilerService_ActionIngestionLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ActionIngestionLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReconcilerServiceServer).ActionIngestionLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ReconcilerService_ActionIngestionLog_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReconcilerServiceServer).ActionIngestionLog(ctx, req.(*ActionIngestionLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + // ReconcilerService_ServiceDesc is the grpc.ServiceDesc for ReconcilerService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -144,6 +179,10 @@ var ReconcilerService_ServiceDesc = grpc.ServiceDesc{ MethodName: "RetrieveIngestionLogs", Handler: _ReconcilerService_RetrieveIngestionLogs_Handler, }, + { + MethodName: "ActionIngestionLog", + Handler: _ReconcilerService_ActionIngestionLog_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "diode/v1/reconciler.proto", From 78d16a0756c5447abf79e8df37b8e40d7da8e89b Mon Sep 17 00:00:00 2001 From: Luke Tucker <64618+ltucker@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:49:37 -0500 Subject: [PATCH 15/26] chore: enable gofumpt lint (#204) --- .github/golangci.yaml | 3 +++ diode-server/Makefile | 4 ++++ diode-server/cmd/ingester/main.go | 2 +- diode-server/cmd/reconciler/main.go | 2 +- diode-server/netboxdiodeplugin/client.go | 4 ++-- diode-server/netboxdiodeplugin/client_test.go | 3 ++- diode-server/reconciler/differ/differ_virt_test.go | 1 - diode-server/reconciler/ingestion_processor.go | 2 +- .../reconciler/ingestion_processor_internal_test.go | 6 ++++-- 9 files changed, 18 insertions(+), 9 deletions(-) diff --git a/.github/golangci.yaml b/.github/golangci.yaml index ee66519f..5513d15b 100644 --- a/.github/golangci.yaml +++ b/.github/golangci.yaml @@ -16,6 +16,7 @@ linters: - gosimple - bodyclose - gci + - gofumpt issues: exclude-use-default: false @@ -35,3 +36,5 @@ linters-settings: - default - prefix(github.com/netboxlabs/diode) custom-order: true + go-fumpt: + extra-rules: true diff --git a/diode-server/Makefile b/diode-server/Makefile index 6a5da8f4..daa2712c 100644 --- a/diode-server/Makefile +++ b/diode-server/Makefile @@ -36,6 +36,10 @@ deps: lint: @golangci-lint run ./... --config ../.github/golangci.yaml +.PHONY: fix-lint +fix-lint: + @golangci-lint run ./... --config ../.github/golangci.yaml --fix + .PHONY: test test: @go test -race ./... diff --git a/diode-server/cmd/ingester/main.go b/diode-server/cmd/ingester/main.go index 7c8e8087..07ad686a 100644 --- a/diode-server/cmd/ingester/main.go +++ b/diode-server/cmd/ingester/main.go @@ -27,7 +27,7 @@ func main() { os.Exit(1) } - //TODO: instantiate prometheus server + // TODO: instantiate prometheus server if err := s.Run(); err != nil { s.Logger().Error("server failure", "serverName", s.Name(), "error", err) diff --git a/diode-server/cmd/reconciler/main.go b/diode-server/cmd/reconciler/main.go index c2103e3a..a7397335 100644 --- a/diode-server/cmd/reconciler/main.go +++ b/diode-server/cmd/reconciler/main.go @@ -66,7 +66,7 @@ func main() { os.Exit(1) } - //TODO: instantiate prometheus server + // TODO: instantiate prometheus server if err := s.Run(); err != nil { s.Logger().Error("server failure", "serverName", s.Name(), "error", err) diff --git a/diode-server/netboxdiodeplugin/client.go b/diode-server/netboxdiodeplugin/client.go index 24aaa1c2..f866c058 100644 --- a/diode-server/netboxdiodeplugin/client.go +++ b/diode-server/netboxdiodeplugin/client.go @@ -373,8 +373,8 @@ func statusMapToStringHookFunc() mapstructure.DecodeHookFunc { return func( f reflect.Kind, t reflect.Kind, - data interface{}) (interface{}, error) { - + data interface{}, + ) (interface{}, error) { if f != reflect.Map { return data, nil } diff --git a/diode-server/netboxdiodeplugin/client_test.go b/diode-server/netboxdiodeplugin/client_test.go index cc33bfb4..5318c084 100644 --- a/diode-server/netboxdiodeplugin/client_test.go +++ b/diode-server/netboxdiodeplugin/client_test.go @@ -478,7 +478,8 @@ func TestRetrieveObjectState(t *testing.T) { params: netboxdiodeplugin.RetrieveObjectStateQueryParams{ ObjectType: netbox.DcimDeviceObjectType, ObjectID: 1, - Params: map[string]string{"q": "dev1", "attr_name": "site.id", "attr_value": "2"}}, + Params: map[string]string{"q": "dev1", "attr_name": "site.id", "attr_value": "2"}, + }, mockServerResponse: `{"object_type":"dcim.device","object_change_id":1,"object":{"id":1,"name":"dev1", "site": {"id": 2}}}`, apiKey: "foobar", response: &netboxdiodeplugin.ObjectState{ diff --git a/diode-server/reconciler/differ/differ_virt_test.go b/diode-server/reconciler/differ/differ_virt_test.go index 09e8278c..bd76e5b9 100644 --- a/diode-server/reconciler/differ/differ_virt_test.go +++ b/diode-server/reconciler/differ/differ_virt_test.go @@ -743,7 +743,6 @@ func TestVirtualizationPrepare(t *testing.T) { wantChangeSet: changeset.ChangeSet{ ChangeSetID: "5663a77e-9bad-4981-afe9-77d8a9f2b8b5", ChangeSet: []changeset.Change{ - { ChangeID: "5663a77e-9bad-4981-afe9-77d8a9f2b8b5", ChangeType: changeset.ChangeTypeCreate, diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index a0dcd5f3..428e6f0c 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -466,7 +466,7 @@ func (p *IngestionProcessor) writeIngestionLog(ctx context.Context, key string, } func normalizeIngestionLog(l []byte) []byte { - //replace ingestionTs string value as integer, see: https://github.com/golang/protobuf/issues/1414 + // replace ingestionTs string value as integer, see: https://github.com/golang/protobuf/issues/1414 re := regexp.MustCompile(`"ingestionTs":"(\d+)"`) return re.ReplaceAll(l, []byte(`"ingestionTs":$1`)) } diff --git a/diode-server/reconciler/ingestion_processor_internal_test.go b/diode-server/reconciler/ingestion_processor_internal_test.go index b4d33210..93fad3a1 100644 --- a/diode-server/reconciler/ingestion_processor_internal_test.go +++ b/diode-server/reconciler/ingestion_processor_internal_test.go @@ -281,12 +281,14 @@ func TestHandleStreamMessage(t *testing.T) { if tt.reconcilerError { mockNbClient.On("RetrieveObjectState", ctx, mock.Anything).Return(&netboxdiodeplugin.ObjectState{}, errors.New("prepare error")) } else { - mockNbClient.On("RetrieveObjectState", ctx, mock.Anything).Return(&netboxdiodeplugin.ObjectState{ObjectType: "dcim.site", + mockNbClient.On("RetrieveObjectState", ctx, mock.Anything).Return(&netboxdiodeplugin.ObjectState{ + ObjectType: "dcim.site", ObjectID: 0, ObjectChangeID: 0, Object: &netbox.DcimSiteDataWrapper{ Site: nil, - }}, nil) + }, + }, nil) } mockNbClient.On("ApplyChangeSet", ctx, mock.Anything).Return(tt.changeSetResponse, tt.changeSetError) if tt.entities[0].Entity != nil { From e3012cc3bb2f5dbaf7c81ff65fcd6d3c739cd7ad Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 16:45:09 +0000 Subject: [PATCH 16/26] chore: gofumpt Signed-off-by: Michal Fiedorowicz --- diode-server/dbstore/postgres/repositories.go | 1 - 1 file changed, 1 deletion(-) diff --git a/diode-server/dbstore/postgres/repositories.go b/diode-server/dbstore/postgres/repositories.go index fe66e9d3..4791755d 100644 --- a/diode-server/dbstore/postgres/repositories.go +++ b/diode-server/dbstore/postgres/repositories.go @@ -247,7 +247,6 @@ func (r *ChangeSetRepository) CreateChangeSet(ctx context.Context, changeSet cha params.BranchID = pgtype.Text{String: *changeSet.BranchID, Valid: true} } cs, err := qtx.CreateChangeSet(ctx, params) - if err != nil { rollback() return nil, fmt.Errorf("failed to create change set: %w", err) From c12f1d1154de6006928846fadebd2940de103bd5 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 20:09:52 +0000 Subject: [PATCH 17/26] chore: remove redis migrations Signed-off-by: Michal Fiedorowicz --- diode-server/cmd/reconciler/main.go | 8 +- .../reconciler/ingestion_processor.go | 7 - diode-server/reconciler/migration.go | 164 ------------------ diode-server/reconciler/migration_test.go | 127 -------------- 4 files changed, 5 insertions(+), 301 deletions(-) delete mode 100644 diode-server/reconciler/migration.go delete mode 100644 diode-server/reconciler/migration_test.go diff --git a/diode-server/cmd/reconciler/main.go b/diode-server/cmd/reconciler/main.go index a7397335..1b24a442 100644 --- a/diode-server/cmd/reconciler/main.go +++ b/diode-server/cmd/reconciler/main.go @@ -29,9 +29,11 @@ func main() { dbURL := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable", cfg.PostgresHost, cfg.PostgresPort, cfg.PostgresUser, cfg.PostgresPassword, cfg.PostgresDBName) - if err := runDBMigrations(ctx, s.Logger(), dbURL); err != nil { - s.Logger().Error("failed to run db migrations", "error", err) - os.Exit(1) + if cfg.MigrationEnabled { + if err := runDBMigrations(ctx, s.Logger(), dbURL); err != nil { + s.Logger().Error("failed to run db migrations", "error", err) + os.Exit(1) + } } dbPool, err := pgxpool.New(ctx, dbURL) diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index 428e6f0c..d13d679b 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -131,13 +131,6 @@ func (p *IngestionProcessor) Name() string { // Start starts the component func (p *IngestionProcessor) Start(ctx context.Context) error { p.logger.Info("starting component", "name", p.Name()) - - if p.Config.MigrationEnabled { - if err := migrate(ctx, p.logger, p.redisClient); err != nil { - return fmt.Errorf("failed to migrate: %v", err) - } - } - return p.consumeIngestionStream(ctx, redisStreamID, redisConsumerGroup, fmt.Sprintf("%s-%s", redisConsumerGroup, p.hostname)) } diff --git a/diode-server/reconciler/migration.go b/diode-server/reconciler/migration.go deleted file mode 100644 index dbecbbbb..00000000 --- a/diode-server/reconciler/migration.go +++ /dev/null @@ -1,164 +0,0 @@ -package reconciler - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "log/slog" - "time" - - "github.com/redis/go-redis/v9" -) - -const ( - // RedisDiodeMigrationsKey is the key for the redis diode migrations - RedisDiodeMigrationsKey = "diode.migrations" -) - -// AppliedMigrations is a list of applied migrations -type AppliedMigrations []MigrationLog - -// MigrationLog is a log of a migration -type MigrationLog struct { - Name string `json:"name"` - ApplyTs int64 `json:"apply_ts"` -} - -type migration struct { - name string - run func(context.Context, *slog.Logger, RedisClient) error -} - -func migrate(ctx context.Context, logger *slog.Logger, redisClient RedisClient) error { - res, err := redisClient.Do(ctx, "JSON.GET", RedisDiodeMigrationsKey).Result() - if err != nil && !errors.Is(err, redis.Nil) { - return fmt.Errorf("failed to get JSON redis key %s: %v", RedisDiodeMigrationsKey, err) - } - - var appliedMigrations AppliedMigrations - if res != nil { - _ = json.Unmarshal([]byte(res.(string)), &appliedMigrations) - } - - logger.Debug("migrations", "appliedMigrations", appliedMigrations) - - if len(appliedMigrations) == 0 { - logger.Debug("no applied migrations found") - } - - migrations := []migration{ - { - name: "0001_initial", - run: initialMigration(), - }, - } - - for _, m := range migrations { - var found bool - for _, am := range appliedMigrations { - if am.Name == m.name { - found = true - break - } - } - - if !found { - logger.Debug("applying migration", "name", m.name) - - if err := m.run(ctx, logger, redisClient); err != nil { - return fmt.Errorf("failed to run migration %s: %v", m.name, err) - } - - logger.Debug("migration applied", "name", m.name) - - appliedMigrations = append(appliedMigrations, MigrationLog{ - Name: m.name, - ApplyTs: time.Now().UnixNano(), - }) - } - } - - appliedMigrationsJSON, err := json.Marshal(appliedMigrations) - if err != nil { - return fmt.Errorf("failed to marshal applied migrations %#v: %v", appliedMigrations, err) - } - - if _, err = redisClient.Do(ctx, "JSON.SET", RedisDiodeMigrationsKey, "$", appliedMigrationsJSON).Result(); err != nil { - return fmt.Errorf("failed to set JSON redis key %s with value %s: %v", RedisDiodeMigrationsKey, appliedMigrationsJSON, err) - } - - return nil -} - -func initialMigration() func(context.Context, *slog.Logger, RedisClient) error { - return func(ctx context.Context, logger *slog.Logger, redisClient RedisClient) error { - // Drop FT index ingest-entity due to schema change - logger.Debug("dropping index", "name", RedisIngestEntityIndexName) - _, err := redisClient.Do(ctx, "FT.DROPINDEX", RedisIngestEntityIndexName).Result() - if err != nil && !errors.Is(err, redis.Nil) && err.Error() != "Unknown Index name" { - return fmt.Errorf("failed to drop FT index %s: %v", RedisIngestEntityIndexName, err) - } - - // Delete all keys with prefix ingest-entity - logger.Debug("deleting keys with prefix", "prefix", "ingest-entity:*") - iter := redisClient.Scan(ctx, 0, "ingest-entity:*", 10).Iterator() - for iter.Next(ctx) { - if err := redisClient.Del(ctx, iter.Val()).Err(); err != nil { - return fmt.Errorf("failed to delete key %s: %v", iter.Val(), err) - } - } - if err := iter.Err(); err != nil { - return fmt.Errorf("failed to iterate over keys with prefix %s: %v", RedisIngestEntityIndexName, err) - } - - // Create new FT index ingest-entity - logger.Debug("creating index", "name", RedisIngestEntityIndexName) - queryArgs := []interface{}{ - "FT.CREATE", - RedisIngestEntityIndexName, - "ON", - "JSON", - "PREFIX", - "1", - "ingest-entity:", - "SCHEMA", - "$.id", - "AS", - "id", - "TEXT", - "SORTABLE", - "$.dataType", - "AS", - "data_type", - "TAG", - "$.state", - "AS", - "state", - "TAG", - "$.requestId", - "AS", - "request_id", - "TAG", - "$.producerAppName", - "AS", - "producer_app_name", - "TAG", - "$.producerAppVersion", - "AS", - "producer_app_version", - "TAG", - "$.ingestionTs", - "AS", - "ingestion_ts", - "NUMERIC", - "SORTABLE", - } - - if _, err = redisClient.Do(ctx, queryArgs...).Result(); err != nil { - return fmt.Errorf("failed to create FT index %s: %v", RedisIngestEntityIndexName, err) - } - - return nil - } -} diff --git a/diode-server/reconciler/migration_test.go b/diode-server/reconciler/migration_test.go deleted file mode 100644 index 06f1c91a..00000000 --- a/diode-server/reconciler/migration_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package reconciler - -import ( - "context" - "encoding/json" - "errors" - "log/slog" - "os" - "testing" - "time" - - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - - mr "github.com/netboxlabs/diode/diode-server/reconciler/mocks" -) - -func TestMigrate(t *testing.T) { - tests := []struct { - name string - appliedMigrations []MigrationLog - err error - }{ - { - name: "no applied migrations found", - appliedMigrations: nil, - err: nil, - }, - { - name: "applied migrations found", - appliedMigrations: []MigrationLog{{Name: "0001_initial", ApplyTs: time.Now().Unix()}}, - err: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockRedisClient := new(mr.RedisClient) - - processor := &IngestionProcessor{ - redisClient: mockRedisClient, - logger: slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})), - Config: Config{ - AutoApplyChangesets: true, - ReconcilerRateLimiterRPS: 20, - ReconcilerRateLimiterBurst: 1, - }, - } - - ctx := context.Background() - - if tt.appliedMigrations == nil { - cmd := redis.NewCmd(ctx) - if tt.err != nil { - cmd.SetErr(errors.New("error")) - } else { - cmd.SetVal(nil) - cmd.SetErr(nil) - } - mockRedisClient.On("Do", context.Background(), "JSON.GET", RedisDiodeMigrationsKey).Return(cmd) - mockRedisClient.On("Do", context.Background(), "FT.DROPINDEX", RedisIngestEntityIndexName).Return(cmd) - scanResults := []string{"ingest-entity:1", "ingest-entity:2", "ingest-entity:3"} - mockRedisClient.On("Scan", context.Background(), uint64(0), "ingest-entity:*", int64(10)).Return(redis.NewScanCmdResult(scanResults, 0, nil)) - for _, key := range scanResults { - mockRedisClient.On("Del", context.Background(), key).Return(redis.NewIntResult(0, nil)) - } - mockRedisClient.On("Do", context.Background(), - "FT.CREATE", - RedisIngestEntityIndexName, - "ON", - "JSON", - "PREFIX", - "1", - "ingest-entity:", - "SCHEMA", - "$.id", - "AS", - "id", - "TEXT", - "SORTABLE", - "$.dataType", - "AS", - "data_type", - "TAG", - "$.state", - "AS", - "state", - "TAG", - "$.requestId", - "AS", - "request_id", - "TAG", - "$.producerAppName", - "AS", - "producer_app_name", - "TAG", - "$.producerAppVersion", - "AS", - "producer_app_version", - "TAG", - "$.ingestionTs", - "AS", - "ingestion_ts", - "NUMERIC", - "SORTABLE", - ).Return(cmd) - mockRedisClient.On("Do", context.Background(), "JSON.SET", RedisDiodeMigrationsKey, "$", mock.Anything).Return(cmd) - } else { - getAppliedMigrationsRespCmd := redis.NewCmd(ctx) - appliedMigrationsJSON, _ := json.Marshal(tt.appliedMigrations) - getAppliedMigrationsRespCmd.SetVal(string(appliedMigrationsJSON)) - getAppliedMigrationsRespCmd.SetErr(nil) - mockRedisClient.On("Do", context.Background(), "JSON.GET", RedisDiodeMigrationsKey).Return(getAppliedMigrationsRespCmd) - mockRedisClient.On("Do", context.Background(), "JSON.SET", RedisDiodeMigrationsKey, "$", appliedMigrationsJSON).Return(redis.NewCmd(ctx)) - } - - err := migrate(ctx, processor.logger, mockRedisClient) - if tt.err != nil { - assert.Error(t, err) - assert.Equal(t, tt.err, err) - } else { - assert.NoError(t, err) - } - }) - } -} From c2048dc6f7d3a91be8fade3bc6b2c2e30875c4ee Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 21:34:34 +0000 Subject: [PATCH 18/26] chore: remove writing ingestion log to redis Signed-off-by: Michal Fiedorowicz --- .../reconciler/ingestion_processor.go | 23 ------------------- 1 file changed, 23 deletions(-) diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index d13d679b..1a123c72 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -6,7 +6,6 @@ import ( "fmt" "log/slog" "os" - "regexp" "strconv" "github.com/google/uuid" @@ -14,7 +13,6 @@ import ( "github.com/redis/go-redis/v9" "github.com/segmentio/ksuid" "golang.org/x/time/rate" - "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" "github.com/netboxlabs/diode/diode-server/gen/diode/v1/diodepb" @@ -443,27 +441,6 @@ func extractIngestionError(err error) *reconcilerpb.IngestionError { return ingestionErr } -func (p *IngestionProcessor) writeIngestionLog(ctx context.Context, key string, ingestionLog *reconcilerpb.IngestionLog) ([]byte, error) { - ingestionLogJSON, err := protojson.Marshal(ingestionLog) - if err != nil { - return nil, fmt.Errorf("failed to marshal JSON: %v", err) - } - - ingestionLogJSON = normalizeIngestionLog(ingestionLogJSON) - - if _, err := p.redisClient.Do(ctx, "JSON.SET", key, "$", ingestionLogJSON).Result(); err != nil { - return nil, fmt.Errorf("failed to set JSON redis key: %v", err) - } - - return ingestionLogJSON, nil -} - -func normalizeIngestionLog(l []byte) []byte { - // replace ingestionTs string value as integer, see: https://github.com/golang/protobuf/issues/1414 - re := regexp.MustCompile(`"ingestionTs":"(\d+)"`) - return re.ReplaceAll(l, []byte(`"ingestionTs":$1`)) -} - func extractObjectType(in *diodepb.Entity) (string, error) { switch in.GetEntity().(type) { case *diodepb.Entity_Device: From 5269c7692fd0de1304ad09820585ffd883c2e059 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 21:35:01 +0000 Subject: [PATCH 19/26] fix: adjust unit tests Signed-off-by: Michal Fiedorowicz --- .../ingestion_processor_internal_test.go | 84 --- .../reconciler/ingestion_processor_test.go | 7 + .../reconciler/server_internal_test.go | 653 ++++++++---------- 3 files changed, 281 insertions(+), 463 deletions(-) diff --git a/diode-server/reconciler/ingestion_processor_internal_test.go b/diode-server/reconciler/ingestion_processor_internal_test.go index 93fad3a1..a273a936 100644 --- a/diode-server/reconciler/ingestion_processor_internal_test.go +++ b/diode-server/reconciler/ingestion_processor_internal_test.go @@ -31,90 +31,6 @@ import ( func int32Ptr(i int32) *int32 { return &i } func strPtr(s string) *string { return &s } -func TestWriteIngestionLog(t *testing.T) { - tests := []struct { - name string - ingestionLog *reconcilerpb.IngestionLog - hasError bool - hasMock bool - }{ - { - name: "write successful", - ingestionLog: &reconcilerpb.IngestionLog{ - RequestId: "cfa0f129-125c-440d-9e41-e87583cd7d89", - DataType: "dcim.site", - Entity: &diodepb.Entity{ - Entity: &diodepb.Entity_Site{ - Site: &diodepb.Site{ - Name: "Site A", - }, - }, - }, - }, - hasError: false, - hasMock: true, - }, - { - name: "redis error", - ingestionLog: &reconcilerpb.IngestionLog{ - RequestId: "cfa0f129-125c-440d-9e41-e87583cd7d89", - DataType: "dcim.site", - Entity: &diodepb.Entity{ - Entity: &diodepb.Entity_Site{ - Site: &diodepb.Site{ - Name: "Site A", - }, - }, - }, - IngestionTs: time.Now().UnixNano(), - }, - hasError: true, - hasMock: true, - }, - } - for i := range tests { - tt := tests[i] - - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - key := "test-key" - - // Create a mock Redis client - mockRedisClient := new(mr.RedisClient) - p := &IngestionProcessor{ - redisClient: mockRedisClient, - logger: slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})), - Config: Config{ - AutoApplyChangesets: true, - ReconcilerRateLimiterRPS: 20, - ReconcilerRateLimiterBurst: 1, - }, - } - - // Set up the mock expectation - cmd := redis.NewCmd(ctx) - if tt.hasError { - cmd.SetErr(errors.New("error")) - } - mockRedisClient.On("Do", ctx, "JSON.SET", "test-key", "$", mock.Anything). - Return(cmd) - - // Call the method - _, err := p.writeIngestionLog(ctx, key, tt.ingestionLog) - - if tt.hasError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - // Assert the expectations - if tt.hasMock { - mockRedisClient.AssertExpectations(t) - } - }) - } -} - func TestHandleStreamMessage(t *testing.T) { tests := []struct { name string diff --git a/diode-server/reconciler/ingestion_processor_test.go b/diode-server/reconciler/ingestion_processor_test.go index 15568019..833526bf 100644 --- a/diode-server/reconciler/ingestion_processor_test.go +++ b/diode-server/reconciler/ingestion_processor_test.go @@ -2,6 +2,7 @@ package reconciler_test import ( "context" + "github.com/stretchr/testify/mock" "log/slog" "os" "testing" @@ -18,6 +19,8 @@ import ( "github.com/netboxlabs/diode/diode-server/reconciler/mocks" ) +func int32Ptr(i int32) *int32 { return &i } + func TestNewIngestionProcessor(t *testing.T) { ctx := context.Background() s := miniredis.RunT(t) @@ -222,6 +225,9 @@ func TestIngestionProcessorStart(t *testing.T) { // Wait server time.Sleep(50 * time.Millisecond) + ingestionLogRepoMock.On("CreateIngestionLog", ctx, mock.Anything, mock.Anything).Return(int32Ptr(1), nil) + ingestionLogRepoMock.On("UpdateIngestionLogStateWithError", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + redisClient := redis.NewClient(&redis.Options{ Addr: s.Addr(), DB: 1, @@ -244,4 +250,5 @@ func TestIngestionProcessorStart(t *testing.T) { // Stop the processor err = processor.Stop() assert.NoError(t, err) + ingestionLogRepoMock.AssertExpectations(t) } diff --git a/diode-server/reconciler/server_internal_test.go b/diode-server/reconciler/server_internal_test.go index d2df1165..7c3a697e 100644 --- a/diode-server/reconciler/server_internal_test.go +++ b/diode-server/reconciler/server_internal_test.go @@ -7,7 +7,6 @@ import ( "os" "testing" - "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -17,25 +16,6 @@ import ( mr "github.com/netboxlabs/diode/diode-server/reconciler/mocks" ) -// MockPipeliner is a mock implementation of the redis Pipeliner interface. -type MockPipeliner struct { - mock.Mock - redis.Pipeliner -} - -// Do is a mock of Pipeliner's Do method. -func (m *MockPipeliner) Do(ctx context.Context, args ...interface{}) *redis.Cmd { - calledArgs := m.Called(ctx, args) - return calledArgs.Get(0).(*redis.Cmd) -} - -// Exec is a mock of Pipeliner's Exec method. -func (m *MockPipeliner) Exec(ctx context.Context) ([]redis.Cmder, error) { - args := m.Called(ctx) - cmds := make([]redis.Cmder, 0) - return cmds, args.Error(0) -} - func TestIsAuthenticated(t *testing.T) { tests := []struct { name string @@ -128,42 +108,66 @@ func TestIsAuthenticated(t *testing.T) { func TestRetrieveLogs(t *testing.T) { tests := []struct { - name string - in reconcilerpb.RetrieveIngestionLogsRequest - result interface{} - response *reconcilerpb.RetrieveIngestionLogsResponse - queryFilter string - queryLimitOffset int32 - failCmd bool - hasError bool + name string + in reconcilerpb.RetrieveIngestionLogsRequest + ingestionLogs []*reconcilerpb.IngestionLog + response *reconcilerpb.RetrieveIngestionLogsResponse + hasError bool }{ { name: "valid request", in: reconcilerpb.RetrieveIngestionLogsRequest{}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":2}`, - "ingestion_ts": "1725552914392208640", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", + DataType: "dcim.interface", + State: reconcilerpb.State_RECONCILED, + RequestId: "req-id", + IngestionTs: 1725552914392208722, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Interface{ + Interface: &diodepb.Interface{ + Device: &diodepb.Device{ + Name: "my_dev", + }, + Name: "Gig 2", + }, }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, }, - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.device","entity":{"device":{"name":"Conference_Room_AP_02","deviceType":{"model":"Cisco Aironet 3802","manufacturer":{"name":"Cisco"}},"role":{"name":"Wireless_AP"},"serial":"PQR456789012","site":{"name":"HQ"}}},"id":"2mC8GVBGFg6NyLsQxuS4IYMB6FI","ingestionTs":1725552654541975975,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"bc1052e3-656a-42f0-b364-27b385e02a0c","sdkName":"diode-sdk-python","sdkVersion":"0.0.1","state":2}`, - "ingestion_ts": "1725552654541976064", + Error: nil, + }, + { + Id: "2mC8GVBGFg6NyLsQxuS4IYMB6FI", + DataType: "dcim.device", + State: reconcilerpb.State_RECONCILED, + RequestId: "bc1052e3-656a-42f0-b364-27b385e02a0c", + IngestionTs: 1725552654541975975, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-python", + SdkVersion: "0.0.1", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Device{ + Device: &diodepb.Device{ + Name: "Conference_Room_AP_02", + DeviceType: &diodepb.DeviceType{ + Model: "Cisco Aironet 3802", + Manufacturer: &diodepb.Manufacturer{ + Name: "Cisco", + }, + }, + Role: &diodepb.Role{Name: "Wireless_AP"}, + Serial: strPtr("PQR456789012"), + Site: &diodepb.Site{Name: "HQ"}, + }, }, - "id": "ingest-entity:dcim.device-1725552654541975975-2mC8GVBGFg6NyLsQxuS4IYMB6FI", - "values": []interface{}{}, }, }, - "total_results": 2, - "warning": []interface{}{}, - }), + }, response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -220,30 +224,45 @@ func TestRetrieveLogs(t *testing.T) { }, NextPageToken: "F/Jk/zc08gA=", }, - queryFilter: "*", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { name: "request with reconciliation error", in: reconcilerpb.RetrieveIngestionLogsRequest{}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"ipam.ipaddress","entity":{"ip_address":{"address":"192.168.1.1","interface":null,"description":"Vendor: HUAWEI TECHNOLOGIES"}},"error":{"message":"failed to apply change set","code":400,"details":{"change_set_id":"6304c706-f955-4bcb-a1cc-514293d53d07","result":"failed","errors":[{"error":"address: Duplicate IP address found in global table: 192.168.1.1/32","change_id":"ff9e29b2-7a64-40ba-99a8-21f44768f60a"}]}},"id":"2mC8KCvHNasrYlfxSASk9hatfYC","ingestionTs":1725046967777525928,"producerAppName":"example-app","producerAppVersion":"0.1.0","request_id":"e03c4892-5b7e-4c39-b5e6-0225a264ab8b","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":3}`, - "ingestion_ts": "1725552914392208640", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + DataType: "ipam.ipaddress", + State: reconcilerpb.State_FAILED, + RequestId: "e03c4892-5b7e-4c39-b5e6-0225a264ab8b", + IngestionTs: 1725046967777525928, + ProducerAppName: "example-app", + ProducerAppVersion: "0.1.0", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_IpAddress{ + IpAddress: &diodepb.IPAddress{ + Address: "192.168.1.1", + Description: strPtr("Vendor: HUAWEI TECHNOLOGIES"), + }, + }, + }, + Error: &reconcilerpb.IngestionError{ + Message: "failed to apply change set", + Code: 400, + Details: &reconcilerpb.IngestionError_Details{ + ChangeSetId: "6304c706-f955-4bcb-a1cc-514293d53d07", + Result: "failed", + Errors: []*reconcilerpb.IngestionError_Details_Error{ + { + ChangeId: "ff9e29b2-7a64-40ba-99a8-21f44768f60a", + Error: "address: Duplicate IP address found in global table: 192.168.1.1/32", + }, + }, }, - "id": "ingest-entity:ipam.ipaddress-1725046967777525928-2mC8KCvHNasrYlfxSASk9hatfYC", - "values": []interface{}{}, }, }, - "total_results": 2, - "warning": []interface{}{}, - }), + }, response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -280,34 +299,38 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ - Total: 2, + Total: 1, }, NextPageToken: "AAAFlw==", }, - queryFilter: "*", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { name: "filter by new state", in: reconcilerpb.RetrieveIngestionLogsRequest{State: reconcilerpb.State_QUEUED.Enum()}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mC8NYwfIKM5rFDibDBuytASSOi","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":1}`, - "ingestion_ts": "1725552914392208640", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + DataType: "dcim.interface", + State: reconcilerpb.State_QUEUED, + RequestId: "req-id", + IngestionTs: 1725552914392208722, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Interface{ + Interface: &diodepb.Interface{ + Device: &diodepb.Device{ + Name: "my_dev", + }, + Name: "Gig 2", + }, }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mC8NYwfIKM5rFDibDBuytASSOi", - "values": []interface{}{}, }, + Error: nil, }, - "total_results": 1, - "warning": []interface{}{}, - }), + }, response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -337,30 +360,35 @@ func TestRetrieveLogs(t *testing.T) { }, NextPageToken: "AAAFlw==", }, - queryFilter: "@state:{QUEUED}", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { name: "filter by reconciled state", in: reconcilerpb.RetrieveIngestionLogsRequest{State: reconcilerpb.State_RECONCILED.Enum()}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":2}`, - "ingestion_ts": "1725552914392208640", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", + DataType: "dcim.interface", + State: reconcilerpb.State_RECONCILED, + RequestId: "req-id", + IngestionTs: 1725552914392208722, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Interface{ + Interface: &diodepb.Interface{ + Device: &diodepb.Device{ + Name: "my_dev", + }, + Name: "Gig 2", + }, }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, }, + Error: nil, }, - "total_results": 1, - "warning": []interface{}{}, - }), + }, response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -391,30 +419,35 @@ func TestRetrieveLogs(t *testing.T) { }, NextPageToken: "AAAFlw==", }, - queryFilter: "@state:{RECONCILED}", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { name: "filter by failed state", in: reconcilerpb.RetrieveIngestionLogsRequest{State: reconcilerpb.State_FAILED.Enum()}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":3}`, - "ingestion_ts": "1725552914392208640", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", + DataType: "dcim.interface", + State: reconcilerpb.State_FAILED, + RequestId: "req-id", + IngestionTs: 1725552914392208722, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Interface{ + Interface: &diodepb.Interface{ + Device: &diodepb.Device{ + Name: "my_dev", + }, + Name: "Gig 2", + }, }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, }, + Error: nil, }, - "total_results": 1, - "warning": []interface{}{}, - }), + }, response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -445,30 +478,35 @@ func TestRetrieveLogs(t *testing.T) { }, NextPageToken: "AAAFlw==", }, - queryFilter: "@state:{FAILED}", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { name: "filter by no changes state", in: reconcilerpb.RetrieveIngestionLogsRequest{State: reconcilerpb.State_NO_CHANGES.Enum()}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":4}`, - "ingestion_ts": "1725552914392208640", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", + DataType: "dcim.interface", + State: reconcilerpb.State_NO_CHANGES, + RequestId: "req-id", + IngestionTs: 1725552914392208722, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Interface{ + Interface: &diodepb.Interface{ + Device: &diodepb.Device{ + Name: "my_dev", + }, + Name: "Gig 2", + }, }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, }, + Error: nil, }, - "total_results": 1, - "warning": []interface{}{}, - }), + }, response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -499,30 +537,35 @@ func TestRetrieveLogs(t *testing.T) { }, NextPageToken: "AAAFlw==", }, - queryFilter: "@state:{NO_CHANGES}", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { name: "filter by data type", in: reconcilerpb.RetrieveIngestionLogsRequest{DataType: "dcim.interface"}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":2}`, - "ingestion_ts": "1725552914392208640", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", + DataType: "dcim.interface", + State: reconcilerpb.State_RECONCILED, + RequestId: "req-id", + IngestionTs: 1725552914392208722, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Interface{ + Interface: &diodepb.Interface{ + Device: &diodepb.Device{ + Name: "my_dev", + }, + Name: "Gig 2", + }, }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, }, + Error: nil, }, - "total_results": 1, - "warning": []interface{}{}, - }), + }, response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -553,30 +596,35 @@ func TestRetrieveLogs(t *testing.T) { }, NextPageToken: "AAAFlw==", }, - queryFilter: "@data_type:{dcim\\.interface}", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { name: "filter by timestamp", in: reconcilerpb.RetrieveIngestionLogsRequest{IngestionTsStart: 1725552914392208639}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":2}`, - "ingestion_ts": "1725552914392208640", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", + DataType: "dcim.interface", + State: reconcilerpb.State_RECONCILED, + RequestId: "req-id", + IngestionTs: 1725552914392208722, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Interface{ + Interface: &diodepb.Interface{ + Device: &diodepb.Device{ + Name: "my_dev", + }, + Name: "Gig 2", + }, }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, }, + Error: nil, }, - "total_results": 1, - "warning": []interface{}{}, - }), + }, response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -607,108 +655,35 @@ func TestRetrieveLogs(t *testing.T) { }, NextPageToken: "AAAFlw==", }, - queryFilter: "@ingestion_ts:[1725552914392208639 inf]", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { name: "pagination check", in: reconcilerpb.RetrieveIngestionLogsRequest{PageToken: "AAAFlg=="}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":2}`, - "ingestion_ts": "1725552914392208640", - }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, - }, - }, - "total_results": 1, - "warning": []interface{}{}, - }), - response: &reconcilerpb.RetrieveIngestionLogsResponse{ - Logs: []*reconcilerpb.IngestionLog{ - { - Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", - DataType: "dcim.interface", - State: reconcilerpb.State_RECONCILED, - RequestId: "req-id", - IngestionTs: 1725552914392208722, - ProducerAppName: "diode-agent", - ProducerAppVersion: "0.0.1", - SdkName: "diode-sdk-go", - SdkVersion: "0.1.0", - Entity: &diodepb.Entity{ - Entity: &diodepb.Entity_Interface{ - Interface: &diodepb.Interface{ - Device: &diodepb.Device{ - Name: "my_dev", - }, - Name: "Gig 2", + ingestionLogs: []*reconcilerpb.IngestionLog{ + { + Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", + DataType: "dcim.interface", + State: reconcilerpb.State_RECONCILED, + RequestId: "req-id", + IngestionTs: 1725552914392208722, + ProducerAppName: "diode-agent", + ProducerAppVersion: "0.0.1", + SdkName: "diode-sdk-go", + SdkVersion: "0.1.0", + Entity: &diodepb.Entity{ + Entity: &diodepb.Entity_Interface{ + Interface: &diodepb.Interface{ + Device: &diodepb.Device{ + Name: "my_dev", }, + Name: "Gig 2", }, }, - Error: nil, }, + Error: nil, }, - Metrics: &reconcilerpb.IngestionMetrics{ - Total: 1, - }, - NextPageToken: "AAAFlw==", }, - queryFilter: "*", - queryLimitOffset: 1430, - failCmd: false, - hasError: false, - }, - { - name: "error parsing extra attributes", - in: reconcilerpb.RetrieveIngestionLogsRequest{PageToken: "AAAFlg=="}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `"extra":is":"invalid"`, - "ingestion_ts": "1725552914392208640", - }, - "id": "ingest-entity:dcim.interface", - "values": []interface{}{}, - }, - }, - "total_results": 1, - "warning": []interface{}{}, - }), - queryFilter: "*", - queryLimitOffset: 1430, - failCmd: false, - hasError: true, - }, - { - name: "error decoding page token", - in: reconcilerpb.RetrieveIngestionLogsRequest{PageToken: "invalid"}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":2}`, - "ingestion_ts": "1725552914392208640", - }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, - }, - }, - "total_results": 1, - "warning": []interface{}{}, - }), response: &reconcilerpb.RetrieveIngestionLogsResponse{ Logs: []*reconcilerpb.IngestionLog{ { @@ -739,40 +714,12 @@ func TestRetrieveLogs(t *testing.T) { }, NextPageToken: "AAAFlw==", }, - queryFilter: "*", - queryLimitOffset: 0, - failCmd: false, - hasError: false, + hasError: false, }, { - name: "error parsing response json", - in: reconcilerpb.RetrieveIngestionLogsRequest{}, - result: interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{ - "extra_attributes": map[interface{}]interface{}{ - "$": `{"dataType":"dcim.interface","entity":{"interface":{"device":{"name":"my_dev"},"name":"Gig 2"}},"id":"2mAT7vZ38H4ttI0i5dBebwJbSnZ","ingestionTs":1725552914392208722,"producerAppName":"diode-agent","producerAppVersion":"0.0.1","request_id":"req-id","sdkName":"diode-sdk-go","sdkVersion":"0.1.0","state":2}`, - "ingestion_ts": 123, - }, - "id": "ingest-entity:dcim.interface-1725552914392208722-2mAT7vZ38H4ttI0i5dBebwJbSnZ", - "values": []interface{}{}, - }, - }, - "total_results": 1, - "warning": []interface{}{}, - }), - queryFilter: "*", - failCmd: false, - hasError: true, - }, - { - name: "redis error", - in: reconcilerpb.RetrieveIngestionLogsRequest{}, - queryFilter: "*", - failCmd: true, - hasError: true, + name: "error decoding page token", + in: reconcilerpb.RetrieveIngestionLogsRequest{PageToken: "invalid"}, + hasError: true, }, } for i := range tests { @@ -782,18 +729,22 @@ func TestRetrieveLogs(t *testing.T) { logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) mockRedisClient := new(mr.RedisClient) + mockIngestionLogRepo := new(mr.IngestionLogRepository) + mockChangeSetRepo := new(mr.ChangeSetRepository) + server := &Server{ + redisClient: mockRedisClient, + logger: logger, + ingestionLogRepository: mockIngestionLogRepo, + changeSetRepository: mockChangeSetRepo, + } - cmd := redis.NewCmd(ctx) - cmd.SetVal(tt.result) - if tt.failCmd { - cmd.SetErr(errors.New("error")) + var retrieveErr error + if tt.hasError { + retrieveErr = errors.New("failed to retrieve ingestion logs") } - mockRedisClient.On("Do", ctx, "FT.SEARCH", "ingest-entity", tt.queryFilter, "SORTBY", "id", "DESC", "LIMIT", tt.queryLimitOffset, int32(100)). - Return(cmd) - server := &Server{ - redisClient: mockRedisClient, - logger: logger, + if !tt.hasError { + mockIngestionLogRepo.On("RetrieveIngestionLogs", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tt.ingestionLogs, retrieveErr) } response, err := server.RetrieveIngestionLogs(ctx, &tt.in) @@ -815,6 +766,7 @@ func TestRetrieveLogs(t *testing.T) { } require.Equal(t, tt.response.Metrics, response.Metrics) } + mockIngestionLogRepo.AssertExpectations(t) }) } } @@ -823,34 +775,27 @@ func TestRetrieveIngestionLogsMetricsOnly(t *testing.T) { tests := []struct { name string expectedTotal interface{} - cmdError bool - execError error hasError bool errorMsg string }{ { name: "valid request", expectedTotal: int64(10), - cmdError: false, hasError: false, }, { name: "query error", - cmdError: true, hasError: true, errorMsg: "failed to retrieve ingestion logs: cmd error", }, { - name: "exec error", - cmdError: false, - execError: errors.New("exec error"), - hasError: true, - errorMsg: "failed to retrieve ingestion logs: exec error", + name: "exec error", + hasError: true, + errorMsg: "failed to retrieve ingestion logs: exec error", }, { name: "error getting total results", expectedTotal: nil, - cmdError: false, hasError: true, errorMsg: "failed to retrieve ingestion logs: failed to parse total_results", }, @@ -870,82 +815,31 @@ func TestRetrieveIngestionLogsMetricsOnly(t *testing.T) { } mockRedisClient := new(mr.RedisClient) - - mockPipeliner := new(MockPipeliner) - - cmdTotal := redis.NewCmd(ctx) - if tt.cmdError { - cmdTotal.SetErr(errors.New("cmd error")) + mockIngestionLogRepo := new(mr.IngestionLogRepository) + mockChangeSetRepo := new(mr.ChangeSetRepository) + server := &Server{ + redisClient: mockRedisClient, + logger: logger, + ingestionLogRepository: mockIngestionLogRepo, + changeSetRepository: mockChangeSetRepo, } - cmdTotal.SetVal(interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{}, - }, - "total_results": tt.expectedTotal, - "warning": []interface{}{}, - })) - mockPipeliner.On("Do", ctx, []interface{}{"FT.SEARCH", "ingest-entity", "*", "LIMIT", 0, 0}).Return(cmdTotal) - - cmdNew := redis.NewCmd(ctx) - cmdNew.SetVal(interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{}, - }, - "total_results": int64(expected.Queued), - "warning": []interface{}{}, - })) - mockPipeliner.On("Do", ctx, []interface{}{"FT.SEARCH", "ingest-entity", "@state:{QUEUED}", "LIMIT", 0, 0}).Return(cmdNew) - cmdReconciled := redis.NewCmd(ctx) - cmdReconciled.SetVal(interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{}, - }, - "total_results": int64(expected.Reconciled), - "warning": []interface{}{}, - })) - mockPipeliner.On("Do", ctx, []interface{}{"FT.SEARCH", "ingest-entity", "@state:{RECONCILED}", "LIMIT", 0, 0}).Return(cmdReconciled) - - cmdFailed := redis.NewCmd(ctx) - cmdFailed.SetVal(interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{}, - }, - "total_results": int64(expected.Failed), - "warning": []interface{}{}, - })) - mockPipeliner.On("Do", ctx, []interface{}{"FT.SEARCH", "ingest-entity", "@state:{FAILED}", "LIMIT", 0, 0}).Return(cmdFailed) + ingestionLogStateMetricsMap := map[reconcilerpb.State]int32{ + reconcilerpb.State_QUEUED: expected.Queued, + reconcilerpb.State_RECONCILED: expected.Reconciled, + reconcilerpb.State_FAILED: expected.Failed, + reconcilerpb.State_NO_CHANGES: expected.NoChanges, + } - cmdNoChanges := redis.NewCmd(ctx) - cmdNoChanges.SetVal(interface{}(map[interface{}]interface{}{ - "attributes": []interface{}{}, - "format": "STRING", - "results": []interface{}{ - map[interface{}]interface{}{}, - }, - "total_results": int64(expected.NoChanges), - "warning": []interface{}{}, - })) - mockPipeliner.On("Do", ctx, []interface{}{"FT.SEARCH", "ingest-entity", "@state:{NO_CHANGES}", "LIMIT", 0, 0}).Return(cmdNoChanges) + var countErr error + if tt.hasError { + countErr = errors.New(tt.errorMsg) + } - mockPipeliner.On("Exec", ctx).Return(tt.execError) - mockRedisClient.On("Pipeline").Return(mockPipeliner) + mockIngestionLogRepo.On("CountIngestionLogsPerState", ctx).Return(ingestionLogStateMetricsMap, countErr) in := reconcilerpb.RetrieveIngestionLogsRequest{OnlyMetrics: true} - server := &Server{ - redisClient: mockRedisClient, - logger: logger, - } - response, err := server.RetrieveIngestionLogs(ctx, &in) if tt.hasError { require.Error(t, err) @@ -954,6 +848,7 @@ func TestRetrieveIngestionLogsMetricsOnly(t *testing.T) { require.NoError(t, err) require.Equal(t, expected, response.Metrics) } + mockIngestionLogRepo.AssertExpectations(t) }) } } From 394698b7fba2f019c8adfd39cc8577c98e1f93bc Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Tue, 17 Dec 2024 21:42:55 +0000 Subject: [PATCH 20/26] chore: gofumpt Signed-off-by: Michal Fiedorowicz --- diode-server/reconciler/ingestion_processor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diode-server/reconciler/ingestion_processor_test.go b/diode-server/reconciler/ingestion_processor_test.go index 833526bf..b0c5cae2 100644 --- a/diode-server/reconciler/ingestion_processor_test.go +++ b/diode-server/reconciler/ingestion_processor_test.go @@ -2,7 +2,6 @@ package reconciler_test import ( "context" - "github.com/stretchr/testify/mock" "log/slog" "os" "testing" @@ -11,6 +10,7 @@ import ( "github.com/alicebob/miniredis/v2" "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" From 02f13148e151d53953287f21687eacbc84d60a5d Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Thu, 19 Dec 2024 17:27:20 +0000 Subject: [PATCH 21/26] fix: retrieving ingestion logs with or without change sets Signed-off-by: Michal Fiedorowicz --- .../postgres/migrations/00002_change_sets.sql | 18 +++++- .../postgres/queries/ingestion_logs.sql | 8 +-- diode-server/dbstore/postgres/repositories.go | 64 ++++++++++--------- .../dbstore/postgres/ingestion_logs.sql.go | 48 +++++++------- diode-server/gen/dbstore/postgres/types.go | 37 +++++++---- 5 files changed, 100 insertions(+), 75 deletions(-) diff --git a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql index d40c3a0b..c9f0c0a5 100644 --- a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql +++ b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql @@ -42,7 +42,16 @@ ALTER TABLE change_sets ALTER TABLE changes ADD CONSTRAINT fk_changes_change_sets FOREIGN KEY (change_set_id) REFERENCES change_sets (id); -CREATE VIEW changes_view AS +-- Create a view to join ingestion_logs with change_sets +CREATE VIEW v_ingestion_logs_change_sets AS +( +SELECT change_sets.* +FROM ingestion_logs + LEFT JOIN change_sets on ingestion_logs.id = change_sets.ingestion_log_id + ); + +-- Create a view to join change_sets with changes +CREATE VIEW v_change_sets_changes AS ( SELECT changes.* FROM change_sets @@ -51,8 +60,11 @@ FROM change_sets -- +goose Down --- Drop the changes_view view -DROP VIEW IF EXISTS changes_view; +-- Drop the v_ingestion_logs_change_sets view +DROP VIEW IF EXISTS v_ingestion_logs_change_sets; + +-- Drop the v_change_sets_with_changes view +DROP VIEW IF EXISTS v_change_sets_with_changes; -- Drop the changes table DROP TABLE changes; diff --git a/diode-server/dbstore/postgres/queries/ingestion_logs.sql b/diode-server/dbstore/postgres/queries/ingestion_logs.sql index d5966f73..b114cb26 100644 --- a/diode-server/dbstore/postgres/queries/ingestion_logs.sql +++ b/diode-server/dbstore/postgres/queries/ingestion_logs.sql @@ -27,13 +27,13 @@ ORDER BY id DESC LIMIT sqlc.arg('limit') OFFSET sqlc.arg('offset'); -- name: RetrieveIngestionLogsWithChangeSets :many -SELECT sqlc.embed(ingestion_logs), sqlc.embed(change_sets), sqlc.embed(changes_view) +SELECT sqlc.embed(ingestion_logs), sqlc.embed(v_ingestion_logs_change_sets), sqlc.embed(v_change_sets_changes) FROM ingestion_logs - LEFT JOIN change_sets on ingestion_logs.id = change_sets.ingestion_log_id - LEFT JOIN changes_view on change_sets.id = changes_view.change_set_id + LEFT JOIN v_ingestion_logs_change_sets on ingestion_logs.id = v_ingestion_logs_change_sets.ingestion_log_id + LEFT JOIN v_change_sets_changes on v_ingestion_logs_change_sets.id = v_change_sets_changes.change_set_id WHERE (ingestion_logs.state = sqlc.narg('state') OR sqlc.narg('state') IS NULL) AND (ingestion_logs.data_type = sqlc.narg('data_type') OR sqlc.narg('data_type') IS NULL) AND (ingestion_logs.ingestion_ts >= sqlc.narg('ingestion_ts_start') OR sqlc.narg('ingestion_ts_start') IS NULL) AND (ingestion_logs.ingestion_ts <= sqlc.narg('ingestion_ts_end') OR sqlc.narg('ingestion_ts_end') IS NULL) -ORDER BY ingestion_logs.id DESC, changes_view.sequence_number ASC +ORDER BY ingestion_logs.id DESC, v_change_sets_changes.sequence_number ASC LIMIT sqlc.arg('limit') OFFSET sqlc.arg('offset'); diff --git a/diode-server/dbstore/postgres/repositories.go b/diode-server/dbstore/postgres/repositories.go index 4791755d..68bfc22a 100644 --- a/diode-server/dbstore/postgres/repositories.go +++ b/diode-server/dbstore/postgres/repositories.go @@ -113,41 +113,45 @@ func (r *IngestionLogRepository) RetrieveIngestionLogs(ctx context.Context, filt changeSetsMap := make(map[int32]*changeset.ChangeSet) for _, row := range rawIngestionLogs { + if !row.VIngestionLogsChangeSet.ID.Valid || !row.VChangeSetsChange.ChangeUuid.Valid { + continue + } + var changeData map[string]any - if row.ChangesView.Data != nil { - if err := json.Unmarshal(row.ChangesView.Data, &changeData); err != nil { + if row.VChangeSetsChange.Data != nil { + if err := json.Unmarshal(row.VChangeSetsChange.Data, &changeData); err != nil { return nil, fmt.Errorf("failed to unmarshal change data: %w", err) } } change := changeset.Change{ - ChangeID: row.ChangesView.ChangeUuid.String, - ChangeType: row.ChangesView.ChangeType.String, - ObjectType: row.ChangesView.ObjectType.String, + ChangeID: row.VChangeSetsChange.ChangeUuid.String, + ChangeType: row.VChangeSetsChange.ChangeType.String, + ObjectType: row.VChangeSetsChange.ObjectType.String, Data: changeData, } - objID := int(row.ChangesView.ObjectID.Int32) - if row.ChangesView.ObjectID.Valid { + objID := int(row.VChangeSetsChange.ObjectID.Int32) + if row.VChangeSetsChange.ObjectID.Valid { change.ObjectID = &objID } - objVersion := int(row.ChangesView.ObjectVersion.Int32) - if row.ChangesView.ObjectVersion.Valid { + objVersion := int(row.VChangeSetsChange.ObjectVersion.Int32) + if row.VChangeSetsChange.ObjectVersion.Valid { change.ObjectVersion = &objVersion } - changeSet, ok := changeSetsMap[row.ChangeSet.ID] + changeSet, ok := changeSetsMap[row.VIngestionLogsChangeSet.ID.Int32] if !ok { changes := make([]changeset.Change, 0) changes = append(changes, change) changeSet = &changeset.ChangeSet{ - ChangeSetID: row.ChangeSet.ChangeSetUuid, + ChangeSetID: row.VIngestionLogsChangeSet.ChangeSetUuid.String, ChangeSet: changes, } - if row.ChangeSet.BranchID.Valid { - changeSet.BranchID = &row.ChangeSet.BranchID.String + if row.VIngestionLogsChangeSet.BranchID.Valid { + changeSet.BranchID = &row.VIngestionLogsChangeSet.BranchID.String } - changeSetsMap[row.ChangeSet.ID] = changeSet + changeSetsMap[row.VIngestionLogsChangeSet.ID.Int32] = changeSet continue } @@ -173,19 +177,6 @@ func (r *IngestionLogRepository) RetrieveIngestionLogs(ctx context.Context, filt } } - changeSet, ok := changeSetsMap[row.ChangeSet.ID] - if !ok { - return nil, fmt.Errorf("change set not found for ingestion log: %d", row.IngestionLog.ID) - } - var compressedChangeSet []byte - if len(changeSet.ChangeSet) > 0 { - b, err := changeset.CompressChangeSet(changeSet) - if err != nil { - return nil, fmt.Errorf("failed to compress change set: %w", err) - } - compressedChangeSet = b - } - log := &reconcilerpb.IngestionLog{ Id: ingestionLog.IngestionLogUuid, DataType: ingestionLog.DataType.String, @@ -198,10 +189,23 @@ func (r *IngestionLogRepository) RetrieveIngestionLogs(ctx context.Context, filt SdkVersion: ingestionLog.SdkVersion.String, Entity: entity, Error: &ingestionErr, - ChangeSet: &reconcilerpb.ChangeSet{ - Id: row.ChangeSet.ChangeSetUuid, + } + + changeSet, ok := changeSetsMap[row.VIngestionLogsChangeSet.ID.Int32] + if ok { + var compressedChangeSet []byte + if len(changeSet.ChangeSet) > 0 { + b, err := changeset.CompressChangeSet(changeSet) + if err != nil { + return nil, fmt.Errorf("failed to compress change set: %w", err) + } + compressedChangeSet = b + } + + log.ChangeSet = &reconcilerpb.ChangeSet{ + Id: row.VIngestionLogsChangeSet.ChangeSetUuid.String, Data: compressedChangeSet, - }, + } } ingestionLogsMap[ingestionLog.ID] = log diff --git a/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go b/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go index e7fed2d1..e9147533 100644 --- a/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go +++ b/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go @@ -162,15 +162,15 @@ func (q *Queries) RetrieveIngestionLogs(ctx context.Context, arg RetrieveIngesti } const retrieveIngestionLogsWithChangeSets = `-- name: RetrieveIngestionLogsWithChangeSets :many -SELECT ingestion_logs.id, ingestion_logs.ingestion_log_uuid, ingestion_logs.data_type, ingestion_logs.state, ingestion_logs.request_id, ingestion_logs.ingestion_ts, ingestion_logs.producer_app_name, ingestion_logs.producer_app_version, ingestion_logs.sdk_name, ingestion_logs.sdk_version, ingestion_logs.entity, ingestion_logs.error, ingestion_logs.source_metadata, ingestion_logs.created_at, ingestion_logs.updated_at, change_sets.id, change_sets.change_set_uuid, change_sets.ingestion_log_id, change_sets.branch_id, change_sets.created_at, change_sets.updated_at, changes_view.id, changes_view.change_uuid, changes_view.change_set_id, changes_view.change_type, changes_view.object_type, changes_view.object_id, changes_view.object_version, changes_view.data, changes_view.sequence_number, changes_view.created_at, changes_view.updated_at +SELECT ingestion_logs.id, ingestion_logs.ingestion_log_uuid, ingestion_logs.data_type, ingestion_logs.state, ingestion_logs.request_id, ingestion_logs.ingestion_ts, ingestion_logs.producer_app_name, ingestion_logs.producer_app_version, ingestion_logs.sdk_name, ingestion_logs.sdk_version, ingestion_logs.entity, ingestion_logs.error, ingestion_logs.source_metadata, ingestion_logs.created_at, ingestion_logs.updated_at, v_ingestion_logs_change_sets.id, v_ingestion_logs_change_sets.change_set_uuid, v_ingestion_logs_change_sets.ingestion_log_id, v_ingestion_logs_change_sets.branch_id, v_ingestion_logs_change_sets.created_at, v_ingestion_logs_change_sets.updated_at, v_change_sets_changes.id, v_change_sets_changes.change_uuid, v_change_sets_changes.change_set_id, v_change_sets_changes.change_type, v_change_sets_changes.object_type, v_change_sets_changes.object_id, v_change_sets_changes.object_version, v_change_sets_changes.data, v_change_sets_changes.sequence_number, v_change_sets_changes.created_at, v_change_sets_changes.updated_at FROM ingestion_logs - LEFT JOIN change_sets on ingestion_logs.id = change_sets.ingestion_log_id - LEFT JOIN changes_view on change_sets.id = changes_view.change_set_id + LEFT JOIN v_ingestion_logs_change_sets on ingestion_logs.id = v_ingestion_logs_change_sets.ingestion_log_id + LEFT JOIN v_change_sets_changes on v_ingestion_logs_change_sets.id = v_change_sets_changes.change_set_id WHERE (ingestion_logs.state = $1 OR $1 IS NULL) AND (ingestion_logs.data_type = $2 OR $2 IS NULL) AND (ingestion_logs.ingestion_ts >= $3 OR $3 IS NULL) AND (ingestion_logs.ingestion_ts <= $4 OR $4 IS NULL) -ORDER BY ingestion_logs.id DESC, changes_view.sequence_number ASC +ORDER BY ingestion_logs.id DESC, v_change_sets_changes.sequence_number ASC LIMIT $6 OFFSET $5 ` @@ -184,9 +184,9 @@ type RetrieveIngestionLogsWithChangeSetsParams struct { } type RetrieveIngestionLogsWithChangeSetsRow struct { - IngestionLog IngestionLog `json:"ingestion_log"` - ChangeSet ChangeSet `json:"change_set"` - ChangesView ChangesView `json:"changes_view"` + IngestionLog IngestionLog `json:"ingestion_log"` + VIngestionLogsChangeSet VIngestionLogsChangeSet `json:"vingestion_logs_change_set"` + VChangeSetsChange VChangeSetsChange `json:"vchange_sets_change"` } func (q *Queries) RetrieveIngestionLogsWithChangeSets(ctx context.Context, arg RetrieveIngestionLogsWithChangeSetsParams) ([]RetrieveIngestionLogsWithChangeSetsRow, error) { @@ -221,23 +221,23 @@ func (q *Queries) RetrieveIngestionLogsWithChangeSets(ctx context.Context, arg R &i.IngestionLog.SourceMetadata, &i.IngestionLog.CreatedAt, &i.IngestionLog.UpdatedAt, - &i.ChangeSet.ID, - &i.ChangeSet.ChangeSetUuid, - &i.ChangeSet.IngestionLogID, - &i.ChangeSet.BranchID, - &i.ChangeSet.CreatedAt, - &i.ChangeSet.UpdatedAt, - &i.ChangesView.ID, - &i.ChangesView.ChangeUuid, - &i.ChangesView.ChangeSetID, - &i.ChangesView.ChangeType, - &i.ChangesView.ObjectType, - &i.ChangesView.ObjectID, - &i.ChangesView.ObjectVersion, - &i.ChangesView.Data, - &i.ChangesView.SequenceNumber, - &i.ChangesView.CreatedAt, - &i.ChangesView.UpdatedAt, + &i.VIngestionLogsChangeSet.ID, + &i.VIngestionLogsChangeSet.ChangeSetUuid, + &i.VIngestionLogsChangeSet.IngestionLogID, + &i.VIngestionLogsChangeSet.BranchID, + &i.VIngestionLogsChangeSet.CreatedAt, + &i.VIngestionLogsChangeSet.UpdatedAt, + &i.VChangeSetsChange.ID, + &i.VChangeSetsChange.ChangeUuid, + &i.VChangeSetsChange.ChangeSetID, + &i.VChangeSetsChange.ChangeType, + &i.VChangeSetsChange.ObjectType, + &i.VChangeSetsChange.ObjectID, + &i.VChangeSetsChange.ObjectVersion, + &i.VChangeSetsChange.Data, + &i.VChangeSetsChange.SequenceNumber, + &i.VChangeSetsChange.CreatedAt, + &i.VChangeSetsChange.UpdatedAt, ); err != nil { return nil, err } diff --git a/diode-server/gen/dbstore/postgres/types.go b/diode-server/gen/dbstore/postgres/types.go index 78b88c63..1ad8c25d 100644 --- a/diode-server/gen/dbstore/postgres/types.go +++ b/diode-server/gen/dbstore/postgres/types.go @@ -31,20 +31,6 @@ type ChangeSet struct { UpdatedAt pgtype.Timestamptz `json:"updated_at"` } -type ChangesView struct { - ID pgtype.Int4 `json:"id"` - ChangeUuid pgtype.Text `json:"change_uuid"` - ChangeSetID pgtype.Int4 `json:"change_set_id"` - ChangeType pgtype.Text `json:"change_type"` - ObjectType pgtype.Text `json:"object_type"` - ObjectID pgtype.Int4 `json:"object_id"` - ObjectVersion pgtype.Int4 `json:"object_version"` - Data []byte `json:"data"` - SequenceNumber pgtype.Int4 `json:"sequence_number"` - CreatedAt pgtype.Timestamptz `json:"created_at"` - UpdatedAt pgtype.Timestamptz `json:"updated_at"` -} - type IngestionLog struct { ID int32 `json:"id"` IngestionLogUuid string `json:"ingestion_log_uuid"` @@ -62,3 +48,26 @@ type IngestionLog struct { CreatedAt pgtype.Timestamptz `json:"created_at"` UpdatedAt pgtype.Timestamptz `json:"updated_at"` } + +type VChangeSetsChange struct { + ID pgtype.Int4 `json:"id"` + ChangeUuid pgtype.Text `json:"change_uuid"` + ChangeSetID pgtype.Int4 `json:"change_set_id"` + ChangeType pgtype.Text `json:"change_type"` + ObjectType pgtype.Text `json:"object_type"` + ObjectID pgtype.Int4 `json:"object_id"` + ObjectVersion pgtype.Int4 `json:"object_version"` + Data []byte `json:"data"` + SequenceNumber pgtype.Int4 `json:"sequence_number"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + UpdatedAt pgtype.Timestamptz `json:"updated_at"` +} + +type VIngestionLogsChangeSet struct { + ID pgtype.Int4 `json:"id"` + ChangeSetUuid pgtype.Text `json:"change_set_uuid"` + IngestionLogID pgtype.Int4 `json:"ingestion_log_id"` + BranchID pgtype.Text `json:"branch_id"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + UpdatedAt pgtype.Timestamptz `json:"updated_at"` +} From d835510ca591f13d206f511a9a08cbb03fc5d8bd Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Thu, 19 Dec 2024 18:50:51 +0000 Subject: [PATCH 22/26] feat: use single repository for ingestion logs and change sets Signed-off-by: Michal Fiedorowicz --- diode-server/cmd/reconciler/main.go | 7 +- .../{repositories.go => repository.go} | 34 +- diode-server/ingester/component_test.go | 5 +- .../reconciler/ingestion_processor.go | 44 ++- .../ingestion_processor_internal_test.go | 23 +- .../reconciler/ingestion_processor_test.go | 16 +- diode-server/reconciler/logs_retriever.go | 10 +- .../reconciler/mocks/changesetrepository.go | 98 ------ .../mocks/ingestionlogrepository.go | 266 -------------- diode-server/reconciler/mocks/repository.go | 328 ++++++++++++++++++ .../{repositories.go => repository.go} | 8 +- diode-server/reconciler/server.go | 34 +- .../reconciler/server_internal_test.go | 32 +- diode-server/reconciler/server_test.go | 10 +- 14 files changed, 421 insertions(+), 494 deletions(-) rename diode-server/dbstore/postgres/{repositories.go => repository.go} (85%) delete mode 100644 diode-server/reconciler/mocks/changesetrepository.go delete mode 100644 diode-server/reconciler/mocks/ingestionlogrepository.go create mode 100644 diode-server/reconciler/mocks/repository.go rename diode-server/reconciler/{repositories.go => repository.go} (77%) diff --git a/diode-server/cmd/reconciler/main.go b/diode-server/cmd/reconciler/main.go index 1b24a442..dd5a2cd2 100644 --- a/diode-server/cmd/reconciler/main.go +++ b/diode-server/cmd/reconciler/main.go @@ -43,10 +43,9 @@ func main() { } defer dbPool.Close() - ingestionLogRepo := postgres.NewIngestionLogRepository(dbPool) - changeSetRepo := postgres.NewChangeSetRepository(dbPool) + repository := postgres.NewRepository(dbPool) - ingestionProcessor, err := reconciler.NewIngestionProcessor(ctx, s.Logger(), ingestionLogRepo, changeSetRepo) + ingestionProcessor, err := reconciler.NewIngestionProcessor(ctx, s.Logger(), repository) if err != nil { s.Logger().Error("failed to instantiate ingestion processor", "error", err) os.Exit(1) @@ -57,7 +56,7 @@ func main() { os.Exit(1) } - gRPCServer, err := reconciler.NewServer(ctx, s.Logger(), ingestionLogRepo, changeSetRepo) + gRPCServer, err := reconciler.NewServer(ctx, s.Logger(), repository) if err != nil { s.Logger().Error("failed to instantiate gRPC server", "error", err) os.Exit(1) diff --git a/diode-server/dbstore/postgres/repositories.go b/diode-server/dbstore/postgres/repository.go similarity index 85% rename from diode-server/dbstore/postgres/repositories.go rename to diode-server/dbstore/postgres/repository.go index 68bfc22a..687fd653 100644 --- a/diode-server/dbstore/postgres/repositories.go +++ b/diode-server/dbstore/postgres/repository.go @@ -15,22 +15,22 @@ import ( "github.com/netboxlabs/diode/diode-server/reconciler/changeset" ) -// IngestionLogRepository allows interacting with ingestion logs. -type IngestionLogRepository struct { +// Repository is an interface for interacting with ingestion logs and change sets. +type Repository struct { pool *pgxpool.Pool queries *postgres.Queries } -// NewIngestionLogRepository creates a new IngestionLogRepository. -func NewIngestionLogRepository(pool *pgxpool.Pool) *IngestionLogRepository { - return &IngestionLogRepository{ +// NewRepository creates a new Repository. +func NewRepository(pool *pgxpool.Pool) *Repository { + return &Repository{ pool: pool, queries: postgres.New(pool), } } // CreateIngestionLog creates a new ingestion log. -func (r *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) (*int32, error) { +func (r *Repository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) (*int32, error) { entityJSON, err := protojson.Marshal(ingestionLog.Entity) if err != nil { return nil, fmt.Errorf("failed to marshal entity: %w", err) @@ -57,7 +57,7 @@ func (r *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingesti } // UpdateIngestionLogStateWithError updates an ingestion log with a new state and error. -func (r *IngestionLogRepository) UpdateIngestionLogStateWithError(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError) error { +func (r *Repository) UpdateIngestionLogStateWithError(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError) error { params := postgres.UpdateIngestionLogStateWithErrorParams{ ID: id, State: pgtype.Int4{Int32: int32(state), Valid: true}, @@ -74,7 +74,7 @@ func (r *IngestionLogRepository) UpdateIngestionLogStateWithError(ctx context.Co } // CountIngestionLogsPerState counts ingestion logs per state. -func (r *IngestionLogRepository) CountIngestionLogsPerState(ctx context.Context) (map[reconcilerpb.State]int32, error) { +func (r *Repository) CountIngestionLogsPerState(ctx context.Context) (map[reconcilerpb.State]int32, error) { counts, err := r.queries.CountIngestionLogsPerState(ctx) if err != nil { return nil, err @@ -88,7 +88,7 @@ func (r *IngestionLogRepository) CountIngestionLogsPerState(ctx context.Context) } // RetrieveIngestionLogs retrieves ingestion logs. -func (r *IngestionLogRepository) RetrieveIngestionLogs(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32) ([]*reconcilerpb.IngestionLog, error) { +func (r *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32) ([]*reconcilerpb.IngestionLog, error) { params := postgres.RetrieveIngestionLogsWithChangeSetsParams{ Limit: limit, Offset: offset, @@ -215,22 +215,8 @@ func (r *IngestionLogRepository) RetrieveIngestionLogs(ctx context.Context, filt return ingestionLogs, nil } -// ChangeSetRepository allows interacting with change sets. -type ChangeSetRepository struct { - pool *pgxpool.Pool - queries *postgres.Queries -} - -// NewChangeSetRepository creates a new ChangeSetRepository. -func NewChangeSetRepository(pool *pgxpool.Pool) *ChangeSetRepository { - return &ChangeSetRepository{ - pool: pool, - queries: postgres.New(pool), - } -} - // CreateChangeSet creates a new change set. -func (r *ChangeSetRepository) CreateChangeSet(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32) (*int32, error) { +func (r *Repository) CreateChangeSet(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32) (*int32, error) { tx, err := r.pool.Begin(ctx) if err != nil { return nil, fmt.Errorf("failed to start transaction: %w", err) diff --git a/diode-server/ingester/component_test.go b/diode-server/ingester/component_test.go index 1deb4d78..edc8b805 100644 --- a/diode-server/ingester/component_test.go +++ b/diode-server/ingester/component_test.go @@ -71,9 +71,8 @@ const bufSize = 1024 * 1024 func startReconcilerServer(ctx context.Context, t *testing.T) *reconciler.Server { logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) - ingestionLogRepoMock := mocks.NewIngestionLogRepository(t) - changeSetRepoMock := mocks.NewChangeSetRepository(t) - server, err := reconciler.NewServer(ctx, logger, ingestionLogRepoMock, changeSetRepoMock) + mockRepository := mocks.NewRepository(t) + server, err := reconciler.NewServer(ctx, logger, mockRepository) require.NoError(t, err) errChan := make(chan error, 1) diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index 1a123c72..49670587 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -53,14 +53,13 @@ type RedisClient interface { // IngestionProcessor processes ingested data type IngestionProcessor struct { - Config Config - logger *slog.Logger - hostname string - redisClient RedisClient - redisStreamClient RedisClient - nbClient netboxdiodeplugin.NetBoxAPI - ingestionLogRepository IngestionLogRepository - changeSetRepository ChangeSetRepository + Config Config + logger *slog.Logger + hostname string + redisClient RedisClient + redisStreamClient RedisClient + nbClient netboxdiodeplugin.NetBoxAPI + repository Repository } // IngestionLogToProcess represents an ingestion log to process @@ -73,7 +72,7 @@ type IngestionLogToProcess struct { } // NewIngestionProcessor creates a new ingestion processor -func NewIngestionProcessor(ctx context.Context, logger *slog.Logger, ingestionLogRepo IngestionLogRepository, changeSetRepo ChangeSetRepository) (*IngestionProcessor, error) { +func NewIngestionProcessor(ctx context.Context, logger *slog.Logger, repository Repository) (*IngestionProcessor, error) { var cfg Config envconfig.MustProcess("", &cfg) @@ -108,14 +107,13 @@ func NewIngestionProcessor(ctx context.Context, logger *slog.Logger, ingestionLo } component := &IngestionProcessor{ - Config: cfg, - logger: logger, - hostname: hostname, - redisClient: redisClient, - redisStreamClient: redisStreamClient, - nbClient: nbClient, - ingestionLogRepository: ingestionLogRepo, - changeSetRepository: changeSetRepo, + Config: cfg, + logger: logger, + hostname: hostname, + redisClient: redisClient, + redisStreamClient: redisStreamClient, + nbClient: nbClient, + repository: repository, } return component, nil @@ -289,7 +287,7 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan ingestionErr := extractIngestionError(err) - if err = p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { + if err = p.repository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } break @@ -297,7 +295,7 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan ingestionLog.changeSet = changeSet - if _, err = p.changeSetRepository.CreateChangeSet(ctx, *changeSet, ingestionLog.ingestionLogID); err != nil { + if _, err = p.repository.CreateChangeSet(ctx, *changeSet, ingestionLog.ingestionLogID); err != nil { ingestionLog.errors = append(ingestionLog.errors, fmt.Errorf("failed to create change set: %v", err)) } @@ -311,7 +309,7 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan } } } else { - if err := p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_NO_CHANGES, nil); err != nil { + if err := p.repository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_NO_CHANGES, nil); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } } @@ -354,14 +352,14 @@ func (p *IngestionProcessor) ApplyChangeSet(ctx context.Context, applyChan <-cha ingestionErr := extractIngestionError(err) - if err := p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { + if err := p.repository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } break } ingestionLog.ingestionLog.State = reconcilerpb.State_RECONCILED - if err := p.ingestionLogRepository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_RECONCILED, nil); err != nil { + if err := p.repository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_RECONCILED, nil); err != nil { ingestionLog.errors = append(ingestionLog.errors, err) } p.logger.Debug("change set applied", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID) @@ -408,7 +406,7 @@ func (p *IngestionProcessor) CreateIngestionLogs(ctx context.Context, ingestReq ingestionLog.Id = uuid.NewString() - ingestionLogID, err := p.ingestionLogRepository.CreateIngestionLog(ctx, ingestionLog, nil) + ingestionLogID, err := p.repository.CreateIngestionLog(ctx, ingestionLog, nil) if err != nil { errs = append(errs, fmt.Errorf("failed to create ingestion log: %v", err)) continue diff --git a/diode-server/reconciler/ingestion_processor_internal_test.go b/diode-server/reconciler/ingestion_processor_internal_test.go index a273a936..0487b3e1 100644 --- a/diode-server/reconciler/ingestion_processor_internal_test.go +++ b/diode-server/reconciler/ingestion_processor_internal_test.go @@ -152,8 +152,7 @@ func TestHandleStreamMessage(t *testing.T) { mockRedisClient := new(mr.RedisClient) mockRedisStreamClient := new(mr.RedisClient) mockNbClient := new(mnp.NetBoxAPI) - mockIngestionLogRepo := new(mr.IngestionLogRepository) - mockChangeSetRepo := new(mr.ChangeSetRepository) + mockRepository := new(mr.Repository) logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) p := &IngestionProcessor{ @@ -166,8 +165,7 @@ func TestHandleStreamMessage(t *testing.T) { ReconcilerRateLimiterRPS: 20, ReconcilerRateLimiterBurst: 1, }, - ingestionLogRepository: mockIngestionLogRepo, - changeSetRepository: mockChangeSetRepo, + repository: mockRepository, } request := redis.XMessage{} @@ -208,7 +206,7 @@ func TestHandleStreamMessage(t *testing.T) { } mockNbClient.On("ApplyChangeSet", ctx, mock.Anything).Return(tt.changeSetResponse, tt.changeSetError) if tt.entities[0].Entity != nil { - mockIngestionLogRepo.On("CreateIngestionLog", ctx, mock.Anything, mock.Anything).Return(int32Ptr(1), nil) + mockRepository.On("CreateIngestionLog", ctx, mock.Anything, mock.Anything).Return(int32Ptr(1), nil) } mockRedisStreamClient.On("XAck", ctx, mock.Anything, mock.Anything, mock.Anything).Return(redis.NewIntCmd(ctx)) mockRedisStreamClient.On("XDel", ctx, mock.Anything, mock.Anything).Return(redis.NewIntCmd(ctx)) @@ -221,7 +219,7 @@ func TestHandleStreamMessage(t *testing.T) { } if tt.validMsg { - mockIngestionLogRepo.AssertExpectations(t) + mockRepository.AssertExpectations(t) } }) } @@ -445,8 +443,7 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { ctx := context.Background() mockRedisClient := new(mr.RedisClient) mockNbClient := new(mnp.NetBoxAPI) - mockIngestionLogRepo := new(mr.IngestionLogRepository) - mockChangeSetRepo := new(mr.ChangeSetRepository) + mockRepository := new(mr.Repository) logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) p := &IngestionProcessor{ @@ -458,8 +455,7 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { ReconcilerRateLimiterRPS: 20, ReconcilerRateLimiterBurst: 1, }, - ingestionLogRepository: mockIngestionLogRepo, - changeSetRepository: mockChangeSetRepo, + repository: mockRepository, } // Set up the mock expectation @@ -472,10 +468,10 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { mockNbClient.On("RetrieveObjectState", ctx, mock.Anything).Return(tt.mockRetrieveObjectStateResponse, nil) if tt.autoApplyChangesets { - mockIngestionLogRepo.On("UpdateIngestionLogStateWithError", ctx, ingestionLogID, tt.expectedStatus, mock.Anything).Return(nil) + mockRepository.On("UpdateIngestionLogStateWithError", ctx, ingestionLogID, tt.expectedStatus, mock.Anything).Return(nil) mockNbClient.On("ApplyChangeSet", ctx, mock.Anything).Return(tt.mockApplyChangeSetResponse, nil) } - mockChangeSetRepo.On("CreateChangeSet", ctx, mock.Anything, ingestionLogID).Return(int32Ptr(1), nil) + mockRepository.On("CreateChangeSet", ctx, mock.Anything, ingestionLogID).Return(int32Ptr(1), nil) bufCapacity := 1 @@ -504,8 +500,7 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { <-applyChangeSetDone } - mockIngestionLogRepo.AssertExpectations(t) - mockChangeSetRepo.AssertExpectations(t) + mockRepository.AssertExpectations(t) require.Equal(t, tt.expectedStatus, tt.ingestionLog.State) }) } diff --git a/diode-server/reconciler/ingestion_processor_test.go b/diode-server/reconciler/ingestion_processor_test.go index b0c5cae2..698b386b 100644 --- a/diode-server/reconciler/ingestion_processor_test.go +++ b/diode-server/reconciler/ingestion_processor_test.go @@ -29,11 +29,10 @@ func TestNewIngestionProcessor(t *testing.T) { setupEnv(s.Addr()) defer teardownEnv() - ingestionLogRepoMock := mocks.NewIngestionLogRepository(t) - changeSetRepoMock := mocks.NewChangeSetRepository(t) + mockRepository := mocks.NewRepository(t) logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) - processor, err := reconciler.NewIngestionProcessor(ctx, logger, ingestionLogRepoMock, changeSetRepoMock) + processor, err := reconciler.NewIngestionProcessor(ctx, logger, mockRepository) require.NoError(t, err) require.NotNil(t, processor) @@ -49,13 +48,12 @@ func TestIngestionProcessorStart(t *testing.T) { setupEnv(s.Addr()) defer teardownEnv() - ingestionLogRepoMock := mocks.NewIngestionLogRepository(t) - changeSetRepoMock := mocks.NewChangeSetRepository(t) + mockRepository := mocks.NewRepository(t) logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) ctx := context.Background() - processor, err := reconciler.NewIngestionProcessor(ctx, logger, ingestionLogRepoMock, changeSetRepoMock) + processor, err := reconciler.NewIngestionProcessor(ctx, logger, mockRepository) require.NoError(t, err) require.NotNil(t, processor) @@ -225,8 +223,8 @@ func TestIngestionProcessorStart(t *testing.T) { // Wait server time.Sleep(50 * time.Millisecond) - ingestionLogRepoMock.On("CreateIngestionLog", ctx, mock.Anything, mock.Anything).Return(int32Ptr(1), nil) - ingestionLogRepoMock.On("UpdateIngestionLogStateWithError", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockRepository.On("CreateIngestionLog", ctx, mock.Anything, mock.Anything).Return(int32Ptr(1), nil) + mockRepository.On("UpdateIngestionLogStateWithError", ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil) redisClient := redis.NewClient(&redis.Options{ Addr: s.Addr(), @@ -250,5 +248,5 @@ func TestIngestionProcessorStart(t *testing.T) { // Stop the processor err = processor.Stop() assert.NoError(t, err) - ingestionLogRepoMock.AssertExpectations(t) + mockRepository.AssertExpectations(t) } diff --git a/diode-server/reconciler/logs_retriever.go b/diode-server/reconciler/logs_retriever.go index e34a2b8f..f677fa57 100644 --- a/diode-server/reconciler/logs_retriever.go +++ b/diode-server/reconciler/logs_retriever.go @@ -11,10 +11,10 @@ import ( "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" ) -func retrieveIngestionMetrics(ctx context.Context, ingestionLogRepo IngestionLogRepository) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { +func retrieveIngestionMetrics(ctx context.Context, repository Repository) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { var metrics reconcilerpb.IngestionMetrics - ingestionLogsPerState, err := ingestionLogRepo.CountIngestionLogsPerState(ctx) + ingestionLogsPerState, err := repository.CountIngestionLogsPerState(ctx) if err != nil { return nil, err } @@ -40,10 +40,10 @@ func retrieveIngestionMetrics(ctx context.Context, ingestionLogRepo IngestionLog return &reconcilerpb.RetrieveIngestionLogsResponse{Metrics: &metrics}, nil } -func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, ingestionLogRepo IngestionLogRepository, _ ChangeSetRepository, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { +func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, repository Repository, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { if in.GetOnlyMetrics() { logger.Debug("retrieving only ingestion metrics") - return retrieveIngestionMetrics(ctx, ingestionLogRepo) + return retrieveIngestionMetrics(ctx, repository) } pageSize := in.GetPageSize() @@ -60,7 +60,7 @@ func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, ingestionLo offset = decodedPageToken } - logs, err := ingestionLogRepo.RetrieveIngestionLogs(ctx, in, pageSize, offset) + logs, err := repository.RetrieveIngestionLogs(ctx, in, pageSize, offset) if err != nil { return nil, fmt.Errorf("failed to retrieve ingestion logs: %w", err) } diff --git a/diode-server/reconciler/mocks/changesetrepository.go b/diode-server/reconciler/mocks/changesetrepository.go deleted file mode 100644 index eb217f32..00000000 --- a/diode-server/reconciler/mocks/changesetrepository.go +++ /dev/null @@ -1,98 +0,0 @@ -// Code generated by mockery v2.50.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - changeset "github.com/netboxlabs/diode/diode-server/reconciler/changeset" - - mock "github.com/stretchr/testify/mock" -) - -// ChangeSetRepository is an autogenerated mock type for the ChangeSetRepository type -type ChangeSetRepository struct { - mock.Mock -} - -type ChangeSetRepository_Expecter struct { - mock *mock.Mock -} - -func (_m *ChangeSetRepository) EXPECT() *ChangeSetRepository_Expecter { - return &ChangeSetRepository_Expecter{mock: &_m.Mock} -} - -// CreateChangeSet provides a mock function with given fields: ctx, changeSet, ingestionLogID -func (_m *ChangeSetRepository) CreateChangeSet(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32) (*int32, error) { - ret := _m.Called(ctx, changeSet, ingestionLogID) - - if len(ret) == 0 { - panic("no return value specified for CreateChangeSet") - } - - var r0 *int32 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, changeset.ChangeSet, int32) (*int32, error)); ok { - return rf(ctx, changeSet, ingestionLogID) - } - if rf, ok := ret.Get(0).(func(context.Context, changeset.ChangeSet, int32) *int32); ok { - r0 = rf(ctx, changeSet, ingestionLogID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*int32) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, changeset.ChangeSet, int32) error); ok { - r1 = rf(ctx, changeSet, ingestionLogID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ChangeSetRepository_CreateChangeSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateChangeSet' -type ChangeSetRepository_CreateChangeSet_Call struct { - *mock.Call -} - -// CreateChangeSet is a helper method to define mock.On call -// - ctx context.Context -// - changeSet changeset.ChangeSet -// - ingestionLogID int32 -func (_e *ChangeSetRepository_Expecter) CreateChangeSet(ctx interface{}, changeSet interface{}, ingestionLogID interface{}) *ChangeSetRepository_CreateChangeSet_Call { - return &ChangeSetRepository_CreateChangeSet_Call{Call: _e.mock.On("CreateChangeSet", ctx, changeSet, ingestionLogID)} -} - -func (_c *ChangeSetRepository_CreateChangeSet_Call) Run(run func(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32)) *ChangeSetRepository_CreateChangeSet_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(changeset.ChangeSet), args[2].(int32)) - }) - return _c -} - -func (_c *ChangeSetRepository_CreateChangeSet_Call) Return(_a0 *int32, _a1 error) *ChangeSetRepository_CreateChangeSet_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ChangeSetRepository_CreateChangeSet_Call) RunAndReturn(run func(context.Context, changeset.ChangeSet, int32) (*int32, error)) *ChangeSetRepository_CreateChangeSet_Call { - _c.Call.Return(run) - return _c -} - -// NewChangeSetRepository creates a new instance of ChangeSetRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewChangeSetRepository(t interface { - mock.TestingT - Cleanup(func()) -}) *ChangeSetRepository { - mock := &ChangeSetRepository{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/diode-server/reconciler/mocks/ingestionlogrepository.go b/diode-server/reconciler/mocks/ingestionlogrepository.go deleted file mode 100644 index 8b096854..00000000 --- a/diode-server/reconciler/mocks/ingestionlogrepository.go +++ /dev/null @@ -1,266 +0,0 @@ -// Code generated by mockery v2.50.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - - reconcilerpb "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" -) - -// IngestionLogRepository is an autogenerated mock type for the IngestionLogRepository type -type IngestionLogRepository struct { - mock.Mock -} - -type IngestionLogRepository_Expecter struct { - mock *mock.Mock -} - -func (_m *IngestionLogRepository) EXPECT() *IngestionLogRepository_Expecter { - return &IngestionLogRepository_Expecter{mock: &_m.Mock} -} - -// CountIngestionLogsPerState provides a mock function with given fields: ctx -func (_m *IngestionLogRepository) CountIngestionLogsPerState(ctx context.Context) (map[reconcilerpb.State]int32, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for CountIngestionLogsPerState") - } - - var r0 map[reconcilerpb.State]int32 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (map[reconcilerpb.State]int32, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) map[reconcilerpb.State]int32); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[reconcilerpb.State]int32) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IngestionLogRepository_CountIngestionLogsPerState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CountIngestionLogsPerState' -type IngestionLogRepository_CountIngestionLogsPerState_Call struct { - *mock.Call -} - -// CountIngestionLogsPerState is a helper method to define mock.On call -// - ctx context.Context -func (_e *IngestionLogRepository_Expecter) CountIngestionLogsPerState(ctx interface{}) *IngestionLogRepository_CountIngestionLogsPerState_Call { - return &IngestionLogRepository_CountIngestionLogsPerState_Call{Call: _e.mock.On("CountIngestionLogsPerState", ctx)} -} - -func (_c *IngestionLogRepository_CountIngestionLogsPerState_Call) Run(run func(ctx context.Context)) *IngestionLogRepository_CountIngestionLogsPerState_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *IngestionLogRepository_CountIngestionLogsPerState_Call) Return(_a0 map[reconcilerpb.State]int32, _a1 error) *IngestionLogRepository_CountIngestionLogsPerState_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *IngestionLogRepository_CountIngestionLogsPerState_Call) RunAndReturn(run func(context.Context) (map[reconcilerpb.State]int32, error)) *IngestionLogRepository_CountIngestionLogsPerState_Call { - _c.Call.Return(run) - return _c -} - -// CreateIngestionLog provides a mock function with given fields: ctx, ingestionLog, sourceMetadata -func (_m *IngestionLogRepository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) (*int32, error) { - ret := _m.Called(ctx, ingestionLog, sourceMetadata) - - if len(ret) == 0 { - panic("no return value specified for CreateIngestionLog") - } - - var r0 *int32 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.IngestionLog, []byte) (*int32, error)); ok { - return rf(ctx, ingestionLog, sourceMetadata) - } - if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.IngestionLog, []byte) *int32); ok { - r0 = rf(ctx, ingestionLog, sourceMetadata) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*int32) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *reconcilerpb.IngestionLog, []byte) error); ok { - r1 = rf(ctx, ingestionLog, sourceMetadata) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IngestionLogRepository_CreateIngestionLog_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateIngestionLog' -type IngestionLogRepository_CreateIngestionLog_Call struct { - *mock.Call -} - -// CreateIngestionLog is a helper method to define mock.On call -// - ctx context.Context -// - ingestionLog *reconcilerpb.IngestionLog -// - sourceMetadata []byte -func (_e *IngestionLogRepository_Expecter) CreateIngestionLog(ctx interface{}, ingestionLog interface{}, sourceMetadata interface{}) *IngestionLogRepository_CreateIngestionLog_Call { - return &IngestionLogRepository_CreateIngestionLog_Call{Call: _e.mock.On("CreateIngestionLog", ctx, ingestionLog, sourceMetadata)} -} - -func (_c *IngestionLogRepository_CreateIngestionLog_Call) Run(run func(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte)) *IngestionLogRepository_CreateIngestionLog_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*reconcilerpb.IngestionLog), args[2].([]byte)) - }) - return _c -} - -func (_c *IngestionLogRepository_CreateIngestionLog_Call) Return(_a0 *int32, _a1 error) *IngestionLogRepository_CreateIngestionLog_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *IngestionLogRepository_CreateIngestionLog_Call) RunAndReturn(run func(context.Context, *reconcilerpb.IngestionLog, []byte) (*int32, error)) *IngestionLogRepository_CreateIngestionLog_Call { - _c.Call.Return(run) - return _c -} - -// RetrieveIngestionLogs provides a mock function with given fields: ctx, filter, limit, offset -func (_m *IngestionLogRepository) RetrieveIngestionLogs(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32) ([]*reconcilerpb.IngestionLog, error) { - ret := _m.Called(ctx, filter, limit, offset) - - if len(ret) == 0 { - panic("no return value specified for RetrieveIngestionLogs") - } - - var r0 []*reconcilerpb.IngestionLog - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) ([]*reconcilerpb.IngestionLog, error)); ok { - return rf(ctx, filter, limit, offset) - } - if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) []*reconcilerpb.IngestionLog); ok { - r0 = rf(ctx, filter, limit, offset) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*reconcilerpb.IngestionLog) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) error); ok { - r1 = rf(ctx, filter, limit, offset) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IngestionLogRepository_RetrieveIngestionLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveIngestionLogs' -type IngestionLogRepository_RetrieveIngestionLogs_Call struct { - *mock.Call -} - -// RetrieveIngestionLogs is a helper method to define mock.On call -// - ctx context.Context -// - filter *reconcilerpb.RetrieveIngestionLogsRequest -// - limit int32 -// - offset int32 -func (_e *IngestionLogRepository_Expecter) RetrieveIngestionLogs(ctx interface{}, filter interface{}, limit interface{}, offset interface{}) *IngestionLogRepository_RetrieveIngestionLogs_Call { - return &IngestionLogRepository_RetrieveIngestionLogs_Call{Call: _e.mock.On("RetrieveIngestionLogs", ctx, filter, limit, offset)} -} - -func (_c *IngestionLogRepository_RetrieveIngestionLogs_Call) Run(run func(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32)) *IngestionLogRepository_RetrieveIngestionLogs_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*reconcilerpb.RetrieveIngestionLogsRequest), args[2].(int32), args[3].(int32)) - }) - return _c -} - -func (_c *IngestionLogRepository_RetrieveIngestionLogs_Call) Return(_a0 []*reconcilerpb.IngestionLog, _a1 error) *IngestionLogRepository_RetrieveIngestionLogs_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *IngestionLogRepository_RetrieveIngestionLogs_Call) RunAndReturn(run func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) ([]*reconcilerpb.IngestionLog, error)) *IngestionLogRepository_RetrieveIngestionLogs_Call { - _c.Call.Return(run) - return _c -} - -// UpdateIngestionLogStateWithError provides a mock function with given fields: ctx, id, state, ingestionError -func (_m *IngestionLogRepository) UpdateIngestionLogStateWithError(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError) error { - ret := _m.Called(ctx, id, state, ingestionError) - - if len(ret) == 0 { - panic("no return value specified for UpdateIngestionLogStateWithError") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, int32, reconcilerpb.State, *reconcilerpb.IngestionError) error); ok { - r0 = rf(ctx, id, state, ingestionError) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// IngestionLogRepository_UpdateIngestionLogStateWithError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateIngestionLogStateWithError' -type IngestionLogRepository_UpdateIngestionLogStateWithError_Call struct { - *mock.Call -} - -// UpdateIngestionLogStateWithError is a helper method to define mock.On call -// - ctx context.Context -// - id int32 -// - state reconcilerpb.State -// - ingestionError *reconcilerpb.IngestionError -func (_e *IngestionLogRepository_Expecter) UpdateIngestionLogStateWithError(ctx interface{}, id interface{}, state interface{}, ingestionError interface{}) *IngestionLogRepository_UpdateIngestionLogStateWithError_Call { - return &IngestionLogRepository_UpdateIngestionLogStateWithError_Call{Call: _e.mock.On("UpdateIngestionLogStateWithError", ctx, id, state, ingestionError)} -} - -func (_c *IngestionLogRepository_UpdateIngestionLogStateWithError_Call) Run(run func(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError)) *IngestionLogRepository_UpdateIngestionLogStateWithError_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(int32), args[2].(reconcilerpb.State), args[3].(*reconcilerpb.IngestionError)) - }) - return _c -} - -func (_c *IngestionLogRepository_UpdateIngestionLogStateWithError_Call) Return(_a0 error) *IngestionLogRepository_UpdateIngestionLogStateWithError_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *IngestionLogRepository_UpdateIngestionLogStateWithError_Call) RunAndReturn(run func(context.Context, int32, reconcilerpb.State, *reconcilerpb.IngestionError) error) *IngestionLogRepository_UpdateIngestionLogStateWithError_Call { - _c.Call.Return(run) - return _c -} - -// NewIngestionLogRepository creates a new instance of IngestionLogRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewIngestionLogRepository(t interface { - mock.TestingT - Cleanup(func()) -}) *IngestionLogRepository { - mock := &IngestionLogRepository{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/diode-server/reconciler/mocks/repository.go b/diode-server/reconciler/mocks/repository.go new file mode 100644 index 00000000..176915e0 --- /dev/null +++ b/diode-server/reconciler/mocks/repository.go @@ -0,0 +1,328 @@ +// Code generated by mockery v2.50.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + changeset "github.com/netboxlabs/diode/diode-server/reconciler/changeset" + + mock "github.com/stretchr/testify/mock" + + reconcilerpb "github.com/netboxlabs/diode/diode-server/gen/diode/v1/reconcilerpb" +) + +// Repository is an autogenerated mock type for the Repository type +type Repository struct { + mock.Mock +} + +type Repository_Expecter struct { + mock *mock.Mock +} + +func (_m *Repository) EXPECT() *Repository_Expecter { + return &Repository_Expecter{mock: &_m.Mock} +} + +// CountIngestionLogsPerState provides a mock function with given fields: ctx +func (_m *Repository) CountIngestionLogsPerState(ctx context.Context) (map[reconcilerpb.State]int32, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for CountIngestionLogsPerState") + } + + var r0 map[reconcilerpb.State]int32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (map[reconcilerpb.State]int32, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) map[reconcilerpb.State]int32); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[reconcilerpb.State]int32) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Repository_CountIngestionLogsPerState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CountIngestionLogsPerState' +type Repository_CountIngestionLogsPerState_Call struct { + *mock.Call +} + +// CountIngestionLogsPerState is a helper method to define mock.On call +// - ctx context.Context +func (_e *Repository_Expecter) CountIngestionLogsPerState(ctx interface{}) *Repository_CountIngestionLogsPerState_Call { + return &Repository_CountIngestionLogsPerState_Call{Call: _e.mock.On("CountIngestionLogsPerState", ctx)} +} + +func (_c *Repository_CountIngestionLogsPerState_Call) Run(run func(ctx context.Context)) *Repository_CountIngestionLogsPerState_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Repository_CountIngestionLogsPerState_Call) Return(_a0 map[reconcilerpb.State]int32, _a1 error) *Repository_CountIngestionLogsPerState_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Repository_CountIngestionLogsPerState_Call) RunAndReturn(run func(context.Context) (map[reconcilerpb.State]int32, error)) *Repository_CountIngestionLogsPerState_Call { + _c.Call.Return(run) + return _c +} + +// CreateChangeSet provides a mock function with given fields: ctx, changeSet, ingestionLogID +func (_m *Repository) CreateChangeSet(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32) (*int32, error) { + ret := _m.Called(ctx, changeSet, ingestionLogID) + + if len(ret) == 0 { + panic("no return value specified for CreateChangeSet") + } + + var r0 *int32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, changeset.ChangeSet, int32) (*int32, error)); ok { + return rf(ctx, changeSet, ingestionLogID) + } + if rf, ok := ret.Get(0).(func(context.Context, changeset.ChangeSet, int32) *int32); ok { + r0 = rf(ctx, changeSet, ingestionLogID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*int32) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, changeset.ChangeSet, int32) error); ok { + r1 = rf(ctx, changeSet, ingestionLogID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Repository_CreateChangeSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateChangeSet' +type Repository_CreateChangeSet_Call struct { + *mock.Call +} + +// CreateChangeSet is a helper method to define mock.On call +// - ctx context.Context +// - changeSet changeset.ChangeSet +// - ingestionLogID int32 +func (_e *Repository_Expecter) CreateChangeSet(ctx interface{}, changeSet interface{}, ingestionLogID interface{}) *Repository_CreateChangeSet_Call { + return &Repository_CreateChangeSet_Call{Call: _e.mock.On("CreateChangeSet", ctx, changeSet, ingestionLogID)} +} + +func (_c *Repository_CreateChangeSet_Call) Run(run func(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32)) *Repository_CreateChangeSet_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(changeset.ChangeSet), args[2].(int32)) + }) + return _c +} + +func (_c *Repository_CreateChangeSet_Call) Return(_a0 *int32, _a1 error) *Repository_CreateChangeSet_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Repository_CreateChangeSet_Call) RunAndReturn(run func(context.Context, changeset.ChangeSet, int32) (*int32, error)) *Repository_CreateChangeSet_Call { + _c.Call.Return(run) + return _c +} + +// CreateIngestionLog provides a mock function with given fields: ctx, ingestionLog, sourceMetadata +func (_m *Repository) CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) (*int32, error) { + ret := _m.Called(ctx, ingestionLog, sourceMetadata) + + if len(ret) == 0 { + panic("no return value specified for CreateIngestionLog") + } + + var r0 *int32 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.IngestionLog, []byte) (*int32, error)); ok { + return rf(ctx, ingestionLog, sourceMetadata) + } + if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.IngestionLog, []byte) *int32); ok { + r0 = rf(ctx, ingestionLog, sourceMetadata) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*int32) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *reconcilerpb.IngestionLog, []byte) error); ok { + r1 = rf(ctx, ingestionLog, sourceMetadata) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Repository_CreateIngestionLog_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateIngestionLog' +type Repository_CreateIngestionLog_Call struct { + *mock.Call +} + +// CreateIngestionLog is a helper method to define mock.On call +// - ctx context.Context +// - ingestionLog *reconcilerpb.IngestionLog +// - sourceMetadata []byte +func (_e *Repository_Expecter) CreateIngestionLog(ctx interface{}, ingestionLog interface{}, sourceMetadata interface{}) *Repository_CreateIngestionLog_Call { + return &Repository_CreateIngestionLog_Call{Call: _e.mock.On("CreateIngestionLog", ctx, ingestionLog, sourceMetadata)} +} + +func (_c *Repository_CreateIngestionLog_Call) Run(run func(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte)) *Repository_CreateIngestionLog_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*reconcilerpb.IngestionLog), args[2].([]byte)) + }) + return _c +} + +func (_c *Repository_CreateIngestionLog_Call) Return(_a0 *int32, _a1 error) *Repository_CreateIngestionLog_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Repository_CreateIngestionLog_Call) RunAndReturn(run func(context.Context, *reconcilerpb.IngestionLog, []byte) (*int32, error)) *Repository_CreateIngestionLog_Call { + _c.Call.Return(run) + return _c +} + +// RetrieveIngestionLogs provides a mock function with given fields: ctx, filter, limit, offset +func (_m *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32) ([]*reconcilerpb.IngestionLog, error) { + ret := _m.Called(ctx, filter, limit, offset) + + if len(ret) == 0 { + panic("no return value specified for RetrieveIngestionLogs") + } + + var r0 []*reconcilerpb.IngestionLog + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) ([]*reconcilerpb.IngestionLog, error)); ok { + return rf(ctx, filter, limit, offset) + } + if rf, ok := ret.Get(0).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) []*reconcilerpb.IngestionLog); ok { + r0 = rf(ctx, filter, limit, offset) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*reconcilerpb.IngestionLog) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) error); ok { + r1 = rf(ctx, filter, limit, offset) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Repository_RetrieveIngestionLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveIngestionLogs' +type Repository_RetrieveIngestionLogs_Call struct { + *mock.Call +} + +// RetrieveIngestionLogs is a helper method to define mock.On call +// - ctx context.Context +// - filter *reconcilerpb.RetrieveIngestionLogsRequest +// - limit int32 +// - offset int32 +func (_e *Repository_Expecter) RetrieveIngestionLogs(ctx interface{}, filter interface{}, limit interface{}, offset interface{}) *Repository_RetrieveIngestionLogs_Call { + return &Repository_RetrieveIngestionLogs_Call{Call: _e.mock.On("RetrieveIngestionLogs", ctx, filter, limit, offset)} +} + +func (_c *Repository_RetrieveIngestionLogs_Call) Run(run func(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32)) *Repository_RetrieveIngestionLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*reconcilerpb.RetrieveIngestionLogsRequest), args[2].(int32), args[3].(int32)) + }) + return _c +} + +func (_c *Repository_RetrieveIngestionLogs_Call) Return(_a0 []*reconcilerpb.IngestionLog, _a1 error) *Repository_RetrieveIngestionLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Repository_RetrieveIngestionLogs_Call) RunAndReturn(run func(context.Context, *reconcilerpb.RetrieveIngestionLogsRequest, int32, int32) ([]*reconcilerpb.IngestionLog, error)) *Repository_RetrieveIngestionLogs_Call { + _c.Call.Return(run) + return _c +} + +// UpdateIngestionLogStateWithError provides a mock function with given fields: ctx, id, state, ingestionError +func (_m *Repository) UpdateIngestionLogStateWithError(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError) error { + ret := _m.Called(ctx, id, state, ingestionError) + + if len(ret) == 0 { + panic("no return value specified for UpdateIngestionLogStateWithError") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int32, reconcilerpb.State, *reconcilerpb.IngestionError) error); ok { + r0 = rf(ctx, id, state, ingestionError) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Repository_UpdateIngestionLogStateWithError_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateIngestionLogStateWithError' +type Repository_UpdateIngestionLogStateWithError_Call struct { + *mock.Call +} + +// UpdateIngestionLogStateWithError is a helper method to define mock.On call +// - ctx context.Context +// - id int32 +// - state reconcilerpb.State +// - ingestionError *reconcilerpb.IngestionError +func (_e *Repository_Expecter) UpdateIngestionLogStateWithError(ctx interface{}, id interface{}, state interface{}, ingestionError interface{}) *Repository_UpdateIngestionLogStateWithError_Call { + return &Repository_UpdateIngestionLogStateWithError_Call{Call: _e.mock.On("UpdateIngestionLogStateWithError", ctx, id, state, ingestionError)} +} + +func (_c *Repository_UpdateIngestionLogStateWithError_Call) Run(run func(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError)) *Repository_UpdateIngestionLogStateWithError_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(int32), args[2].(reconcilerpb.State), args[3].(*reconcilerpb.IngestionError)) + }) + return _c +} + +func (_c *Repository_UpdateIngestionLogStateWithError_Call) Return(_a0 error) *Repository_UpdateIngestionLogStateWithError_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Repository_UpdateIngestionLogStateWithError_Call) RunAndReturn(run func(context.Context, int32, reconcilerpb.State, *reconcilerpb.IngestionError) error) *Repository_UpdateIngestionLogStateWithError_Call { + _c.Call.Return(run) + return _c +} + +// NewRepository creates a new instance of Repository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRepository(t interface { + mock.TestingT + Cleanup(func()) +}) *Repository { + mock := &Repository{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/diode-server/reconciler/repositories.go b/diode-server/reconciler/repository.go similarity index 77% rename from diode-server/reconciler/repositories.go rename to diode-server/reconciler/repository.go index 90c98d31..db1fb611 100644 --- a/diode-server/reconciler/repositories.go +++ b/diode-server/reconciler/repository.go @@ -7,15 +7,11 @@ import ( "github.com/netboxlabs/diode/diode-server/reconciler/changeset" ) -// IngestionLogRepository is an interface for interacting with ingestion logs. -type IngestionLogRepository interface { +// Repository is an interface for interacting with ingestion logs and change sets. +type Repository interface { CreateIngestionLog(ctx context.Context, ingestionLog *reconcilerpb.IngestionLog, sourceMetadata []byte) (*int32, error) UpdateIngestionLogStateWithError(ctx context.Context, id int32, state reconcilerpb.State, ingestionError *reconcilerpb.IngestionError) error RetrieveIngestionLogs(ctx context.Context, filter *reconcilerpb.RetrieveIngestionLogsRequest, limit int32, offset int32) ([]*reconcilerpb.IngestionLog, error) CountIngestionLogsPerState(ctx context.Context) (map[reconcilerpb.State]int32, error) -} - -// ChangeSetRepository is an interface for interacting with change sets. -type ChangeSetRepository interface { CreateChangeSet(ctx context.Context, changeSet changeset.ChangeSet, ingestionLogID int32) (*int32, error) } diff --git a/diode-server/reconciler/server.go b/diode-server/reconciler/server.go index 420a020d..84484e8f 100644 --- a/diode-server/reconciler/server.go +++ b/diode-server/reconciler/server.go @@ -30,18 +30,17 @@ const ( type Server struct { reconcilerpb.UnimplementedReconcilerServiceServer - config Config - logger *slog.Logger - grpcListener net.Listener - grpcServer *grpc.Server - redisClient RedisClient - ingestionLogRepository IngestionLogRepository - changeSetRepository ChangeSetRepository - apiKeys APIKeys + config Config + logger *slog.Logger + grpcListener net.Listener + grpcServer *grpc.Server + redisClient RedisClient + repository Repository + apiKeys APIKeys } // NewServer creates a new reconciler server -func NewServer(ctx context.Context, logger *slog.Logger, ingestionLogRepo IngestionLogRepository, changeSetRepo ChangeSetRepository) (*Server, error) { +func NewServer(ctx context.Context, logger *slog.Logger, repository Repository) (*Server, error) { var cfg Config envconfig.MustProcess("", &cfg) @@ -69,14 +68,13 @@ func NewServer(ctx context.Context, logger *slog.Logger, ingestionLogRepo Ingest grpcServer := grpc.NewServer(grpc.ChainUnaryInterceptor(auth)) component := &Server{ - config: cfg, - logger: logger, - grpcListener: grpcListener, - grpcServer: grpcServer, - redisClient: redisClient, - ingestionLogRepository: ingestionLogRepo, - changeSetRepository: changeSetRepo, - apiKeys: apiKeys, + config: cfg, + logger: logger, + grpcListener: grpcListener, + grpcServer: grpcServer, + redisClient: redisClient, + repository: repository, + apiKeys: apiKeys, } reconcilerpb.RegisterReconcilerServiceServer(grpcServer, component) @@ -144,7 +142,7 @@ func (s *Server) RetrieveIngestionDataSources(_ context.Context, in *reconcilerp // RetrieveIngestionLogs retrieves logs func (s *Server) RetrieveIngestionLogs(ctx context.Context, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { - return retrieveIngestionLogs(ctx, s.logger, s.ingestionLogRepository, s.changeSetRepository, in) + return retrieveIngestionLogs(ctx, s.logger, s.repository, in) } func validateRetrieveIngestionDataSourcesRequest(in *reconcilerpb.RetrieveIngestionDataSourcesRequest) error { diff --git a/diode-server/reconciler/server_internal_test.go b/diode-server/reconciler/server_internal_test.go index 7c3a697e..e34b8e96 100644 --- a/diode-server/reconciler/server_internal_test.go +++ b/diode-server/reconciler/server_internal_test.go @@ -728,14 +728,12 @@ func TestRetrieveLogs(t *testing.T) { ctx := context.Background() logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) - mockRedisClient := new(mr.RedisClient) - mockIngestionLogRepo := new(mr.IngestionLogRepository) - mockChangeSetRepo := new(mr.ChangeSetRepository) + mockRedisClient := mr.NewRedisClient(t) + mockRepository := mr.NewRepository(t) server := &Server{ - redisClient: mockRedisClient, - logger: logger, - ingestionLogRepository: mockIngestionLogRepo, - changeSetRepository: mockChangeSetRepo, + redisClient: mockRedisClient, + logger: logger, + repository: mockRepository, } var retrieveErr error @@ -744,7 +742,7 @@ func TestRetrieveLogs(t *testing.T) { } if !tt.hasError { - mockIngestionLogRepo.On("RetrieveIngestionLogs", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tt.ingestionLogs, retrieveErr) + mockRepository.On("RetrieveIngestionLogs", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tt.ingestionLogs, retrieveErr) } response, err := server.RetrieveIngestionLogs(ctx, &tt.in) @@ -766,7 +764,7 @@ func TestRetrieveLogs(t *testing.T) { } require.Equal(t, tt.response.Metrics, response.Metrics) } - mockIngestionLogRepo.AssertExpectations(t) + mockRepository.AssertExpectations(t) }) } } @@ -814,14 +812,12 @@ func TestRetrieveIngestionLogsMetricsOnly(t *testing.T) { Total: 10, } - mockRedisClient := new(mr.RedisClient) - mockIngestionLogRepo := new(mr.IngestionLogRepository) - mockChangeSetRepo := new(mr.ChangeSetRepository) + mockRedisClient := mr.NewRedisClient(t) + mockRepository := mr.NewRepository(t) server := &Server{ - redisClient: mockRedisClient, - logger: logger, - ingestionLogRepository: mockIngestionLogRepo, - changeSetRepository: mockChangeSetRepo, + redisClient: mockRedisClient, + logger: logger, + repository: mockRepository, } ingestionLogStateMetricsMap := map[reconcilerpb.State]int32{ @@ -836,7 +832,7 @@ func TestRetrieveIngestionLogsMetricsOnly(t *testing.T) { countErr = errors.New(tt.errorMsg) } - mockIngestionLogRepo.On("CountIngestionLogsPerState", ctx).Return(ingestionLogStateMetricsMap, countErr) + mockRepository.On("CountIngestionLogsPerState", ctx).Return(ingestionLogStateMetricsMap, countErr) in := reconcilerpb.RetrieveIngestionLogsRequest{OnlyMetrics: true} @@ -848,7 +844,7 @@ func TestRetrieveIngestionLogsMetricsOnly(t *testing.T) { require.NoError(t, err) require.Equal(t, expected, response.Metrics) } - mockIngestionLogRepo.AssertExpectations(t) + mockRepository.AssertExpectations(t) }) } } diff --git a/diode-server/reconciler/server_test.go b/diode-server/reconciler/server_test.go index d008bded..fbc1f9e0 100644 --- a/diode-server/reconciler/server_test.go +++ b/diode-server/reconciler/server_test.go @@ -27,9 +27,8 @@ func startTestServer(ctx context.Context, t *testing.T, redisAddr string) (*reco s := grpc.NewServer() logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) - ingestionLogRepoMock := mocks.NewIngestionLogRepository(t) - changeSetRepoMock := mocks.NewChangeSetRepository(t) - server, err := reconciler.NewServer(ctx, logger, ingestionLogRepoMock, changeSetRepoMock) + mockRepository := mocks.NewRepository(t) + server, err := reconciler.NewServer(ctx, logger, mockRepository) require.NoError(t, err) pb.RegisterReconcilerServiceServer(s, server) @@ -63,9 +62,8 @@ func TestNewServer(t *testing.T) { defer teardownEnv() logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug, AddSource: false})) - ingestionLogRepoMock := mocks.NewIngestionLogRepository(t) - changeSetRepoMock := mocks.NewChangeSetRepository(t) - server, err := reconciler.NewServer(ctx, logger, ingestionLogRepoMock, changeSetRepoMock) + mockRepository := mocks.NewRepository(t) + server, err := reconciler.NewServer(ctx, logger, mockRepository) require.NoError(t, err) require.NotNil(t, server) From e8c32fcd8a6bc348672ea6d1a89038d7448b9809 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Fri, 20 Dec 2024 16:16:51 +0000 Subject: [PATCH 23/26] feat: add postgres view retrieving ingestion log with latest change set object and array of changes Signed-off-by: Michal Fiedorowicz --- .../postgres/migrations/00002_change_sets.sql | 24 ++---- .../postgres/queries/ingestion_logs.sql | 17 ++-- .../gen/dbstore/postgres/change_sets.sql.go | 2 +- .../dbstore/postgres/ingestion_logs.sql.go | 77 +++++++------------ diode-server/gen/dbstore/postgres/types.go | 41 +++++----- diode-server/sqlc.yaml | 7 ++ 6 files changed, 69 insertions(+), 99 deletions(-) diff --git a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql index c9f0c0a5..ed517633 100644 --- a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql +++ b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql @@ -42,29 +42,19 @@ ALTER TABLE change_sets ALTER TABLE changes ADD CONSTRAINT fk_changes_change_sets FOREIGN KEY (change_set_id) REFERENCES change_sets (id); --- Create a view to join ingestion_logs with change_sets -CREATE VIEW v_ingestion_logs_change_sets AS -( -SELECT change_sets.* +-- Create a view to join ingestion_logs with aggregated change_set and changes +CREATE VIEW v_ingestion_logs_with_change_set AS +SELECT ingestion_logs.*, row_to_json(change_sets.*) AS change_set, JSON_AGG(changes.* ORDER BY changes.sequence_number ASC) FILTER ( WHERE changes.id IS NOT NULL ) AS changes FROM ingestion_logs LEFT JOIN change_sets on ingestion_logs.id = change_sets.ingestion_log_id - ); - --- Create a view to join change_sets with changes -CREATE VIEW v_change_sets_changes AS -( -SELECT changes.* -FROM change_sets LEFT JOIN changes on change_sets.id = changes.change_set_id - ); +GROUP BY ingestion_logs.id, change_sets.id +ORDER BY ingestion_logs.id DESC, change_sets.id DESC; -- +goose Down --- Drop the v_ingestion_logs_change_sets view -DROP VIEW IF EXISTS v_ingestion_logs_change_sets; - --- Drop the v_change_sets_with_changes view -DROP VIEW IF EXISTS v_change_sets_with_changes; +-- Drop the v_ingestion_logs_with_change_sets view +DROP VIEW IF EXISTS v_ingestion_logs_with_change_sets; -- Drop the changes table DROP TABLE changes; diff --git a/diode-server/dbstore/postgres/queries/ingestion_logs.sql b/diode-server/dbstore/postgres/queries/ingestion_logs.sql index b114cb26..4a9aafaa 100644 --- a/diode-server/dbstore/postgres/queries/ingestion_logs.sql +++ b/diode-server/dbstore/postgres/queries/ingestion_logs.sql @@ -27,13 +27,12 @@ ORDER BY id DESC LIMIT sqlc.arg('limit') OFFSET sqlc.arg('offset'); -- name: RetrieveIngestionLogsWithChangeSets :many -SELECT sqlc.embed(ingestion_logs), sqlc.embed(v_ingestion_logs_change_sets), sqlc.embed(v_change_sets_changes) -FROM ingestion_logs - LEFT JOIN v_ingestion_logs_change_sets on ingestion_logs.id = v_ingestion_logs_change_sets.ingestion_log_id - LEFT JOIN v_change_sets_changes on v_ingestion_logs_change_sets.id = v_change_sets_changes.change_set_id -WHERE (ingestion_logs.state = sqlc.narg('state') OR sqlc.narg('state') IS NULL) - AND (ingestion_logs.data_type = sqlc.narg('data_type') OR sqlc.narg('data_type') IS NULL) - AND (ingestion_logs.ingestion_ts >= sqlc.narg('ingestion_ts_start') OR sqlc.narg('ingestion_ts_start') IS NULL) - AND (ingestion_logs.ingestion_ts <= sqlc.narg('ingestion_ts_end') OR sqlc.narg('ingestion_ts_end') IS NULL) -ORDER BY ingestion_logs.id DESC, v_change_sets_changes.sequence_number ASC +SELECT v_ingestion_logs_with_change_set.* +FROM v_ingestion_logs_with_change_set +WHERE (v_ingestion_logs_with_change_set.state = sqlc.narg('state') OR sqlc.narg('state') IS NULL) + AND (v_ingestion_logs_with_change_set.data_type = sqlc.narg('data_type') OR sqlc.narg('data_type') IS NULL) + AND (v_ingestion_logs_with_change_set.ingestion_ts >= sqlc.narg('ingestion_ts_start') OR sqlc.narg('ingestion_ts_start') IS NULL) + AND (v_ingestion_logs_with_change_set.ingestion_ts <= sqlc.narg('ingestion_ts_end') OR sqlc.narg('ingestion_ts_end') IS NULL) +ORDER BY v_ingestion_logs_with_change_set.id DESC LIMIT sqlc.arg('limit') OFFSET sqlc.arg('offset'); + diff --git a/diode-server/gen/dbstore/postgres/change_sets.sql.go b/diode-server/gen/dbstore/postgres/change_sets.sql.go index 09b3d06b..57fb6934 100644 --- a/diode-server/gen/dbstore/postgres/change_sets.sql.go +++ b/diode-server/gen/dbstore/postgres/change_sets.sql.go @@ -26,7 +26,7 @@ type CreateChangeParams struct { ObjectType string `json:"object_type"` ObjectID pgtype.Int4 `json:"object_id"` ObjectVersion pgtype.Int4 `json:"object_version"` - Data []byte `json:"data"` + Data any `json:"data"` SequenceNumber pgtype.Int4 `json:"sequence_number"` } diff --git a/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go b/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go index e9147533..811efc84 100644 --- a/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go +++ b/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go @@ -162,15 +162,13 @@ func (q *Queries) RetrieveIngestionLogs(ctx context.Context, arg RetrieveIngesti } const retrieveIngestionLogsWithChangeSets = `-- name: RetrieveIngestionLogsWithChangeSets :many -SELECT ingestion_logs.id, ingestion_logs.ingestion_log_uuid, ingestion_logs.data_type, ingestion_logs.state, ingestion_logs.request_id, ingestion_logs.ingestion_ts, ingestion_logs.producer_app_name, ingestion_logs.producer_app_version, ingestion_logs.sdk_name, ingestion_logs.sdk_version, ingestion_logs.entity, ingestion_logs.error, ingestion_logs.source_metadata, ingestion_logs.created_at, ingestion_logs.updated_at, v_ingestion_logs_change_sets.id, v_ingestion_logs_change_sets.change_set_uuid, v_ingestion_logs_change_sets.ingestion_log_id, v_ingestion_logs_change_sets.branch_id, v_ingestion_logs_change_sets.created_at, v_ingestion_logs_change_sets.updated_at, v_change_sets_changes.id, v_change_sets_changes.change_uuid, v_change_sets_changes.change_set_id, v_change_sets_changes.change_type, v_change_sets_changes.object_type, v_change_sets_changes.object_id, v_change_sets_changes.object_version, v_change_sets_changes.data, v_change_sets_changes.sequence_number, v_change_sets_changes.created_at, v_change_sets_changes.updated_at -FROM ingestion_logs - LEFT JOIN v_ingestion_logs_change_sets on ingestion_logs.id = v_ingestion_logs_change_sets.ingestion_log_id - LEFT JOIN v_change_sets_changes on v_ingestion_logs_change_sets.id = v_change_sets_changes.change_set_id -WHERE (ingestion_logs.state = $1 OR $1 IS NULL) - AND (ingestion_logs.data_type = $2 OR $2 IS NULL) - AND (ingestion_logs.ingestion_ts >= $3 OR $3 IS NULL) - AND (ingestion_logs.ingestion_ts <= $4 OR $4 IS NULL) -ORDER BY ingestion_logs.id DESC, v_change_sets_changes.sequence_number ASC +SELECT v_ingestion_logs_with_change_set.id, v_ingestion_logs_with_change_set.ingestion_log_uuid, v_ingestion_logs_with_change_set.data_type, v_ingestion_logs_with_change_set.state, v_ingestion_logs_with_change_set.request_id, v_ingestion_logs_with_change_set.ingestion_ts, v_ingestion_logs_with_change_set.producer_app_name, v_ingestion_logs_with_change_set.producer_app_version, v_ingestion_logs_with_change_set.sdk_name, v_ingestion_logs_with_change_set.sdk_version, v_ingestion_logs_with_change_set.entity, v_ingestion_logs_with_change_set.error, v_ingestion_logs_with_change_set.source_metadata, v_ingestion_logs_with_change_set.created_at, v_ingestion_logs_with_change_set.updated_at, v_ingestion_logs_with_change_set.change_set, v_ingestion_logs_with_change_set.changes +FROM v_ingestion_logs_with_change_set +WHERE (v_ingestion_logs_with_change_set.state = $1 OR $1 IS NULL) + AND (v_ingestion_logs_with_change_set.data_type = $2 OR $2 IS NULL) + AND (v_ingestion_logs_with_change_set.ingestion_ts >= $3 OR $3 IS NULL) + AND (v_ingestion_logs_with_change_set.ingestion_ts <= $4 OR $4 IS NULL) +ORDER BY v_ingestion_logs_with_change_set.id DESC LIMIT $6 OFFSET $5 ` @@ -183,13 +181,7 @@ type RetrieveIngestionLogsWithChangeSetsParams struct { Limit int32 `json:"limit"` } -type RetrieveIngestionLogsWithChangeSetsRow struct { - IngestionLog IngestionLog `json:"ingestion_log"` - VIngestionLogsChangeSet VIngestionLogsChangeSet `json:"vingestion_logs_change_set"` - VChangeSetsChange VChangeSetsChange `json:"vchange_sets_change"` -} - -func (q *Queries) RetrieveIngestionLogsWithChangeSets(ctx context.Context, arg RetrieveIngestionLogsWithChangeSetsParams) ([]RetrieveIngestionLogsWithChangeSetsRow, error) { +func (q *Queries) RetrieveIngestionLogsWithChangeSets(ctx context.Context, arg RetrieveIngestionLogsWithChangeSetsParams) ([]VIngestionLogsWithChangeSet, error) { rows, err := q.db.Query(ctx, retrieveIngestionLogsWithChangeSets, arg.State, arg.DataType, @@ -202,42 +194,27 @@ func (q *Queries) RetrieveIngestionLogsWithChangeSets(ctx context.Context, arg R return nil, err } defer rows.Close() - var items []RetrieveIngestionLogsWithChangeSetsRow + var items []VIngestionLogsWithChangeSet for rows.Next() { - var i RetrieveIngestionLogsWithChangeSetsRow + var i VIngestionLogsWithChangeSet if err := rows.Scan( - &i.IngestionLog.ID, - &i.IngestionLog.IngestionLogUuid, - &i.IngestionLog.DataType, - &i.IngestionLog.State, - &i.IngestionLog.RequestID, - &i.IngestionLog.IngestionTs, - &i.IngestionLog.ProducerAppName, - &i.IngestionLog.ProducerAppVersion, - &i.IngestionLog.SdkName, - &i.IngestionLog.SdkVersion, - &i.IngestionLog.Entity, - &i.IngestionLog.Error, - &i.IngestionLog.SourceMetadata, - &i.IngestionLog.CreatedAt, - &i.IngestionLog.UpdatedAt, - &i.VIngestionLogsChangeSet.ID, - &i.VIngestionLogsChangeSet.ChangeSetUuid, - &i.VIngestionLogsChangeSet.IngestionLogID, - &i.VIngestionLogsChangeSet.BranchID, - &i.VIngestionLogsChangeSet.CreatedAt, - &i.VIngestionLogsChangeSet.UpdatedAt, - &i.VChangeSetsChange.ID, - &i.VChangeSetsChange.ChangeUuid, - &i.VChangeSetsChange.ChangeSetID, - &i.VChangeSetsChange.ChangeType, - &i.VChangeSetsChange.ObjectType, - &i.VChangeSetsChange.ObjectID, - &i.VChangeSetsChange.ObjectVersion, - &i.VChangeSetsChange.Data, - &i.VChangeSetsChange.SequenceNumber, - &i.VChangeSetsChange.CreatedAt, - &i.VChangeSetsChange.UpdatedAt, + &i.ID, + &i.IngestionLogUuid, + &i.DataType, + &i.State, + &i.RequestID, + &i.IngestionTs, + &i.ProducerAppName, + &i.ProducerAppVersion, + &i.SdkName, + &i.SdkVersion, + &i.Entity, + &i.Error, + &i.SourceMetadata, + &i.CreatedAt, + &i.UpdatedAt, + &i.ChangeSet, + &i.Changes, ); err != nil { return nil, err } diff --git a/diode-server/gen/dbstore/postgres/types.go b/diode-server/gen/dbstore/postgres/types.go index 1ad8c25d..8917cee7 100644 --- a/diode-server/gen/dbstore/postgres/types.go +++ b/diode-server/gen/dbstore/postgres/types.go @@ -16,7 +16,7 @@ type Change struct { ObjectType string `json:"object_type"` ObjectID pgtype.Int4 `json:"object_id"` ObjectVersion pgtype.Int4 `json:"object_version"` - Data []byte `json:"data"` + Data any `json:"data"` SequenceNumber pgtype.Int4 `json:"sequence_number"` CreatedAt pgtype.Timestamptz `json:"created_at"` UpdatedAt pgtype.Timestamptz `json:"updated_at"` @@ -49,25 +49,22 @@ type IngestionLog struct { UpdatedAt pgtype.Timestamptz `json:"updated_at"` } -type VChangeSetsChange struct { - ID pgtype.Int4 `json:"id"` - ChangeUuid pgtype.Text `json:"change_uuid"` - ChangeSetID pgtype.Int4 `json:"change_set_id"` - ChangeType pgtype.Text `json:"change_type"` - ObjectType pgtype.Text `json:"object_type"` - ObjectID pgtype.Int4 `json:"object_id"` - ObjectVersion pgtype.Int4 `json:"object_version"` - Data []byte `json:"data"` - SequenceNumber pgtype.Int4 `json:"sequence_number"` - CreatedAt pgtype.Timestamptz `json:"created_at"` - UpdatedAt pgtype.Timestamptz `json:"updated_at"` -} - -type VIngestionLogsChangeSet struct { - ID pgtype.Int4 `json:"id"` - ChangeSetUuid pgtype.Text `json:"change_set_uuid"` - IngestionLogID pgtype.Int4 `json:"ingestion_log_id"` - BranchID pgtype.Text `json:"branch_id"` - CreatedAt pgtype.Timestamptz `json:"created_at"` - UpdatedAt pgtype.Timestamptz `json:"updated_at"` +type VIngestionLogsWithChangeSet struct { + ID int32 `json:"id"` + IngestionLogUuid string `json:"ingestion_log_uuid"` + DataType pgtype.Text `json:"data_type"` + State pgtype.Int4 `json:"state"` + RequestID pgtype.Text `json:"request_id"` + IngestionTs pgtype.Int8 `json:"ingestion_ts"` + ProducerAppName pgtype.Text `json:"producer_app_name"` + ProducerAppVersion pgtype.Text `json:"producer_app_version"` + SdkName pgtype.Text `json:"sdk_name"` + SdkVersion pgtype.Text `json:"sdk_version"` + Entity []byte `json:"entity"` + Error []byte `json:"error"` + SourceMetadata []byte `json:"source_metadata"` + CreatedAt pgtype.Timestamptz `json:"created_at"` + UpdatedAt pgtype.Timestamptz `json:"updated_at"` + ChangeSet ChangeSet `json:"change_set"` + Changes []byte `json:"changes"` } diff --git a/diode-server/sqlc.yaml b/diode-server/sqlc.yaml index 5a2f896e..36d45791 100644 --- a/diode-server/sqlc.yaml +++ b/diode-server/sqlc.yaml @@ -10,3 +10,10 @@ sql: sql_package: "pgx/v5" output_models_file_name: "types.go" emit_json_tags: true + overrides: + - column: v_ingestion_logs_with_change_set.change_set + go_type: + type: ChangeSet + - column: changes.data + go_type: + type: any From bb7fe254cc40909cdba03f9e84b1fddfce6b0c2a Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Fri, 20 Dec 2024 16:18:44 +0000 Subject: [PATCH 24/26] feat: refactor retrieving ingestion logs Signed-off-by: Michal Fiedorowicz --- diode-server/dbstore/postgres/repository.go | 93 +++++++------------ diode-server/reconciler/logs_retriever.go | 27 ++---- .../reconciler/server_internal_test.go | 59 ++++++++++-- 3 files changed, 92 insertions(+), 87 deletions(-) diff --git a/diode-server/dbstore/postgres/repository.go b/diode-server/dbstore/postgres/repository.go index 687fd653..e33c9dc8 100644 --- a/diode-server/dbstore/postgres/repository.go +++ b/diode-server/dbstore/postgres/repository.go @@ -111,61 +111,9 @@ func (r *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcil return nil, err } - changeSetsMap := make(map[int32]*changeset.ChangeSet) - for _, row := range rawIngestionLogs { - if !row.VIngestionLogsChangeSet.ID.Valid || !row.VChangeSetsChange.ChangeUuid.Valid { - continue - } - - var changeData map[string]any - if row.VChangeSetsChange.Data != nil { - if err := json.Unmarshal(row.VChangeSetsChange.Data, &changeData); err != nil { - return nil, fmt.Errorf("failed to unmarshal change data: %w", err) - } - } - - change := changeset.Change{ - ChangeID: row.VChangeSetsChange.ChangeUuid.String, - ChangeType: row.VChangeSetsChange.ChangeType.String, - ObjectType: row.VChangeSetsChange.ObjectType.String, - Data: changeData, - } - objID := int(row.VChangeSetsChange.ObjectID.Int32) - if row.VChangeSetsChange.ObjectID.Valid { - change.ObjectID = &objID - } - objVersion := int(row.VChangeSetsChange.ObjectVersion.Int32) - if row.VChangeSetsChange.ObjectVersion.Valid { - change.ObjectVersion = &objVersion - } - - changeSet, ok := changeSetsMap[row.VIngestionLogsChangeSet.ID.Int32] - if !ok { - changes := make([]changeset.Change, 0) - changes = append(changes, change) - - changeSet = &changeset.ChangeSet{ - ChangeSetID: row.VIngestionLogsChangeSet.ChangeSetUuid.String, - ChangeSet: changes, - } - if row.VIngestionLogsChangeSet.BranchID.Valid { - changeSet.BranchID = &row.VIngestionLogsChangeSet.BranchID.String - } - changeSetsMap[row.VIngestionLogsChangeSet.ID.Int32] = changeSet - continue - } - - changeSet.ChangeSet = append(changeSet.ChangeSet, change) - } - ingestionLogs := make([]*reconcilerpb.IngestionLog, 0, len(rawIngestionLogs)) - ingestionLogsMap := make(map[int32]*reconcilerpb.IngestionLog) for _, row := range rawIngestionLogs { - if _, ok := ingestionLogsMap[row.IngestionLog.ID]; ok { - continue - } - - ingestionLog := row.IngestionLog + ingestionLog := row entity := &diodepb.Entity{} if err := protojson.Unmarshal(ingestionLog.Entity, entity); err != nil { return nil, fmt.Errorf("failed to unmarshal entity: %w", err) @@ -191,10 +139,40 @@ func (r *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcil Error: &ingestionErr, } - changeSet, ok := changeSetsMap[row.VIngestionLogsChangeSet.ID.Int32] - if ok { + if row.Changes != nil { + var dbChanges []postgres.Change + if err := json.Unmarshal(row.Changes, &dbChanges); err != nil { + return nil, fmt.Errorf("failed to unmarshal changes: %w", err) + } + + changes := make([]changeset.Change, 0, len(dbChanges)) + for _, dbChange := range dbChanges { + change := changeset.Change{ + ChangeID: dbChange.ChangeUuid, + ChangeType: dbChange.ChangeType, + ObjectType: dbChange.ObjectType, + Data: dbChange.Data, + } + + objID := int(dbChange.ObjectID.Int32) + if dbChange.ObjectID.Valid { + change.ObjectID = &objID + } + objVersion := int(dbChange.ObjectVersion.Int32) + if dbChange.ObjectVersion.Valid { + change.ObjectVersion = &objVersion + } + + changes = append(changes, change) + } + + changeSet := &changeset.ChangeSet{ + ChangeSetID: row.ChangeSet.ChangeSetUuid, + ChangeSet: changes, + } + var compressedChangeSet []byte - if len(changeSet.ChangeSet) > 0 { + if len(changes) > 0 { b, err := changeset.CompressChangeSet(changeSet) if err != nil { return nil, fmt.Errorf("failed to compress change set: %w", err) @@ -203,12 +181,11 @@ func (r *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcil } log.ChangeSet = &reconcilerpb.ChangeSet{ - Id: row.VIngestionLogsChangeSet.ChangeSetUuid.String, + Id: row.ChangeSet.ChangeSetUuid, Data: compressedChangeSet, } } - ingestionLogsMap[ingestionLog.ID] = log ingestionLogs = append(ingestionLogs, log) } diff --git a/diode-server/reconciler/logs_retriever.go b/diode-server/reconciler/logs_retriever.go index f677fa57..6abffe25 100644 --- a/diode-server/reconciler/logs_retriever.go +++ b/diode-server/reconciler/logs_retriever.go @@ -41,9 +41,14 @@ func retrieveIngestionMetrics(ctx context.Context, repository Repository) (*reco } func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, repository Repository, in *reconcilerpb.RetrieveIngestionLogsRequest) (*reconcilerpb.RetrieveIngestionLogsResponse, error) { + ingestionLogsMetricsResponse, err := retrieveIngestionMetrics(ctx, repository) + if err != nil { + return nil, err + } + if in.GetOnlyMetrics() { logger.Debug("retrieving only ingestion metrics") - return retrieveIngestionMetrics(ctx, repository) + return ingestionLogsMetricsResponse, nil } pageSize := in.GetPageSize() @@ -73,25 +78,9 @@ func retrieveIngestionLogs(ctx context.Context, logger *slog.Logger, repository } // Fill metrics - var metrics reconcilerpb.IngestionMetrics - total := int32(len(logs)) - if in.State != nil { - if in.GetState() == reconcilerpb.State_UNSPECIFIED { - metrics.Total = total - } else if in.GetState() == reconcilerpb.State_QUEUED { - metrics.Queued = total - } else if in.GetState() == reconcilerpb.State_RECONCILED { - metrics.Reconciled = total - } else if in.GetState() == reconcilerpb.State_FAILED { - metrics.Failed = total - } else if in.GetState() == reconcilerpb.State_NO_CHANGES { - metrics.NoChanges = total - } - } else { - metrics.Total = total - } + metrics := ingestionLogsMetricsResponse.GetMetrics() - return &reconcilerpb.RetrieveIngestionLogsResponse{Logs: logs, Metrics: &metrics, NextPageToken: nextPageToken}, nil + return &reconcilerpb.RetrieveIngestionLogsResponse{Logs: logs, Metrics: metrics, NextPageToken: nextPageToken}, nil } func decodeBase64ToInt(encoded string) (int32, error) { diff --git a/diode-server/reconciler/server_internal_test.go b/diode-server/reconciler/server_internal_test.go index e34b8e96..f80f438c 100644 --- a/diode-server/reconciler/server_internal_test.go +++ b/diode-server/reconciler/server_internal_test.go @@ -108,15 +108,19 @@ func TestIsAuthenticated(t *testing.T) { func TestRetrieveLogs(t *testing.T) { tests := []struct { - name string - in reconcilerpb.RetrieveIngestionLogsRequest - ingestionLogs []*reconcilerpb.IngestionLog - response *reconcilerpb.RetrieveIngestionLogsResponse - hasError bool + name string + in reconcilerpb.RetrieveIngestionLogsRequest + ingestionLogsPerState map[reconcilerpb.State]int32 + ingestionLogs []*reconcilerpb.IngestionLog + response *reconcilerpb.RetrieveIngestionLogsResponse + hasError bool }{ { name: "valid request", in: reconcilerpb.RetrieveIngestionLogsRequest{}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_RECONCILED: 2, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", @@ -220,7 +224,8 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ - Total: 2, + Total: 2, + Reconciled: 2, }, NextPageToken: "F/Jk/zc08gA=", }, @@ -229,6 +234,9 @@ func TestRetrieveLogs(t *testing.T) { { name: "request with reconciliation error", in: reconcilerpb.RetrieveIngestionLogsRequest{}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_FAILED: 1, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { DataType: "ipam.ipaddress", @@ -299,7 +307,8 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ - Total: 1, + Total: 1, + Failed: 1, }, NextPageToken: "AAAFlw==", }, @@ -308,6 +317,9 @@ func TestRetrieveLogs(t *testing.T) { { name: "filter by new state", in: reconcilerpb.RetrieveIngestionLogsRequest{State: reconcilerpb.State_QUEUED.Enum()}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_QUEUED: 1, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { DataType: "dcim.interface", @@ -356,6 +368,7 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ + Total: 1, Queued: 1, }, NextPageToken: "AAAFlw==", @@ -365,6 +378,9 @@ func TestRetrieveLogs(t *testing.T) { { name: "filter by reconciled state", in: reconcilerpb.RetrieveIngestionLogsRequest{State: reconcilerpb.State_RECONCILED.Enum()}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_RECONCILED: 1, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", @@ -415,6 +431,7 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ + Total: 1, Reconciled: 1, }, NextPageToken: "AAAFlw==", @@ -424,6 +441,9 @@ func TestRetrieveLogs(t *testing.T) { { name: "filter by failed state", in: reconcilerpb.RetrieveIngestionLogsRequest{State: reconcilerpb.State_FAILED.Enum()}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_FAILED: 1, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", @@ -474,6 +494,7 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ + Total: 1, Failed: 1, }, NextPageToken: "AAAFlw==", @@ -483,6 +504,9 @@ func TestRetrieveLogs(t *testing.T) { { name: "filter by no changes state", in: reconcilerpb.RetrieveIngestionLogsRequest{State: reconcilerpb.State_NO_CHANGES.Enum()}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_NO_CHANGES: 1, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", @@ -533,6 +557,7 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ + Total: 1, NoChanges: 1, }, NextPageToken: "AAAFlw==", @@ -542,6 +567,9 @@ func TestRetrieveLogs(t *testing.T) { { name: "filter by data type", in: reconcilerpb.RetrieveIngestionLogsRequest{DataType: "dcim.interface"}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_RECONCILED: 1, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", @@ -592,7 +620,8 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ - Total: 1, + Total: 1, + Reconciled: 1, }, NextPageToken: "AAAFlw==", }, @@ -601,6 +630,9 @@ func TestRetrieveLogs(t *testing.T) { { name: "filter by timestamp", in: reconcilerpb.RetrieveIngestionLogsRequest{IngestionTsStart: 1725552914392208639}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_RECONCILED: 1, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", @@ -651,7 +683,8 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ - Total: 1, + Total: 1, + Reconciled: 1, }, NextPageToken: "AAAFlw==", }, @@ -660,6 +693,9 @@ func TestRetrieveLogs(t *testing.T) { { name: "pagination check", in: reconcilerpb.RetrieveIngestionLogsRequest{PageToken: "AAAFlg=="}, + ingestionLogsPerState: map[reconcilerpb.State]int32{ + reconcilerpb.State_RECONCILED: 1, + }, ingestionLogs: []*reconcilerpb.IngestionLog{ { Id: "2mAT7vZ38H4ttI0i5dBebwJbSnZ", @@ -710,7 +746,8 @@ func TestRetrieveLogs(t *testing.T) { }, }, Metrics: &reconcilerpb.IngestionMetrics{ - Total: 1, + Total: 1, + Reconciled: 1, }, NextPageToken: "AAAFlw==", }, @@ -741,6 +778,8 @@ func TestRetrieveLogs(t *testing.T) { retrieveErr = errors.New("failed to retrieve ingestion logs") } + mockRepository.On("CountIngestionLogsPerState", ctx).Return(tt.ingestionLogsPerState, nil) + if !tt.hasError { mockRepository.On("RetrieveIngestionLogs", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(tt.ingestionLogs, retrieveErr) } From 0ad52e5dfe3314e4b7ca2010109cd77f526a4c22 Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Fri, 20 Dec 2024 17:16:39 +0000 Subject: [PATCH 25/26] chore: go mod tidy Signed-off-by: Michal Fiedorowicz --- diode-server/go.sum | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/diode-server/go.sum b/diode-server/go.sum index 272ba161..9ec65645 100644 --- a/diode-server/go.sum +++ b/diode-server/go.sum @@ -99,10 +99,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= @@ -114,8 +112,6 @@ golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= From 4145fedaa9d7fa2963239aa3c8165e3f5968ddcb Mon Sep 17 00:00:00 2001 From: Michal Fiedorowicz Date: Fri, 20 Dec 2024 20:51:38 +0000 Subject: [PATCH 26/26] feat: postgres tables with hybrid IDs id integer for PK external_id UUID for external exposure Signed-off-by: Michal Fiedorowicz --- .../migrations/00001_ingestion_logs.sql | 4 +- .../postgres/migrations/00002_change_sets.sql | 13 +-- .../dbstore/postgres/queries/change_sets.sql | 4 +- .../postgres/queries/ingestion_logs.sql | 9 ++- diode-server/dbstore/postgres/repository.go | 14 ++-- .../gen/dbstore/postgres/change_sets.sql.go | 20 ++--- .../dbstore/postgres/ingestion_logs.sql.go | 26 +++--- diode-server/gen/dbstore/postgres/types.go | 8 +- diode-server/go.mod | 1 - diode-server/go.sum | 2 - .../reconciler/ingestion_processor.go | 81 ++++++++----------- .../ingestion_processor_internal_test.go | 14 +--- 12 files changed, 90 insertions(+), 106 deletions(-) diff --git a/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql b/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql index 05750377..2914c41b 100644 --- a/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql +++ b/diode-server/dbstore/postgres/migrations/00001_ingestion_logs.sql @@ -4,7 +4,7 @@ CREATE TABLE IF NOT EXISTS ingestion_logs ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - ingestion_log_uuid VARCHAR(255) NOT NULL, + external_id VARCHAR(255) NOT NULL, data_type VARCHAR(255), state INTEGER, request_id VARCHAR(255), @@ -21,7 +21,7 @@ CREATE TABLE IF NOT EXISTS ingestion_logs ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_ingestion_logs_ingestion_log_uuid ON ingestion_logs (ingestion_log_uuid); +CREATE INDEX IF NOT EXISTS idx_ingestion_logs_external_id ON ingestion_logs (external_id); CREATE INDEX IF NOT EXISTS idx_ingestion_logs_data_type ON ingestion_logs (data_type); CREATE INDEX IF NOT EXISTS idx_ingestion_logs_state ON ingestion_logs (state); CREATE INDEX IF NOT EXISTS idx_ingestion_logs_request_id ON ingestion_logs (request_id); diff --git a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql index ed517633..1fe7766b 100644 --- a/diode-server/dbstore/postgres/migrations/00002_change_sets.sql +++ b/diode-server/dbstore/postgres/migrations/00002_change_sets.sql @@ -4,7 +4,7 @@ CREATE TABLE IF NOT EXISTS change_sets ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - change_set_uuid VARCHAR(255) NOT NULL, + external_id VARCHAR(255) NOT NULL, ingestion_log_id INTEGER NOT NULL, branch_id VARCHAR(255), created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP, @@ -12,13 +12,14 @@ CREATE TABLE IF NOT EXISTS change_sets ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_change_sets_change_set_uuid ON change_sets (change_set_uuid); +CREATE INDEX IF NOT EXISTS idx_change_sets_external_id ON change_sets (external_id); +CREATE INDEX IF NOT EXISTS idx_change_sets_ingestion_log_id ON change_sets (ingestion_log_id); -- Create the changes table CREATE TABLE IF NOT EXISTS changes ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - change_uuid VARCHAR(255) NOT NULL, + external_id VARCHAR(255) NOT NULL, change_set_id INTEGER NOT NULL, change_type VARCHAR(50) NOT NULL, object_type VARCHAR(100) NOT NULL, @@ -31,7 +32,7 @@ CREATE TABLE IF NOT EXISTS changes ); -- Create indices -CREATE INDEX IF NOT EXISTS idx_changes_change_uuid ON changes (change_uuid); +CREATE INDEX IF NOT EXISTS idx_changes_external_id ON changes (external_id); CREATE INDEX IF NOT EXISTS idx_changes_change_set_id ON changes (change_set_id); CREATE INDEX IF NOT EXISTS idx_changes_change_type ON changes (change_type); CREATE INDEX IF NOT EXISTS idx_changes_object_type ON changes (object_type); @@ -44,7 +45,9 @@ ALTER TABLE changes -- Create a view to join ingestion_logs with aggregated change_set and changes CREATE VIEW v_ingestion_logs_with_change_set AS -SELECT ingestion_logs.*, row_to_json(change_sets.*) AS change_set, JSON_AGG(changes.* ORDER BY changes.sequence_number ASC) FILTER ( WHERE changes.id IS NOT NULL ) AS changes +SELECT ingestion_logs.*, + row_to_json(change_sets.*) AS change_set, + JSON_AGG(changes.* ORDER BY changes.sequence_number ASC) FILTER ( WHERE changes.id IS NOT NULL ) AS changes FROM ingestion_logs LEFT JOIN change_sets on ingestion_logs.id = change_sets.ingestion_log_id LEFT JOIN changes on change_sets.id = changes.change_set_id diff --git a/diode-server/dbstore/postgres/queries/change_sets.sql b/diode-server/dbstore/postgres/queries/change_sets.sql index 9b440c91..e1101b24 100644 --- a/diode-server/dbstore/postgres/queries/change_sets.sql +++ b/diode-server/dbstore/postgres/queries/change_sets.sql @@ -1,12 +1,12 @@ -- name: CreateChangeSet :one -INSERT INTO change_sets (change_set_uuid, ingestion_log_id, branch_id) +INSERT INTO change_sets (external_id, ingestion_log_id, branch_id) VALUES ($1, $2, $3) RETURNING *; -- name: CreateChange :one -INSERT INTO changes (change_uuid, change_set_id, change_type, object_type, object_id, object_version, data, +INSERT INTO changes (external_id, change_set_id, change_type, object_type, object_id, object_version, data, sequence_number) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING *; diff --git a/diode-server/dbstore/postgres/queries/ingestion_logs.sql b/diode-server/dbstore/postgres/queries/ingestion_logs.sql index 4a9aafaa..9f8334ac 100644 --- a/diode-server/dbstore/postgres/queries/ingestion_logs.sql +++ b/diode-server/dbstore/postgres/queries/ingestion_logs.sql @@ -1,5 +1,5 @@ -- name: CreateIngestionLog :one -INSERT INTO ingestion_logs (ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, +INSERT INTO ingestion_logs (external_id, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, source_metadata) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING *; @@ -31,8 +31,9 @@ SELECT v_ingestion_logs_with_change_set.* FROM v_ingestion_logs_with_change_set WHERE (v_ingestion_logs_with_change_set.state = sqlc.narg('state') OR sqlc.narg('state') IS NULL) AND (v_ingestion_logs_with_change_set.data_type = sqlc.narg('data_type') OR sqlc.narg('data_type') IS NULL) - AND (v_ingestion_logs_with_change_set.ingestion_ts >= sqlc.narg('ingestion_ts_start') OR sqlc.narg('ingestion_ts_start') IS NULL) - AND (v_ingestion_logs_with_change_set.ingestion_ts <= sqlc.narg('ingestion_ts_end') OR sqlc.narg('ingestion_ts_end') IS NULL) + AND (v_ingestion_logs_with_change_set.ingestion_ts >= sqlc.narg('ingestion_ts_start') OR + sqlc.narg('ingestion_ts_start') IS NULL) + AND (v_ingestion_logs_with_change_set.ingestion_ts <= sqlc.narg('ingestion_ts_end') OR + sqlc.narg('ingestion_ts_end') IS NULL) ORDER BY v_ingestion_logs_with_change_set.id DESC LIMIT sqlc.arg('limit') OFFSET sqlc.arg('offset'); - diff --git a/diode-server/dbstore/postgres/repository.go b/diode-server/dbstore/postgres/repository.go index e33c9dc8..adb4ec07 100644 --- a/diode-server/dbstore/postgres/repository.go +++ b/diode-server/dbstore/postgres/repository.go @@ -36,7 +36,7 @@ func (r *Repository) CreateIngestionLog(ctx context.Context, ingestionLog *recon return nil, fmt.Errorf("failed to marshal entity: %w", err) } params := postgres.CreateIngestionLogParams{ - IngestionLogUuid: ingestionLog.Id, + ExternalID: ingestionLog.Id, DataType: pgtype.Text{String: ingestionLog.DataType, Valid: true}, State: pgtype.Int4{Int32: int32(ingestionLog.State), Valid: true}, RequestID: pgtype.Text{String: ingestionLog.RequestId, Valid: true}, @@ -126,7 +126,7 @@ func (r *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcil } log := &reconcilerpb.IngestionLog{ - Id: ingestionLog.IngestionLogUuid, + Id: ingestionLog.ExternalID, DataType: ingestionLog.DataType.String, State: reconcilerpb.State(ingestionLog.State.Int32), RequestId: ingestionLog.RequestID.String, @@ -148,7 +148,7 @@ func (r *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcil changes := make([]changeset.Change, 0, len(dbChanges)) for _, dbChange := range dbChanges { change := changeset.Change{ - ChangeID: dbChange.ChangeUuid, + ChangeID: dbChange.ExternalID, ChangeType: dbChange.ChangeType, ObjectType: dbChange.ObjectType, Data: dbChange.Data, @@ -167,7 +167,7 @@ func (r *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcil } changeSet := &changeset.ChangeSet{ - ChangeSetID: row.ChangeSet.ChangeSetUuid, + ChangeSetID: row.ChangeSet.ExternalID, ChangeSet: changes, } @@ -181,7 +181,7 @@ func (r *Repository) RetrieveIngestionLogs(ctx context.Context, filter *reconcil } log.ChangeSet = &reconcilerpb.ChangeSet{ - Id: row.ChangeSet.ChangeSetUuid, + Id: row.ChangeSet.ExternalID, Data: compressedChangeSet, } } @@ -207,7 +207,7 @@ func (r *Repository) CreateChangeSet(ctx context.Context, changeSet changeset.Ch qtx := r.queries.WithTx(tx) params := postgres.CreateChangeSetParams{ - ChangeSetUuid: changeSet.ChangeSetID, + ExternalID: changeSet.ChangeSetID, IngestionLogID: ingestionLogID, } if changeSet.BranchID != nil { @@ -227,7 +227,7 @@ func (r *Repository) CreateChangeSet(ctx context.Context, changeSet changeset.Ch } changeParams := postgres.CreateChangeParams{ - ChangeUuid: change.ChangeID, + ExternalID: change.ChangeID, ChangeSetID: cs.ID, ChangeType: change.ChangeType, ObjectType: change.ObjectType, diff --git a/diode-server/gen/dbstore/postgres/change_sets.sql.go b/diode-server/gen/dbstore/postgres/change_sets.sql.go index 57fb6934..683e3fca 100644 --- a/diode-server/gen/dbstore/postgres/change_sets.sql.go +++ b/diode-server/gen/dbstore/postgres/change_sets.sql.go @@ -13,14 +13,14 @@ import ( const createChange = `-- name: CreateChange :one -INSERT INTO changes (change_uuid, change_set_id, change_type, object_type, object_id, object_version, data, +INSERT INTO changes (external_id, change_set_id, change_type, object_type, object_id, object_version, data, sequence_number) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) -RETURNING id, change_uuid, change_set_id, change_type, object_type, object_id, object_version, data, sequence_number, created_at, updated_at +RETURNING id, external_id, change_set_id, change_type, object_type, object_id, object_version, data, sequence_number, created_at, updated_at ` type CreateChangeParams struct { - ChangeUuid string `json:"change_uuid"` + ExternalID string `json:"external_id"` ChangeSetID int32 `json:"change_set_id"` ChangeType string `json:"change_type"` ObjectType string `json:"object_type"` @@ -32,7 +32,7 @@ type CreateChangeParams struct { func (q *Queries) CreateChange(ctx context.Context, arg CreateChangeParams) (Change, error) { row := q.db.QueryRow(ctx, createChange, - arg.ChangeUuid, + arg.ExternalID, arg.ChangeSetID, arg.ChangeType, arg.ObjectType, @@ -44,7 +44,7 @@ func (q *Queries) CreateChange(ctx context.Context, arg CreateChangeParams) (Cha var i Change err := row.Scan( &i.ID, - &i.ChangeUuid, + &i.ExternalID, &i.ChangeSetID, &i.ChangeType, &i.ObjectType, @@ -60,23 +60,23 @@ func (q *Queries) CreateChange(ctx context.Context, arg CreateChangeParams) (Cha const createChangeSet = `-- name: CreateChangeSet :one -INSERT INTO change_sets (change_set_uuid, ingestion_log_id, branch_id) +INSERT INTO change_sets (external_id, ingestion_log_id, branch_id) VALUES ($1, $2, $3) -RETURNING id, change_set_uuid, ingestion_log_id, branch_id, created_at, updated_at +RETURNING id, external_id, ingestion_log_id, branch_id, created_at, updated_at ` type CreateChangeSetParams struct { - ChangeSetUuid string `json:"change_set_uuid"` + ExternalID string `json:"external_id"` IngestionLogID int32 `json:"ingestion_log_id"` BranchID pgtype.Text `json:"branch_id"` } func (q *Queries) CreateChangeSet(ctx context.Context, arg CreateChangeSetParams) (ChangeSet, error) { - row := q.db.QueryRow(ctx, createChangeSet, arg.ChangeSetUuid, arg.IngestionLogID, arg.BranchID) + row := q.db.QueryRow(ctx, createChangeSet, arg.ExternalID, arg.IngestionLogID, arg.BranchID) var i ChangeSet err := row.Scan( &i.ID, - &i.ChangeSetUuid, + &i.ExternalID, &i.IngestionLogID, &i.BranchID, &i.CreatedAt, diff --git a/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go b/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go index 811efc84..da019b51 100644 --- a/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go +++ b/diode-server/gen/dbstore/postgres/ingestion_logs.sql.go @@ -43,14 +43,14 @@ func (q *Queries) CountIngestionLogsPerState(ctx context.Context) ([]CountIngest } const createIngestionLog = `-- name: CreateIngestionLog :one -INSERT INTO ingestion_logs (ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, +INSERT INTO ingestion_logs (external_id, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, source_metadata) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) -RETURNING id, ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at +RETURNING id, external_id, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at ` type CreateIngestionLogParams struct { - IngestionLogUuid string `json:"ingestion_log_uuid"` + ExternalID string `json:"external_id"` DataType pgtype.Text `json:"data_type"` State pgtype.Int4 `json:"state"` RequestID pgtype.Text `json:"request_id"` @@ -65,7 +65,7 @@ type CreateIngestionLogParams struct { func (q *Queries) CreateIngestionLog(ctx context.Context, arg CreateIngestionLogParams) (IngestionLog, error) { row := q.db.QueryRow(ctx, createIngestionLog, - arg.IngestionLogUuid, + arg.ExternalID, arg.DataType, arg.State, arg.RequestID, @@ -80,7 +80,7 @@ func (q *Queries) CreateIngestionLog(ctx context.Context, arg CreateIngestionLog var i IngestionLog err := row.Scan( &i.ID, - &i.IngestionLogUuid, + &i.ExternalID, &i.DataType, &i.State, &i.RequestID, @@ -99,7 +99,7 @@ func (q *Queries) CreateIngestionLog(ctx context.Context, arg CreateIngestionLog } const retrieveIngestionLogs = `-- name: RetrieveIngestionLogs :many -SELECT id, ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at +SELECT id, external_id, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at FROM ingestion_logs WHERE (state = $1 OR $1 IS NULL) AND (data_type = $2 OR $2 IS NULL) @@ -136,7 +136,7 @@ func (q *Queries) RetrieveIngestionLogs(ctx context.Context, arg RetrieveIngesti var i IngestionLog if err := rows.Scan( &i.ID, - &i.IngestionLogUuid, + &i.ExternalID, &i.DataType, &i.State, &i.RequestID, @@ -162,12 +162,14 @@ func (q *Queries) RetrieveIngestionLogs(ctx context.Context, arg RetrieveIngesti } const retrieveIngestionLogsWithChangeSets = `-- name: RetrieveIngestionLogsWithChangeSets :many -SELECT v_ingestion_logs_with_change_set.id, v_ingestion_logs_with_change_set.ingestion_log_uuid, v_ingestion_logs_with_change_set.data_type, v_ingestion_logs_with_change_set.state, v_ingestion_logs_with_change_set.request_id, v_ingestion_logs_with_change_set.ingestion_ts, v_ingestion_logs_with_change_set.producer_app_name, v_ingestion_logs_with_change_set.producer_app_version, v_ingestion_logs_with_change_set.sdk_name, v_ingestion_logs_with_change_set.sdk_version, v_ingestion_logs_with_change_set.entity, v_ingestion_logs_with_change_set.error, v_ingestion_logs_with_change_set.source_metadata, v_ingestion_logs_with_change_set.created_at, v_ingestion_logs_with_change_set.updated_at, v_ingestion_logs_with_change_set.change_set, v_ingestion_logs_with_change_set.changes +SELECT v_ingestion_logs_with_change_set.id, v_ingestion_logs_with_change_set.external_id, v_ingestion_logs_with_change_set.data_type, v_ingestion_logs_with_change_set.state, v_ingestion_logs_with_change_set.request_id, v_ingestion_logs_with_change_set.ingestion_ts, v_ingestion_logs_with_change_set.producer_app_name, v_ingestion_logs_with_change_set.producer_app_version, v_ingestion_logs_with_change_set.sdk_name, v_ingestion_logs_with_change_set.sdk_version, v_ingestion_logs_with_change_set.entity, v_ingestion_logs_with_change_set.error, v_ingestion_logs_with_change_set.source_metadata, v_ingestion_logs_with_change_set.created_at, v_ingestion_logs_with_change_set.updated_at, v_ingestion_logs_with_change_set.change_set, v_ingestion_logs_with_change_set.changes FROM v_ingestion_logs_with_change_set WHERE (v_ingestion_logs_with_change_set.state = $1 OR $1 IS NULL) AND (v_ingestion_logs_with_change_set.data_type = $2 OR $2 IS NULL) - AND (v_ingestion_logs_with_change_set.ingestion_ts >= $3 OR $3 IS NULL) - AND (v_ingestion_logs_with_change_set.ingestion_ts <= $4 OR $4 IS NULL) + AND (v_ingestion_logs_with_change_set.ingestion_ts >= $3 OR + $3 IS NULL) + AND (v_ingestion_logs_with_change_set.ingestion_ts <= $4 OR + $4 IS NULL) ORDER BY v_ingestion_logs_with_change_set.id DESC LIMIT $6 OFFSET $5 ` @@ -199,7 +201,7 @@ func (q *Queries) RetrieveIngestionLogsWithChangeSets(ctx context.Context, arg R var i VIngestionLogsWithChangeSet if err := rows.Scan( &i.ID, - &i.IngestionLogUuid, + &i.ExternalID, &i.DataType, &i.State, &i.RequestID, @@ -231,7 +233,7 @@ UPDATE ingestion_logs SET state = $2, error = $3 WHERE id = $1 -RETURNING id, ingestion_log_uuid, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at +RETURNING id, external_id, data_type, state, request_id, ingestion_ts, producer_app_name, producer_app_version, sdk_name, sdk_version, entity, error, source_metadata, created_at, updated_at ` type UpdateIngestionLogStateWithErrorParams struct { diff --git a/diode-server/gen/dbstore/postgres/types.go b/diode-server/gen/dbstore/postgres/types.go index 8917cee7..cf331d3b 100644 --- a/diode-server/gen/dbstore/postgres/types.go +++ b/diode-server/gen/dbstore/postgres/types.go @@ -10,7 +10,7 @@ import ( type Change struct { ID int32 `json:"id"` - ChangeUuid string `json:"change_uuid"` + ExternalID string `json:"external_id"` ChangeSetID int32 `json:"change_set_id"` ChangeType string `json:"change_type"` ObjectType string `json:"object_type"` @@ -24,7 +24,7 @@ type Change struct { type ChangeSet struct { ID int32 `json:"id"` - ChangeSetUuid string `json:"change_set_uuid"` + ExternalID string `json:"external_id"` IngestionLogID int32 `json:"ingestion_log_id"` BranchID pgtype.Text `json:"branch_id"` CreatedAt pgtype.Timestamptz `json:"created_at"` @@ -33,7 +33,7 @@ type ChangeSet struct { type IngestionLog struct { ID int32 `json:"id"` - IngestionLogUuid string `json:"ingestion_log_uuid"` + ExternalID string `json:"external_id"` DataType pgtype.Text `json:"data_type"` State pgtype.Int4 `json:"state"` RequestID pgtype.Text `json:"request_id"` @@ -51,7 +51,7 @@ type IngestionLog struct { type VIngestionLogsWithChangeSet struct { ID int32 `json:"id"` - IngestionLogUuid string `json:"ingestion_log_uuid"` + ExternalID string `json:"external_id"` DataType pgtype.Text `json:"data_type"` State pgtype.Int4 `json:"state"` RequestID pgtype.Text `json:"request_id"` diff --git a/diode-server/go.mod b/diode-server/go.mod index ba08939f..65e68b2f 100644 --- a/diode-server/go.mod +++ b/diode-server/go.mod @@ -18,7 +18,6 @@ require ( github.com/oklog/run v1.1.0 github.com/pressly/goose/v3 v3.23.0 github.com/redis/go-redis/v9 v9.5.1 - github.com/segmentio/ksuid v1.0.4 github.com/stretchr/testify v1.9.0 golang.org/x/time v0.5.0 google.golang.org/grpc v1.64.1 diff --git a/diode-server/go.sum b/diode-server/go.sum index 9ec65645..a1deba33 100644 --- a/diode-server/go.sum +++ b/diode-server/go.sum @@ -80,8 +80,6 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= -github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/diode-server/reconciler/ingestion_processor.go b/diode-server/reconciler/ingestion_processor.go index 49670587..09a8914b 100644 --- a/diode-server/reconciler/ingestion_processor.go +++ b/diode-server/reconciler/ingestion_processor.go @@ -11,7 +11,6 @@ import ( "github.com/google/uuid" "github.com/kelseyhightower/envconfig" "github.com/redis/go-redis/v9" - "github.com/segmentio/ksuid" "golang.org/x/time/rate" "google.golang.org/protobuf/proto" @@ -64,9 +63,9 @@ type IngestionProcessor struct { // IngestionLogToProcess represents an ingestion log to process type IngestionLogToProcess struct { - key string ingestionLogID int32 ingestionLog *reconcilerpb.IngestionLog + changeSetID int32 changeSet *changeset.ChangeSet errors []error } @@ -253,7 +252,7 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan case <-ctx.Done(): p.logger.Debug("context cancelled", "error", ctx.Err()) return - case ingestionLog, ok := <-generateChangeSetChan: + case msg, ok := <-generateChangeSetChan: if !ok { return } @@ -262,13 +261,11 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan return } - p.logger.Debug("generating change set", "ingestionLogID", ingestionLog.ingestionLog.GetId()) - ingestEntity := differ.IngestEntity{ - RequestID: ingestionLog.ingestionLog.GetId(), - DataType: ingestionLog.ingestionLog.GetDataType(), - Entity: ingestionLog.ingestionLog.GetEntity(), - State: int(ingestionLog.ingestionLog.GetState()), + RequestID: msg.ingestionLog.GetId(), + DataType: msg.ingestionLog.GetDataType(), + Entity: msg.ingestionLog.GetEntity(), + State: int(msg.ingestionLog.GetState()), } changeSet, err := differ.Diff(ctx, ingestEntity, "", p.nbClient) @@ -281,39 +278,40 @@ func (p *IngestionProcessor) GenerateChangeSet(ctx context.Context, generateChan "data_type": ingestEntity.DataType, } sentry.CaptureError(err, tags, "Ingest Entity", contextMap) - p.logger.Debug("failed to prepare change set", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "error", err) + p.logger.Debug("failed to prepare change set", "ingestionLogID", msg.ingestionLog.GetId(), "error", err) - ingestionLog.errors = append(ingestionLog.errors, fmt.Errorf("failed to prepare change set: %v", err)) + msg.errors = append(msg.errors, fmt.Errorf("failed to prepare change set: %v", err)) ingestionErr := extractIngestionError(err) - if err = p.repository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { - ingestionLog.errors = append(ingestionLog.errors, err) + if err = p.repository.UpdateIngestionLogStateWithError(ctx, msg.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { + msg.errors = append(msg.errors, err) } break } - ingestionLog.changeSet = changeSet + msg.changeSet = changeSet - if _, err = p.repository.CreateChangeSet(ctx, *changeSet, ingestionLog.ingestionLogID); err != nil { - ingestionLog.errors = append(ingestionLog.errors, fmt.Errorf("failed to create change set: %v", err)) + id, err := p.repository.CreateChangeSet(ctx, *changeSet, msg.ingestionLogID) + if err != nil { + msg.errors = append(msg.errors, fmt.Errorf("failed to create change set: %v", err)) } if len(changeSet.ChangeSet) > 0 { if applyChangeSetChan != nil { applyChangeSetChan <- IngestionLogToProcess{ - key: ingestionLog.key, - ingestionLogID: ingestionLog.ingestionLogID, - ingestionLog: ingestionLog.ingestionLog, - changeSet: ingestionLog.changeSet, + ingestionLogID: msg.ingestionLogID, + ingestionLog: msg.ingestionLog, + changeSetID: *id, + changeSet: msg.changeSet, } } } else { - if err := p.repository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_NO_CHANGES, nil); err != nil { - ingestionLog.errors = append(ingestionLog.errors, err) + if err := p.repository.UpdateIngestionLogStateWithError(ctx, msg.ingestionLogID, reconcilerpb.State_NO_CHANGES, nil); err != nil { + msg.errors = append(msg.errors, err) } } - p.logger.Debug("change set generated", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID) + p.logger.Debug("change set generated", "id", id, "externalID", msg.changeSet.ChangeSetID, "ingestionLogID", msg.ingestionLogID) } } }() @@ -335,7 +333,7 @@ func (p *IngestionProcessor) ApplyChangeSet(ctx context.Context, applyChan <-cha case <-ctx.Done(): p.logger.Debug("context cancelled", "error", ctx.Err()) return - case ingestionLog, ok := <-applyChan: + case msg, ok := <-applyChan: if !ok { return } @@ -344,25 +342,23 @@ func (p *IngestionProcessor) ApplyChangeSet(ctx context.Context, applyChan <-cha return } - p.logger.Debug("applying change set", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID) - - if err := applier.ApplyChangeSet(ctx, p.logger, *ingestionLog.changeSet, "", p.nbClient); err != nil { - p.logger.Debug("failed to apply change set", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID, "error", err) - ingestionLog.errors = append(ingestionLog.errors, fmt.Errorf("failed to apply chang eset: %v", err)) + if err := applier.ApplyChangeSet(ctx, p.logger, *msg.changeSet, "", p.nbClient); err != nil { + p.logger.Debug("failed to apply change set", "id", msg.changeSetID, "externalID", msg.changeSet.ChangeSetID, "ingestionLogID", msg.ingestionLogID, "error", err) + msg.errors = append(msg.errors, fmt.Errorf("failed to apply change set: %v", err)) ingestionErr := extractIngestionError(err) - if err := p.repository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { - ingestionLog.errors = append(ingestionLog.errors, err) + if err := p.repository.UpdateIngestionLogStateWithError(ctx, msg.ingestionLogID, reconcilerpb.State_FAILED, ingestionErr); err != nil { + msg.errors = append(msg.errors, err) } break } - ingestionLog.ingestionLog.State = reconcilerpb.State_RECONCILED - if err := p.repository.UpdateIngestionLogStateWithError(ctx, ingestionLog.ingestionLogID, reconcilerpb.State_RECONCILED, nil); err != nil { - ingestionLog.errors = append(ingestionLog.errors, err) + msg.ingestionLog.State = reconcilerpb.State_RECONCILED + if err := p.repository.UpdateIngestionLogStateWithError(ctx, msg.ingestionLogID, reconcilerpb.State_RECONCILED, nil); err != nil { + msg.errors = append(msg.errors, err) } - p.logger.Debug("change set applied", "ingestionLogID", ingestionLog.ingestionLog.GetId(), "changeSetID", ingestionLog.changeSet.ChangeSetID) + p.logger.Debug("change set applied", "id", msg.changeSetID, "externalID", msg.changeSet.ChangeSetID, "ingestionLogID", msg.ingestionLogID) } } }() @@ -386,13 +382,8 @@ func (p *IngestionProcessor) CreateIngestionLogs(ctx context.Context, ingestReq continue } - ingestionLogKSUID := ksuid.New().String() - - key := fmt.Sprintf("ingest-entity:%s-%d-%s", objectType, ingestionTs, ingestionLogKSUID) - p.logger.Debug("ingest entity key", "key", key) - ingestionLog := &reconcilerpb.IngestionLog{ - Id: ingestionLogKSUID, + Id: uuid.NewString(), RequestId: ingestReq.GetId(), ProducerAppName: ingestReq.GetProducerAppName(), ProducerAppVersion: ingestReq.GetProducerAppVersion(), @@ -404,17 +395,15 @@ func (p *IngestionProcessor) CreateIngestionLogs(ctx context.Context, ingestReq State: reconcilerpb.State_QUEUED, } - ingestionLog.Id = uuid.NewString() - - ingestionLogID, err := p.repository.CreateIngestionLog(ctx, ingestionLog, nil) + id, err := p.repository.CreateIngestionLog(ctx, ingestionLog, nil) if err != nil { errs = append(errs, fmt.Errorf("failed to create ingestion log: %v", err)) continue } + p.logger.Debug("created ingestion log", "id", id, "externalID", ingestionLog.GetId()) generateIngestionLogChan <- IngestionLogToProcess{ - key: key, - ingestionLogID: *ingestionLogID, + ingestionLogID: *id, ingestionLog: ingestionLog, } } diff --git a/diode-server/reconciler/ingestion_processor_internal_test.go b/diode-server/reconciler/ingestion_processor_internal_test.go index 0487b3e1..69328610 100644 --- a/diode-server/reconciler/ingestion_processor_internal_test.go +++ b/diode-server/reconciler/ingestion_processor_internal_test.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "io" "log/slog" "os" @@ -13,8 +12,8 @@ import ( "time" "github.com/andybalholm/brotli" + "github.com/google/uuid" "github.com/redis/go-redis/v9" - "github.com/segmentio/ksuid" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -373,7 +372,7 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { { name: "generate and apply change set", ingestionLog: &reconcilerpb.IngestionLog{ - Id: ksuid.New().String(), + Id: uuid.NewString(), RequestId: "cfa0f129-125c-440d-9e41-e87583cd7d89", ProducerAppName: "test-app", ProducerAppVersion: "0.1.0", @@ -408,7 +407,7 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { { name: "generate change set only", ingestionLog: &reconcilerpb.IngestionLog{ - Id: ksuid.New().String(), + Id: uuid.NewString(), RequestId: "cfa0f129-125c-440d-9e41-e87583cd7d89", ProducerAppName: "test-app", ProducerAppVersion: "0.1.0", @@ -458,12 +457,6 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { repository: mockRepository, } - // Set up the mock expectation - cmd := redis.NewCmd(ctx) - if tt.expectedError { - cmd.SetErr(errors.New("error")) - } - redisKey := fmt.Sprintf("ingest-entity:%s-%d-%s", tt.ingestionLog.DataType, tt.ingestionLog.IngestionTs, tt.ingestionLog.Id) ingestionLogID := int32(1) mockNbClient.On("RetrieveObjectState", ctx, mock.Anything).Return(tt.mockRetrieveObjectStateResponse, nil) @@ -489,7 +482,6 @@ func TestIngestionProcessor_GenerateAndApplyChangeSet(t *testing.T) { } generateChangeSetChannel <- IngestionLogToProcess{ - key: redisKey, ingestionLogID: ingestionLogID, ingestionLog: tt.ingestionLog, }