From 325a9f806ef86e12775e9d76bf699d51a4d3ec16 Mon Sep 17 00:00:00 2001 From: Daniel Bell Date: Mon, 24 Jul 2023 15:54:31 +0200 Subject: [PATCH 1/6] Add a dedicated worflow for the app module (#4101) * Split app workflow * use sbt aliases to simplify workflows * rename app workflow * re-add static analysis to plugins, clean correctly * fix sourcing psql name * always do full clean * fix sbt commands * use stripmargin --- .github/workflows/ci-delta-app.yml | 41 ++++++++++++++++++++++ .github/workflows/ci-delta-core.yml | 33 ++++++------------ .github/workflows/ci-delta-plugins.yml | 29 +++++++++++++--- build.sbt | 48 ++++++++++++++++++++++++-- 4 files changed, 122 insertions(+), 29 deletions(-) create mode 100644 .github/workflows/ci-delta-app.yml diff --git a/.github/workflows/ci-delta-app.yml b/.github/workflows/ci-delta-app.yml new file mode 100644 index 0000000000..de38da5fd2 --- /dev/null +++ b/.github/workflows/ci-delta-app.yml @@ -0,0 +1,41 @@ +name: Delta App +on: + pull_request: + paths: + - 'delta/**' + - 'build.sbt' + - 'project/**' + - '.github/workflows/ci-delta-app.yml' +jobs: + static-analysis: + if: github.event_name == 'pull_request' + runs-on: it + timeout-minutes: 10 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Static analysis + run: | + sbt -Dsbt.color=always -Dsbt.supershell=false \ + clean \ + app-static-analysis + unit-tests: + if: github.event_name == 'pull_request' + runs-on: it + timeout-minutes: 20 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Unit tests + run: | + sbt -Dsbt.color=always -Dsbt.supershell=false \ + clean \ + app-unit-tests-with-coverage \ No newline at end of file diff --git a/.github/workflows/ci-delta-core.yml b/.github/workflows/ci-delta-core.yml index b3998c7a3c..0c5091a63c 100644 --- a/.github/workflows/ci-delta-core.yml +++ b/.github/workflows/ci-delta-core.yml @@ -2,7 +2,12 @@ name: Delta Core on: pull_request: paths: - - 'delta/**' + - 'delta/kernel/**' + - 'delta/plugins/**' + - 'delta/rdf/**' + - 'delta/sdk/**' + - 'delta/sourcing-psql/**' + - 'delta/testkit/**' - 'build.sbt' - 'project/**' - '.github/workflows/ci-delta-core.yml' @@ -18,16 +23,12 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 0 - - name: Static Analysis + - name: Static analysis run: | sbt -Dsbt.color=always -Dsbt.supershell=false \ - "project delta" \ clean \ - scalafmtCheck \ - Test/scalafmtCheck \ - scalafmtSbtCheck \ - scapegoat \ - doc + core-static-analysis + unit-tests: if: github.event_name == 'pull_request' runs-on: it @@ -39,20 +40,8 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 0 - - name: Tests + - name: Unit tests run: | sbt -Dsbt.color=always -Dsbt.supershell=false \ clean \ - coverage \ - app/test \ - kernel/test \ - rdf/test \ - sdk/test \ - sourcingPsql/test \ - testkit/test \ - app/coverageReport \ - kernel/coverageReport \ - rdf/coverageReport \ - sdk/coverageReport \ - sourcingPsql/coverageReport \ - testkit/coverageReport + core-unit-tests-with-coverage diff --git a/.github/workflows/ci-delta-plugins.yml b/.github/workflows/ci-delta-plugins.yml index 291bc4c2f2..cd3724c84e 100644 --- a/.github/workflows/ci-delta-plugins.yml +++ b/.github/workflows/ci-delta-plugins.yml @@ -2,11 +2,32 @@ name: Delta Plugins on: pull_request: paths: - - 'delta/**' + - 'delta/kernel/**' + - 'delta/plugins/**' + - 'delta/rdf/**' + - 'delta/sdk/**' + - 'delta/sourcing-psql/**' + - 'delta/testkit/**' - 'build.sbt' - 'project/**' - '.github/workflows/ci-delta-plugins.yml' jobs: + static-analysis: + if: github.event_name == 'pull_request' + runs-on: it + timeout-minutes: 10 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Static analysis + run: | + sbt -Dsbt.color=always -Dsbt.supershell=false \ + clean \ + plugins-static-analysis unit-tests: if: github.event_name == 'pull_request' runs-on: it @@ -18,10 +39,8 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 0 - - name: Tests + - name: Unit tests run: | sbt -Dsbt.color=always -Dsbt.supershell=false \ clean \ - coverage \ - plugins/test \ - plugins/coverageReport + plugins-unit-tests-with-coverage diff --git a/build.sbt b/build.sbt index 05678d0ff4..97eed7caa3 100755 --- a/build.sbt +++ b/build.sbt @@ -1005,10 +1005,54 @@ Global / excludeLintKeys += docs / paradoxRoots Global / excludeLintKeys += docs / Paradox / paradoxNavigationDepth Global / concurrentRestrictions += Tags.limit(Tags.Test, 1) -addCommandAlias("review", ";clean;scalafmtCheck;test:scalafmtCheck;scalafmtSbtCheck;coverage;scapegoat;test;coverageReport;coverageAggregate") +addCommandAlias("review", + s""" + |;clean + |;scalafmtCheck + |;test:scalafmtCheck + |;scalafmtSbtCheck + |;coverage + |;scapegoat + |;test + |;coverageReport + |;coverageAggregate + |""".stripMargin +) addCommandAlias( "deltaReview", - ";delta/clean;delta/scalafmtCheck;delta/test:scalafmtCheck;scalafmtSbtCheck;coverage;delta/scapegoat;delta/test;delta/coverageReport;delta/coverageAggregate" + """ + |;delta/clean + |;delta/scalafmtCheck + |;delta/test:scalafmtCheck + |;scalafmtSbtCheck;coverage + |;delta/scapegoat + |;delta/test + |;delta/coverageReport + |;delta/coverageAggregate + |""".stripMargin ) addCommandAlias("build-docs", ";docs/clean;docs/makeSite") addCommandAlias("preview-docs", ";docs/clean;docs/previewSite") + +val coreModules = List("kernel", "rdf", "sdk", "sourcingPsql", "testkit") +def staticAnalysisCommandForModule(module: String) = + s""" + |;$module/scalafmtCheck + |;$module/Test/scalafmtCheck + |;$module/scalafmtSbtCheck + |;$module/scapegoat + |;$module/doc + |""".stripMargin + +addCommandAlias("core-static-analysis", coreModules.map(staticAnalysisCommandForModule).mkString) +addCommandAlias("app-static-analysis", staticAnalysisCommandForModule("app")) +addCommandAlias("plugins-static-analysis", staticAnalysisCommandForModule("plugins")) + +def unitTestsWithCoverageCommandsForModules(modules: List[String]) = { + ";coverage" + + modules.map(module => s";$module/test").mkString + + modules.map(module => s";$module/coverageReport").mkString +} +addCommandAlias("core-unit-tests-with-coverage", unitTestsWithCoverageCommandsForModules(coreModules)) +addCommandAlias("app-unit-tests-with-coverage", unitTestsWithCoverageCommandsForModules(List("app"))) +addCommandAlias("plugins-unit-tests-with-coverage", unitTestsWithCoverageCommandsForModules(List("plugins"))) From 7ee5332ee2b5e656bbc7521e1fed5eb3443ed637 Mon Sep 17 00:00:00 2001 From: Daniel Bell Date: Wed, 26 Jul 2023 11:51:01 +0200 Subject: [PATCH 2/6] fix bug where lenient decoding would never occur (#4108) * fix bug where lenient decoding would never occur * make two instances of decoder so it's not created every time / is clearer * change the way an either is constructed * scalafmt --- .../delta/routes/ResourcesRoutesSpec.scala | 8 ++-- .../delta/sdk/resources/NexusSource.scala | 41 ++++++++++--------- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/delta/app/src/test/scala/ch/epfl/bluebrain/nexus/delta/routes/ResourcesRoutesSpec.scala b/delta/app/src/test/scala/ch/epfl/bluebrain/nexus/delta/routes/ResourcesRoutesSpec.scala index 1d43894bb8..337ae755b8 100644 --- a/delta/app/src/test/scala/ch/epfl/bluebrain/nexus/delta/routes/ResourcesRoutesSpec.scala +++ b/delta/app/src/test/scala/ch/epfl/bluebrain/nexus/delta/routes/ResourcesRoutesSpec.scala @@ -64,12 +64,13 @@ class ResourcesRoutesSpec extends BaseRouteSpec { private val myId2 = nxv + "myid2" // Resource created against schema1 with id present on the payload private val myId3 = nxv + "myid3" // Resource created against no schema with id passed and present on the payload private val myId4 = nxv + "myid4" // Resource created against schema1 with id passed and present on the payload + private val myId5 = nxv + "myid5" // Resource created against schema1 with id passed and present on the payload private val myIdEncoded = UrlUtils.encode(myId.toString) private val myId2Encoded = UrlUtils.encode(myId2.toString) private val payload = jsonContentOf("resources/resource.json", "id" -> myId) private val payloadWithBlankId = jsonContentOf("resources/resource.json", "id" -> "") private val payloadWithUnderscoreFields = - jsonContentOf("resources/resource-with-underscore-fields.json", "id" -> myId) + jsonContentOf("resources/resource-with-underscore-fields.json", "id" -> myId5) private val payloadWithMetadata = jsonContentOf("resources/resource-with-metadata.json", "id" -> myId) private val aclCheck = AclSimpleCheck().accepted @@ -195,10 +196,7 @@ class ResourcesRoutesSpec extends BaseRouteSpec { val lenientDecodingRoutes = routesWithDecodingOption(DecodingOption.Lenient) Post("/v1/resources/myorg/myproject/_/", payloadWithUnderscoreFields.toEntity) ~> lenientDecodingRoutes ~> check { - response.status shouldEqual StatusCodes.BadRequest - response.asJson shouldEqual jsonContentOf( - "/resources/errors/underscore-fields.json" - ) + response.status shouldEqual StatusCodes.Created } } diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/resources/NexusSource.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/resources/NexusSource.scala index acd3471599..79faa5707e 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/resources/NexusSource.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/resources/NexusSource.scala @@ -42,29 +42,30 @@ object NexusSource { } } - implicit def nexusSourceDecoder(implicit decodingOption: DecodingOption): Decoder[NexusSource] = { - - new Decoder[NexusSource] { - private val decoder = implicitly[Decoder[Json]] - - println(decodingOption) + private val strictDecoder = new Decoder[NexusSource] { + private val decoder = implicitly[Decoder[Json]] - override def apply(c: HCursor): Result[NexusSource] = { - decoder(c).flatMap { json => - val underscoreFields = json.asObject.toList.flatMap(_.keys).filter(_.startsWith("_")) - if (underscoreFields.nonEmpty) { - Left( - DecodingFailure( - s"Field(s) starting with _ found in payload: ${underscoreFields.mkString(", ")}", - c.history - ) - ) - } else { - Right(NexusSource(json)) - } - } + override def apply(c: HCursor): Result[NexusSource] = { + decoder(c).flatMap { json => + val underscoreFields = json.asObject.toList.flatMap(_.keys).filter(_.startsWith("_")) + Either.cond( + underscoreFields.isEmpty, + NexusSource(json), + DecodingFailure( + s"Field(s) starting with _ found in payload: ${underscoreFields.mkString(", ")}", + c.history + ) + ) } } } + private val lenientDecoder = implicitly[Decoder[Json]].map(NexusSource(_)) + + implicit def nexusSourceDecoder(implicit decodingOption: DecodingOption): Decoder[NexusSource] = { + decodingOption match { + case DecodingOption.Lenient => lenientDecoder + case DecodingOption.Strict => strictDecoder + } + } } From a13c3b1c8a6657aa2a29b549915abd02ff521310 Mon Sep 17 00:00:00 2001 From: Simon Date: Wed, 26 Jul 2023 12:30:31 +0200 Subject: [PATCH 3/6] Implement listing of projection errors (#4106) * Implement listing of projection errors * Fix compilation --------- Co-authored-by: Simon Dumas --- .../plugins/blazegraph/BlazegraphViews.scala | 6 +- .../compositeviews/CompositeViews.scala | 10 +- .../projections/CompositeProjections.scala | 2 +- .../elasticsearch/ElasticSearchViews.scala | 6 +- .../EventMetricsProjection.scala | 5 +- .../query/DefaultSearchRequest.scala | 12 +- .../query/DefaultViewsQuery.scala | 12 +- .../query/DefaultViewsStore.scala | 10 +- .../query/DefaultViewsQuerySuite.scala | 14 +- .../query/DefaultViewsStoreSuite.scala | 10 +- .../delta/plugins/storage/files/Files.scala | 2 +- .../plugins/storage/storages/Storages.scala | 6 +- .../delta/sdk/projects/ProjectsImpl.scala | 6 +- .../delta/sdk/resolvers/ResolversImpl.scala | 4 +- .../nexus/delta/sdk/sse/SseEventLog.scala | 19 +- .../nexus/delta/sdk/views/ViewsStore.scala | 10 +- .../delta/sourcing/FragmentEncoder.scala | 21 ++ .../nexus/delta/sourcing/Predicate.scala | 39 ---- .../nexus/delta/sourcing/Scope.scala | 38 ++++ .../nexus/delta/sourcing/ScopedEventLog.scala | 48 ++--- .../delta/sourcing/event/EventStreaming.scala | 20 +- .../sourcing/event/GlobalEventStore.scala | 2 +- .../sourcing/event/ScopedEventStore.scala | 24 +-- .../implicits/TimeRangeInstances.scala | 22 +++ .../delta/sourcing/implicits/package.scala | 10 +- .../sourcing/model/FailedElemLogRow.scala | 94 +++++++++ .../nexus/delta/sourcing/offset/Offset.scala | 16 +- .../projections/FailedElemLogStore.scala | 73 ++++++- .../projections/ProjectionErrors.scala | 9 +- .../delta/sourcing/query/StreamingQuery.scala | 9 +- .../sourcing/state/ScopedStateStore.scala | 59 +++--- .../sourcing/stream/ProjectionStore.scala | 86 +-------- .../delta/sourcing/syntax/DoobieSyntax.scala | 20 ++ .../delta/sourcing/ScopedEventLogSuite.scala | 2 +- .../sourcing/event/EventStreamingSuite.scala | 14 +- .../event/ScopedEventStoreSuite.scala | 14 +- .../projections/FailedElemLogStoreSuite.scala | 182 ++++++++++-------- .../state/ScopedStateStoreSuite.scala | 22 +-- .../nexus/testkit/MutableClock.scala | 29 +++ 39 files changed, 586 insertions(+), 401 deletions(-) create mode 100644 delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/FragmentEncoder.scala delete mode 100644 delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/Predicate.scala create mode 100644 delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/Scope.scala create mode 100644 delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/implicits/TimeRangeInstances.scala create mode 100644 delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/model/FailedElemLogRow.scala create mode 100644 delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/syntax/DoobieSyntax.scala create mode 100644 delta/testkit/src/main/scala/ch/epfl/bluebrain/nexus/testkit/MutableClock.scala diff --git a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphViews.scala b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphViews.scala index a5a9f51d1e..ca16cc06ce 100644 --- a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphViews.scala +++ b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphViews.scala @@ -293,7 +293,7 @@ final class BlazegraphViews( * Return the existing indexing views in a project in a finite stream */ def currentIndexingViews(project: ProjectRef): ElemStream[IndexingViewDef] = - log.currentStates(Predicate.Project(project)).evalMapFilter { envelope => + log.currentStates(Scope.Project(project)).evalMapFilter { envelope => Task.pure(toIndexViewDef(envelope)) } @@ -301,7 +301,7 @@ final class BlazegraphViews( * Return all existing indexing views in a finite stream */ def currentIndexingViews: ElemStream[IndexingViewDef] = - log.currentStates(Predicate.Root).evalMapFilter { envelope => + log.currentStates(Scope.Root).evalMapFilter { envelope => Task.pure(toIndexViewDef(envelope)) } @@ -309,7 +309,7 @@ final class BlazegraphViews( * Return the indexing views in a non-ending stream */ def indexingViews(start: Offset): ElemStream[IndexingViewDef] = - log.states(Predicate.Root, start).evalMapFilter { envelope => + log.states(Scope.Root, start).evalMapFilter { envelope => Task.pure(toIndexViewDef(envelope)) } diff --git a/delta/plugins/composite-views/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/compositeviews/CompositeViews.scala b/delta/plugins/composite-views/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/compositeviews/CompositeViews.scala index 49dd0ca8c6..96a5ee691a 100644 --- a/delta/plugins/composite-views/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/compositeviews/CompositeViews.scala +++ b/delta/plugins/composite-views/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/compositeviews/CompositeViews.scala @@ -36,7 +36,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Subject import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag import ch.epfl.bluebrain.nexus.delta.sourcing.model._ import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset -import ch.epfl.bluebrain.nexus.delta.sourcing.{Predicate, ScopedEntityDefinition, ScopedEventLog, StateMachine, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{Scope, ScopedEntityDefinition, ScopedEventLog, StateMachine, Transactors} import io.circe.Json import monix.bio.{IO, Task, UIO} @@ -400,7 +400,7 @@ final class CompositeViews private ( params: CompositeViewSearchParams, ordering: Ordering[ViewResource] ): UIO[UnscoredSearchResults[ViewResource]] = { - val predicate = params.project.fold[Predicate](Predicate.Root)(ref => Predicate.Project(ref)) + val predicate = params.project.fold[Scope](Scope.Root)(ref => Scope.Project(ref)) SearchResults( log.currentStates(predicate, identity(_)).evalMapFilter[Task, ViewResource] { state => fetchContext.cacheOnReads @@ -422,19 +422,19 @@ final class CompositeViews private ( * Return all existing views for the given project in a finite stream */ def currentViews(project: ProjectRef): ElemStream[CompositeViewDef] = - log.currentStates(Predicate.Project(project)).map(toCompositeViewDef) + log.currentStates(Scope.Project(project)).map(toCompositeViewDef) /** * Return all existing indexing views in a finite stream */ def currentViews: ElemStream[CompositeViewDef] = - log.currentStates(Predicate.Root).map(toCompositeViewDef) + log.currentStates(Scope.Root).map(toCompositeViewDef) /** * Return the indexing views in a non-ending stream */ def views(start: Offset): ElemStream[CompositeViewDef] = - log.states(Predicate.Root, start).map(toCompositeViewDef) + log.states(Scope.Root, start).map(toCompositeViewDef) private def toCompositeViewDef(envelope: Envelope[CompositeViewState]) = envelope.toElem { v => Some(v.project) }.map { v => diff --git a/delta/plugins/composite-views/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/compositeviews/projections/CompositeProjections.scala b/delta/plugins/composite-views/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/compositeviews/projections/CompositeProjections.scala index cd7575a140..7d4a0397f2 100644 --- a/delta/plugins/composite-views/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/compositeviews/projections/CompositeProjections.scala +++ b/delta/plugins/composite-views/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/compositeviews/projections/CompositeProjections.scala @@ -142,7 +142,7 @@ object CompositeProjections { Projection.persist( progress, compositeProgressStore.save(view, rev, branch, _), - failedElemLogStore.saveFailedElems(metadata, _) + failedElemLogStore.save(metadata, _) )(batch) ) diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchViews.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchViews.scala index 00525a7ac2..5acacf131f 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchViews.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchViews.scala @@ -325,7 +325,7 @@ final class ElasticSearchViews private ( * Return the existing indexing views in a project in a finite stream */ def currentIndexingViews(project: ProjectRef): ElemStream[IndexingViewDef] = - log.currentStates(Predicate.Project(project)).evalMapFilter { envelope => + log.currentStates(Scope.Project(project)).evalMapFilter { envelope => Task.pure(toIndexViewDef(envelope)) } @@ -333,7 +333,7 @@ final class ElasticSearchViews private ( * Return all existing indexing views in a finite stream */ def currentIndexingViews: ElemStream[IndexingViewDef] = - log.currentStates(Predicate.Root).evalMapFilter { envelope => + log.currentStates(Scope.Root).evalMapFilter { envelope => Task.pure(toIndexViewDef(envelope)) } @@ -341,7 +341,7 @@ final class ElasticSearchViews private ( * Return the indexing views in a non-ending stream */ def indexingViews(start: Offset): ElemStream[IndexingViewDef] = - log.states(Predicate.Root, start).evalMapFilter { envelope => + log.states(Scope.Root, start).evalMapFilter { envelope => Task.pure(toIndexViewDef(envelope)) } diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/EventMetricsProjection.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/EventMetricsProjection.scala index 55249fcff1..c7e8965b9b 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/EventMetricsProjection.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/EventMetricsProjection.scala @@ -14,7 +14,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Operation.Sink import ch.epfl.bluebrain.nexus.delta.sourcing.stream._ import ch.epfl.bluebrain.nexus.delta.sourcing.stream.pipes.AsJson -import ch.epfl.bluebrain.nexus.delta.sourcing.{MultiDecoder, Predicate, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{MultiDecoder, Scope, Transactors} import monix.bio.Task trait EventMetricsProjection @@ -73,8 +73,7 @@ object EventMetricsProjection { MultiDecoder(metricEncoders.map { encoder => encoder.entityType -> encoder.toMetric }.toMap) // define how to get metrics from a given offset - val metrics = (offset: Offset) => - EventStreaming.fetchScoped(Predicate.root, allEntityTypes, offset, queryConfig, xas) + val metrics = (offset: Offset) => EventStreaming.fetchScoped(Scope.root, allEntityTypes, offset, queryConfig, xas) val index = eventMetricsIndex(indexPrefix) diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultSearchRequest.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultSearchRequest.scala index 46b9aeaff9..3a407a583d 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultSearchRequest.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultSearchRequest.scala @@ -8,7 +8,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SortList import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContext import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.{ApiMappings, ProjectBase} -import ch.epfl.bluebrain.nexus.delta.sourcing.Predicate +import ch.epfl.bluebrain.nexus.delta.sourcing.Scope import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef, ResourceRef} import monix.bio.IO @@ -33,9 +33,9 @@ sealed trait DefaultSearchRequest extends Product with Serializable { def sort: SortList /** - * If the search applies to the project/org/root level + * If the search applies to the project/org/root scope */ - def predicate: Predicate + def scope: Scope } @@ -46,7 +46,7 @@ object DefaultSearchRequest { */ case class ProjectSearch(ref: ProjectRef, params: ResourcesSearchParams, pagination: Pagination, sort: SortList) extends DefaultSearchRequest { - override def predicate: Predicate = Predicate.Project(ref) + override def scope: Scope = Scope(ref) } object ProjectSearch { @@ -82,7 +82,7 @@ object DefaultSearchRequest { */ case class OrgSearch(label: Label, params: ResourcesSearchParams, pagination: Pagination, sort: SortList) extends DefaultSearchRequest { - override def predicate: Predicate = Predicate.Org(label) + override def scope: Scope = Scope.Org(label) } object OrgSearch { @@ -103,7 +103,7 @@ object DefaultSearchRequest { */ case class RootSearch(params: ResourcesSearchParams, pagination: Pagination, sort: SortList) extends DefaultSearchRequest { - override def predicate: Predicate = Predicate.Root + override def scope: Scope = Scope.Root } object RootSearch { diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsQuery.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsQuery.scala index 89c07424a1..2a977d3ab7 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsQuery.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsQuery.scala @@ -11,7 +11,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri import ch.epfl.bluebrain.nexus.delta.sdk.model.search.{AggregationResult, SearchResults} import ch.epfl.bluebrain.nexus.delta.sdk.views.View.IndexingView -import ch.epfl.bluebrain.nexus.delta.sourcing.{Predicate, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{Scope, Transactors} import io.circe.JsonObject import monix.bio.{IO, UIO} @@ -58,14 +58,14 @@ object DefaultViewsQuery { } def apply[Result, Aggregate]( - fetchViews: Predicate => UIO[List[IndexingView]], + fetchViews: Scope => UIO[List[IndexingView]], aclCheck: AclCheck, listAction: (DefaultSearchRequest, Set[IndexingView]) => IO[ElasticSearchQueryError, Result], aggregateAction: (DefaultSearchRequest, Set[IndexingView]) => IO[ElasticSearchQueryError, Aggregate] ): DefaultViewsQuery[Result, Aggregate] = new DefaultViewsQuery[Result, Aggregate] { - private def filterViews(predicate: Predicate)(implicit caller: Caller) = - fetchViews(predicate) + private def filterViews(scope: Scope)(implicit caller: Caller) = + fetchViews(scope) .flatMap { allViews => aclCheck.mapFilter[IndexingView, IndexingView]( allViews, @@ -80,7 +80,7 @@ object DefaultViewsQuery { override def list( searchRequest: DefaultSearchRequest )(implicit caller: Caller): IO[ElasticSearchQueryError, Result] = - filterViews(searchRequest.predicate).flatMap { views => + filterViews(searchRequest.scope).flatMap { views => listAction(searchRequest, views) } @@ -90,7 +90,7 @@ object DefaultViewsQuery { override def aggregate( searchRequest: DefaultSearchRequest )(implicit caller: Caller): IO[ElasticSearchQueryError, Aggregate] = - filterViews(searchRequest.predicate).flatMap { views => + filterViews(searchRequest.scope).flatMap { views => aggregateAction(searchRequest, views) } diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsStore.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsStore.scala index 20d83c1052..b9f35ce374 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsStore.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsStore.scala @@ -5,8 +5,8 @@ import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.ElasticSearchViews import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.{defaultViewId, permissions, ElasticSearchViewState} import ch.epfl.bluebrain.nexus.delta.sdk.views.View.IndexingView import ch.epfl.bluebrain.nexus.delta.sdk.views.ViewRef -import ch.epfl.bluebrain.nexus.delta.sourcing.{Predicate, Transactors} import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag +import ch.epfl.bluebrain.nexus.delta.sourcing.{Scope, Transactors} import doobie._ import doobie.implicits._ import io.circe.{Decoder, Json} @@ -18,9 +18,9 @@ import monix.bio.{IO, UIO} trait DefaultViewsStore { /** - * Return views at the given predicate + * Return views at the given scope */ - def find(predicate: Predicate): UIO[List[IndexingView]] + def find(scope: Scope): UIO[List[IndexingView]] } object DefaultViewsStore { @@ -38,11 +38,11 @@ object DefaultViewsStore { def apply(prefix: String, xas: Transactors): DefaultViewsStore = { new DefaultViewsStore { implicit val stateDecoder: Decoder[ElasticSearchViewState] = ElasticSearchViewState.serializer.codec - def find(predicate: Predicate): UIO[List[IndexingView]] = + def find(scope: Scope): UIO[List[IndexingView]] = (fr"SELECT value FROM scoped_states" ++ Fragments.whereAndOpt( Some(fr"type = ${ElasticSearchViews.entityType}"), - predicate.asFragment, + scope.asFragment, Some(fr"tag = ${Tag.Latest.value}"), Some(fr"id = $defaultViewId"), Some(fr"deprecated = false") diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsQuerySuite.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsQuerySuite.scala index cbc64dc787..88b859f521 100644 --- a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsQuerySuite.scala +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsQuerySuite.scala @@ -10,7 +10,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SortList import ch.epfl.bluebrain.nexus.delta.sdk.views.View.IndexingView import ch.epfl.bluebrain.nexus.delta.sdk.views.ViewRef -import ch.epfl.bluebrain.nexus.delta.sourcing.Predicate +import ch.epfl.bluebrain.nexus.delta.sourcing.Scope import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Group, User} import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} import ch.epfl.bluebrain.nexus.testkit.bio.BioSuite @@ -45,13 +45,13 @@ class DefaultViewsQuerySuite extends BioSuite { (charlie.subject, AclAddress.Project(project1), Set(permissions.read)) ) - private def fetchViews(predicate: Predicate) = UIO.pure { + private def fetchViews(predicate: Scope) = UIO.pure { val viewRefs = predicate match { - case Predicate.Root => List(defaultView, defaultView2, defaultView3) - case Predicate.Org(`org`) => List(defaultView, defaultView2) - case Predicate.Org(`org2`) => List(defaultView3) - case Predicate.Org(_) => List.empty - case Predicate.Project(project) => List(ViewRef(project, defaultViewId)) + case Scope.Root => List(defaultView, defaultView2, defaultView3) + case Scope.Org(`org`) => List(defaultView, defaultView2) + case Scope.Org(`org2`) => List(defaultView3) + case Scope.Org(_) => List.empty + case Scope.Project(project) => List(ViewRef(project, defaultViewId)) } viewRefs.map { ref => IndexingView(ref, "index", permissions.read) diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsStoreSuite.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsStoreSuite.scala index 6bd8351d3e..f55e217c12 100644 --- a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsStoreSuite.scala +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/query/DefaultViewsStoreSuite.scala @@ -6,7 +6,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.{ElasticSearchViewGen import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv import ch.epfl.bluebrain.nexus.delta.sdk.views.View.IndexingView import ch.epfl.bluebrain.nexus.delta.sdk.views.ViewRef -import ch.epfl.bluebrain.nexus.delta.sourcing.Predicate +import ch.epfl.bluebrain.nexus.delta.sourcing.Scope import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef, Tag} import ch.epfl.bluebrain.nexus.delta.sourcing.state.ScopedStateStoreFixture import ch.epfl.bluebrain.nexus.testkit.bio.{BioSuite, ResourceFixture} @@ -75,7 +75,7 @@ class DefaultViewsStoreSuite extends BioSuite { private lazy val viewStore = defaultViewsStore() - private def findDefaultRefs(predicate: Predicate) = + private def findDefaultRefs(predicate: Scope) = viewStore.find(predicate).map(_.map(_.ref)) test("Construct indexing view correctly") { @@ -89,14 +89,14 @@ class DefaultViewsStoreSuite extends BioSuite { } test(s"Get non-deprecated default views in '$project1'") { - findDefaultRefs(Predicate.Project(project1)).assert(List(defaultView1)) + findDefaultRefs(Scope.Project(project1)).assert(List(defaultView1)) } test(s"Get non-deprecated default views in '$org'") { - findDefaultRefs(Predicate.Org(org)).assert(List(defaultView1, defaultView2)) + findDefaultRefs(Scope.Org(org)).assert(List(defaultView1, defaultView2)) } test(s"Get non-deprecated in all orgs") { - findDefaultRefs(Predicate.Root).assert(List(defaultView1, defaultView2, defaultView3)) + findDefaultRefs(Scope.Root).assert(List(defaultView1, defaultView2, defaultView3)) } } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala index c0e3f2c1f4..771f2b2221 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/Files.scala @@ -467,7 +467,7 @@ final class Files( .bimap(WrappedStorageRejection, _.value) ) stream <- log - .states(Predicate.root, offset) + .states(Scope.root, offset) .map { envelope => envelope.value match { case f diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala index 400dabeb3e..b4f29e5717 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala @@ -31,7 +31,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.ScopedEntityDefinition.Tagger import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Subject import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ElemStream, EntityType, ProjectRef, ResourceRef} -import ch.epfl.bluebrain.nexus.delta.sourcing.{Predicate, ScopedEntityDefinition, ScopedEventLog, StateMachine, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{Scope, ScopedEntityDefinition, ScopedEventLog, StateMachine, Transactors} import com.typesafe.scalalogging.Logger import fs2.Stream import io.circe.Json @@ -288,7 +288,7 @@ final class Storages private ( private def fetchDefaults(project: ProjectRef): IO[StorageFetchRejection, Stream[Task, StorageResource]] = fetchContext.onRead(project).map { pc => - log.currentStates(Predicate.Project(project), _.toResource(pc.apiMappings, pc.base)).filter(_.value.default) + log.currentStates(Scope.Project(project), _.toResource(pc.apiMappings, pc.base)).filter(_.value.default) } /** @@ -309,7 +309,7 @@ final class Storages private ( * Return the existing storages in a project in a finite stream */ def currentStorages(project: ProjectRef): ElemStream[StorageState] = - log.currentStates(Predicate.Project(project)).map { + log.currentStates(Scope.Project(project)).map { _.toElem { s => Some(s.project) } } diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/projects/ProjectsImpl.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/projects/ProjectsImpl.scala index 1e344858d1..015f290332 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/projects/ProjectsImpl.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/projects/ProjectsImpl.scala @@ -95,16 +95,16 @@ final class ProjectsImpl private ( ): UIO[SearchResults.UnscoredSearchResults[ProjectResource]] = SearchResults( log - .currentStates(params.organization.fold(Predicate.root)(Predicate.Org), _.toResource(defaultApiMappings)) + .currentStates(params.organization.fold(Scope.root)(Scope.Org), _.toResource(defaultApiMappings)) .evalFilter(params.matches), pagination, ordering ).span("listProjects") override def currentRefs: Stream[Task, ProjectRef] = - log.currentStates(Predicate.root).map(_.value.project) + log.currentStates(Scope.root).map(_.value.project) - override def states(offset: Offset): ElemStream[ProjectState] = log.states(Predicate.root, offset).map { + override def states(offset: Offset): ElemStream[ProjectState] = log.states(Scope.root, offset).map { _.toElem { p => Some(p.project) } } diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/resolvers/ResolversImpl.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/resolvers/ResolversImpl.scala index f87eec739d..e0beff4103 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/resolvers/ResolversImpl.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/resolvers/ResolversImpl.scala @@ -153,9 +153,9 @@ final class ResolversImpl private ( params: ResolverSearchParams, ordering: Ordering[ResolverResource] ): UIO[UnscoredSearchResults[ResolverResource]] = { - val predicate = params.project.fold[Predicate](Predicate.Root)(ref => Predicate.Project(ref)) + val scope = params.project.fold[Scope](Scope.Root)(ref => Scope.Project(ref)) SearchResults( - log.currentStates(predicate, identity(_)).evalMapFilter[Task, ResolverResource] { state => + log.currentStates(scope, identity(_)).evalMapFilter[Task, ResolverResource] { state => fetchContext.cacheOnReads .onRead(state.project) .redeemWith( diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/sse/SseEventLog.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/sse/SseEventLog.scala index 1ed9ecfdde..8e849ceb12 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/sse/SseEventLog.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/sse/SseEventLog.scala @@ -13,7 +13,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.event.EventStreaming import ch.epfl.bluebrain.nexus.delta.sourcing.model._ import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset.{At, Start} -import ch.epfl.bluebrain.nexus.delta.sourcing.{MultiDecoder, Predicate, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{MultiDecoder, Scope, Transactors} import com.typesafe.scalalogging.Logger import fs2.Stream import io.circe.syntax.EncoderOps @@ -161,8 +161,7 @@ object SseEventLog { private def fetchUuids(ref: ProjectRef) = cache.getOrElseUpdate(ref, fetchProject(ref)).attempt.map(_.toOption) - private def stream(predicate: Predicate, selector: Option[Label], offset: Offset) - : Stream[Task, ServerSentEvent] = { + private def stream(scope: Scope, selector: Option[Label], offset: Offset): Stream[Task, ServerSentEvent] = { Stream .fromEither[Task]( selector @@ -174,7 +173,7 @@ object SseEventLog { .flatMap { entityTypes => EventStreaming .fetchAll( - predicate, + scope, entityTypes, offset, config.query, @@ -184,25 +183,25 @@ object SseEventLog { } } - override def stream(offset: Offset): Stream[Task, ServerSentEvent] = stream(Predicate.root, None, offset) + override def stream(offset: Offset): Stream[Task, ServerSentEvent] = stream(Scope.root, None, offset) override def streamBy(selector: Label, offset: Offset): Stream[Task, ServerSentEvent] = - stream(Predicate.root, Some(selector), offset) + stream(Scope.root, Some(selector), offset) override def stream(org: Label, offset: Offset): IO[OrganizationRejection, Stream[Task, ServerSentEvent]] = - fetchOrg(org).as(stream(Predicate.Org(org), None, offset)) + fetchOrg(org).as(stream(Scope.Org(org), None, offset)) override def streamBy(selector: Label, org: Label, offset: Offset) : IO[OrganizationRejection, Stream[Task, ServerSentEvent]] = - fetchOrg(org).as(stream(Predicate.Org(org), Some(selector), offset)) + fetchOrg(org).as(stream(Scope.Org(org), Some(selector), offset)) override def stream(project: ProjectRef, offset: Offset) : IO[ProjectRejection, Stream[Task, ServerSentEvent]] = - fetchProject(project).as(stream(Predicate.Project(project), None, offset)) + fetchProject(project).as(stream(Scope.Project(project), None, offset)) override def streamBy(selector: Label, project: ProjectRef, offset: Offset) : IO[ProjectRejection, Stream[Task, ServerSentEvent]] = - fetchProject(project).as(stream(Predicate.Project(project), Some(selector), offset)) + fetchProject(project).as(stream(Scope.Project(project), Some(selector), offset)) } } .tapEval { sseLog => diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/views/ViewsStore.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/views/ViewsStore.scala index 4baa62f2f9..553b935260 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/views/ViewsStore.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/views/ViewsStore.scala @@ -7,7 +7,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegmentRef import ch.epfl.bluebrain.nexus.delta.sdk.views.View.{AggregateView, IndexingView} import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, ProjectRef, Tag} -import ch.epfl.bluebrain.nexus.delta.sourcing.{EntityDependencyStore, Predicate, Serializer, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{EntityDependencyStore, Scope, Serializer, Transactors} import com.typesafe.scalalogging.Logger import doobie._ import doobie.implicits._ @@ -28,11 +28,11 @@ trait ViewsStore[Rejection] { /** * Fetch default views and combine them in an aggregate view - * @param predicate + * @param scope * to get all default view from the system / a given organization / a given project * @return */ - def fetchDefaultViews(predicate: Predicate): UIO[AggregateView] + def fetchDefaultViews(scope: Scope): UIO[AggregateView] } @@ -74,11 +74,11 @@ object ViewsStore { } } yield singleOrMultiple - override def fetchDefaultViews(predicate: Predicate): UIO[AggregateView] = { + override def fetchDefaultViews(scope: Scope): UIO[AggregateView] = { (fr"SELECT value FROM scoped_states" ++ Fragments.whereAndOpt( Some(fr"type = $entityType"), - predicate.asFragment, + scope.asFragment, Some(fr"tag = ${Tag.Latest.value}"), Some(fr"id = $defaultViewId"), Some(fr"deprecated = false") diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/FragmentEncoder.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/FragmentEncoder.scala new file mode 100644 index 0000000000..97035587ca --- /dev/null +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/FragmentEncoder.scala @@ -0,0 +1,21 @@ +package ch.epfl.bluebrain.nexus.delta.sourcing + +import doobie.util.fragment.Fragment + +/** + * A type class that provides a conversion from a value of type `A` to a doobie [[Fragment]]. + */ +trait FragmentEncoder[A] { + + def apply(value: A): Option[Fragment] + +} + +object FragmentEncoder { + + /** + * Construct an instance from a function + */ + def instance[A](f: A => Option[Fragment]): FragmentEncoder[A] = (value: A) => f(value) + +} diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/Predicate.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/Predicate.scala deleted file mode 100644 index ac665e5a43..0000000000 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/Predicate.scala +++ /dev/null @@ -1,39 +0,0 @@ -package ch.epfl.bluebrain.nexus.delta.sourcing - -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} -import doobie.implicits._ -import doobie.util.fragment.Fragment - -/** - * Allows to filter results when querying the database for scoped entities - */ -sealed trait Predicate extends Product with Serializable { - def asFragment: Option[Fragment] -} - -object Predicate { - - val root: Predicate = Root - - /** - * Get all results for any org and any project - */ - final case object Root extends Predicate { - override def asFragment: Option[Fragment] = None - } - - /** - * Get all results within the given org - */ - final case class Org(label: Label) extends Predicate { - override def asFragment: Option[Fragment] = Some(fr"org = $label") - } - - /** - * Get all results within the given project - */ - final case class Project(ref: ProjectRef) extends Predicate { - override def asFragment: Option[Fragment] = Some(fr"org = ${ref.organization} and project = ${ref.project}") - } - -} diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/Scope.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/Scope.scala new file mode 100644 index 0000000000..e16e7618dd --- /dev/null +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/Scope.scala @@ -0,0 +1,38 @@ +package ch.epfl.bluebrain.nexus.delta.sourcing + +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} +import doobie.implicits._ + +/** + * Allows to filter results when querying the database for scoped entities + */ +sealed trait Scope extends Product with Serializable + +object Scope { + + val root: Scope = Root + + def apply(project: ProjectRef): Scope = Project(project) + + /** + * Get all results for any org and any project + */ + final case object Root extends Scope + + /** + * Get all results within the given org + */ + final case class Org(label: Label) extends Scope + + /** + * Get all results within the given project + */ + final case class Project(ref: ProjectRef) extends Scope + + implicit val scopeFragmentEncoder: FragmentEncoder[Scope] = FragmentEncoder.instance { + case Root => None + case Org(label) => Some(fr"org = $label") + case Project(ref) => Some(fr"org = ${ref.organization} and project = ${ref.project}") + } + +} diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/ScopedEventLog.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/ScopedEventLog.scala index 41e41c6853..bfcfbf4d8a 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/ScopedEventLog.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/ScopedEventLog.scala @@ -114,66 +114,66 @@ trait ScopedEventLog[Id, S <: ScopedState, Command, E <: ScopedEvent, Rejection] /** * Allow to stream all current events within [[Envelope]] s - * @param predicate + * @param scope * to filter returned events * @param offset * offset to start from */ - def currentEvents(predicate: Predicate, offset: Offset): EnvelopeStream[E] + def currentEvents(scope: Scope, offset: Offset): EnvelopeStream[E] /** * Allow to stream all current events within [[Envelope]] s - * @param predicate + * @param scope * to filter returned events * @param offset * offset to start from */ - def events(predicate: Predicate, offset: Offset): EnvelopeStream[E] + def events(scope: Scope, offset: Offset): EnvelopeStream[E] /** * Allow to stream all latest states within [[Envelope]] s without applying transformation - * @param predicate + * @param scope * to filter returned states * @param offset * offset to start from */ - def currentStates(predicate: Predicate, offset: Offset): EnvelopeStream[S] + def currentStates(scope: Scope, offset: Offset): EnvelopeStream[S] /** * Allow to stream all latest states from the beginning within [[Envelope]] s without applying transformation - * @param predicate + * @param scope * to filter returned states */ - def currentStates(predicate: Predicate): EnvelopeStream[S] = currentStates(predicate, Offset.Start) + def currentStates(scope: Scope): EnvelopeStream[S] = currentStates(scope, Offset.Start) /** * Allow to stream all current states from the provided offset - * @param predicate + * @param scope * to filter returned states * @param offset * offset to start from * @param f * the function to apply on each state */ - def currentStates[T](predicate: Predicate, offset: Offset, f: S => T): Stream[Task, T] + def currentStates[T](scope: Scope, offset: Offset, f: S => T): Stream[Task, T] /** * Allow to stream all current states from the beginning - * @param predicate + * @param scope * to filter returned states * @param f * the function to apply on each state */ - def currentStates[T](predicate: Predicate, f: S => T): Stream[Task, T] = currentStates(predicate, Offset.Start, f) + def currentStates[T](scope: Scope, f: S => T): Stream[Task, T] = currentStates(scope, Offset.Start, f) /** * Stream the state changes continuously from the provided offset. - * @param predicate + * @param scope * to filter returned states * @param offset * the start offset */ - def states(predicate: Predicate, offset: Offset): EnvelopeStream[S] + def states(scope: Scope, offset: Offset): EnvelopeStream[S] } object ScopedEventLog { @@ -313,22 +313,22 @@ object ScopedEventLog { stateMachine.evaluate(state, command, maxDuration) } - override def currentEvents(predicate: Predicate, offset: Offset): EnvelopeStream[E] = - eventStore.currentEvents(predicate, offset) + override def currentEvents(scope: Scope, offset: Offset): EnvelopeStream[E] = + eventStore.currentEvents(scope, offset) - override def events(predicate: Predicate, offset: Offset): EnvelopeStream[E] = - eventStore.events(predicate, offset) + override def events(scope: Scope, offset: Offset): EnvelopeStream[E] = + eventStore.events(scope, offset) - override def currentStates(predicate: Predicate, offset: Offset): EnvelopeStream[S] = - stateStore.currentStates(predicate, offset) + override def currentStates(scope: Scope, offset: Offset): EnvelopeStream[S] = + stateStore.currentStates(scope, offset) - override def currentStates[T](predicate: Predicate, offset: Offset, f: S => T): Stream[Task, T] = - currentStates(predicate, offset).map { s => + override def currentStates[T](scope: Scope, offset: Offset, f: S => T): Stream[Task, T] = + currentStates(scope, offset).map { s => f(s.value) } - override def states(predicate: Predicate, offset: Offset): EnvelopeStream[S] = - stateStore.states(predicate, offset) + override def states(scope: Scope, offset: Offset): EnvelopeStream[S] = + stateStore.states(scope, offset) } } diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/EventStreaming.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/EventStreaming.scala index 2b8e26c5b5..a77743e9b1 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/EventStreaming.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/EventStreaming.scala @@ -1,12 +1,12 @@ package ch.epfl.bluebrain.nexus.delta.sourcing.event import cats.data.NonEmptyList -import ch.epfl.bluebrain.nexus.delta.sourcing.Predicate.Root +import ch.epfl.bluebrain.nexus.delta.sourcing.Scope.Root import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, Envelope, EnvelopeStream} import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.implicits._ -import ch.epfl.bluebrain.nexus.delta.sourcing.{MultiDecoder, Predicate, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{MultiDecoder, Scope, Transactors} import doobie.implicits._ import doobie.{Fragment, Fragments} import io.circe.Json @@ -14,7 +14,7 @@ import io.circe.Json object EventStreaming { def fetchAll[A]( - predicate: Predicate, + scope: Scope, types: List[EntityType], offset: Offset, config: QueryConfig, @@ -25,12 +25,12 @@ object EventStreaming { Envelope.streamA( offset, offset => - predicate match { + scope match { case Root => - sql"""(${globalEvents(typeIn, offset, config)}) UNION ALL (${scopedEvents(typeIn, predicate, offset, config)}) + sql"""(${globalEvents(typeIn, offset, config)}) UNION ALL (${scopedEvents(typeIn, scope, offset, config)}) |ORDER BY ordering |LIMIT ${config.batchSize}""".stripMargin.query[Envelope[Json]] - case _ => scopedEvents(typeIn, predicate, offset, config).query[Envelope[Json]] + case _ => scopedEvents(typeIn, scope, offset, config).query[Envelope[Json]] }, xas, config @@ -38,7 +38,7 @@ object EventStreaming { } def fetchScoped[A]( - predicate: Predicate, + scope: Scope, types: List[EntityType], offset: Offset, config: QueryConfig, @@ -48,7 +48,7 @@ object EventStreaming { Envelope.streamA( offset, - offset => scopedEvents(typeIn, predicate, offset, config).query[Envelope[Json]], + offset => scopedEvents(typeIn, scope, offset, config).query[Envelope[Json]], xas, config ) @@ -60,9 +60,9 @@ object EventStreaming { |ORDER BY ordering |LIMIT ${cfg.batchSize}""".stripMargin - private def scopedEvents(typeIn: Option[Fragment], predicate: Predicate, o: Offset, cfg: QueryConfig) = + private def scopedEvents(typeIn: Option[Fragment], scope: Scope, o: Offset, cfg: QueryConfig) = fr"""SELECT type, id, value, rev, instant, ordering FROM public.scoped_events - |${Fragments.whereAndOpt(typeIn, predicate.asFragment, o.asFragment)} + |${Fragments.whereAndOpt(typeIn, scope.asFragment, o.asFragment)} |ORDER BY ordering |LIMIT ${cfg.batchSize}""".stripMargin diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/GlobalEventStore.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/GlobalEventStore.scala index adb1d6bbc5..402eafebb9 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/GlobalEventStore.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/GlobalEventStore.scala @@ -4,7 +4,7 @@ import cats.syntax.all._ import ch.epfl.bluebrain.nexus.delta.sourcing.{Serializer, Transactors} import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.event.Event.GlobalEvent -import ch.epfl.bluebrain.nexus.delta.sourcing.implicits.IriInstances +import ch.epfl.bluebrain.nexus.delta.sourcing.implicits._ import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, Envelope, EnvelopeStream} import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.query.{RefreshStrategy, StreamingQuery} diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/ScopedEventStore.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/ScopedEventStore.scala index 2206d0e8b9..bacad6308d 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/ScopedEventStore.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/ScopedEventStore.scala @@ -3,11 +3,11 @@ package ch.epfl.bluebrain.nexus.delta.sourcing.event import cats.syntax.all._ import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.event.Event.ScopedEvent -import ch.epfl.bluebrain.nexus.delta.sourcing.implicits.IriInstances +import ch.epfl.bluebrain.nexus.delta.sourcing.implicits._ import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, Envelope, EnvelopeStream, ProjectRef} import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.query.{RefreshStrategy, StreamingQuery} -import ch.epfl.bluebrain.nexus.delta.sourcing.{Execute, PartitionInit, Predicate, Serializer, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{Execute, PartitionInit, Scope, Serializer, Transactors} import doobie._ import doobie.implicits._ import doobie.postgres.implicits._ @@ -54,21 +54,21 @@ trait ScopedEventStore[Id, E <: ScopedEvent] { /** * Allow to stream all current events within [[Envelope]] s - * @param predicate + * @param scope * to filter returned events * @param offset * offset to start from */ - def currentEvents(predicate: Predicate, offset: Offset): EnvelopeStream[E] + def currentEvents(scope: Scope, offset: Offset): EnvelopeStream[E] /** * Allow to stream all current events within [[Envelope]] s - * @param predicate + * @param scope * to filter returned events * @param offset * offset to start from */ - def events(predicate: Predicate, offset: Offset): EnvelopeStream[E] + def events(scope: Scope, offset: Offset): EnvelopeStream[E] } @@ -129,14 +129,14 @@ object ScopedEventStore { } private def events( - predicate: Predicate, + scope: Scope, offset: Offset, strategy: RefreshStrategy ): Stream[Task, Envelope[E]] = StreamingQuery[Envelope[E]]( offset, offset => sql"""SELECT type, id, value, rev, instant, ordering FROM public.scoped_events - |${Fragments.whereAndOpt(Some(fr"type = $tpe"), predicate.asFragment, offset.asFragment)} + |${Fragments.whereAndOpt(Some(fr"type = $tpe"), scope.asFragment, offset.asFragment)} |ORDER BY ordering |LIMIT ${config.batchSize}""".stripMargin.query[Envelope[E]], _.offset, @@ -144,11 +144,11 @@ object ScopedEventStore { xas ) - override def currentEvents(predicate: Predicate, offset: Offset): Stream[Task, Envelope[E]] = - events(predicate, offset, RefreshStrategy.Stop) + override def currentEvents(scope: Scope, offset: Offset): Stream[Task, Envelope[E]] = + events(scope, offset, RefreshStrategy.Stop) - override def events(predicate: Predicate, offset: Offset): Stream[Task, Envelope[E]] = - events(predicate, offset, config.refreshStrategy) + override def events(scope: Scope, offset: Offset): Stream[Task, Envelope[E]] = + events(scope, offset, config.refreshStrategy) } } diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/implicits/TimeRangeInstances.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/implicits/TimeRangeInstances.scala new file mode 100644 index 0000000000..0a48413007 --- /dev/null +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/implicits/TimeRangeInstances.scala @@ -0,0 +1,22 @@ +package ch.epfl.bluebrain.nexus.delta.sourcing.implicits + +import ch.epfl.bluebrain.nexus.delta.kernel.search.TimeRange +import ch.epfl.bluebrain.nexus.delta.kernel.search.TimeRange._ +import ch.epfl.bluebrain.nexus.delta.sourcing.FragmentEncoder +import doobie.implicits._ +import doobie.util.fragment.Fragment +import doobie.postgres.implicits._ + +trait TimeRangeInstances { + + def createTimeRangeFragmentEncoder(columnName: String): FragmentEncoder[TimeRange] = { + val column = Fragment.const(columnName) + FragmentEncoder.instance { + case Anytime => None + case After(value) => Some(fr"$column >= $value") + case Before(value) => Some(fr"$column <= $value") + case Between(start, end) => Some(fr"$column >= $start and $column <= $end") + } + } + +} diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/implicits/package.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/implicits/package.scala index 91d441f21f..437f1b7efe 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/implicits/package.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/implicits/package.scala @@ -1,3 +1,11 @@ package ch.epfl.bluebrain.nexus.delta.sourcing -package object implicits extends InstantInstances with IriInstances with CirceInstances with DurationInstances +import ch.epfl.bluebrain.nexus.delta.sourcing.syntax.DoobieSyntax + +package object implicits + extends InstantInstances + with IriInstances + with CirceInstances + with DurationInstances + with TimeRangeInstances + with DoobieSyntax diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/model/FailedElemLogRow.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/model/FailedElemLogRow.scala new file mode 100644 index 0000000000..3c31706f23 --- /dev/null +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/model/FailedElemLogRow.scala @@ -0,0 +1,94 @@ +package ch.epfl.bluebrain.nexus.delta.sourcing.model + +import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri +import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.contexts +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder +import ch.epfl.bluebrain.nexus.delta.sourcing.implicits._ +import ch.epfl.bluebrain.nexus.delta.sourcing.model.FailedElemLogRow.FailedElemData +import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset +import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionMetadata +import doobie._ +import doobie.postgres.implicits._ +import io.circe.Encoder +import io.circe.generic.semiauto.deriveEncoder + +import java.time.Instant + +/** + * The row of the failed_elem_log table + */ +final case class FailedElemLogRow( + ordering: Offset, + projectionMetadata: ProjectionMetadata, + failedElemData: FailedElemData, + instant: Instant +) + +object FailedElemLogRow { + private type Row = + ( + Offset, + String, + String, + Option[ProjectRef], + Option[Iri], + EntityType, + Offset, + Iri, + Option[ProjectRef], + Int, + String, + String, + String, + Instant + ) + + /** + * Helper case class to structure FailedElemLogRow + */ + final case class FailedElemData( + id: Iri, + project: Option[ProjectRef], + entityType: EntityType, + offset: Offset, + rev: Int, + errorType: String, + message: String, + stackTrace: String + ) + + implicit val failedElemDataEncoder: Encoder.AsObject[FailedElemData] = + deriveEncoder[FailedElemData] + .mapJsonObject(_.remove("stackTrace")) + .mapJsonObject(_.remove("entityType")) + implicit val failedElemDataJsonLdEncoder: JsonLdEncoder[FailedElemData] = + JsonLdEncoder.computeFromCirce(ContextValue(contexts.error)) + + implicit val failedElemLogRow: Read[FailedElemLogRow] = { + Read[Row].map { + case ( + ordering, + name, + module, + project, + resourceId, + entityType, + elemOffset, + elemId, + elemProject, + revision, + errorType, + message, + stackTrace, + instant + ) => + FailedElemLogRow( + ordering, + ProjectionMetadata(module, name, project, resourceId), + FailedElemData(elemId, elemProject, entityType, elemOffset, revision, errorType, message, stackTrace), + instant + ) + } + } +} diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/offset/Offset.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/offset/Offset.scala index 2c7269dbdf..594fd70245 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/offset/Offset.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/offset/Offset.scala @@ -5,9 +5,9 @@ import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.contexts import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder +import ch.epfl.bluebrain.nexus.delta.sourcing.FragmentEncoder import doobie._ import doobie.implicits._ -import doobie.util.fragment.Fragment import io.circe.Codec import io.circe.generic.extras.Configuration import io.circe.generic.extras.semiauto.deriveConfiguredCodec @@ -18,8 +18,6 @@ sealed trait Offset extends Product with Serializable { def value: Long - def asFragment: Option[Fragment] - def ordering: Long = this match { case Offset.Start => 0L case Offset.At(value) => value @@ -32,10 +30,7 @@ object Offset { * To fetch all rows from the beginning */ final case object Start extends Offset { - override val value: Long = 0L - - override def asFragment: Option[Fragment] = None } def from(value: Long): Offset = if (value > 0L) Offset.at(value) else Offset.Start @@ -43,9 +38,7 @@ object Offset { /** * To fetch rows from the given offset */ - final case class At(value: Long) extends Offset { - override def asFragment: Option[Fragment] = Some(fr"ordering > $value") - } + final case class At(value: Long) extends Offset val start: Offset = Start @@ -68,6 +61,11 @@ object Offset { implicit final val offsetGet: Get[Offset] = Get[Long].map(from) implicit final val offsetPut: Put[Offset] = Put[Long].contramap(_.value) + implicit val offsetFragmentEncoder: FragmentEncoder[Offset] = FragmentEncoder.instance { + case Start => None + case At(value) => Some(fr"ordering > $value") + } + implicit val offsetJsonLdEncoder: JsonLdEncoder[Offset] = JsonLdEncoder.computeFromCirce(ContextValue(contexts.offset)) diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStore.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStore.scala index 8bf844b2bb..632956120a 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStore.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStore.scala @@ -3,17 +3,18 @@ package ch.epfl.bluebrain.nexus.delta.sourcing.projections import cats.effect.Clock import cats.implicits._ import ch.epfl.bluebrain.nexus.delta.kernel.Logger +import ch.epfl.bluebrain.nexus.delta.kernel.search.Pagination.FromPagination +import ch.epfl.bluebrain.nexus.delta.kernel.search.TimeRange import ch.epfl.bluebrain.nexus.delta.kernel.utils.IOUtils import ch.epfl.bluebrain.nexus.delta.kernel.utils.ThrowableUtils._ import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.sourcing.Transactors import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.implicits._ -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{FailedElemLogRow, ProjectRef} import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Elem.FailedElem -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionStore.FailedElemLogRow import ch.epfl.bluebrain.nexus.delta.sourcing.stream.{ProjectionMetadata, ProjectionStore} +import ch.epfl.bluebrain.nexus.delta.sourcing.{FragmentEncoder, Transactors} import doobie._ import doobie.implicits._ import doobie.postgres.implicits._ @@ -27,6 +28,11 @@ import java.time.Instant */ trait FailedElemLogStore { + /** + * Returns the total number of elems + */ + def count: UIO[Long] + /** * Saves a list of failed elems * @@ -35,7 +41,7 @@ trait FailedElemLogStore { * @param failures * the FailedElem to save */ - def saveFailedElems(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] + def save(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] /** * Saves one failed elem @@ -53,7 +59,7 @@ trait FailedElemLogStore { * @param offset * failed elem offset */ - def failedElemEntries( + def stream( projectionProject: ProjectRef, projectionId: Iri, offset: Offset @@ -68,11 +74,30 @@ trait FailedElemLogStore { * failed elem offset * @return */ - def failedElemEntries( + def stream( projectionName: String, offset: Offset ): Stream[Task, FailedElemLogRow] + /** + * Return a list of errors for the given projection ordered by instant + * @param project + * the project of the projection + * @param projectionId + * its identifier + * @param pagination + * the pagination to apply + * @param timeRange + * the time range to restrict on + * @return + */ + def list( + project: ProjectRef, + projectionId: Iri, + pagination: FromPagination, + timeRange: TimeRange + ): UIO[List[FailedElemLogRow]] + } object FailedElemLogStore { @@ -82,7 +107,16 @@ object FailedElemLogStore { def apply(xas: Transactors, config: QueryConfig)(implicit clock: Clock[UIO]): FailedElemLogStore = new FailedElemLogStore { - override def saveFailedElems(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] = { + implicit val timeRangeFragmentEncoder: FragmentEncoder[TimeRange] = createTimeRangeFragmentEncoder("instant") + + override def count: UIO[Long] = + sql"SELECT count(ordering) FROM public.failed_elem_logs" + .query[Long] + .unique + .transact(xas.read) + .hideErrors + + override def save(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] = { val log = logger.debug(s"[${metadata.name}] Saving ${failures.length} failed elems.") val save = IOUtils.instant.flatMap { instant => failures.traverse(elem => saveFailedElem(metadata, elem, instant)).transact(xas.write).void.hideErrors @@ -127,7 +161,7 @@ object FailedElemLogStore { | $instant | )""".stripMargin.update.run.void - override def failedElemEntries( + override def stream( projectionProject: ProjectRef, projectionId: Iri, offset: Offset @@ -141,7 +175,7 @@ object FailedElemLogStore { .streamWithChunkSize(config.batchSize) .transact(xas.read) - override def failedElemEntries(projectionName: String, offset: Offset): Stream[Task, FailedElemLogRow] = + override def stream(projectionName: String, offset: Offset): Stream[Task, FailedElemLogRow] = sql"""SELECT * from public.failed_elem_logs |WHERE projection_name = $projectionName |AND ordering > $offset @@ -149,6 +183,27 @@ object FailedElemLogStore { .query[FailedElemLogRow] .streamWithChunkSize(config.batchSize) .transact(xas.read) + + override def list( + project: ProjectRef, + projectionId: Iri, + pagination: FromPagination, + timeRange: TimeRange + ): UIO[List[FailedElemLogRow]] = { + val where = Fragments.whereAndOpt( + Some(fr"projection_project = $project"), + Some(fr"projection_id = $projectionId"), + timeRange.asFragment + ) + sql"""SELECT * from public.failed_elem_logs + |$where + |ORDER BY ordering ASC + |LIMIT ${pagination.size} OFFSET ${pagination.from}""".stripMargin + .query[FailedElemLogRow] + .to[List] + .transact(xas.read) + .hideErrors + } } } diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala index ec2da0f119..cbd7b36187 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala @@ -8,11 +8,10 @@ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.{JsonLdApi, JsonLdJavaApi} import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.RemoteContextResolution import ch.epfl.bluebrain.nexus.delta.sourcing.Transactors import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{FailedElemLogRow, ProjectRef} import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Elem.FailedElem import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionMetadata -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionStore.FailedElemLogRow import fs2.Stream import io.circe.Printer import monix.bio.{Task, UIO} @@ -84,16 +83,16 @@ object ProjectionErrors { val store = FailedElemLogStore(xas, config) override def saveFailedElems(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] = - store.saveFailedElems(metadata, failures) + store.save(metadata, failures) override def failedElemEntries( projectionProject: ProjectRef, projectionId: Iri, offset: Offset - ): Stream[Task, FailedElemLogRow] = store.failedElemEntries(projectionProject, projectionId, offset) + ): Stream[Task, FailedElemLogRow] = store.stream(projectionProject, projectionId, offset) override def failedElemEntries(projectionName: String, offset: Offset): Stream[Task, FailedElemLogRow] = - store.failedElemEntries(projectionName, offset) + store.stream(projectionName, offset) override def failedElemSses(projectionProject: ProjectRef, projectionId: Iri, offset: Offset)(implicit rcr: RemoteContextResolution diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/query/StreamingQuery.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/query/StreamingQuery.scala index 77d3406556..a5e698ce47 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/query/StreamingQuery.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/query/StreamingQuery.scala @@ -2,8 +2,7 @@ package ch.epfl.bluebrain.nexus.delta.sourcing.query import cats.effect.ExitCase import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.sourcing.Predicate.Project -import ch.epfl.bluebrain.nexus.delta.sourcing.Transactors +import ch.epfl.bluebrain.nexus.delta.sourcing.{Scope, Transactors} import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.implicits._ import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, Label, ProjectRef, Tag} @@ -41,7 +40,7 @@ object StreamingQuery { * the transactors */ def remaining(project: ProjectRef, tag: Tag, start: Offset, xas: Transactors): UIO[Option[RemainingElems]] = { - val where = Fragments.whereAndOpt(Project(project).asFragment, Some(fr"tag = $tag"), start.asFragment) + val where = Fragments.whereAndOpt(Scope(project).asFragment, Some(fr"tag = $tag"), start.asFragment) sql"""SELECT count(ordering), max(instant) |FROM public.scoped_states |$where @@ -81,7 +80,7 @@ object StreamingQuery { xas: Transactors ): Stream[Task, Elem[Unit]] = { def query(offset: Offset): Query0[Elem[Unit]] = { - val where = Fragments.whereAndOpt(Project(project).asFragment, Some(fr"tag = $tag"), offset.asFragment) + val where = Fragments.whereAndOpt(Scope(project).asFragment, Some(fr"tag = $tag"), offset.asFragment) sql"""((SELECT 'newState', type, id, org, project, instant, ordering, rev |FROM public.scoped_states |$where @@ -137,7 +136,7 @@ object StreamingQuery { decodeValue: (EntityType, Json) => Task[A] ): Stream[Task, Elem[A]] = { def query(offset: Offset): Query0[Elem[Json]] = { - val where = Fragments.whereAndOpt(Project(project).asFragment, Some(fr"tag = $tag"), offset.asFragment) + val where = Fragments.whereAndOpt(Scope(project).asFragment, Some(fr"tag = $tag"), offset.asFragment) sql"""((SELECT 'newState', type, id, org, project, value, instant, ordering, rev |FROM public.scoped_states |$where diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/state/ScopedStateStore.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/state/ScopedStateStore.scala index 933ad772fc..f49b62a7f5 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/state/ScopedStateStore.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/state/ScopedStateStore.scala @@ -6,12 +6,13 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.implicits.IriInstances import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.Latest import ch.epfl.bluebrain.nexus.delta.sourcing.model._ +import ch.epfl.bluebrain.nexus.delta.sourcing.implicits._ import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.query.{RefreshStrategy, StreamingQuery} import ch.epfl.bluebrain.nexus.delta.sourcing.state.ScopedStateStore.StateNotFound import ch.epfl.bluebrain.nexus.delta.sourcing.state.ScopedStateStore.StateNotFound.{TagNotFound, UnknownState} import ch.epfl.bluebrain.nexus.delta.sourcing.state.State.ScopedState -import ch.epfl.bluebrain.nexus.delta.sourcing.{Execute, PartitionInit, Predicate, Serializer, Transactors} +import ch.epfl.bluebrain.nexus.delta.sourcing.{Execute, PartitionInit, Scope, Serializer, Transactors} import doobie._ import doobie.implicits._ import doobie.postgres.implicits._ @@ -19,7 +20,7 @@ import io.circe.Decoder import monix.bio.IO /** - * Allows to save/fetch [[ScopeState]] from the database + * Allows to save/fetch [[ScopedState]] from the database */ trait ScopedStateStore[Id, S <: ScopedState] { @@ -72,48 +73,48 @@ trait ScopedStateStore[Id, S <: ScopedState] { * Fetches latest states from the given type from the beginning. * * The stream is completed when it reaches the end. - * @param predicate + * @param scope * to filter returned states */ - def currentStates(predicate: Predicate): EnvelopeStream[S] = - currentStates(predicate, Offset.Start) + def currentStates(scope: Scope): EnvelopeStream[S] = + currentStates(scope, Offset.Start) /** * Fetches states from the given type with the given tag from the beginning. * * The stream is completed when it reaches the end. - * @param predicate + * @param scope * to filter returned states * @param tag * only states with this tag will be selected */ - def currentStates(predicate: Predicate, tag: Tag): EnvelopeStream[S] = - currentStates(predicate, tag, Offset.Start) + def currentStates(scope: Scope, tag: Tag): EnvelopeStream[S] = + currentStates(scope, tag, Offset.Start) /** * Fetches latest states from the given type from the provided offset. * * The stream is completed when it reaches the end. - * @param predicate + * @param scope * to filter returned states * @param offset * the offset */ - def currentStates(predicate: Predicate, offset: Offset): EnvelopeStream[S] = - currentStates(predicate, Latest, offset) + def currentStates(scope: Scope, offset: Offset): EnvelopeStream[S] = + currentStates(scope, Latest, offset) /** * Fetches states from the given type with the given tag from the provided offset. * * The stream is completed when it reaches the end. - * @param predicate + * @param scope * to filter returned states * @param tag * only states with this tag will be selected * @param offset * the offset */ - def currentStates(predicate: Predicate, tag: Tag, offset: Offset): EnvelopeStream[S] + def currentStates(scope: Scope, tag: Tag, offset: Offset): EnvelopeStream[S] /** * Fetches latest states from the given type from the beginning @@ -121,11 +122,11 @@ trait ScopedStateStore[Id, S <: ScopedState] { * The stream is not completed when it reaches the end of the existing events, but it continues to push new events * when new events are persisted. * - * @param predicate + * @param scope * to filter returned states */ - def states(predicate: Predicate): EnvelopeStream[S] = - states(predicate, Latest, Offset.Start) + def states(scope: Scope): EnvelopeStream[S] = + states(scope, Latest, Offset.Start) /** * Fetches states from the given type with the given tag from the beginning @@ -133,12 +134,12 @@ trait ScopedStateStore[Id, S <: ScopedState] { * The stream is not completed when it reaches the end of the existing events, but it continues to push new events * when new states are persisted. * - * @param predicate + * @param scope * to filter returned states * @param tag * only states with this tag will be selected */ - def states(predicate: Predicate, tag: Tag): EnvelopeStream[S] = states(predicate, tag, Offset.Start) + def states(scope: Scope, tag: Tag): EnvelopeStream[S] = states(scope, tag, Offset.Start) /** * Fetches latest states from the given type from the provided offset @@ -146,13 +147,13 @@ trait ScopedStateStore[Id, S <: ScopedState] { * The stream is not completed when it reaches the end of the existing events, but it continues to push new events * when new events are persisted. * - * @param predicate + * @param scope * to filter returned states * @param offset * the offset */ - def states(predicate: Predicate, offset: Offset): EnvelopeStream[S] = - states(predicate, Latest, offset) + def states(scope: Scope, offset: Offset): EnvelopeStream[S] = + states(scope, Latest, offset) /** * Fetches states from the given type with the given tag from the provided offset @@ -160,14 +161,14 @@ trait ScopedStateStore[Id, S <: ScopedState] { * The stream is not completed when it reaches the end of the existing events, but it continues to push new events * when new states are persisted. * - * @param predicate + * @param scope * to filter returned states * @param tag * only states with this tag will be selected * @param offset * the offset */ - def states(predicate: Predicate, tag: Tag, offset: Offset): EnvelopeStream[S] + def states(scope: Scope, tag: Tag, offset: Offset): EnvelopeStream[S] } @@ -274,7 +275,7 @@ object ScopedStateStore { } private def states( - predicate: Predicate, + scope: Scope, tag: Tag, offset: Offset, strategy: RefreshStrategy @@ -284,7 +285,7 @@ object ScopedStateStore { offset => // format: off sql"""SELECT type, id, value, rev, instant, ordering FROM public.scoped_states - |${Fragments.whereAndOpt(Some(fr"type = $tpe"), predicate.asFragment, Some(fr"tag = $tag"), offset.asFragment)} + |${Fragments.whereAndOpt(Some(fr"type = $tpe"), scope.asFragment, Some(fr"tag = $tag"), offset.asFragment)} |ORDER BY ordering |LIMIT ${config.batchSize}""".stripMargin.query[Envelope[S]], _.offset, @@ -292,11 +293,11 @@ object ScopedStateStore { xas ) - override def currentStates(predicate: Predicate, tag: Tag, offset: Offset): EnvelopeStream[S] = - states(predicate, tag, offset, RefreshStrategy.Stop) + override def currentStates(scope: Scope, tag: Tag, offset: Offset): EnvelopeStream[S] = + states(scope, tag, offset, RefreshStrategy.Stop) - override def states(predicate: Predicate, tag: Tag, offset: Offset): EnvelopeStream[S] = - states(predicate, tag, offset, config.refreshStrategy) + override def states(scope: Scope, tag: Tag, offset: Offset): EnvelopeStream[S] = + states(scope, tag, offset, config.refreshStrategy) } } diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/stream/ProjectionStore.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/stream/ProjectionStore.scala index 439ee869d1..68d5075dbb 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/stream/ProjectionStore.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/stream/ProjectionStore.scala @@ -3,22 +3,16 @@ package ch.epfl.bluebrain.nexus.delta.sourcing.stream import cats.effect.Clock import ch.epfl.bluebrain.nexus.delta.kernel.utils.IOUtils import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.contexts -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder import ch.epfl.bluebrain.nexus.delta.sourcing.Transactors import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.implicits._ -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, ProjectRef} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionStore.FailedElemLogRow.FailedElemData import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionStore.ProjectionProgressRow import doobie._ import doobie.implicits._ import doobie.postgres.implicits._ import fs2.Stream -import io.circe.Encoder -import io.circe.generic.semiauto.deriveEncoder import monix.bio.{Task, UIO} import java.time.Instant @@ -141,82 +135,4 @@ object ProjectionStore { } } - /** - * The row of the failed_elem_log table - */ - final case class FailedElemLogRow( - ordering: Offset, - projectionMetadata: ProjectionMetadata, - failedElemData: FailedElemData, - instant: Instant - ) - - object FailedElemLogRow { - private type Row = - ( - Offset, - String, - String, - Option[ProjectRef], - Option[Iri], - EntityType, - Offset, - Iri, - Option[ProjectRef], - Int, - String, - String, - String, - Instant - ) - - /** - * Helper case class to structure FailedElemLogRow - */ - final case class FailedElemData( - id: Iri, - project: Option[ProjectRef], - entityType: EntityType, - offset: Offset, - rev: Int, - errorType: String, - message: String, - stackTrace: String - ) - - implicit val failedElemDataEncoder: Encoder.AsObject[FailedElemData] = - deriveEncoder[FailedElemData] - .mapJsonObject(_.remove("stackTrace")) - .mapJsonObject(_.remove("entityType")) - implicit val failedElemDataJsonLdEncoder: JsonLdEncoder[FailedElemData] = - JsonLdEncoder.computeFromCirce(ContextValue(contexts.error)) - - implicit val failedElemLogRow: Read[FailedElemLogRow] = { - Read[Row].map { - case ( - ordering, - name, - module, - project, - resourceId, - entityType, - elemOffset, - elemId, - elemProject, - revision, - errorType, - message, - stackTrace, - instant - ) => - FailedElemLogRow( - ordering, - ProjectionMetadata(module, name, project, resourceId), - FailedElemData(elemId, elemProject, entityType, elemOffset, revision, errorType, message, stackTrace), - instant - ) - } - } - } - } diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/syntax/DoobieSyntax.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/syntax/DoobieSyntax.scala new file mode 100644 index 0000000000..f1c7b465d9 --- /dev/null +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/syntax/DoobieSyntax.scala @@ -0,0 +1,20 @@ +package ch.epfl.bluebrain.nexus.delta.sourcing.syntax + +import ch.epfl.bluebrain.nexus.delta.sourcing.FragmentEncoder +import ch.epfl.bluebrain.nexus.delta.sourcing.syntax.DoobieSyntax.FragmentEncoderOps +import doobie.util.fragment.Fragment + +/** + * This package provides syntax via enrichment classes for Doobie + */ +trait DoobieSyntax { + + implicit final def fragmentEncoderOps[A](value: A): FragmentEncoderOps[A] = new FragmentEncoderOps[A](value) + +} + +object DoobieSyntax { + implicit class FragmentEncoderOps[A](private val value: A) extends AnyVal { + def asFragment(implicit encoder: FragmentEncoder[A]): Option[Fragment] = encoder(value) + } +} diff --git a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/ScopedEventLogSuite.scala b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/ScopedEventLogSuite.scala index 5664f3d6a2..568059b14e 100644 --- a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/ScopedEventLogSuite.scala +++ b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/ScopedEventLogSuite.scala @@ -204,7 +204,7 @@ class ScopedEventLogSuite extends BioSuite with Doobie.Fixture { test("Stream continuously the current states") { for { queue <- Queue.unbounded[Task, Envelope[PullRequestState]] - _ <- eventLog.states(Predicate.root, Offset.Start).through(queue.enqueue).compile.drain.timeout(500.millis) + _ <- eventLog.states(Scope.root, Offset.Start).through(queue.enqueue).compile.drain.timeout(500.millis) elems <- queue.tryDequeueChunk1(Int.MaxValue).map(opt => opt.map(_.toList).getOrElse(Nil)) _ = elems.assertSize(2) } yield () diff --git a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/EventStreamingSuite.scala b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/EventStreamingSuite.scala index 5fdd1313d7..51274c660e 100644 --- a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/EventStreamingSuite.scala +++ b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/EventStreamingSuite.scala @@ -13,7 +13,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Anonymous import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.query.RefreshStrategy -import ch.epfl.bluebrain.nexus.delta.sourcing.{Arithmetic, MultiDecoder, Predicate, PullRequest} +import ch.epfl.bluebrain.nexus.delta.sourcing.{Arithmetic, MultiDecoder, PullRequest, Scope} import ch.epfl.bluebrain.nexus.testkit.bio.BioSuite import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.Doobie import doobie.implicits._ @@ -82,7 +82,7 @@ class EventStreamingSuite extends BioSuite with Doobie.Fixture with Doobie.Asser test("Get events of all types from the start") { EventStreaming .fetchAll( - Predicate.root, + Scope.root, List.empty, Offset.Start, queryConfig, @@ -102,7 +102,7 @@ class EventStreamingSuite extends BioSuite with Doobie.Fixture with Doobie.Asser test("Get events of all types from offset 2") { EventStreaming .fetchAll( - Predicate.root, + Scope.root, List.empty, Offset.at(2L), queryConfig, @@ -120,7 +120,7 @@ class EventStreamingSuite extends BioSuite with Doobie.Fixture with Doobie.Asser test("Get PR events from offset 2") { EventStreaming .fetchAll( - Predicate.root, + Scope.root, List(PullRequest.entityType), Offset.at(2L), queryConfig, @@ -137,7 +137,7 @@ class EventStreamingSuite extends BioSuite with Doobie.Fixture with Doobie.Asser test("Get events from project 1 from offset 1") { EventStreaming .fetchAll( - Predicate.Project(project1), + Scope.Project(project1), List.empty, Offset.at(1L), queryConfig, @@ -153,7 +153,7 @@ class EventStreamingSuite extends BioSuite with Doobie.Fixture with Doobie.Asser test("Get events from org 1 from offset 1") { EventStreaming .fetchAll( - Predicate.Org(project1.organization), + Scope.Org(project1.organization), List.empty, Offset.at(1L), queryConfig, @@ -170,7 +170,7 @@ class EventStreamingSuite extends BioSuite with Doobie.Fixture with Doobie.Asser test("Get all scoped events from offset 1") { EventStreaming .fetchScoped( - Predicate.Root, + Scope.Root, List.empty, Offset.at(1L), queryConfig, diff --git a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/ScopedEventStoreSuite.scala b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/ScopedEventStoreSuite.scala index b4597337f1..a3545b8d52 100644 --- a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/ScopedEventStoreSuite.scala +++ b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/event/ScopedEventStoreSuite.scala @@ -10,7 +10,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, User} import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Envelope, Label, ProjectRef} import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.query.RefreshStrategy -import ch.epfl.bluebrain.nexus.delta.sourcing.{Predicate, PullRequest} +import ch.epfl.bluebrain.nexus.delta.sourcing.{PullRequest, Scope} import ch.epfl.bluebrain.nexus.testkit.bio.BioSuite import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.Doobie import doobie.implicits._ @@ -90,28 +90,28 @@ class ScopedEventStoreSuite extends BioSuite with Doobie.Fixture with Doobie.Ass test("Fetch all current events from the beginning") { store - .currentEvents(Predicate.Root, Offset.Start) + .currentEvents(Scope.Root, Offset.Start) .assert(envelope1, envelope2, envelope3, envelope4, envelope5, envelope6) } test("Fetch current events for `org` from offset 2") { - store.currentEvents(Predicate.Org(Label.unsafe("org")), Offset.at(2L)).assert(envelope3, envelope4, envelope5) + store.currentEvents(Scope.Org(Label.unsafe("org")), Offset.at(2L)).assert(envelope3, envelope4, envelope5) } test("Fetch current events for `proj1` from the beginning") { - store.currentEvents(Predicate.Project(project1), Offset.Start).assert(envelope1, envelope2, envelope3, envelope4) + store.currentEvents(Scope.Project(project1), Offset.Start).assert(envelope1, envelope2, envelope3, envelope4) } test("Fetch all events from the beginning") { - store.events(Predicate.Root, Offset.Start).assert(envelope1, envelope2, envelope3, envelope4, envelope5, envelope6) + store.events(Scope.Root, Offset.Start).assert(envelope1, envelope2, envelope3, envelope4, envelope5, envelope6) } test(s"Fetch current events for `${project1.organization}` from offset 2") { - store.events(Predicate.Org(project1.organization), Offset.at(2L)).assert(envelope3, envelope4, envelope5) + store.events(Scope.Org(project1.organization), Offset.at(2L)).assert(envelope3, envelope4, envelope5) } test(s"Fetch current events for `$project1` from the beginning") { - store.events(Predicate.Project(project1), Offset.Start).assert(envelope1, envelope2, envelope3, envelope4) + store.events(Scope.Project(project1), Offset.Start).assert(envelope1, envelope2, envelope3, envelope4) } } diff --git a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStoreSuite.scala b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStoreSuite.scala index ad332d9c83..651a98660e 100644 --- a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStoreSuite.scala +++ b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStoreSuite.scala @@ -1,7 +1,10 @@ package ch.epfl.bluebrain.nexus.delta.sourcing.projections +import cats.syntax.all._ +import ch.epfl.bluebrain.nexus.delta.kernel.search.Pagination.FromPagination +import ch.epfl.bluebrain.nexus.delta.kernel.search.{Pagination, TimeRange} +import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv -import ch.epfl.bluebrain.nexus.delta.rdf.syntax._ import ch.epfl.bluebrain.nexus.delta.sourcing.PurgeElemFailures import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, ProjectRef} @@ -10,138 +13,161 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.Doobie import ch.epfl.bluebrain.nexus.delta.sourcing.query.RefreshStrategy import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Elem.FailedElem import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionMetadata -import ch.epfl.bluebrain.nexus.testkit.IOFixedClock +import ch.epfl.bluebrain.nexus.testkit.MutableClock import ch.epfl.bluebrain.nexus.testkit.bio.BioSuite -import munit.AnyFixture +import munit.{AnyFixture, Location} import java.time.Instant import scala.concurrent.duration.{DurationInt, FiniteDuration} -class FailedElemLogStoreSuite extends BioSuite with IOFixedClock with Doobie.Fixture with Doobie.Assertions { +class FailedElemLogStoreSuite extends BioSuite with MutableClock.Fixture with Doobie.Fixture with Doobie.Assertions { - override def munitFixtures: Seq[AnyFixture[_]] = List(doobie) + override def munitFixtures: Seq[AnyFixture[_]] = List(doobie, clock) private lazy val xas = doobie() + implicit private lazy val mutableClock: MutableClock = clock() + private lazy val store = FailedElemLogStore(xas, QueryConfig(10, RefreshStrategy.Stop)) - private val name = "offset" - private val project = ProjectRef.unsafe("org", "proj") - private val resource = iri"https://resource" - private val metadata = ProjectionMetadata("test", name, Some(project), Some(resource)) + private def createMetadata(project: ProjectRef, id: Iri) = + ProjectionMetadata("test", s"project|$id", Some(project), Some(id)) + + private val project1 = ProjectRef.unsafe("org", "proj") + private val projection11 = nxv + "projection11" + private val metadata11 = createMetadata(project1, projection11) + private val projection12 = nxv + "projection12" + private val metadata12 = createMetadata(project1, projection12) + + private val project2 = ProjectRef.unsafe("org", "proj2") + private val metadata21 = createMetadata(project2, projection12) private val id = nxv + "id" private val error = new RuntimeException("boom") private val rev = 1 - private val fail1 = FailedElem(EntityType("ACL"), id, Some(project), Instant.EPOCH, Offset.At(42L), error, rev) - private val fail2 = FailedElem(EntityType("Schema"), id, Some(project), Instant.EPOCH, Offset.At(42L), error, rev) - test("Return no failed elem entries by name") { - for { - entries <- store.failedElemEntries(name, Offset.start).compile.toList - _ = entries.assertEmpty() - } yield () - } + private val entityType = EntityType("Test") + private def createFailedElem(project: ProjectRef, offset: Long) = + FailedElem(entityType, id, Some(project), Instant.EPOCH.plusSeconds(offset), Offset.at(offset), error, rev) - test("Return no failed elem entries by (project, id)") { + private val fail1 = createFailedElem(project1, 1L) + private val fail2 = createFailedElem(project1, 2L) + private val fail3 = createFailedElem(project1, 3L) + private val fail4 = createFailedElem(project1, 4L) + private val fail5 = createFailedElem(project2, 5L) + + private def assertSave(metadata: ProjectionMetadata, failed: FailedElem) = for { - entries <- store.failedElemEntries(project, resource, Offset.start).compile.toList - _ = entries.assertEmpty() + _ <- mutableClock.set(failed.instant) + _ <- store.save(metadata, List(failed)) } yield () - } - test("Insert empty list of failed elem") { + private def assertStream(metadata: ProjectionMetadata, offset: Offset, expected: List[FailedElem])(implicit + loc: Location + ) = { + val expectedOffsets = expected.map(_.offset) for { - _ <- store.saveFailedElems(metadata, List.empty) - entries <- store.failedElemEntries(name, Offset.start).compile.toList - _ = entries.assertEmpty() + _ <- store.stream(metadata.name, offset).map(_.failedElemData.offset).assert(expectedOffsets) + _ <- (metadata.project, metadata.resourceId).traverseN { case (project, resourceId) => + store.stream(project, resourceId, offset).map(_.failedElemData.offset).assert(expectedOffsets) + } } yield () } - test("Return no failed elem entries by name") { - for { - entries <- store.failedElemEntries(name, Offset.start).compile.toList - _ = entries.assertEmpty() - } yield () + private def assertList( + project: ProjectRef, + projectionId: Iri, + pagination: FromPagination, + timeRange: TimeRange, + expected: List[FailedElem] + )(implicit loc: Location) = { + val expectedOffsets = expected.map(_.offset) + store + .list(project, projectionId, pagination, timeRange) + .map(_.map(_.failedElemData.offset)) + .assert(expectedOffsets) } - test("Return no failed elem entries by (project, id)") { + test("Insert empty list of failures") { for { - entries <- store.failedElemEntries(project, resource, Offset.start).compile.toList - _ = entries.assertEmpty() + _ <- store.save(metadata11, List.empty) + _ <- assertStream(metadata11, Offset.Start, List.empty) } yield () } - test("Insert empty list of failed elem") { + test("Insert several failures") { for { - _ <- store.saveFailedElems(metadata, List.empty) - entries <- store.failedElemEntries(name, Offset.start).compile.toList - _ = entries.assertEmpty() + _ <- assertSave(metadata11, fail1) + _ <- assertSave(metadata12, fail2) + _ <- assertSave(metadata12, fail3) + _ <- assertSave(metadata12, fail4) + _ <- assertSave(metadata21, fail5) } yield () } - test("Insert failed elem") { + test(s"Get stream of failures for ${metadata11.name}") { for { - _ <- store.saveFailedElems(metadata, List(fail1)) - entries <- store.failedElemEntries(name, Offset.start).compile.toList + entries <- store.stream(metadata11.name, Offset.start).compile.toList r = entries.assertOneElem - _ = assertEquals(r.projectionMetadata, metadata) + _ = assertEquals(r.projectionMetadata, metadata11) _ = assertEquals(r.ordering, Offset.At(1L)) - _ = assertEquals(r.instant, Instant.EPOCH) + _ = assertEquals(r.instant, fail1.instant) elem = r.failedElemData - _ = assertEquals(elem.offset, Offset.At(42L)) + _ = assertEquals(elem.offset, Offset.At(1L)) _ = assertEquals(elem.errorType, "java.lang.RuntimeException") _ = assertEquals(elem.id, id) - _ = assertEquals(elem.entityType, EntityType("ACL")) + _ = assertEquals(elem.entityType, entityType) _ = assertEquals(elem.rev, rev) - _ = assertEquals(elem.project, Some(project)) + _ = assertEquals(elem.project, Some(project1)) } yield () } - test("Insert several failed elem") { - for { - _ <- store.saveFailedElems(metadata, List(fail1, fail2)) - entries <- store.failedElemEntries(name, Offset.start).compile.toList - _ = entries.assertSize(3) - } yield () + test(s"Get stream of failures for ${metadata12.name}") { + assertStream(metadata12, Offset.start, List(fail2, fail3, fail4)) } - test("Return failed elem entries by (project, id)") { - for { - entries <- store.failedElemEntries(project, resource, Offset.start).compile.toList - _ = entries.assertSize(3) - } yield () + test("Get an empty stream for an unknown projection") { + val unknownMetadata = createMetadata(ProjectRef.unsafe("xxx", "xxx"), nxv + "xxx") + assertStream(unknownMetadata, Offset.start, List.empty) } - test("Return empty if no failed elem is found by name") { - for { - entries <- store.failedElemEntries("other", Offset.start).compile.toList - _ = entries.assertEmpty() - } yield () + test(s"List all failures for ${metadata12.name}") { + assertList(project1, projection12, Pagination.OnePage, TimeRange.Anytime, List(fail2, fail3, fail4)) } - test("Return empty if not found by (project, id)") { - for { - entries <- store.failedElemEntries(project, iri"https://example.com", Offset.start).compile.toList - _ = entries.assertEmpty() - } yield () + test(s"Paginate to list 'fail3' for ${metadata12.name}") { + assertList(project1, projection12, FromPagination(1, 1), TimeRange.Anytime, List(fail3)) + } + + test(s"Paginate to list 'fail3' and 'fail4' for ${metadata12.name}") { + assertList(project1, projection12, FromPagination(1, 2), TimeRange.Anytime, List(fail3, fail4)) + } + + test(s"List failures before ${fail3.instant} for ${metadata12.name}") { + assertList(project1, projection12, Pagination.OnePage, TimeRange.Before(fail3.instant), List(fail2, fail3)) + } + + private val between = TimeRange.Between(fail2.instant.plusMillis(1L), fail3.instant.plusMillis(1L)) + test(s"List failures between ${between.start} and ${between.end} for ${metadata12.name}") { + assertList(project1, projection12, Pagination.OnePage, between, List(fail3)) } - test("Purge failed elements after predefined ttl") { - val failedElemTtl = 14.days + test("Purge failures after predefined ttl") { + val failedElemTtl = 14.days + val purgeElemFailures = new PurgeElemFailures(xas, failedElemTtl) - lazy val purgeElemFailures: FiniteDuration => PurgeElemFailures = timeTravel => - new PurgeElemFailures(xas, failedElemTtl)( - IOFixedClock.ioClock(Instant.EPOCH.plusMillis(timeTravel.toMillis)) - ) + def timeTravel(duration: FiniteDuration) = mutableClock.set(Instant.EPOCH.plusMillis(duration.toMillis)) for { - _ <- purgeElemFailures(failedElemTtl - 500.millis)() - entries <- store.failedElemEntries(project, resource, Offset.start).compile.toList - _ = entries.assertSize(3) // no elements are deleted after 13 days - _ <- purgeElemFailures(failedElemTtl + 500.millis)() - entries2 <- store.failedElemEntries(project, resource, Offset.start).compile.toList - _ = entries2.assertEmpty() // all elements were deleted after 14 days + _ <- store.count.assert(5L) + _ <- timeTravel(failedElemTtl - 500.millis) + _ <- purgeElemFailures() + // no elements are deleted after 13 days + _ <- store.count.assert(5L) + _ <- timeTravel(failedElemTtl + 10.seconds) + _ <- purgeElemFailures() + // all elements were deleted after 14 days + _ <- store.count.assert(0L) } yield () } diff --git a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/state/ScopedStateStoreSuite.scala b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/state/ScopedStateStoreSuite.scala index 0ce01a5b67..2092d5fd50 100644 --- a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/state/ScopedStateStoreSuite.scala +++ b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/state/ScopedStateStoreSuite.scala @@ -13,7 +13,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Envelope, Label, ProjectRef import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.query.RefreshStrategy import ch.epfl.bluebrain.nexus.delta.sourcing.state.ScopedStateStore.StateNotFound.{TagNotFound, UnknownState} -import ch.epfl.bluebrain.nexus.delta.sourcing.{EntityCheck, Predicate, PullRequest} +import ch.epfl.bluebrain.nexus.delta.sourcing.{EntityCheck, PullRequest, Scope} import ch.epfl.bluebrain.nexus.testkit.bio.BioSuite import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.Doobie import doobie.implicits._ @@ -84,35 +84,35 @@ class ScopedStateStoreSuite extends BioSuite with Doobie.Fixture with Doobie.Ass } test("Fetch all current latest states from the beginning") { - store.currentStates(Predicate.Root).assert(envelope1, envelope2, envelope3, envelope4) + store.currentStates(Scope.Root).assert(envelope1, envelope2, envelope3, envelope4) } test("Fetch all latest states from the beginning") { - store.states(Predicate.Root).assert(envelope1, envelope2, envelope3, envelope4) + store.states(Scope.Root).assert(envelope1, envelope2, envelope3, envelope4) } test(s"Fetch current states for ${project1.organization} from the beginning") { - store.currentStates(Predicate.Org(project1.organization)).assert(envelope1, envelope2, envelope3) + store.currentStates(Scope.Org(project1.organization)).assert(envelope1, envelope2, envelope3) } test(s"Fetch states for ${project1.organization} from the beginning") { - store.states(Predicate.Org(project1.organization)).assert(envelope1, envelope2, envelope3) + store.states(Scope.Org(project1.organization)).assert(envelope1, envelope2, envelope3) } test(s"Fetch current states for $project1 from offset 2") { - store.currentStates(Predicate.Project(project1), Offset.at(1L)).assert(envelope2) + store.currentStates(Scope.Project(project1), Offset.at(1L)).assert(envelope2) } test(s"Fetch states for $project1 from offset 2") { - store.states(Predicate.Project(project1), Offset.at(1L)).assert(envelope2) + store.states(Scope.Project(project1), Offset.at(1L)).assert(envelope2) } test(s"Fetch all current states from the beginning for tag `$customTag`") { - store.currentStates(Predicate.Root, customTag).assert(envelope1Tagged, envelope3Tagged) + store.currentStates(Scope.Root, customTag).assert(envelope1Tagged, envelope3Tagged) } test(s"Fetch all states from the beginning for tag `$customTag`") { - store.states(Predicate.Root, customTag).assert(envelope1Tagged, envelope3Tagged) + store.states(Scope.Root, customTag).assert(envelope1Tagged, envelope3Tagged) } test("Update state 1 successfully") { @@ -124,7 +124,7 @@ class ScopedStateStoreSuite extends BioSuite with Doobie.Fixture with Doobie.Ass } test("Fetch all current latest states from the beginning") { - store.currentStates(Predicate.Root).assert(envelope2, envelope3, envelope4, envelopeUpdated1) + store.currentStates(Scope.Root).assert(envelope2, envelope3, envelope4, envelopeUpdated1) } test("Delete tagged state 3 successfully") { @@ -136,7 +136,7 @@ class ScopedStateStoreSuite extends BioSuite with Doobie.Fixture with Doobie.Ass } test(s"Fetch all states from the beginning for tag `$customTag` after deletion of `state3`") { - store.states(Predicate.Root, customTag).assert(envelope1Tagged) + store.states(Scope.Root, customTag).assert(envelope1Tagged) } test("Check that the given ids does exist") { diff --git a/delta/testkit/src/main/scala/ch/epfl/bluebrain/nexus/testkit/MutableClock.scala b/delta/testkit/src/main/scala/ch/epfl/bluebrain/nexus/testkit/MutableClock.scala new file mode 100644 index 0000000000..77418f34d1 --- /dev/null +++ b/delta/testkit/src/main/scala/ch/epfl/bluebrain/nexus/testkit/MutableClock.scala @@ -0,0 +1,29 @@ +package ch.epfl.bluebrain.nexus.testkit + +import cats.effect.concurrent.Ref +import cats.effect.{Clock, Resource} +import ch.epfl.bluebrain.nexus.testkit.bio.ResourceFixture.TaskFixture +import ch.epfl.bluebrain.nexus.testkit.bio.{BioSuite, ResourceFixture} +import monix.bio.{Task, UIO} + +import java.time.Instant +import scala.concurrent.duration.TimeUnit + +final class MutableClock(value: Ref[Task, Instant]) extends Clock[UIO] { + + def set(instant: Instant): UIO[Unit] = value.set(instant).hideErrors + override def realTime(unit: TimeUnit): UIO[Long] = value.get.map(_.toEpochMilli).hideErrors + + override def monotonic(unit: TimeUnit): UIO[Long] = value.get.map(_.toEpochMilli).hideErrors +} +object MutableClock { + private def suiteLocalFixture: TaskFixture[MutableClock] = { + val clock = Ref.of[Task, Instant](Instant.EPOCH).map(new MutableClock(_)) + ResourceFixture.suiteLocal("clock", Resource.eval(clock)) + } + + trait Fixture { + self: BioSuite => + val clock: ResourceFixture.TaskFixture[MutableClock] = suiteLocalFixture + } +} From d5fe84863265f7eb1524fea419612bb1246050f8 Mon Sep 17 00:00:00 2001 From: Simon Date: Thu, 27 Jul 2023 15:31:40 +0200 Subject: [PATCH 4/6] Add endpoint to list indexing errors for Elasticsearch (#4109) * Add endpoint to list indexing errors for Elasticsearch * Address feedback + add missing test * Change test names --------- Co-authored-by: Simon Dumas --- .../ElasticSearchPluginModule.scala | 59 ++++- .../indexing/IndexingViewDef.scala | 23 +- .../routes/ElasticSearchAllRoutes.scala | 25 ++ .../routes/ElasticSearchIndexingRoutes.scala | 191 ++++++++++++++ .../routes/ElasticSearchQueryRoutes.scala | 27 +- .../routes/ElasticSearchViewsRoutes.scala | 236 +++++------------- .../elasticsearch/routes/package.scala | 12 + .../routes/list-indexing-errors.json | 31 +++ .../src/test/resources/routes/offset.json | 4 - .../src/test/resources/routes/statistics.json | 11 - .../ElasticSearchIndexingRoutesSpec.scala | 222 ++++++++++++++++ .../routes/ElasticSearchQueryRoutesSpec.scala | 91 +------ .../ElasticSearchViewsRoutesBaseSpec.scala | 85 +++++++ .../routes/ElasticSearchViewsRoutesSpec.scala | 226 ++--------------- .../delta/sdk/directives/UriDirectives.scala | 2 +- .../sourcing/model/FailedElemLogRow.scala | 2 + .../projections/FailedElemLogStore.scala | 38 ++- .../projections/ProjectionErrors.scala | 62 +++-- .../sourcing/projections/Projections.scala | 18 +- .../projections/FailedElemLogStoreSuite.scala | 36 ++- 20 files changed, 844 insertions(+), 557 deletions(-) create mode 100644 delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchAllRoutes.scala create mode 100644 delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutes.scala create mode 100644 delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/package.scala create mode 100644 delta/plugins/elasticsearch/src/test/resources/routes/list-indexing-errors.json delete mode 100644 delta/plugins/elasticsearch/src/test/resources/routes/offset.json delete mode 100644 delta/plugins/elasticsearch/src/test/resources/routes/statistics.json create mode 100644 delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutesSpec.scala create mode 100644 delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesBaseSpec.scala diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchPluginModule.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchPluginModule.scala index 6c76bcbcfd..9781e8cead 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchPluginModule.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchPluginModule.scala @@ -10,7 +10,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.indexing.ElasticSearc import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.ElasticSearchViewRejection.ProjectContextRejection import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.{contexts, defaultElasticsearchMapping, defaultElasticsearchSettings, schema => viewsSchemaId, ElasticSearchView, ElasticSearchViewEvent} import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.query.{DefaultViewsQuery, ElasticSearchQueryError} -import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes.{ElasticSearchQueryRoutes, ElasticSearchViewsRoutes} +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes.{ElasticSearchAllRoutes, ElasticSearchIndexingRoutes, ElasticSearchQueryRoutes, ElasticSearchViewsRoutes} import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue.ContextObject @@ -170,8 +170,6 @@ class ElasticSearchPluginModule(priority: Int) extends ModuleDef { identities: Identities, aclCheck: AclCheck, views: ElasticSearchViews, - projections: Projections, - projectionErrors: ProjectionErrors, schemeDirectives: DeltaSchemeDirectives, indexingAction: IndexingAction @Id("aggregate"), viewsQuery: ElasticSearchViewsQuery, @@ -187,8 +185,6 @@ class ElasticSearchPluginModule(priority: Int) extends ModuleDef { aclCheck, views, viewsQuery, - projections, - projectionErrors, schemeDirectives, indexingAction(_, _, _)(shift, cr) )( @@ -231,6 +227,36 @@ class ElasticSearchPluginModule(priority: Int) extends ModuleDef { ) } + make[ElasticSearchIndexingRoutes].from { + ( + identities: Identities, + aclCheck: AclCheck, + views: ElasticSearchViews, + projections: Projections, + projectionErrors: ProjectionErrors, + schemeDirectives: DeltaSchemeDirectives, + baseUri: BaseUri, + s: Scheduler, + cr: RemoteContextResolution @Id("aggregate"), + esConfig: ElasticSearchViewsConfig, + ordering: JsonKeyOrdering + ) => + new ElasticSearchIndexingRoutes( + identities, + aclCheck, + views.fetchIndexingView(_, _), + projections, + projectionErrors, + schemeDirectives + )( + baseUri, + esConfig.pagination, + s, + cr, + ordering + ) + } + make[ElasticSearchScopeInitialization] .from { (views: ElasticSearchViews, serviceAccount: ServiceAccount, config: ElasticSearchViewsConfig) => new ElasticSearchScopeInitialization(views, serviceAccount, config.defaults) @@ -292,11 +318,24 @@ class ElasticSearchPluginModule(priority: Int) extends ModuleDef { many[ApiMappings].add(ElasticSearchViews.mappings) - many[PriorityRoute].add { (route: ElasticSearchViewsRoutes) => - PriorityRoute(priority, route.routes, requiresStrictEntity = true) - } - many[PriorityRoute].add { (route: ElasticSearchQueryRoutes) => - PriorityRoute(priority, route.routes, requiresStrictEntity = true) + many[PriorityRoute].add { + ( + es: ElasticSearchViewsRoutes, + query: ElasticSearchQueryRoutes, + indexing: ElasticSearchIndexingRoutes, + schemeDirectives: DeltaSchemeDirectives, + baseUri: BaseUri + ) => + PriorityRoute( + priority, + ElasticSearchAllRoutes( + schemeDirectives, + es.routes, + query.routes, + indexing.routes + )(baseUri), + requiresStrictEntity = true + ) } many[ServiceDependency].add { new ElasticSearchServiceDependency(_) } diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/indexing/IndexingViewDef.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/indexing/IndexingViewDef.scala index 5cfba4af6d..6471ace817 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/indexing/IndexingViewDef.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/indexing/IndexingViewDef.scala @@ -50,7 +50,16 @@ object IndexingViewDef { context: Option[ContextObject], indexingRev: Int, rev: Int - ) extends IndexingViewDef + ) extends IndexingViewDef { + + def projectionMetadata: ProjectionMetadata = + ProjectionMetadata( + ElasticSearchViews.entityType.value, + projection, + Some(ref.project), + Some(ref.viewId) + ) + } /** * Deprecated view to be cleaned up and removed from the supervisor @@ -106,14 +115,6 @@ object IndexingViewDef { stream: Offset => ElemStream[GraphResource], sink: Sink )(implicit cr: RemoteContextResolution): Task[CompiledProjection] = { - val project = v.ref.project - val id = v.ref.viewId - val metadata = ProjectionMetadata( - ElasticSearchViews.entityType.value, - v.projection, - Some(project), - Some(id) - ) val mergedContext = v.context.fold(defaultContext) { defaultContext.merge(_) } val postPipes: Operation = new GraphResourceToDocument(mergedContext, false) @@ -122,7 +123,7 @@ object IndexingViewDef { pipes <- v.pipeChain.traverse(compilePipeChain) chain = pipes.fold(NonEmptyChain.one(postPipes))(NonEmptyChain(_, postPipes)) projection <- CompiledProjection.compile( - metadata, + v.projectionMetadata, ExecutionStrategy.PersistentSingleNode, Source(stream), chain, @@ -131,7 +132,7 @@ object IndexingViewDef { } yield projection Task.fromEither(compiled).tapError { e => - Task.delay(logger.error(s"View '$project/$id' could not be compiled.", e)) + Task.delay(logger.error(s"View '${v.ref}' could not be compiled.", e)) } } } diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchAllRoutes.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchAllRoutes.scala new file mode 100644 index 0000000000..de30508f92 --- /dev/null +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchAllRoutes.scala @@ -0,0 +1,25 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes + +import akka.http.scaladsl.server.Directives.concat +import akka.http.scaladsl.server.Route +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model._ +import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives +import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri + +class ElasticSearchAllRoutes(schemeDirectives: DeltaSchemeDirectives, underlying: Route*)(implicit baseUri: BaseUri) + extends ElasticSearchViewsDirectives { + + import schemeDirectives._ + + def routes: Route = + (baseUriPrefix(baseUri.prefix) & replaceUri("views", schema.iri)) { + concat(underlying: _*) + } + +} + +object ElasticSearchAllRoutes { + + def apply(schemeDirectives: DeltaSchemeDirectives, routes: Route*)(implicit baseUri: BaseUri): Route = + new ElasticSearchAllRoutes(schemeDirectives, routes: _*).routes +} diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutes.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutes.scala new file mode 100644 index 0000000000..60fc140f87 --- /dev/null +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutes.scala @@ -0,0 +1,191 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes + +import akka.http.scaladsl.server.Directives._ +import akka.http.scaladsl.server._ +import cats.syntax.all._ +import ch.epfl.bluebrain.nexus.delta.kernel.search.Pagination.FromPagination +import ch.epfl.bluebrain.nexus.delta.kernel.search.TimeRange +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.indexing.IndexingViewDef.ActiveViewDef +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.ElasticSearchViewRejection._ +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model._ +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.permissions.{read => Read, write => Write} +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes.ElasticSearchIndexingRoutes.FetchIndexingView +import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder +import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering +import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck +import ch.epfl.bluebrain.nexus.delta.sdk.circe.CirceUnmarshalling +import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaDirectives._ +import ch.epfl.bluebrain.nexus.delta.sdk.directives.{AuthDirectives, DeltaSchemeDirectives} +import ch.epfl.bluebrain.nexus.delta.sdk.identities.Identities +import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ +import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.RdfMarshalling +import ch.epfl.bluebrain.nexus.delta.sdk.model._ +import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SearchResults.searchResultsJsonLdEncoder +import ch.epfl.bluebrain.nexus.delta.sdk.model.search.{PaginationConfig, SearchResults} +import ch.epfl.bluebrain.nexus.delta.sdk.views.ViewRef +import ch.epfl.bluebrain.nexus.delta.sourcing.ProgressStatistics +import ch.epfl.bluebrain.nexus.delta.sourcing.model.FailedElemLogRow.FailedElemData +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{FailedElemLogRow, ProjectRef} +import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset +import ch.epfl.bluebrain.nexus.delta.sourcing.projections.{ProjectionErrors, Projections} +import io.circe.Encoder +import io.circe.generic.semiauto.deriveEncoder +import io.circe.syntax._ +import monix.bio.IO +import monix.execution.Scheduler + +/** + * The elasticsearch views routes + * + * @param identities + * the identity module + * @param aclCheck + * to check acls + * @param fetch + * how to fetch an Elasticsearch view + * @param projections + * the projections module + * @param projectionErrors + * the projection errors module + * @param schemeDirectives + * directives related to orgs and projects + */ +final class ElasticSearchIndexingRoutes( + identities: Identities, + aclCheck: AclCheck, + fetch: FetchIndexingView, + projections: Projections, + projectionErrors: ProjectionErrors, + schemeDirectives: DeltaSchemeDirectives +)(implicit + baseUri: BaseUri, + paginationConfig: PaginationConfig, + s: Scheduler, + cr: RemoteContextResolution, + ordering: JsonKeyOrdering +) extends AuthDirectives(identities, aclCheck) + with CirceUnmarshalling + with RdfMarshalling { + + import schemeDirectives._ + + implicit private val viewStatisticEncoder: Encoder.AsObject[ProgressStatistics] = + deriveEncoder[ProgressStatistics].mapJsonObject(_.add(keywords.tpe, "ViewStatistics".asJson)) + + implicit private val viewStatisticJsonLdEncoder: JsonLdEncoder[ProgressStatistics] = + JsonLdEncoder.computeFromCirce(ContextValue(Vocabulary.contexts.statistics)) + + def routes: Route = + pathPrefix("views") { + extractCaller { implicit caller => + resolveProjectRef.apply { ref => + concat( + idSegment { id => + concat( + // Fetch an elasticsearch view statistics + (pathPrefix("statistics") & get & pathEndOrSingleSlash) { + authorizeFor(ref, Read).apply { + emit( + fetch(id, ref) + .flatMap(v => projections.statistics(ref, v.resourceTag, v.projection)) + .rejectWhen(decodingFailedOrViewNotFound) + ) + } + }, + // Fetch elastic search view indexing failures + (pathPrefix("failures") & get) { + authorizeFor(ref, Write).apply { + concat( + (pathPrefix("sse") & lastEventId) { offset => + emit( + fetch(id, ref) + .map { view => + projectionErrors.failedElemSses(view.ref.project, view.ref.viewId, offset) + } + ) + }, + (fromPaginated & timeRange("instant") & extractUri & pathEndOrSingleSlash) { + (pagination, timeRange, uri) => + implicit val searchJsonLdEncoder: JsonLdEncoder[SearchResults[FailedElemData]] = + searchResultsJsonLdEncoder(FailedElemLogRow.context, pagination, uri) + emit( + fetch(id, ref) + .flatMap { view => + listErrors(view.ref, pagination, timeRange) + } + ) + } + ) + } + }, + // Manage an elasticsearch view offset + (pathPrefix("offset") & pathEndOrSingleSlash) { + concat( + // Fetch an elasticsearch view offset + (get & authorizeFor(ref, Read)) { + emit( + fetch(id, ref) + .flatMap(v => projections.offset(v.projection)) + .rejectWhen(decodingFailedOrViewNotFound) + ) + }, + // Remove an elasticsearch view offset (restart the view) + (delete & authorizeFor(ref, Write)) { + emit( + fetch(id, ref) + .flatMap { v => projections.scheduleRestart(v.projection) } + .as(Offset.start) + .rejectWhen(decodingFailedOrViewNotFound) + ) + } + ) + } + ) + } + ) + } + } + } + + private def listErrors(ref: ViewRef, pagination: FromPagination, timeRange: TimeRange) = { + for { + results <- projectionErrors.list(ref.project, ref.viewId, pagination, timeRange) + count <- projectionErrors.count(ref.project, ref.viewId, timeRange) + } yield SearchResults(count, results.map { _.failedElemData }) + }.widen[SearchResults[FailedElemData]] +} + +object ElasticSearchIndexingRoutes { + + type FetchIndexingView = (IdSegment, ProjectRef) => IO[ElasticSearchViewRejection, ActiveViewDef] + + /** + * @return + * the [[Route]] for elasticsearch views + */ + def apply( + identities: Identities, + aclCheck: AclCheck, + fetch: FetchIndexingView, + projections: Projections, + projectionErrors: ProjectionErrors, + schemeDirectives: DeltaSchemeDirectives + )(implicit + baseUri: BaseUri, + paginationConfig: PaginationConfig, + s: Scheduler, + cr: RemoteContextResolution, + ordering: JsonKeyOrdering + ): Route = + new ElasticSearchIndexingRoutes( + identities, + aclCheck, + fetch, + projections, + projectionErrors, + schemeDirectives + ).routes +} diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutes.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutes.scala index 2692e74f6b..92321872e2 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutes.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutes.scala @@ -20,7 +20,6 @@ import ch.epfl.bluebrain.nexus.delta.sdk.model.search.{AggregationResult, Pagina import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContext import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label import io.circe.JsonObject -import kamon.instrumentation.akka.http.TracingDirectives.operationName import monix.bio.IO import monix.execution.Scheduler @@ -40,13 +39,9 @@ class ElasticSearchQueryRoutes( ) extends AuthDirectives(identities, aclCheck) with ElasticSearchViewsDirectives { - import baseUri.prefixSegment import schemeDirectives._ - def routes: Route = - (baseUriPrefix(baseUri.prefix) & replaceUri("views", schema.iri)) { - concat(genericResourcesRoutes, resourcesListings) - } + def routes: Route = concat(genericResourcesRoutes, resourcesListings) private val genericResourcesRoutes: Route = pathPrefix("resources") { @@ -55,13 +50,13 @@ class ElasticSearchQueryRoutes( (searchParametersAndSortList & paginated) { (params, sort, page) => concat( // List/aggregate all resources - (pathEndOrSingleSlash & operationName(s"$prefixSegment/resources")) { + pathEndOrSingleSlash { concat( aggregated { _ => aggregate(RootSearch(params)) }, list(RootSearch(params, page, sort)) ) }, - (label & pathEndOrSingleSlash & operationName(s"$prefixSegment/resources")) { org => + (label & pathEndOrSingleSlash) { org => concat( aggregated { _ => aggregate(OrgSearch(org, params)) }, list(OrgSearch(org, params, page, sort)) @@ -74,7 +69,7 @@ class ElasticSearchQueryRoutes( (searchParametersInProject & paginated) { (params, sort, page) => concat( // List/aggregate all resources inside a project - (pathEndOrSingleSlash & operationName(s"$prefixSegment/resources/{org}/{project}")) { + pathEndOrSingleSlash { concat( aggregated { _ => aggregate(ProjectSearch(ref, params)) }, list(ProjectSearch(ref, params, page, sort)) @@ -82,7 +77,7 @@ class ElasticSearchQueryRoutes( }, idSegment { schema => // List/aggregate all resources inside a project filtering by its schema type - (pathEndOrSingleSlash & operationName(s"$prefixSegment/resources/{org}/{project}/{schema}")) { + pathEndOrSingleSlash { underscoreToOption(schema) match { case None => concat( @@ -113,12 +108,12 @@ class ElasticSearchQueryRoutes( (searchParametersAndSortList & paginated) { (params, sort, page) => concat( // List all resources of type resourceSegment - (pathEndOrSingleSlash & operationName(s"$prefixSegment/$resourceSegment")) { + pathEndOrSingleSlash { val request = DefaultSearchRequest.RootSearch(params, page, sort, resourceSchema)(fetchContext) list(request) }, // List all resources of type resourceSegment inside an organization - (label & pathEndOrSingleSlash & operationName(s"$prefixSegment/$resourceSegment/{org}")) { org => + (label & pathEndOrSingleSlash) { org => val request = DefaultSearchRequest.OrgSearch(org, params, page, sort, resourceSchema)(fetchContext) list(request) } @@ -128,11 +123,9 @@ class ElasticSearchQueryRoutes( projectContext(ref) { implicit pc => // List all resources of type resourceSegment inside a project (searchParametersInProject & paginated & pathEndOrSingleSlash) { (params, sort, page) => - operationName(s"$prefixSegment/$resourceSegment/{org}/{project}") { - val request = - DefaultSearchRequest.ProjectSearch(ref, params, page, sort, resourceSchema)(fetchContext) - list(request) - } + val request = + DefaultSearchRequest.ProjectSearch(ref, params, page, sort, resourceSchema)(fetchContext) + list(request) } } } diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutes.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutes.scala index 063fae0d16..ab71f1fc9d 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutes.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutes.scala @@ -7,10 +7,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.ElasticSearchVi import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model._ import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.permissions.{read => Read, write => Write} import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.{ElasticSearchViews, ElasticSearchViewsQuery} -import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.RemoteContextResolution import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering import ch.epfl.bluebrain.nexus.delta.sdk._ import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck @@ -23,13 +20,7 @@ import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.RdfMarshalling import ch.epfl.bluebrain.nexus.delta.sdk.model._ import ch.epfl.bluebrain.nexus.delta.sdk.model.routes.Tag -import ch.epfl.bluebrain.nexus.delta.sourcing.ProgressStatistics -import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset -import ch.epfl.bluebrain.nexus.delta.sourcing.projections.{ProjectionErrors, Projections} -import io.circe.generic.semiauto.deriveEncoder -import io.circe.syntax._ -import io.circe.{Encoder, Json, JsonObject} -import kamon.instrumentation.akka.http.TracingDirectives.operationName +import io.circe.{Json, JsonObject} import monix.execution.Scheduler /** @@ -43,10 +34,6 @@ import monix.execution.Scheduler * the elasticsearch views operations bundle * @param viewsQuery * the elasticsearch views query operations bundle - * @param projections - * the projections module - * @param projectionErrors - * the projection errors module * @param schemeDirectives * directives related to orgs and projects * @param index @@ -57,8 +44,6 @@ final class ElasticSearchViewsRoutes( aclCheck: AclCheck, views: ElasticSearchViews, viewsQuery: ElasticSearchViewsQuery, - projections: Projections, - projectionErrors: ProjectionErrors, schemeDirectives: DeltaSchemeDirectives, index: IndexingAction.Execute[ElasticSearchView] )(implicit @@ -72,26 +57,14 @@ final class ElasticSearchViewsRoutes( with ElasticSearchViewsDirectives with RdfMarshalling { - import baseUri.prefixSegment import schemeDirectives._ - implicit private val viewStatisticEncoder: Encoder.AsObject[ProgressStatistics] = - deriveEncoder[ProgressStatistics].mapJsonObject(_.add(keywords.tpe, "ViewStatistics".asJson)) - - implicit private val viewStatisticJsonLdEncoder: JsonLdEncoder[ProgressStatistics] = - JsonLdEncoder.computeFromCirce(ContextValue(Vocabulary.contexts.statistics)) - def routes: Route = - (baseUriPrefix(baseUri.prefix) & replaceUri("views", schema.iri)) { - viewsRoutes - } - - private val viewsRoutes: Route = pathPrefix("views") { extractCaller { implicit caller => resolveProjectRef.apply { ref => concat( - (pathEndOrSingleSlash & operationName(s"$prefixSegment/views/{org}/{project}")) { + pathEndOrSingleSlash { // Create an elasticsearch view without id segment (post & pathEndOrSingleSlash & noParameter("rev") & entity(as[Json]) & indexingMode) { (source, mode) => authorizeFor(ref, Write).apply { @@ -109,156 +82,91 @@ final class ElasticSearchViewsRoutes( (idSegment & indexingMode) { (id, mode) => concat( pathEndOrSingleSlash { - operationName(s"$prefixSegment/views/{org}/{project}/{id}") { - concat( - // Create or update an elasticsearch view - put { - authorizeFor(ref, Write).apply { - (parameter("rev".as[Int].?) & pathEndOrSingleSlash & entity(as[Json])) { - case (None, source) => - // Create an elasticsearch view with id segment - emit( - Created, - views - .create(id, ref, source) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectWhen(decodingFailedOrViewNotFound) - ) - case (Some(rev), source) => - // Update a view - emit( - views - .update(id, ref, rev, source) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectWhen(decodingFailedOrViewNotFound) - ) - } - } - }, - // Deprecate an elasticsearch view - (delete & parameter("rev".as[Int])) { rev => - authorizeFor(ref, Write).apply { - emit( - views - .deprecate(id, ref, rev) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectWhen(decodingFailedOrViewNotFound) - ) + concat( + // Create or update an elasticsearch view + put { + authorizeFor(ref, Write).apply { + (parameter("rev".as[Int].?) & pathEndOrSingleSlash & entity(as[Json])) { + case (None, source) => + // Create an elasticsearch view with id segment + emit( + Created, + views + .create(id, ref, source) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) + .rejectWhen(decodingFailedOrViewNotFound) + ) + case (Some(rev), source) => + // Update a view + emit( + views + .update(id, ref, rev, source) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) + .rejectWhen(decodingFailedOrViewNotFound) + ) } - }, - // Fetch an elasticsearch view - (get & idSegmentRef(id)) { id => - emitOrFusionRedirect( - ref, - id, - authorizeFor(ref, Read).apply { - emit(views.fetch(id, ref).rejectOn[ViewNotFound]) - } - ) } - ) - } - }, - // Fetch an elasticsearch view statistics - (pathPrefix("statistics") & get & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/statistics") { - authorizeFor(ref, Read).apply { - emit( - views - .fetchIndexingView(id, ref) - .flatMap(v => - projections.statistics(ref, v.resourceTag, ElasticSearchViews.projectionName(v)) - ) - .rejectWhen(decodingFailedOrViewNotFound) - ) - } - } - }, - // Fetch elastic search view indexing failures - lastEventId { offset => - (pathPrefix("failures") & get & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/failures") { + }, + // Deprecate an elasticsearch view + (delete & parameter("rev".as[Int])) { rev => authorizeFor(ref, Write).apply { emit( views - .fetch(id, ref) - .map { view => - projectionErrors.failedElemSses(view.value.project, view.value.id, offset) - } - ) - } - } - } - }, - // Manage an elasticsearch view offset - (pathPrefix("offset") & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/offset") { - concat( - // Fetch an elasticsearch view offset - (get & authorizeFor(ref, Read)) { - emit( - views - .fetchIndexingView(id, ref) - .flatMap(v => projections.offset(ElasticSearchViews.projectionName(v))) - .rejectWhen(decodingFailedOrViewNotFound) - ) - }, - // Remove an elasticsearch view offset (restart the view) - (delete & authorizeFor(ref, Write)) { - emit( - views - .fetchIndexingView(id, ref) - .flatMap { v => projections.scheduleRestart(ElasticSearchViews.projectionName(v)) } - .as(Offset.start) + .deprecate(id, ref, rev) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) .rejectWhen(decodingFailedOrViewNotFound) ) } - ) - } + }, + // Fetch an elasticsearch view + (get & idSegmentRef(id)) { id => + emitOrFusionRedirect( + ref, + id, + authorizeFor(ref, Read).apply { + emit(views.fetch(id, ref).rejectOn[ViewNotFound]) + } + ) + } + ) }, // Query an elasticsearch view (pathPrefix("_search") & post & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/_search") { - (extractQueryParams & entity(as[JsonObject])) { (qp, query) => - emit(viewsQuery.query(id, ref, query, qp)) - } + (extractQueryParams & entity(as[JsonObject])) { (qp, query) => + emit(viewsQuery.query(id, ref, query, qp)) } }, // Fetch an elasticsearch view original source (pathPrefix("source") & get & pathEndOrSingleSlash & idSegmentRef(id)) { id => - operationName(s"$prefixSegment/views/{org}/{project}/{id}/source") { - authorizeFor(ref, Read).apply { - emit(views.fetch(id, ref).map(_.value.source).rejectOn[ViewNotFound]) - } + authorizeFor(ref, Read).apply { + emit(views.fetch(id, ref).map(_.value.source).rejectOn[ViewNotFound]) } }, (pathPrefix("tags") & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/tags") { - concat( - // Fetch an elasticsearch view tags - (get & idSegmentRef(id) & authorizeFor(ref, Read)) { id => - emit(views.fetch(id, ref).map(_.value.tags).rejectOn[ViewNotFound]) - }, - // Tag an elasticsearch view - (post & parameter("rev".as[Int])) { rev => - authorizeFor(ref, Write).apply { - entity(as[Tag]) { case Tag(tagRev, tag) => - emit( - Created, - views - .tag(id, ref, tag, tagRev, rev) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectWhen(decodingFailedOrViewNotFound) - ) - } + concat( + // Fetch an elasticsearch view tags + (get & idSegmentRef(id) & authorizeFor(ref, Read)) { id => + emit(views.fetch(id, ref).map(_.value.tags).rejectOn[ViewNotFound]) + }, + // Tag an elasticsearch view + (post & parameter("rev".as[Int])) { rev => + authorizeFor(ref, Write).apply { + entity(as[Tag]) { case Tag(tagRev, tag) => + emit( + Created, + views + .tag(id, ref, tag, tagRev, rev) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) + .rejectWhen(decodingFailedOrViewNotFound) + ) } } - ) - } + } + ) } ) } @@ -266,10 +174,6 @@ final class ElasticSearchViewsRoutes( } } } - - private val decodingFailedOrViewNotFound: PartialFunction[ElasticSearchViewRejection, Boolean] = { - case _: DecodingFailed | _: ViewNotFound | _: InvalidJsonLdFormat => true - } } object ElasticSearchViewsRoutes { @@ -283,8 +187,6 @@ object ElasticSearchViewsRoutes { aclCheck: AclCheck, views: ElasticSearchViews, viewsQuery: ElasticSearchViewsQuery, - projections: Projections, - projectionErrors: ProjectionErrors, schemeDirectives: DeltaSchemeDirectives, index: IndexingAction.Execute[ElasticSearchView] )(implicit @@ -299,8 +201,6 @@ object ElasticSearchViewsRoutes { aclCheck, views, viewsQuery, - projections, - projectionErrors, schemeDirectives, index ).routes diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/package.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/package.scala new file mode 100644 index 0000000000..37f0133571 --- /dev/null +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/package.scala @@ -0,0 +1,12 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch + +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.ElasticSearchViewRejection +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.ElasticSearchViewRejection.{DecodingFailed, InvalidJsonLdFormat, ViewNotFound} + +package object routes { + + val decodingFailedOrViewNotFound: PartialFunction[ElasticSearchViewRejection, Boolean] = { + case _: DecodingFailed | _: ViewNotFound | _: InvalidJsonLdFormat => true + } + +} diff --git a/delta/plugins/elasticsearch/src/test/resources/routes/list-indexing-errors.json b/delta/plugins/elasticsearch/src/test/resources/routes/list-indexing-errors.json new file mode 100644 index 0000000000..5a9cb102e4 --- /dev/null +++ b/delta/plugins/elasticsearch/src/test/resources/routes/list-indexing-errors.json @@ -0,0 +1,31 @@ +{ + "@context": [ + "https://bluebrain.github.io/nexus/contexts/metadata.json", + "https://bluebrain.github.io/nexus/contexts/search.json", + "https://bluebrain.github.io/nexus/contexts/error.json" + ], + "_total": 2, + "_results": [ + { + "errorType": "java.lang.Exception", + "id": "https://bluebrain.github.io/nexus/vocabulary/myid", + "message": "boom", + "offset": { + "@type": "At", + "value": 42 + }, + "project": "myorg/myproject", + "_rev": 1 + }, + { + "errorType": "java.lang.Exception", + "id": "https://bluebrain.github.io/nexus/vocabulary/myid", + "message": "boom", + "offset": { + "@type": "At", + "value": 42 + }, + "_rev": 1 + } + ] +} \ No newline at end of file diff --git a/delta/plugins/elasticsearch/src/test/resources/routes/offset.json b/delta/plugins/elasticsearch/src/test/resources/routes/offset.json deleted file mode 100644 index 06fc9e9503..0000000000 --- a/delta/plugins/elasticsearch/src/test/resources/routes/offset.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "@context" : "https://bluebrain.github.io/nexus/contexts/offset.json", - "@type" : "Start" -} diff --git a/delta/plugins/elasticsearch/src/test/resources/routes/statistics.json b/delta/plugins/elasticsearch/src/test/resources/routes/statistics.json deleted file mode 100644 index 755065891f..0000000000 --- a/delta/plugins/elasticsearch/src/test/resources/routes/statistics.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "@context": "https://bluebrain.github.io/nexus/contexts/statistics.json", - "@type": "ViewStatistics", - "discardedEvents": 0, - "evaluatedEvents": 0, - "failedEvents": 0, - "lastEventDateTime": "{{projectLatestInstant}}", - "processedEvents": 0, - "remainingEvents": 3, - "totalEvents": 3 -} diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutesSpec.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutesSpec.scala new file mode 100644 index 0000000000..02a2a2edfc --- /dev/null +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutesSpec.scala @@ -0,0 +1,222 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes + +import akka.http.scaladsl.model.headers.`Last-Event-ID` +import akka.http.scaladsl.model.{MediaTypes, StatusCodes} +import akka.http.scaladsl.server.Route +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.client.IndexLabel +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.indexing.IndexingViewDef.ActiveViewDef +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.ElasticSearchViewRejection.{InvalidResourceId, ViewNotFound} +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.{permissions => esPermissions, ElasticSearchViewRejection} +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes.ElasticSearchIndexingRoutes.FetchIndexingView +import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary +import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv +import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress +import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives +import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment +import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment.{IriSegment, StringSegment} +import ch.epfl.bluebrain.nexus.delta.sdk.projects.{FetchContext, FetchContextDummy} +import ch.epfl.bluebrain.nexus.delta.sdk.views.ViewRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.EntityType +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Anonymous +import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset +import ch.epfl.bluebrain.nexus.delta.sourcing.projections.{ProjectionErrors, Projections} +import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Elem.FailedElem +import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionProgress +import io.circe.JsonObject +import monix.bio.IO + +import java.time.Instant +import scala.concurrent.duration._ + +class ElasticSearchIndexingRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { + + private lazy val projections = Projections(xas, queryConfig, 1.hour) + private lazy val projectionErrors = ProjectionErrors(xas, queryConfig) + + implicit private val fetchContextRejection: FetchContext[ElasticSearchViewRejection] = + FetchContextDummy[ElasticSearchViewRejection]( + Map(project.value.ref -> project.value.context), + ElasticSearchViewRejection.ProjectContextRejection + ) + + private val groupDirectives = + DeltaSchemeDirectives( + fetchContextRejection, + ioFromMap(uuid -> projectRef.organization), + ioFromMap(uuid -> projectRef) + ) + + private val myId = nxv + "myid" + private val indexingView = ActiveViewDef( + ViewRef(projectRef, myId), + "projection", + None, + None, + IndexLabel.unsafe("index"), + JsonObject.empty, + JsonObject.empty, + None, + 1, + 1 + ) + private val progress = ProjectionProgress(Offset.at(15L), Instant.EPOCH, 9000L, 400L, 30L) + + private def fetchView: FetchIndexingView = + (id: IdSegment, ref) => + id match { + case IriSegment(`myId`) => IO.pure(indexingView) + case IriSegment(id) => IO.raiseError(ViewNotFound(id, ref)) + case StringSegment("myid") => IO.pure(indexingView) + case StringSegment(id) => IO.raiseError(InvalidResourceId(id)) + } + + private lazy val routes = + Route.seal( + ElasticSearchAllRoutes( + groupDirectives, + ElasticSearchIndexingRoutes( + identities, + aclCheck, + fetchView, + projections, + projectionErrors, + groupDirectives + ) + ) + ) + + override def beforeAll(): Unit = { + super.beforeAll() + val error = new Exception("boom") + val rev = 1 + val fail1 = FailedElem(EntityType("ACL"), myId, Some(projectRef), Instant.EPOCH, Offset.At(42L), error, rev) + val fail2 = FailedElem(EntityType("Schema"), myId, None, Instant.EPOCH, Offset.At(42L), error, rev) + val save = for { + _ <- projections.save(indexingView.projectionMetadata, progress) + _ <- projectionErrors.saveFailedElems(indexingView.projectionMetadata, List(fail1, fail2)) + } yield () + save.accepted + } + + private val viewEndpoint = "/v1/views/myorg/myproject/myid" + + "fail to fetch statistics and offset from view without resources/read permission" in { + val endpoints = List( + s"$viewEndpoint/statistics", + s"$viewEndpoint/offset" + ) + forAll(endpoints) { endpoint => + Get(endpoint) ~> routes ~> check { + response.status shouldEqual StatusCodes.Forbidden + response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") + } + } + } + + "fetch statistics from view" in { + aclCheck.append(AclAddress.Root, Anonymous -> Set(esPermissions.read)).accepted + + val expectedResponse = + json""" + { + "@context": "https://bluebrain.github.io/nexus/contexts/statistics.json", + "@type": "ViewStatistics", + "delayInSeconds" : 0, + "discardedEvents": 400, + "evaluatedEvents": 8570, + "failedEvents": 30, + "lastEventDateTime": "${Instant.EPOCH}", + "lastProcessedEventDateTime": "${Instant.EPOCH}", + "processedEvents": 9000, + "remainingEvents": 0, + "totalEvents": 9000 + }""" + + Get(s"$viewEndpoint/statistics") ~> routes ~> check { + response.status shouldEqual StatusCodes.OK + response.asJson shouldEqual expectedResponse + } + } + + "fetch offset from view" in { + val expectedResponse = + json"""{ + "@context" : "https://bluebrain.github.io/nexus/contexts/offset.json", + "@type" : "At", + "value" : 15 + }""" + Get(s"$viewEndpoint/offset") ~> routes ~> check { + response.status shouldEqual StatusCodes.OK + response.asJson shouldEqual expectedResponse + } + } + + "fail to restart offset from view without views/write permission" in { + aclCheck.subtract(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted + + Delete(s"$viewEndpoint/offset") ~> routes ~> check { + response.status shouldEqual StatusCodes.Forbidden + response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") + } + } + + "restart offset from view" in { + aclCheck.append(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted + projections.restarts(Offset.start).compile.toList.accepted.size shouldEqual 0 + Delete(s"$viewEndpoint/offset") ~> routes ~> check { + response.status shouldEqual StatusCodes.OK + response.asJson shouldEqual json"""{"@context": "${Vocabulary.contexts.offset}", "@type": "Start"}""" + projections.restarts(Offset.start).compile.toList.accepted.size shouldEqual 1 + } + } + + "return no failures without write permission" in { + aclCheck.subtract(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted + + val endpoints = List( + s"$viewEndpoint/failures", + s"$viewEndpoint/failures/sse" + ) + forAll(endpoints) { endpoint => + Get(endpoint) ~> routes ~> check { + response.status shouldEqual StatusCodes.Forbidden + response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") + } + } + } + + "return no elasticsearch projection failures without write permission" in { + aclCheck.subtract(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted + + Get(s"$viewEndpoint/failures/sse") ~> routes ~> check { + response.status shouldBe StatusCodes.Forbidden + response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") + } + } + + "return all failures as SSE when no LastEventID is provided" in { + aclCheck.append(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted + Get(s"$viewEndpoint/failures/sse") ~> routes ~> check { + response.status shouldBe StatusCodes.OK + mediaType shouldBe MediaTypes.`text/event-stream` + chunksStream.asString(2).strip shouldEqual contentOf("/routes/sse/indexing-failures-1-2.txt") + } + } + + "return failures as SSE only from the given LastEventID" in { + Get(s"$viewEndpoint/failures/sse") ~> `Last-Event-ID`("1") ~> routes ~> check { + response.status shouldBe StatusCodes.OK + mediaType shouldBe MediaTypes.`text/event-stream` + chunksStream.asString(3).strip shouldEqual contentOf("/routes/sse/indexing-failure-2.txt") + } + } + + "return failures as a listing" in { + aclCheck.append(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted + Get(s"$viewEndpoint/failures") ~> routes ~> check { + response.status shouldBe StatusCodes.OK + response.asJson shouldEqual jsonContentOf("/routes/list-indexing-errors.json") + } + } + +} diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutesSpec.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutesSpec.scala index afcf7b83b8..f80d09523b 100644 --- a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutesSpec.scala +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutesSpec.scala @@ -1,11 +1,8 @@ package ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes -import akka.actor.typed.ActorSystem -import akka.actor.typed.scaladsl.adapter._ import akka.http.scaladsl.model.StatusCodes -import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler, Route} +import akka.http.scaladsl.server.Route import ch.epfl.bluebrain.nexus.delta.kernel.utils.UrlUtils -import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.Fixtures import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.contexts.{aggregations, searchMetadata} import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.{permissions => esPermissions, schema => elasticSearchSchema} import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.query.ElasticSearchQueryError @@ -13,83 +10,17 @@ import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.query.ElasticSearchQu import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes.DummyDefaultViewsQuery._ import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.contexts.search import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.{contexts, nxv} -import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering -import ch.epfl.bluebrain.nexus.delta.sdk.ConfigFixtures -import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclSimpleCheck import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress -import ch.epfl.bluebrain.nexus.delta.sdk.circe.CirceMarshalling import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives -import ch.epfl.bluebrain.nexus.delta.sdk.generators.ProjectGen -import ch.epfl.bluebrain.nexus.delta.sdk.identities.IdentitiesDummy -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ -import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.{RdfExceptionHandler, RdfRejectionHandler} import ch.epfl.bluebrain.nexus.delta.sdk.model._ -import ch.epfl.bluebrain.nexus.delta.sdk.model.search.PaginationConfig -import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ApiMappings import ch.epfl.bluebrain.nexus.delta.sdk.projects.{FetchContext, FetchContextDummy} -import ch.epfl.bluebrain.nexus.delta.sdk.utils.RouteHelpers -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Authenticated, Group, User} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Anonymous import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label -import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.DoobieScalaTestFixture -import ch.epfl.bluebrain.nexus.testkit._ import io.circe.syntax._ import io.circe.{Json, JsonObject} -import monix.execution.Scheduler -import org.scalatest.matchers.should.Matchers -import org.scalatest.{CancelAfterFailure, Inspectors, OptionValues} - -import java.util.UUID - -class ElasticSearchQueryRoutesSpec - extends RouteHelpers - with DoobieScalaTestFixture - with Matchers - with CirceLiteral - with CirceEq - with IOFixedClock - with IOValues - with OptionValues - with TestMatchers - with Inspectors - with CancelAfterFailure - with ConfigFixtures - with TestHelpers - with CirceMarshalling - with Fixtures { - - implicit val typedSystem: ActorSystem[Nothing] = system.toTyped - - private val uuid = UUID.randomUUID() - - implicit private val ordering: JsonKeyOrdering = - JsonKeyOrdering.default(topKeys = - List("@context", "@id", "@type", "reason", "details", "sourceId", "projectionId", "_total", "_results") - ) - - implicit private val baseUri: BaseUri = BaseUri("http://localhost", Label.unsafe("v1")) - implicit private val paginationConfig: PaginationConfig = PaginationConfig(5, 10, 5) - implicit private val s: Scheduler = Scheduler.global - implicit private val rejectionHandler: RejectionHandler = RdfRejectionHandler.apply - implicit private val exceptionHandler: ExceptionHandler = RdfExceptionHandler.apply - - private val realm: Label = Label.unsafe("wonderland") - private val alice: User = User("alice", realm) - private val caller = Caller(alice, Set(alice, Anonymous, Authenticated(realm), Group("group", realm))) - - private val identities = IdentitiesDummy(caller) - - private val project = ProjectGen.resourceFor( - ProjectGen.project( - "myorg", - "myproject", - uuid = uuid, - orgUuid = uuid, - mappings = ApiMappings("view" -> elasticSearchSchema.iri) - ) - ) - private val projectRef = project.value.ref +class ElasticSearchQueryRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { private val myId2 = nxv + "myid2" private val myId2Encoded = UrlUtils.encode(myId2.toString) @@ -102,7 +33,6 @@ class ElasticSearchQueryRoutesSpec private val resourceToSchemaMapping = ResourceToSchemaMappings(Label.unsafe("views") -> elasticSearchSchema.iri) - private val aclCheck = AclSimpleCheck().accepted private val groupDirectives = DeltaSchemeDirectives( fetchContextError, @@ -114,13 +44,16 @@ class ElasticSearchQueryRoutesSpec private lazy val routes = Route.seal( - new ElasticSearchQueryRoutes( - identities, - aclCheck, - resourceToSchemaMapping, + ElasticSearchAllRoutes( groupDirectives, - defaultViewsQuery - ).routes + new ElasticSearchQueryRoutes( + identities, + aclCheck, + resourceToSchemaMapping, + groupDirectives, + defaultViewsQuery + ).routes + ) ) "list at project level" in { diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesBaseSpec.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesBaseSpec.scala new file mode 100644 index 0000000000..126550d223 --- /dev/null +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesBaseSpec.scala @@ -0,0 +1,85 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes + +import akka.actor.typed.ActorSystem +import akka.http.scaladsl.model.headers.OAuth2BearerToken +import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler} +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.Fixtures +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.{schema => elasticSearchSchema} +import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering +import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclSimpleCheck +import ch.epfl.bluebrain.nexus.delta.sdk.circe.CirceMarshalling +import ch.epfl.bluebrain.nexus.delta.sdk.generators.ProjectGen +import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller +import ch.epfl.bluebrain.nexus.delta.sdk.identities.{Identities, IdentitiesDummy} +import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.{RdfExceptionHandler, RdfRejectionHandler} +import ch.epfl.bluebrain.nexus.delta.sdk.model._ +import ch.epfl.bluebrain.nexus.delta.sdk.model.search.PaginationConfig +import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ApiMappings +import ch.epfl.bluebrain.nexus.delta.sdk.utils.RouteHelpers +import ch.epfl.bluebrain.nexus.delta.sdk.{ConfigFixtures, ProjectResource} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Authenticated, Group, User} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} +import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.DoobieScalaTestFixture +import ch.epfl.bluebrain.nexus.testkit._ +import monix.execution.Scheduler +import org.scalatest.matchers.should.Matchers +import org.scalatest.{CancelAfterFailure, Inspectors, OptionValues} + +import java.util.UUID + +class ElasticSearchViewsRoutesBaseSpec + extends RouteHelpers + with DoobieScalaTestFixture + with Matchers + with CirceLiteral + with CirceEq + with IOFixedClock + with IOValues + with OptionValues + with TestMatchers + with Inspectors + with CancelAfterFailure + with ConfigFixtures + with TestHelpers + with CirceMarshalling + with Fixtures { + + import akka.actor.typed.scaladsl.adapter._ + + val uuid: UUID = UUID.randomUUID() + + implicit val typedSystem: ActorSystem[Nothing] = system.toTyped + + implicit val ordering: JsonKeyOrdering = + JsonKeyOrdering.default(topKeys = + List("@context", "@id", "@type", "reason", "details", "sourceId", "projectionId", "_total", "_results") + ) + + implicit val baseUri: BaseUri = BaseUri("http://localhost", Label.unsafe("v1")) + implicit val s: Scheduler = Scheduler.global + implicit val paginationConfig: PaginationConfig = PaginationConfig(5, 10, 5) + implicit val rejectionHandler: RejectionHandler = RdfRejectionHandler.apply + implicit val exceptionHandler: ExceptionHandler = RdfExceptionHandler.apply + + val aclCheck: AclSimpleCheck = AclSimpleCheck().accepted + + val realm: Label = Label.unsafe("wonderland") + val alice: User = User("alice", realm) + + val caller: Caller = Caller(alice, Set(alice, Anonymous, Authenticated(realm), Group("group", realm))) + + val identities: Identities = IdentitiesDummy(caller) + + val asAlice = addCredentials(OAuth2BearerToken("alice")) + + val project: ProjectResource = ProjectGen.resourceFor( + ProjectGen.project( + "myorg", + "myproject", + uuid = uuid, + orgUuid = uuid, + mappings = ApiMappings("view" -> elasticSearchSchema.iri) + ) + ) + val projectRef: ProjectRef = project.value.ref +} diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesSpec.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesSpec.scala index e513cfac2a..b0adb4f276 100644 --- a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesSpec.scala +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesSpec.scala @@ -1,107 +1,33 @@ package ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes -import akka.actor.typed.ActorSystem import akka.http.scaladsl.model.MediaTypes.`text/html` -import akka.http.scaladsl.model.headers.{`Last-Event-ID`, Accept, Location, OAuth2BearerToken} -import akka.http.scaladsl.model.{MediaTypes, StatusCodes, Uri} -import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler, Route} +import akka.http.scaladsl.model.headers.{Accept, Location} +import akka.http.scaladsl.model.{StatusCodes, Uri} +import akka.http.scaladsl.server.Route import ch.epfl.bluebrain.nexus.delta.kernel.utils.{UUIDF, UrlUtils} -import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.{permissions => esPermissions, schema => elasticSearchSchema, ElasticSearchViewRejection} -import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.{ElasticSearchViews, Fixtures, ValidateElasticSearchView} +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.{permissions => esPermissions, ElasticSearchViewRejection} +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.{ElasticSearchViews, ValidateElasticSearchView} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.{contexts, nxv} import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords -import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering -import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclSimpleCheck +import ch.epfl.bluebrain.nexus.delta.sdk.IndexingAction import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress -import ch.epfl.bluebrain.nexus.delta.sdk.circe.CirceMarshalling import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives import ch.epfl.bluebrain.nexus.delta.sdk.fusion.FusionConfig -import ch.epfl.bluebrain.nexus.delta.sdk.generators.ProjectGen -import ch.epfl.bluebrain.nexus.delta.sdk.identities.IdentitiesDummy -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ -import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.{RdfExceptionHandler, RdfRejectionHandler} -import ch.epfl.bluebrain.nexus.delta.sdk.model._ import ch.epfl.bluebrain.nexus.delta.sdk.permissions.Permissions.events -import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ApiMappings import ch.epfl.bluebrain.nexus.delta.sdk.projects.{FetchContext, FetchContextDummy} import ch.epfl.bluebrain.nexus.delta.sdk.resolvers.ResolverContextResolution -import ch.epfl.bluebrain.nexus.delta.sdk.utils.RouteHelpers -import ch.epfl.bluebrain.nexus.delta.sdk.{ConfigFixtures, IndexingAction} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Authenticated, Group, Subject, User} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, Label} -import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset -import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.DoobieScalaTestFixture -import ch.epfl.bluebrain.nexus.delta.sourcing.projections.{ProjectionErrors, Projections} -import ch.epfl.bluebrain.nexus.delta.sourcing.projections.model.ProjectionRestart -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Elem.{FailedElem, SuccessElem} -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.{PipeChain, ProjectionMetadata} -import ch.epfl.bluebrain.nexus.testkit._ +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Subject} +import ch.epfl.bluebrain.nexus.delta.sourcing.stream.PipeChain import io.circe.Json import monix.bio.{IO, UIO} -import monix.execution.Scheduler -import org.scalatest.matchers.should.Matchers -import org.scalatest.{CancelAfterFailure, Inspectors, OptionValues} - -import java.time.Instant -import java.util.UUID -import scala.concurrent.duration._ - -class ElasticSearchViewsRoutesSpec - extends RouteHelpers - with DoobieScalaTestFixture - with Matchers - with CirceLiteral - with CirceEq - with IOFixedClock - with IOValues - with OptionValues - with TestMatchers - with Inspectors - with CancelAfterFailure - with ConfigFixtures - with TestHelpers - with CirceMarshalling - with Fixtures { - - import akka.actor.typed.scaladsl.adapter._ - implicit val typedSystem: ActorSystem[Nothing] = system.toTyped - - private val uuid = UUID.randomUUID() - implicit private val uuidF: UUIDF = UUIDF.fixed(uuid) - - implicit private val ordering: JsonKeyOrdering = - JsonKeyOrdering.default(topKeys = - List("@context", "@id", "@type", "reason", "details", "sourceId", "projectionId", "_total", "_results") - ) - - implicit private val baseUri: BaseUri = BaseUri("http://localhost", Label.unsafe("v1")) - implicit private val s: Scheduler = Scheduler.global - implicit private val rejectionHandler: RejectionHandler = RdfRejectionHandler.apply - implicit private val exceptionHandler: ExceptionHandler = RdfExceptionHandler.apply - implicit private val f: FusionConfig = fusionConfig - private val realm: Label = Label.unsafe("wonderland") - private val alice: User = User("alice", realm) +class ElasticSearchViewsRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { - private val caller = Caller(alice, Set(alice, Anonymous, Authenticated(realm), Group("group", realm))) - - private val identities = IdentitiesDummy(caller) - - private val asAlice = addCredentials(OAuth2BearerToken("alice")) + implicit private val uuidF: UUIDF = UUIDF.fixed(uuid) - private val project = ProjectGen.resourceFor( - ProjectGen.project( - "myorg", - "myproject", - uuid = uuid, - orgUuid = uuid, - mappings = ApiMappings("view" -> elasticSearchSchema.iri) - ) - ) - private val projectRef = project.value.ref + implicit private val f: FusionConfig = fusionConfig private val myId = nxv + "myid" private val myIdEncoded = UrlUtils.encode(myId.toString) @@ -125,7 +51,6 @@ class ElasticSearchViewsRoutesSpec ElasticSearchViewRejection.ProjectContextRejection ) - private val aclCheck = AclSimpleCheck().accepted private val groupDirectives = DeltaSchemeDirectives( fetchContextRejection, @@ -151,20 +76,18 @@ class ElasticSearchViewsRoutesSpec private lazy val viewsQuery = new DummyElasticSearchViewsQuery(views) - private lazy val projections = Projections(xas, queryConfig, 1.hour) - private lazy val projectionErrors = ProjectionErrors(xas, queryConfig) - private lazy val routes = Route.seal( - ElasticSearchViewsRoutes( - identities, - aclCheck, - views, - viewsQuery, - projections, - projectionErrors, + ElasticSearchAllRoutes( groupDirectives, - IndexingAction.noop + ElasticSearchViewsRoutes( + identities, + aclCheck, + views, + viewsQuery, + groupDirectives, + IndexingAction.noop + ) ) ) @@ -419,72 +342,6 @@ class ElasticSearchViewsRoutesSpec } } - "fail to fetch statistics and offset from view without resources/read permission" in { - aclCheck.subtract(AclAddress.Root, Anonymous -> Set(esPermissions.read)).accepted - - val endpoints = List( - "/v1/views/myorg/myproject/myid2/statistics", - "/v1/views/myorg/myproject/myid2/offset" - ) - forAll(endpoints) { endpoint => - Get(endpoint) ~> routes ~> check { - response.status shouldEqual StatusCodes.Forbidden - response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") - } - } - } - - "fetch statistics from view" in { - aclCheck.append(AclAddress.Root, Anonymous -> Set(esPermissions.read)).accepted - Get("/v1/views/myorg/myproject/myid2/statistics") ~> routes ~> check { - response.status shouldEqual StatusCodes.OK - response.asJson shouldEqual jsonContentOf( - "/routes/statistics.json", - "projectLatestInstant" -> Instant.EPOCH, - "viewLatestInstant" -> Instant.EPOCH - ) - } - } - - "fetch offset from view" in { - Get("/v1/views/myorg/myproject/myid2/offset") ~> routes ~> check { - response.status shouldEqual StatusCodes.OK - response.asJson shouldEqual jsonContentOf("/routes/offset.json") - } - } - - "fail to restart offset from view without views/write permission" in { - aclCheck.subtract(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted - - Delete("/v1/views/myorg/myproject/myid2/offset") ~> routes ~> check { - response.status shouldEqual StatusCodes.Forbidden - response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") - } - } - - "restart offset from view" in { - aclCheck.append(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted - projections.restarts(Offset.start).compile.toList.accepted.size shouldEqual 0 - Delete("/v1/views/myorg/myproject/myid2/offset") ~> routes ~> check { - response.status shouldEqual StatusCodes.OK - response.asJson shouldEqual json"""{"@context": "${Vocabulary.contexts.offset}", "@type": "Start"}""" - projections.restarts(Offset.start).compile.lastOrError.accepted shouldEqual SuccessElem( - ProjectionRestart.entityType, - ProjectionRestart.restartId(Offset.at(1L)), - None, - Instant.EPOCH, - Offset.at(1L), - ProjectionRestart( - // view has be created and then only tagged, thus the indexing revision is 1 - "elasticsearch-myorg/myproject-https://bluebrain.github.io/nexus/vocabulary/myid2-1", - Instant.EPOCH, - Anonymous - ), - 1 - ) - } - } - "run query" in { val query = json"""{"query": { "match_all": {} } }""" Post("/v1/views/myorg/myproject/myid2/_search?from=0&size=5&q1=v1&q=something", query) ~> routes ~> check { @@ -502,49 +359,6 @@ class ElasticSearchViewsRoutesSpec ) } } - - "return no elasticsearch projection failures without write permission" in { - aclCheck.subtract(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted - - Get("/v1/views/myorg/myproject/myid/failures") ~> routes ~> check { - response.status shouldBe StatusCodes.Forbidden - response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") - } - } - - "not return any failures if there aren't any" in { - aclCheck.append(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted - - Get("/v1/views/myorg/myproject/myid/failures") ~> routes ~> check { - mediaType shouldBe MediaTypes.`text/event-stream` - response.status shouldBe StatusCodes.OK - chunksStream.asString(2).strip shouldBe "" - } - } - - "return all available failures when no LastEventID is provided" in { - val metadata = ProjectionMetadata("testModule", "testName", Some(projectRef), Some(myId)) - val error = new Exception("boom") - val rev = 1 - val fail1 = FailedElem(EntityType("ACL"), myId, Some(projectRef), Instant.EPOCH, Offset.At(42L), error, rev) - val fail2 = FailedElem(EntityType("Schema"), myId, None, Instant.EPOCH, Offset.At(42L), error, rev) - projectionErrors.saveFailedElems(metadata, List(fail1, fail2)).accepted - - Get("/v1/views/myorg/myproject/myid/failures") ~> routes ~> check { - mediaType shouldBe MediaTypes.`text/event-stream` - response.status shouldBe StatusCodes.OK - chunksStream.asString(2).strip shouldEqual contentOf("/routes/sse/indexing-failures-1-2.txt") - } - } - - "return failures only from the given LastEventID" in { - Get("/v1/views/myorg/myproject/myid/failures") ~> `Last-Event-ID`("1") ~> routes ~> check { - mediaType shouldBe MediaTypes.`text/event-stream` - response.status shouldBe StatusCodes.OK - chunksStream.asString(3).strip shouldEqual contentOf("/routes/sse/indexing-failure-2.txt") - } - } - } private def elasticSearchViewMetadata( diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/directives/UriDirectives.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/directives/UriDirectives.scala index 6412e88475..9720a10450 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/directives/UriDirectives.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/directives/UriDirectives.scala @@ -161,7 +161,7 @@ trait UriDirectives extends QueryParamsUnmarshalling { ) } - private def timeRange(paramName: String): Directive1[TimeRange] = parameter(paramName.as[String].?).flatMap { + def timeRange(paramName: String): Directive1[TimeRange] = parameter(paramName.as[String].?).flatMap { case None => provide(TimeRange.default) case Some(value) => TimeRange.parse(value) match { diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/model/FailedElemLogRow.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/model/FailedElemLogRow.scala index 3c31706f23..4cc8da541e 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/model/FailedElemLogRow.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/model/FailedElemLogRow.scala @@ -58,6 +58,8 @@ object FailedElemLogRow { stackTrace: String ) + val context: ContextValue = ContextValue(contexts.error) + implicit val failedElemDataEncoder: Encoder.AsObject[FailedElemData] = deriveEncoder[FailedElemData] .mapJsonObject(_.remove("stackTrace")) diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStore.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStore.scala index 632956120a..d99894b29b 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStore.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStore.scala @@ -80,7 +80,20 @@ trait FailedElemLogStore { ): Stream[Task, FailedElemLogRow] /** - * Return a list of errors for the given projection ordered by instant + * Return a list of errors for the given projection on a time window ordered by instant + * + * @param project + * the project of the projection + * @param projectionId + * its identifier + * @param timeRange + * the time range to restrict on + * @return + */ + def count(project: ProjectRef, projectionId: Iri, timeRange: TimeRange): UIO[Long] + + /** + * Return a list of errors for the given projection on a time window ordered by instant * @param project * the project of the projection * @param projectionId @@ -184,26 +197,33 @@ object FailedElemLogStore { .streamWithChunkSize(config.batchSize) .transact(xas.read) + override def count(project: ProjectRef, projectionId: Iri, timeRange: TimeRange): UIO[Long] = + sql"SELECT count(ordering) from public.failed_elem_logs ${whereClause(project, projectionId, timeRange)}" + .query[Long] + .unique + .transact(xas.read) + .hideErrors + override def list( project: ProjectRef, projectionId: Iri, pagination: FromPagination, timeRange: TimeRange - ): UIO[List[FailedElemLogRow]] = { - val where = Fragments.whereAndOpt( - Some(fr"projection_project = $project"), - Some(fr"projection_id = $projectionId"), - timeRange.asFragment - ) + ): UIO[List[FailedElemLogRow]] = sql"""SELECT * from public.failed_elem_logs - |$where + |${whereClause(project, projectionId, timeRange)} |ORDER BY ordering ASC |LIMIT ${pagination.size} OFFSET ${pagination.from}""".stripMargin .query[FailedElemLogRow] .to[List] .transact(xas.read) .hideErrors - } + + private def whereClause(project: ProjectRef, projectionId: Iri, timeRange: TimeRange) = Fragments.whereAndOpt( + Some(fr"projection_project = $project"), + Some(fr"projection_id = $projectionId"), + timeRange.asFragment + ) } } diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala index cbd7b36187..1a54b77b3b 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala @@ -2,6 +2,8 @@ package ch.epfl.bluebrain.nexus.delta.sourcing.projections import akka.http.scaladsl.model.sse.ServerSentEvent import cats.effect.Clock +import ch.epfl.bluebrain.nexus.delta.kernel.search.Pagination.FromPagination +import ch.epfl.bluebrain.nexus.delta.kernel.search.TimeRange import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.implicits._ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.{JsonLdApi, JsonLdJavaApi} @@ -28,23 +30,6 @@ trait ProjectionErrors { */ def saveFailedElems(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] - /** - * Get available failed elem entries for a given projection (provided by project and id), starting from a failed elem - * offset. - * - * @param projectionProject - * the project the projection belongs to - * @param projectionId - * IRI of the projection - * @param offset - * failed elem offset - */ - def failedElemEntries( - projectionProject: ProjectRef, - projectionId: Iri, - offset: Offset - ): Stream[Task, FailedElemLogRow] - /** * Get available failed elem entries for a given projection by projection name, starting from a failed elem offset. * @@ -71,6 +56,37 @@ trait ProjectionErrors { rcr: RemoteContextResolution ): Stream[Task, ServerSentEvent] + /** + * Return the total of errors for the given projection on a time window ordered by instant + * + * @param project + * the project of the projection + * @param projectionId + * its identifier + * @param timeRange + * the time range to restrict on + */ + def count(project: ProjectRef, projectionId: Iri, timeRange: TimeRange): UIO[Long] + + /** + * Return a list of errors for the given projection on a time window ordered by instant + * + * @param project + * the project of the projection + * @param projectionId + * its identifier + * @param pagination + * the pagination to apply + * @param timeRange + * the time range to restrict on + */ + def list( + project: ProjectRef, + projectionId: Iri, + pagination: FromPagination, + timeRange: TimeRange + ): UIO[List[FailedElemLogRow]] + } object ProjectionErrors { @@ -85,7 +101,7 @@ object ProjectionErrors { override def saveFailedElems(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] = store.save(metadata, failures) - override def failedElemEntries( + private def failedElemEntries( projectionProject: ProjectRef, projectionId: Iri, offset: Offset @@ -106,6 +122,16 @@ object ProjectionErrors { ) } } + + override def count(project: ProjectRef, projectionId: Iri, timeRange: TimeRange): UIO[Long] = + store.count(project, projectionId, timeRange) + + override def list( + project: ProjectRef, + projectionId: Iri, + pagination: FromPagination, + timeRange: TimeRange + ): UIO[List[FailedElemLogRow]] = store.list(project, projectionId, pagination, timeRange) } } diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/Projections.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/Projections.scala index a9d8ba7752..21456239e4 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/Projections.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/Projections.scala @@ -8,7 +8,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ElemStream, ProjectRef, Tag import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.projections.model.ProjectionRestart import ch.epfl.bluebrain.nexus.delta.sourcing.query.StreamingQuery -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.{ProjectionMetadata, ProjectionProgress, ProjectionStore, RemainingElems} +import ch.epfl.bluebrain.nexus.delta.sourcing.stream.{ProjectionMetadata, ProjectionProgress, ProjectionStore} import ch.epfl.bluebrain.nexus.delta.sourcing.{ProgressStatistics, Transactors} import monix.bio.UIO @@ -88,17 +88,6 @@ trait Projections { * the projection id for which the statistics are computed */ def statistics(project: ProjectRef, tag: Option[Tag], projectionId: String): UIO[ProgressStatistics] - - /** - * Retrieves the progress of the provided ''projectionId'' and uses the provided ''remaining elems'' to compute its - * statistics. - * - * @param projectionId - * the projection id for which the statistics are computed - * @param remaining - * a description of the remaining elements to stream - */ - def statistics(projectionId: String, remaining: Option[RemainingElems]): UIO[ProgressStatistics] } object Projections { @@ -132,14 +121,11 @@ object Projections { projectionRestartStore.deleteExpired(now.minusMillis(restartTtl.toMillis)) } - def statistics(project: ProjectRef, tag: Option[Tag], projectionId: String): UIO[ProgressStatistics] = + override def statistics(project: ProjectRef, tag: Option[Tag], projectionId: String): UIO[ProgressStatistics] = for { current <- progress(projectionId) remaining <- StreamingQuery.remaining(project, tag.getOrElse(Tag.latest), current.fold(Offset.start)(_.offset), xas) } yield ProgressStatistics(current, remaining) - - def statistics(projectionId: String, remaining: Option[RemainingElems]): UIO[ProgressStatistics] = - progress(projectionId).map(ProgressStatistics(_, remaining)) } } diff --git a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStoreSuite.scala b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStoreSuite.scala index 651a98660e..9759d81a8f 100644 --- a/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStoreSuite.scala +++ b/delta/sourcing-psql/src/test/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/FailedElemLogStoreSuite.scala @@ -122,7 +122,7 @@ class FailedElemLogStoreSuite extends BioSuite with MutableClock.Fixture with Do } yield () } - test(s"Get stream of failures for ${metadata12.name}") { + test(s"Get a stream of all failures") { assertStream(metadata12, Offset.start, List(fail2, fail3, fail4)) } @@ -131,27 +131,49 @@ class FailedElemLogStoreSuite extends BioSuite with MutableClock.Fixture with Do assertStream(unknownMetadata, Offset.start, List.empty) } - test(s"List all failures for ${metadata12.name}") { + test(s"List all failures") { assertList(project1, projection12, Pagination.OnePage, TimeRange.Anytime, List(fail2, fail3, fail4)) } - test(s"Paginate to list 'fail3' for ${metadata12.name}") { + test(s"Count all failures") { + store.count(project1, projection12, TimeRange.Anytime).assert(3L) + } + + test(s"Paginate failures to get one result") { assertList(project1, projection12, FromPagination(1, 1), TimeRange.Anytime, List(fail3)) } - test(s"Paginate to list 'fail3' and 'fail4' for ${metadata12.name}") { + test(s"Paginate failures to get the last results ") { assertList(project1, projection12, FromPagination(1, 2), TimeRange.Anytime, List(fail3, fail4)) } - test(s"List failures before ${fail3.instant} for ${metadata12.name}") { - assertList(project1, projection12, Pagination.OnePage, TimeRange.Before(fail3.instant), List(fail2, fail3)) + private val after = TimeRange.After(fail3.instant) + test(s"List failures after a given time") { + assertList(project1, projection12, Pagination.OnePage, after, List(fail3, fail4)) + } + + test(s"Count failures after a given time") { + store.count(project1, projection12, after).assert(2L) + } + + private val before = TimeRange.Before(fail3.instant) + test(s"List failures before a given time") { + assertList(project1, projection12, Pagination.OnePage, before, List(fail2, fail3)) + } + + test(s"Count failures before a given time") { + store.count(project1, projection12, before).assert(2L) } private val between = TimeRange.Between(fail2.instant.plusMillis(1L), fail3.instant.plusMillis(1L)) - test(s"List failures between ${between.start} and ${between.end} for ${metadata12.name}") { + test(s"List failures within the time window") { assertList(project1, projection12, Pagination.OnePage, between, List(fail3)) } + test(s"Count failures within the time window") { + store.count(project1, projection12, between).assert(1L) + } + test("Purge failures after predefined ttl") { val failedElemTtl = 14.days val purgeElemFailures = new PurgeElemFailures(xas, failedElemTtl) From ad6b3ec98ecefec1e640bbe68e537567f71ec3b5 Mon Sep 17 00:00:00 2001 From: Simon Date: Thu, 27 Jul 2023 17:55:29 +0200 Subject: [PATCH 5/6] Add endpoint to list indexing errors for Blazegraph (#4111) Co-authored-by: Simon Dumas --- .../blazegraph/BlazegraphPluginModule.scala | 54 ++- .../blazegraph/indexing/IndexingViewDef.scala | 22 +- .../BlazegraphViewsIndexingRoutes.scala | 163 +++++++++ .../routes/BlazegraphViewsRoutes.scala | 323 ++++++------------ .../routes/BlazegraphViewsRoutesHandler.scala | 19 ++ .../plugins/blazegraph/routes/package.scala | 12 + .../resources/indexing-view-source-2.json | 9 - .../routes/list-indexing-errors.json | 31 ++ .../resources/routes/responses/offset.json | 4 - .../routes/responses/statistics.json | 12 - .../routes/BlazegraphViewRoutesFixtures.scala | 67 +++- .../BlazegraphViewsIndexingRoutesSpec.scala | 206 +++++++++++ .../routes/BlazegraphViewsRoutesSpec.scala | 212 ++---------- .../ElasticSearchPluginModule.scala | 4 +- .../routes/ElasticSearchIndexingRoutes.scala | 15 +- ... => ElasticSearchViewsRoutesHandler.scala} | 22 +- .../ElasticSearchIndexingRoutesSpec.scala | 30 +- .../routes/ElasticSearchQueryRoutesSpec.scala | 4 +- ...=> ElasticSearchViewsRoutesFixtures.scala} | 2 +- .../routes/ElasticSearchViewsRoutesSpec.scala | 4 +- .../nexus/delta/sdk/implicits/package.scala | 3 +- .../sdk/syntax/ProjectionErrorsSyntax.scala | 86 +++++ .../nexus/delta/sdk/syntax/package.scala | 1 + .../projections/ProjectionErrors.scala | 104 +++--- 24 files changed, 843 insertions(+), 566 deletions(-) create mode 100644 delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsIndexingRoutes.scala create mode 100644 delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutesHandler.scala create mode 100644 delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/package.scala delete mode 100644 delta/plugins/blazegraph/src/test/resources/indexing-view-source-2.json create mode 100644 delta/plugins/blazegraph/src/test/resources/routes/list-indexing-errors.json delete mode 100644 delta/plugins/blazegraph/src/test/resources/routes/responses/offset.json delete mode 100644 delta/plugins/blazegraph/src/test/resources/routes/responses/statistics.json create mode 100644 delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsIndexingRoutesSpec.scala rename delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/{ElasticSearchAllRoutes.scala => ElasticSearchViewsRoutesHandler.scala} (52%) rename delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/{ElasticSearchViewsRoutesBaseSpec.scala => ElasticSearchViewsRoutesFixtures.scala} (98%) create mode 100644 delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/syntax/ProjectionErrorsSyntax.scala diff --git a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphPluginModule.scala b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphPluginModule.scala index 4739b75647..15b85899d7 100644 --- a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphPluginModule.scala +++ b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/BlazegraphPluginModule.scala @@ -8,7 +8,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.config.BlazegraphViewsCo import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.indexing.BlazegraphCoordinator import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.BlazegraphViewRejection.ProjectContextRejection import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.{contexts, schema => viewsSchemaId, BlazegraphView, BlazegraphViewEvent} -import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes.BlazegraphViewsRoutes +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes.{BlazegraphViewsIndexingRoutes, BlazegraphViewsRoutes, BlazegraphViewsRoutesHandler} import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.slowqueries.{BlazegraphSlowQueryDeleter, BlazegraphSlowQueryLogger, BlazegraphSlowQueryStore} import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} @@ -178,8 +178,6 @@ class BlazegraphPluginModule(priority: Int) extends ModuleDef { identities: Identities, aclCheck: AclCheck, views: BlazegraphViews, - projections: Projections, - projectionErrors: ProjectionErrors, viewsQuery: BlazegraphViewsQuery, schemeDirectives: DeltaSchemeDirectives, indexingAction: IndexingAction @Id("aggregate"), @@ -196,8 +194,6 @@ class BlazegraphPluginModule(priority: Int) extends ModuleDef { viewsQuery, identities, aclCheck, - projections, - projectionErrors, schemeDirectives, indexingAction(_, _, _)(shift, cr) )( @@ -210,6 +206,36 @@ class BlazegraphPluginModule(priority: Int) extends ModuleDef { ) } + make[BlazegraphViewsIndexingRoutes].from { + ( + identities: Identities, + aclCheck: AclCheck, + views: BlazegraphViews, + projections: Projections, + projectionErrors: ProjectionErrors, + schemeDirectives: DeltaSchemeDirectives, + baseUri: BaseUri, + cfg: BlazegraphViewsConfig, + s: Scheduler, + cr: RemoteContextResolution @Id("aggregate"), + ordering: JsonKeyOrdering + ) => + new BlazegraphViewsIndexingRoutes( + views.fetchIndexingView(_, _), + identities, + aclCheck, + projections, + projectionErrors, + schemeDirectives + )( + baseUri, + s, + cr, + ordering, + cfg.pagination + ) + } + make[BlazegraphScopeInitialization].from { (views: BlazegraphViews, serviceAccount: ServiceAccount, config: BlazegraphViewsConfig) => new BlazegraphScopeInitialization(views, serviceAccount, config.defaults) @@ -240,8 +266,22 @@ class BlazegraphPluginModule(priority: Int) extends ModuleDef { many[ApiMappings].add(BlazegraphViews.mappings) - many[PriorityRoute].add { (route: BlazegraphViewsRoutes) => - PriorityRoute(priority, route.routes, requiresStrictEntity = true) + many[PriorityRoute].add { + ( + bg: BlazegraphViewsRoutes, + indexing: BlazegraphViewsIndexingRoutes, + schemeDirectives: DeltaSchemeDirectives, + baseUri: BaseUri + ) => + PriorityRoute( + priority, + BlazegraphViewsRoutesHandler( + schemeDirectives, + bg.routes, + indexing.routes + )(baseUri), + requiresStrictEntity = true + ) } many[ServiceDependency].add { (client: BlazegraphClient @Id("blazegraph-indexing-client")) => diff --git a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/indexing/IndexingViewDef.scala b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/indexing/IndexingViewDef.scala index 5d7f0d35f9..cabc0b7baa 100644 --- a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/indexing/IndexingViewDef.scala +++ b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/indexing/IndexingViewDef.scala @@ -39,7 +39,15 @@ object IndexingViewDef { namespace: String, indexingRev: Int, rev: Int - ) extends IndexingViewDef + ) extends IndexingViewDef { + def projectionMetadata: ProjectionMetadata = + ProjectionMetadata( + BlazegraphViews.entityType.value, + projection, + Some(ref.project), + Some(ref.viewId) + ) + } /** * Deprecated view to be cleaned up and removed from the supervisor @@ -89,14 +97,6 @@ object IndexingViewDef { stream: Offset => ElemStream[GraphResource], sink: Sink ): Task[CompiledProjection] = { - val project = v.ref.project - val id = v.ref.viewId - val metadata = ProjectionMetadata( - BlazegraphViews.entityType.value, - v.projection, - Some(project), - Some(id) - ) val postPipes: Operation = GraphResourceToNTriples @@ -104,7 +104,7 @@ object IndexingViewDef { pipes <- v.pipeChain.traverse(compilePipeChain) chain = pipes.fold(NonEmptyChain.one(postPipes))(NonEmptyChain(_, postPipes)) projection <- CompiledProjection.compile( - metadata, + v.projectionMetadata, ExecutionStrategy.PersistentSingleNode, Source(stream), chain, @@ -113,7 +113,7 @@ object IndexingViewDef { } yield projection Task.fromEither(compiled).tapError { e => - Task.delay(logger.error(s"View '$project/$id' could not be compiled.", e)) + Task.delay(logger.error(s"View '${v.ref}' could not be compiled.", e)) } } } diff --git a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsIndexingRoutes.scala b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsIndexingRoutes.scala new file mode 100644 index 0000000000..54b980b2f0 --- /dev/null +++ b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsIndexingRoutes.scala @@ -0,0 +1,163 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes + +import akka.http.scaladsl.server.Directives._ +import akka.http.scaladsl.server.Route +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.indexing.IndexingViewDef.ActiveViewDef +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.BlazegraphViewRejection._ +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model._ +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.permissions.{write => Write} +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes.BlazegraphViewsIndexingRoutes.FetchIndexingView +import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.contexts +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder +import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering +import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck +import ch.epfl.bluebrain.nexus.delta.sdk.circe.CirceUnmarshalling +import ch.epfl.bluebrain.nexus.delta.sdk.directives.{AuthDirectives, DeltaDirectives, DeltaSchemeDirectives} +import ch.epfl.bluebrain.nexus.delta.sdk.identities.Identities +import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ +import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.RdfMarshalling +import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SearchResults.searchResultsJsonLdEncoder +import ch.epfl.bluebrain.nexus.delta.sdk.model.search.{PaginationConfig, SearchResults} +import ch.epfl.bluebrain.nexus.delta.sdk.model.{BaseUri, IdSegment} +import ch.epfl.bluebrain.nexus.delta.sourcing.ProgressStatistics +import ch.epfl.bluebrain.nexus.delta.sourcing.model.FailedElemLogRow.FailedElemData +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{FailedElemLogRow, ProjectRef} +import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset +import ch.epfl.bluebrain.nexus.delta.sourcing.projections.{ProjectionErrors, Projections} +import io.circe.Encoder +import io.circe.generic.semiauto.deriveEncoder +import io.circe.syntax._ +import monix.bio.IO +import monix.execution.Scheduler +class BlazegraphViewsIndexingRoutes( + fetch: FetchIndexingView, + identities: Identities, + aclCheck: AclCheck, + projections: Projections, + projectionErrors: ProjectionErrors, + schemeDirectives: DeltaSchemeDirectives +)(implicit + baseUri: BaseUri, + s: Scheduler, + cr: RemoteContextResolution, + ordering: JsonKeyOrdering, + pc: PaginationConfig +) extends AuthDirectives(identities, aclCheck) + with CirceUnmarshalling + with DeltaDirectives + with RdfMarshalling + with BlazegraphViewsDirectives { + + import schemeDirectives._ + + implicit private val viewStatisticEncoder: Encoder.AsObject[ProgressStatistics] = + deriveEncoder[ProgressStatistics].mapJsonObject(_.add(keywords.tpe, "ViewStatistics".asJson)) + + implicit private val viewStatisticJsonLdEncoder: JsonLdEncoder[ProgressStatistics] = + JsonLdEncoder.computeFromCirce(ContextValue(contexts.statistics)) + + def routes: Route = + pathPrefix("views") { + extractCaller { implicit caller => + resolveProjectRef.apply { implicit ref => + idSegment { id => + concat( + // Fetch a blazegraph view statistics + (pathPrefix("statistics") & get & pathEndOrSingleSlash) { + authorizeFor(ref, permissions.read).apply { + emit( + fetch(id, ref) + .flatMap(v => projections.statistics(ref, v.resourceTag, v.projection)) + .rejectOn[ViewNotFound] + ) + } + }, + // Fetch balzegraph view indexing failures + (pathPrefix("failures") & get) { + authorizeFor(ref, Write).apply { + concat( + (pathPrefix("sse") & lastEventId) { offset => + emit( + fetch(id, ref) + .map { view => + projectionErrors.sses(view.ref.project, view.ref.viewId, offset) + } + ) + }, + (fromPaginated & timeRange("instant") & extractUri & pathEndOrSingleSlash) { + (pagination, timeRange, uri) => + implicit val searchJsonLdEncoder: JsonLdEncoder[SearchResults[FailedElemData]] = + searchResultsJsonLdEncoder(FailedElemLogRow.context, pagination, uri) + emit( + fetch(id, ref) + .flatMap { view => + projectionErrors.search(view.ref, pagination, timeRange) + } + ) + } + ) + } + }, + // Manage an blazegraph view offset + (pathPrefix("offset") & pathEndOrSingleSlash) { + concat( + // Fetch a blazegraph view offset + (get & authorizeFor(ref, permissions.read)) { + emit( + fetch(id, ref) + .flatMap(v => projections.offset(v.projection)) + .rejectOn[ViewNotFound] + ) + }, + // Remove an blazegraph view offset (restart the view) + (delete & authorizeFor(ref, Write)) { + emit( + fetch(id, ref) + .flatMap { r => projections.scheduleRestart(r.projection) } + .as(Offset.start) + .rejectOn[ViewNotFound] + ) + } + ) + } + ) + } + } + } + } +} + +object BlazegraphViewsIndexingRoutes { + + type FetchIndexingView = (IdSegment, ProjectRef) => IO[BlazegraphViewRejection, ActiveViewDef] + + /** + * @return + * the [[Route]] for BlazegraphViews + */ + def apply( + fetch: FetchIndexingView, + identities: Identities, + aclCheck: AclCheck, + projections: Projections, + projectionErrors: ProjectionErrors, + schemeDirectives: DeltaSchemeDirectives + )(implicit + baseUri: BaseUri, + s: Scheduler, + cr: RemoteContextResolution, + ordering: JsonKeyOrdering, + pc: PaginationConfig + ): Route = { + new BlazegraphViewsIndexingRoutes( + fetch, + identities, + aclCheck, + projections, + projectionErrors: ProjectionErrors, + schemeDirectives + ).routes + } +} diff --git a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutes.scala b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutes.scala index 290a1453ba..dea871edd3 100644 --- a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutes.scala +++ b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutes.scala @@ -8,10 +8,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.BlazegraphViewReje import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model._ import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.permissions.{read => Read, write => Write} import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.{BlazegraphViews, BlazegraphViewsQuery} -import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary -import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.contexts -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder import ch.epfl.bluebrain.nexus.delta.rdf.query.SparqlQuery @@ -29,15 +26,8 @@ import ch.epfl.bluebrain.nexus.delta.sdk.model.routes.Tag import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SearchResults._ import ch.epfl.bluebrain.nexus.delta.sdk.model.search.{PaginationConfig, SearchResults} import ch.epfl.bluebrain.nexus.delta.sdk.model.{BaseUri, IdSegment} -import ch.epfl.bluebrain.nexus.delta.sourcing.ProgressStatistics import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef -import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset -import ch.epfl.bluebrain.nexus.delta.sourcing.projections.{ProjectionErrors, Projections} -import io.circe.generic.semiauto.deriveEncoder -import io.circe.syntax._ -import io.circe.{Encoder, Json} -import kamon.instrumentation.akka.http.TracingDirectives.operationName -import monix.bio.UIO +import io.circe.Json import monix.execution.Scheduler /** @@ -49,10 +39,6 @@ import monix.execution.Scheduler * the identity module * @param aclCheck * to check the acls - * @param projections - * the projections module - * @param projectionErrors - * the projection errors module * @param schemeDirectives * directives related to orgs and projects * @param index @@ -63,8 +49,6 @@ class BlazegraphViewsRoutes( viewsQuery: BlazegraphViewsQuery, identities: Identities, aclCheck: AclCheck, - projections: Projections, - projectionErrors: ProjectionErrors, schemeDirectives: DeltaSchemeDirectives, index: IndexingAction.Execute[BlazegraphView] )(implicit @@ -80,214 +64,142 @@ class BlazegraphViewsRoutes( with RdfMarshalling with BlazegraphViewsDirectives { - import baseUri.prefixSegment import schemeDirectives._ - implicit private val viewStatisticEncoder: Encoder.AsObject[ProgressStatistics] = - deriveEncoder[ProgressStatistics].mapJsonObject(_.add(keywords.tpe, "ViewStatistics".asJson)) - - implicit private val viewStatisticJsonLdEncoder: JsonLdEncoder[ProgressStatistics] = - JsonLdEncoder.computeFromCirce(ContextValue(contexts.statistics)) - def routes: Route = - (baseUriPrefix(baseUri.prefix) & replaceUri("views", schema.iri)) { - concat( - pathPrefix("views") { - extractCaller { implicit caller => - resolveProjectRef.apply { implicit ref => - // Create a view without id segment - concat( - (post & entity(as[Json]) & noParameter("rev") & pathEndOrSingleSlash & indexingMode) { (source, mode) => - operationName(s"$prefixSegment/views/{org}/{project}") { - authorizeFor(ref, Write).apply { - emit( - Created, - views - .create(ref, source) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectWhen(decodingFailedOrViewNotFound) - ) - } - } - }, - (idSegment & indexingMode) { (id, mode) => - concat( - (pathEndOrSingleSlash & operationName(s"$prefixSegment/views/{org}/{project}/{id}")) { - concat( - put { - authorizeFor(ref, Write).apply { - (parameter("rev".as[Int].?) & pathEndOrSingleSlash & entity(as[Json])) { - case (None, source) => - // Create a view with id segment - emit( - Created, - views - .create(id, ref, source) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectWhen(decodingFailedOrViewNotFound) - ) - case (Some(rev), source) => - // Update a view - emit( - views - .update(id, ref, rev, source) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectWhen(decodingFailedOrViewNotFound) - ) - } - } - }, - (delete & parameter("rev".as[Int])) { rev => - // Deprecate a view - authorizeFor(ref, Write).apply { - emit( - views - .deprecate(id, ref, rev) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectOn[ViewNotFound] - ) + concat( + pathPrefix("views") { + extractCaller { implicit caller => + resolveProjectRef.apply { implicit ref => + // Create a view without id segment + concat( + (post & entity(as[Json]) & noParameter("rev") & pathEndOrSingleSlash & indexingMode) { (source, mode) => + authorizeFor(ref, Write).apply { + emit( + Created, + views + .create(ref, source) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) + .rejectWhen(decodingFailedOrViewNotFound) + ) + } + }, + (idSegment & indexingMode) { (id, mode) => + concat( + pathEndOrSingleSlash { + concat( + put { + authorizeFor(ref, Write).apply { + (parameter("rev".as[Int].?) & pathEndOrSingleSlash & entity(as[Json])) { + case (None, source) => + // Create a view with id segment + emit( + Created, + views + .create(id, ref, source) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) + .rejectWhen(decodingFailedOrViewNotFound) + ) + case (Some(rev), source) => + // Update a view + emit( + views + .update(id, ref, rev, source) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) + .rejectWhen(decodingFailedOrViewNotFound) + ) } - }, - // Fetch a view - (get & idSegmentRef(id)) { id => - emitOrFusionRedirect( - ref, - id, - authorizeFor(ref, Read).apply { - emit(views.fetch(id, ref).rejectOn[ViewNotFound]) - } - ) } - ) - }, - // Query a blazegraph view - (pathPrefix("sparql") & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/sparql") { - concat( - // Query - ((get & parameter("query".as[SparqlQuery])) | (post & entity(as[SparqlQuery]))) { query => - queryResponseType.apply { responseType => - emit(viewsQuery.query(id, ref, query, responseType).rejectOn[ViewNotFound]) - } - } - ) - } - }, - // Fetch a blazegraph view statistics - (pathPrefix("statistics") & get & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/statistics") { - authorizeFor(ref, permissions.read).apply { + }, + (delete & parameter("rev".as[Int])) { rev => + // Deprecate a view + authorizeFor(ref, Write).apply { emit( views - .fetchIndexingView(id, ref) - .flatMap(v => projections.statistics(ref, v.resourceTag, v.projection)) + .deprecate(id, ref, rev) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) .rejectOn[ViewNotFound] ) } - } - }, - // Fetch blazegraph view indexing failures - lastEventId { offset => - (pathPrefix("failures") & get & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/failures") { - authorizeFor(ref, Write).apply { - emit( - views - .fetch(id, ref) - .map { view => - projectionErrors.failedElemSses(view.value.project, view.value.id, offset) - } - ) + }, + // Fetch a view + (get & idSegmentRef(id)) { id => + emitOrFusionRedirect( + ref, + id, + authorizeFor(ref, Read).apply { + emit(views.fetch(id, ref).rejectOn[ViewNotFound]) } + ) + } + ) + }, + // Query a blazegraph view + (pathPrefix("sparql") & pathEndOrSingleSlash) { + concat( + // Query + ((get & parameter("query".as[SparqlQuery])) | (post & entity(as[SparqlQuery]))) { query => + queryResponseType.apply { responseType => + emit(viewsQuery.query(id, ref, query, responseType).rejectOn[ViewNotFound]) } } - }, - // Manage an blazegraph view offset - (pathPrefix("offset") & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/offset") { - concat( - // Fetch a blazegraph view offset - (get & authorizeFor(ref, permissions.read)) { - emit( - views - .fetchIndexingView(id, ref) - .flatMap(v => projections.offset(v.projection)) - .rejectOn[ViewNotFound] - ) - }, - // Remove an blazegraph view offset (restart the view) - (delete & authorizeFor(ref, Write)) { + ) + }, + (pathPrefix("tags") & pathEndOrSingleSlash) { + concat( + // Fetch tags for a view + (get & idSegmentRef(id) & authorizeFor(ref, Read)) { id => + emit(views.fetch(id, ref).map(_.value.tags).rejectOn[ViewNotFound]) + }, + // Tag a view + (post & parameter("rev".as[Int])) { rev => + authorizeFor(ref, Write).apply { + entity(as[Tag]) { case Tag(tagRev, tag) => emit( + Created, views - .fetchIndexingView(id, ref) - .flatMap { r => projections.scheduleRestart(r.projection) } - .as(Offset.start) + .tag(id, ref, tag, tagRev, rev) + .tapEval(index(ref, _, mode)) + .mapValue(_.metadata) .rejectOn[ViewNotFound] ) } - ) - } - }, - (pathPrefix("tags") & pathEndOrSingleSlash) { - operationName(s"$prefixSegment/views/{org}/{project}/{id}/tags") { - concat( - // Fetch tags for a view - (get & idSegmentRef(id) & authorizeFor(ref, Read)) { id => - emit(views.fetch(id, ref).map(_.value.tags).rejectOn[ViewNotFound]) - }, - // Tag a view - (post & parameter("rev".as[Int])) { rev => - authorizeFor(ref, Write).apply { - entity(as[Tag]) { case Tag(tagRev, tag) => - emit( - Created, - views - .tag(id, ref, tag, tagRev, rev) - .tapEval(index(ref, _, mode)) - .mapValue(_.metadata) - .rejectOn[ViewNotFound] - ) - } - } - } - ) - } - }, - // Fetch a view original source - (pathPrefix("source") & get & pathEndOrSingleSlash & idSegmentRef(id)) { id => - operationName(s"$prefixSegment/views/{org}/{project}/{id}/source") { - authorizeFor(ref, Read).apply { - emit(views.fetch(id, ref).map(_.value.source).rejectOn[ViewNotFound]) } } - }, - //Incoming/outgoing links for views - incomingOutgoing(id, ref) - ) - } - ) - } - } - }, - //Handle all other incoming and outgoing links - pathPrefix(Segment) { segment => - extractCaller { implicit caller => - resolveProjectRef.apply { ref => - // if we are on the path /resources/{org}/{proj}/ we need to consume the {schema} segment before consuming the {id} - consumeIdSegmentIf(segment == "resources") { - idSegment { id => + ) + }, + // Fetch a view original source + (pathPrefix("source") & get & pathEndOrSingleSlash & idSegmentRef(id)) { id => + authorizeFor(ref, Read).apply { + emit(views.fetch(id, ref).map(_.value.source).rejectOn[ViewNotFound]) + } + }, + //Incoming/outgoing links for views incomingOutgoing(id, ref) - } + ) + } + ) + } + } + }, + //Handle all other incoming and outgoing links + pathPrefix(Segment) { segment => + extractCaller { implicit caller => + resolveProjectRef.apply { ref => + // if we are on the path /resources/{org}/{proj}/ we need to consume the {schema} segment before consuming the {id} + consumeIdSegmentIf(segment == "resources") { + idSegment { id => + incomingOutgoing(id, ref) } } } } - ) - } + } + ) private def consumeIdSegmentIf(condition: Boolean): Directive0 = if (condition) idSegment.flatMap(_ => pass) @@ -314,17 +226,10 @@ class BlazegraphViewsRoutes( } } ) - - private val decodingFailedOrViewNotFound: PartialFunction[BlazegraphViewRejection, Boolean] = { - case _: DecodingFailed | _: ViewNotFound | _: InvalidJsonLdFormat => true - } - } object BlazegraphViewsRoutes { - type RestartView = (Iri, ProjectRef) => UIO[Unit] - /** * @return * the [[Route]] for BlazegraphViews @@ -334,8 +239,6 @@ object BlazegraphViewsRoutes { viewsQuery: BlazegraphViewsQuery, identities: Identities, aclCheck: AclCheck, - projections: Projections, - projectionErrors: ProjectionErrors, schemeDirectives: DeltaSchemeDirectives, index: IndexingAction.Execute[BlazegraphView] )(implicit @@ -351,8 +254,6 @@ object BlazegraphViewsRoutes { viewsQuery, identities, aclCheck, - projections, - projectionErrors: ProjectionErrors, schemeDirectives, index ).routes diff --git a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutesHandler.scala b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutesHandler.scala new file mode 100644 index 0000000000..bb6c0bcc5a --- /dev/null +++ b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutesHandler.scala @@ -0,0 +1,19 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes + +import akka.http.scaladsl.server.Directives.concat +import akka.http.scaladsl.server.Route +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model._ +import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives +import ch.epfl.bluebrain.nexus.delta.sdk.directives.UriDirectives.baseUriPrefix +import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri + +/** + * Transforms the incoming request to consume the baseUri prefix and rewrite the generic resource endpoint + */ +object BlazegraphViewsRoutesHandler extends { + + def apply(schemeDirectives: DeltaSchemeDirectives, routes: Route*)(implicit baseUri: BaseUri): Route = + (baseUriPrefix(baseUri.prefix) & schemeDirectives.replaceUri("views", schema.iri)) { + concat(routes: _*) + } +} diff --git a/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/package.scala b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/package.scala new file mode 100644 index 0000000000..4ce0b3fd2d --- /dev/null +++ b/delta/plugins/blazegraph/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/package.scala @@ -0,0 +1,12 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.blazegraph + +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.BlazegraphViewRejection +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.BlazegraphViewRejection.{DecodingFailed, InvalidJsonLdFormat, ViewNotFound} + +package object routes { + + val decodingFailedOrViewNotFound: PartialFunction[BlazegraphViewRejection, Boolean] = { + case _: DecodingFailed | _: ViewNotFound | _: InvalidJsonLdFormat => true + } + +} diff --git a/delta/plugins/blazegraph/src/test/resources/indexing-view-source-2.json b/delta/plugins/blazegraph/src/test/resources/indexing-view-source-2.json deleted file mode 100644 index b2db52d14c..0000000000 --- a/delta/plugins/blazegraph/src/test/resources/indexing-view-source-2.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "resourceSchemas": [], - "resourceTypes": [], - "includeMetadata": false, - "includeDeprecated": false, - "permission": "views/query", - "@type": "SparqlView", - "@id": "https://bluebrain.github.io/nexus/vocabulary/indexing-view-2" -} \ No newline at end of file diff --git a/delta/plugins/blazegraph/src/test/resources/routes/list-indexing-errors.json b/delta/plugins/blazegraph/src/test/resources/routes/list-indexing-errors.json new file mode 100644 index 0000000000..8c2880cfbe --- /dev/null +++ b/delta/plugins/blazegraph/src/test/resources/routes/list-indexing-errors.json @@ -0,0 +1,31 @@ +{ + "@context": [ + "https://bluebrain.github.io/nexus/contexts/metadata.json", + "https://bluebrain.github.io/nexus/contexts/search.json", + "https://bluebrain.github.io/nexus/contexts/error.json" + ], + "_total": 2, + "_results": [ + { + "errorType": "java.lang.Exception", + "id": "https://bluebrain.github.io/nexus/vocabulary/myid", + "message": "boom", + "offset": { + "@type": "At", + "value": 42 + }, + "project": "org/proj", + "_rev": 1 + }, + { + "errorType": "java.lang.Exception", + "id": "https://bluebrain.github.io/nexus/vocabulary/myid", + "message": "boom", + "offset": { + "@type": "At", + "value": 42 + }, + "_rev": 1 + } + ] +} \ No newline at end of file diff --git a/delta/plugins/blazegraph/src/test/resources/routes/responses/offset.json b/delta/plugins/blazegraph/src/test/resources/routes/responses/offset.json deleted file mode 100644 index 06fc9e9503..0000000000 --- a/delta/plugins/blazegraph/src/test/resources/routes/responses/offset.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "@context" : "https://bluebrain.github.io/nexus/contexts/offset.json", - "@type" : "Start" -} diff --git a/delta/plugins/blazegraph/src/test/resources/routes/responses/statistics.json b/delta/plugins/blazegraph/src/test/resources/routes/responses/statistics.json deleted file mode 100644 index 6270fab0b5..0000000000 --- a/delta/plugins/blazegraph/src/test/resources/routes/responses/statistics.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "@context" : "https://bluebrain.github.io/nexus/contexts/statistics.json", - "@type": "ViewStatistics", - "discardedEvents" : 0, - "evaluatedEvents" : 0, - "failedEvents" : 0, - "lastEventDateTime" : "{{projectLatestInstant}}", - "processedEvents" : 0, - "remainingEvents" : 2, - "totalEvents" : 2 -} - diff --git a/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewRoutesFixtures.scala b/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewRoutesFixtures.scala index a8b29cf0ec..ce94ca3d43 100644 --- a/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewRoutesFixtures.scala +++ b/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewRoutesFixtures.scala @@ -1,23 +1,82 @@ package ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes +import akka.actor.typed.ActorSystem +import akka.http.scaladsl.model.headers.OAuth2BearerToken +import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler} +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.Fixtures import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.SparqlLink.{SparqlExternalLink, SparqlResourceLink} import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.{schema, SparqlLink} import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv +import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering +import ch.epfl.bluebrain.nexus.delta.sdk.ConfigFixtures +import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclSimpleCheck import ch.epfl.bluebrain.nexus.delta.sdk.generators.ProjectGen +import ch.epfl.bluebrain.nexus.delta.sdk.identities.IdentitiesDummy +import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller +import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.{RdfExceptionHandler, RdfRejectionHandler} import ch.epfl.bluebrain.nexus.delta.sdk.model.search.ResultEntry.UnscoredResultEntry -import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SearchResults +import ch.epfl.bluebrain.nexus.delta.sdk.model.search.{PaginationConfig, SearchResults} import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SearchResults.UnscoredSearchResults -import ch.epfl.bluebrain.nexus.delta.sdk.model.{ResourceF, ResourceUris} +import ch.epfl.bluebrain.nexus.delta.sdk.model.{BaseUri, ResourceF, ResourceUris} import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.ApiMappings import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ +import ch.epfl.bluebrain.nexus.delta.sdk.utils.RouteHelpers import ch.epfl.bluebrain.nexus.delta.sourcing.model.ResourceRef import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity -import ch.epfl.bluebrain.nexus.testkit.{EitherValuable, TestHelpers} +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Authenticated, Group, User} +import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.DoobieScalaTestFixture +import ch.epfl.bluebrain.nexus.testkit.{CirceEq, CirceLiteral, EitherValuable, IOFixedClock, IOValues, TestHelpers, TestMatchers} +import monix.execution.Scheduler +import org.scalatest.matchers.should.Matchers +import org.scalatest.{BeforeAndAfterAll, CancelAfterFailure, Inspectors, OptionValues} import java.time.Instant +import java.util.UUID -trait BlazegraphViewRoutesFixtures extends TestHelpers with EitherValuable { +trait BlazegraphViewRoutesFixtures + extends RouteHelpers + with DoobieScalaTestFixture + with Matchers + with CirceLiteral + with CirceEq + with IOFixedClock + with IOValues + with OptionValues + with TestMatchers + with Inspectors + with CancelAfterFailure + with ConfigFixtures + with BeforeAndAfterAll + with TestHelpers + with Fixtures + with EitherValuable { + + import akka.actor.typed.scaladsl.adapter._ + + implicit val typedSystem: ActorSystem[Nothing] = system.toTyped + implicit val sc: Scheduler = Scheduler.global + implicit val baseUri: BaseUri = BaseUri("http://localhost", Label.unsafe("v1")) + + implicit val ordering: JsonKeyOrdering = + JsonKeyOrdering.default(topKeys = + List("@context", "@id", "@type", "reason", "details", "sourceId", "projectionId", "_total", "_results") + ) + implicit val rejectionHandler: RejectionHandler = RdfRejectionHandler.apply + implicit val exceptionHandler: ExceptionHandler = RdfExceptionHandler.apply + + implicit val paginationConfig: PaginationConfig = pagination + + val uuid = UUID.randomUUID() + + val aclCheck = AclSimpleCheck().accepted + + val realm = Label.unsafe("myrealm") + val bob = User("Bob", realm) + implicit val caller: Caller = Caller(bob, Set(bob, Group("mygroup", realm), Authenticated(realm))) + + val identities = IdentitiesDummy(caller) + val asBob = addCredentials(OAuth2BearerToken("Bob")) val org = Label.unsafe("org") val orgDeprecated = Label.unsafe("org-deprecated") diff --git a/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsIndexingRoutesSpec.scala b/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsIndexingRoutesSpec.scala new file mode 100644 index 0000000000..aaad5f4903 --- /dev/null +++ b/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsIndexingRoutesSpec.scala @@ -0,0 +1,206 @@ +package ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes + +import akka.http.scaladsl.model.headers.`Last-Event-ID` +import akka.http.scaladsl.model.{MediaTypes, StatusCodes} +import akka.http.scaladsl.server.Route +import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress +import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset +import ch.epfl.bluebrain.nexus.delta.sourcing.projections.{ProjectionErrors, Projections} +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model._ +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.BlazegraphViewRejection.{InvalidResourceId, ProjectContextRejection, ViewNotFound} +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes.BlazegraphViewsIndexingRoutes.FetchIndexingView +import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary +import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.indexing.IndexingViewDef.ActiveViewDef +import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives +import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment +import ch.epfl.bluebrain.nexus.delta.sdk.model.IdSegment.{IriSegment, StringSegment} +import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContextDummy +import ch.epfl.bluebrain.nexus.delta.sdk.views.ViewRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.EntityType +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Anonymous +import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Elem.FailedElem +import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionProgress +import monix.bio.IO + +import java.time.Instant +import scala.concurrent.duration._ + +class BlazegraphViewsIndexingRoutesSpec extends BlazegraphViewRoutesFixtures { + + private lazy val projections = Projections(xas, queryConfig, 1.hour) + private lazy val projectionErrors = ProjectionErrors(xas, queryConfig) + + private val fetchContext = FetchContextDummy[BlazegraphViewRejection]( + Map(project.ref -> project.context), + Set(deprecatedProject.ref), + ProjectContextRejection + ) + + private val groupDirectives = + DeltaSchemeDirectives( + fetchContext, + ioFromMap(uuid -> projectRef.organization), + ioFromMap(uuid -> projectRef) + ) + + private val myId = nxv + "myid" + private val indexingView = ActiveViewDef( + ViewRef(projectRef, myId), + "projection", + None, + None, + "namespace", + 1, + 1 + ) + private val progress = ProjectionProgress(Offset.at(15L), Instant.EPOCH, 9000L, 400L, 30L) + + private def fetchView: FetchIndexingView = + (id: IdSegment, ref) => + id match { + case IriSegment(`myId`) => IO.pure(indexingView) + case IriSegment(id) => IO.raiseError(ViewNotFound(id, ref)) + case StringSegment("myid") => IO.pure(indexingView) + case StringSegment(id) => IO.raiseError(InvalidResourceId(id)) + } + + private lazy val routes = + Route.seal( + BlazegraphViewsIndexingRoutes( + fetchView, + identities, + aclCheck, + projections, + projectionErrors, + groupDirectives + ) + ) + + override def beforeAll(): Unit = { + super.beforeAll() + val error = new Exception("boom") + val rev = 1 + val fail1 = FailedElem(EntityType("ACL"), myId, Some(projectRef), Instant.EPOCH, Offset.At(42L), error, rev) + val fail2 = FailedElem(EntityType("Schema"), myId, None, Instant.EPOCH, Offset.At(42L), error, rev) + val save = for { + _ <- projections.save(indexingView.projectionMetadata, progress) + _ <- projectionErrors.saveFailedElems(indexingView.projectionMetadata, List(fail1, fail2)) + } yield () + save.accepted + } + + private val viewEndpoint = "/views/myorg/myproject/myid" + + "fail to fetch statistics and offset from view without resources/read permission" in { + val endpoints = List( + s"$viewEndpoint/statistics", + s"$viewEndpoint/offset" + ) + forAll(endpoints) { endpoint => + Get(endpoint) ~> routes ~> check { + response.status shouldEqual StatusCodes.Forbidden + response.asJson shouldEqual jsonContentOf("routes/errors/authorization-failed.json") + } + } + } + + "fetch statistics from view" in { + aclCheck.append(AclAddress.Root, Anonymous -> Set(permissions.read)).accepted + + val expectedResponse = + json""" + { + "@context": "https://bluebrain.github.io/nexus/contexts/statistics.json", + "@type": "ViewStatistics", + "delayInSeconds" : 0, + "discardedEvents": 400, + "evaluatedEvents": 8570, + "failedEvents": 30, + "lastEventDateTime": "${Instant.EPOCH}", + "lastProcessedEventDateTime": "${Instant.EPOCH}", + "processedEvents": 9000, + "remainingEvents": 0, + "totalEvents": 9000 + }""" + + Get(s"$viewEndpoint/statistics") ~> routes ~> check { + response.status shouldEqual StatusCodes.OK + response.asJson shouldEqual expectedResponse + } + } + + "fetch offset from view" in { + val expectedResponse = + json"""{ + "@context" : "https://bluebrain.github.io/nexus/contexts/offset.json", + "@type" : "At", + "value" : 15 + }""" + + Get(s"$viewEndpoint/offset") ~> routes ~> check { + response.status shouldEqual StatusCodes.OK + response.asJson shouldEqual expectedResponse + } + } + + "fail to restart offset from view without resources/write permission" in { + + Delete(s"$viewEndpoint/offset") ~> routes ~> check { + response.status shouldEqual StatusCodes.Forbidden + response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") + } + } + + "restart offset from view" in { + + aclCheck.append(AclAddress.Root, Anonymous -> Set(permissions.write)).accepted + projections.restarts(Offset.start).compile.toList.accepted.size shouldEqual 0 + Delete(s"$viewEndpoint/offset") ~> routes ~> check { + response.status shouldEqual StatusCodes.OK + response.asJson shouldEqual json"""{"@context": "${Vocabulary.contexts.offset}", "@type": "Start"}""" + projections.restarts(Offset.start).compile.toList.accepted.size shouldEqual 1 + } + } + + "return no blazegraph projection failures without write permission" in { + aclCheck.subtract(AclAddress.Root, Anonymous -> Set(permissions.write)).accepted + + val endpoints = List( + s"$viewEndpoint/failures", + s"$viewEndpoint/failures/sse" + ) + forAll(endpoints) { endpoint => + Get(endpoint) ~> routes ~> check { + response.status shouldEqual StatusCodes.Forbidden + response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") + } + } + } + + "return all failures as SSE when no LastEventID is provided" in { + aclCheck.append(AclAddress.Root, Anonymous -> Set(permissions.write)).accepted + Get(s"$viewEndpoint/failures/sse") ~> routes ~> check { + response.status shouldBe StatusCodes.OK + mediaType shouldBe MediaTypes.`text/event-stream` + chunksStream.asString(2).strip shouldEqual contentOf("/routes/sse/indexing-failures-1-2.txt") + } + } + + "return failures as SSE only from the given LastEventID" in { + Get(s"$viewEndpoint/failures/sse") ~> `Last-Event-ID`("1") ~> routes ~> check { + response.status shouldBe StatusCodes.OK + mediaType shouldBe MediaTypes.`text/event-stream` + chunksStream.asString(3).strip shouldEqual contentOf("/routes/sse/indexing-failure-2.txt") + } + } + + "return failures as a listing" in { + aclCheck.append(AclAddress.Root, Anonymous -> Set(permissions.write)).accepted + Get(s"$viewEndpoint/failures") ~> routes ~> check { + response.status shouldBe StatusCodes.OK + response.asJson shouldEqual jsonContentOf("/routes/list-indexing-errors.json") + } + } + +} diff --git a/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutesSpec.scala b/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutesSpec.scala index 8fb2906f16..54d5e17a83 100644 --- a/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutesSpec.scala +++ b/delta/plugins/blazegraph/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/blazegraph/routes/BlazegraphViewsRoutesSpec.scala @@ -1,91 +1,40 @@ package ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.routes -import akka.actor.typed.ActorSystem import akka.http.scaladsl.model.MediaTypes.`text/html` import akka.http.scaladsl.model.headers._ -import akka.http.scaladsl.model.{HttpEntity, MediaTypes, StatusCodes, Uri} -import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler, Route} +import akka.http.scaladsl.model.{HttpEntity, StatusCodes, Uri} +import akka.http.scaladsl.server.Route import akka.util.ByteString import ch.epfl.bluebrain.nexus.delta.kernel.utils.{UUIDF, UrlUtils} +import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.BlazegraphViews import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.client.{SparqlQueryClientDummy, SparqlResults} import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model.BlazegraphViewRejection.ProjectContextRejection import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.model._ -import ch.epfl.bluebrain.nexus.delta.plugins.blazegraph.{BlazegraphViews, Fixtures} import ch.epfl.bluebrain.nexus.delta.rdf.RdfMediaTypes._ import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv import ch.epfl.bluebrain.nexus.delta.rdf.query.SparqlQuery import ch.epfl.bluebrain.nexus.delta.rdf.query.SparqlQuery.SparqlConstructQuery -import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering -import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclSimpleCheck +import ch.epfl.bluebrain.nexus.delta.sdk.IndexingAction import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives import ch.epfl.bluebrain.nexus.delta.sdk.fusion.FusionConfig -import ch.epfl.bluebrain.nexus.delta.sdk.identities.IdentitiesDummy -import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.Caller import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ -import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.{RdfExceptionHandler, RdfRejectionHandler} -import ch.epfl.bluebrain.nexus.delta.sdk.model._ -import ch.epfl.bluebrain.nexus.delta.sdk.model.search.PaginationConfig import ch.epfl.bluebrain.nexus.delta.sdk.permissions.Permissions.events import ch.epfl.bluebrain.nexus.delta.sdk.projects.FetchContextDummy import ch.epfl.bluebrain.nexus.delta.sdk.resolvers.ResolverContextResolution -import ch.epfl.bluebrain.nexus.delta.sdk.utils.RouteHelpers -import ch.epfl.bluebrain.nexus.delta.sdk.{ConfigFixtures, IndexingAction} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.{Anonymous, Authenticated, Group, User} -import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, Label} -import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset -import ch.epfl.bluebrain.nexus.delta.sourcing.postgres.DoobieScalaTestFixture -import ch.epfl.bluebrain.nexus.delta.sourcing.projections.{ProjectionErrors, Projections} -import ch.epfl.bluebrain.nexus.delta.sourcing.projections.model.ProjectionRestart -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Elem.{FailedElem, SuccessElem} -import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionMetadata -import ch.epfl.bluebrain.nexus.testkit._ +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Anonymous import io.circe.Json import io.circe.syntax._ import monix.bio.UIO -import monix.execution.Scheduler -import org.scalatest._ -import org.scalatest.matchers.should.Matchers - -import java.time.Instant -import java.util.UUID -import scala.concurrent.duration._ - -class BlazegraphViewsRoutesSpec - extends RouteHelpers - with DoobieScalaTestFixture - with Matchers - with CirceLiteral - with CirceEq - with IOFixedClock - with IOValues - with OptionValues - with TestMatchers - with Inspectors - with CancelAfterFailure - with ConfigFixtures - with BeforeAndAfterAll - with TestHelpers - with Fixtures - with BlazegraphViewRoutesFixtures { - - import akka.actor.typed.scaladsl.adapter._ - implicit private val typedSystem: ActorSystem[Nothing] = system.toTyped - - private val prefix = "prefix" - private val uuid = UUID.randomUUID() - implicit private val uuidF: UUIDF = UUIDF.fixed(uuid) - implicit private val sc: Scheduler = Scheduler.global - private val realm = Label.unsafe("myrealm") - private val bob = User("Bob", realm) - implicit private val caller: Caller = Caller(bob, Set(bob, Group("mygroup", realm), Authenticated(realm))) - implicit private val baseUri: BaseUri = BaseUri("http://localhost", Label.unsafe("v1")) - private val identities = IdentitiesDummy(caller) - private val asBob = addCredentials(OAuth2BearerToken("Bob")) + +class BlazegraphViewsRoutesSpec extends BlazegraphViewRoutesFixtures { + + private val prefix = "prefix" + + implicit private val uuidF: UUIDF = UUIDF.fixed(uuid) private val indexingSource = jsonContentOf("indexing-view-source.json") - private val indexingSource2 = jsonContentOf("indexing-view-source-2.json") private val aggregateSource = jsonContentOf("aggregate-view-source.json") private val updatedIndexingSource = indexingSource.mapObject(_.add("resourceTag", Json.fromString("v1.5"))) @@ -98,15 +47,7 @@ class BlazegraphViewsRoutesSpec ProjectContextRejection ) - implicit private val ordering: JsonKeyOrdering = - JsonKeyOrdering.default(topKeys = - List("@context", "@id", "@type", "reason", "details", "sourceId", "projectionId", "_total", "_results") - ) - implicit val rejectionHandler: RejectionHandler = RdfRejectionHandler.apply - implicit val exceptionHandler: ExceptionHandler = RdfExceptionHandler.apply - - implicit val paginationConfig: PaginationConfig = pagination - implicit private val f: FusionConfig = fusionConfig + implicit private val f: FusionConfig = fusionConfig private val selectQuery = SparqlQuery("SELECT * {?s ?p ?o}") private val constructQuery = SparqlConstructQuery("CONSTRUCT {?s ?p ?o} WHERE {?s ?p ?o}").rightValue @@ -121,9 +62,6 @@ class BlazegraphViewsRoutesSpec xas ).accepted - private lazy val projections = Projections(xas, queryConfig, 1.hour) - private lazy val projectionErrors = ProjectionErrors(xas, queryConfig) - lazy val viewsQuery = new BlazegraphViewsQueryDummy( projectRef, new SparqlQueryClientDummy(), @@ -131,19 +69,19 @@ class BlazegraphViewsRoutesSpec Map("resource-incoming-outgoing" -> linksResults) ) - private val aclCheck = AclSimpleCheck().accepted private val groupDirectives = DeltaSchemeDirectives(fetchContext, _ => UIO.none, _ => UIO.none) private lazy val routes = Route.seal( - BlazegraphViewsRoutes( - views, - viewsQuery, - identities, - aclCheck, - projections, - projectionErrors, + BlazegraphViewsRoutesHandler( groupDirectives, - IndexingAction.noop + BlazegraphViewsRoutes( + views, + viewsQuery, + identities, + aclCheck, + groupDirectives, + IndexingAction.noop + ) ) ) @@ -203,39 +141,6 @@ class BlazegraphViewsRoutesSpec } } - "fail to fetch statistics and offset from view without resources/read permission" in { - - val endpoints = List( - "/v1/views/org/proj/indexing-view/statistics", - "/v1/views/org/proj/indexing-view/offset" - ) - forAll(endpoints) { endpoint => - Get(endpoint) ~> routes ~> check { - response.status shouldEqual StatusCodes.Forbidden - response.asJson shouldEqual jsonContentOf("routes/errors/authorization-failed.json") - } - } - } - - "fetch statistics from view" in { - - Get("/v1/views/org/proj/indexing-view/statistics") ~> asBob ~> routes ~> check { - response.status shouldEqual StatusCodes.OK - response.asJson shouldEqual jsonContentOf( - "routes/responses/statistics.json", - "projectLatestInstant" -> Instant.EPOCH, - "viewLatestInstant" -> Instant.EPOCH - ) - } - } - - "fetch offset from view" in { - Get("/v1/views/org/proj/indexing-view/offset") ~> asBob ~> routes ~> check { - response.status shouldEqual StatusCodes.OK - response.asJson shouldEqual jsonContentOf("routes/responses/offset.json") - } - } - "reject creation of a view which already exits" in { Put("/v1/views/org/proj/aggregate-view", aggregateSource.toEntity) ~> asBob ~> routes ~> check { response.status shouldEqual StatusCodes.Conflict @@ -451,81 +356,6 @@ class BlazegraphViewsRoutesSpec } } } - "fail to restart offset from view without resources/write permission" in { - - Delete("/v1/views/org/proj/indexing-view/offset") ~> routes ~> check { - response.status shouldEqual StatusCodes.Forbidden - response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") - } - } - - "restart offset from view" in { - // Creating a new view, as indexing-view is deprecated and cannot be restarted - Post("/v1/views/org/proj", indexingSource2.toEntity) ~> asBob ~> routes - - aclCheck.append(AclAddress.Root, Anonymous -> Set(permissions.write)).accepted - projections.restarts(Offset.start).compile.toList.accepted.size shouldEqual 0 - Delete("/v1/views/org/proj/indexing-view-2/offset") ~> routes ~> check { - response.status shouldEqual StatusCodes.OK - response.asJson shouldEqual json"""{"@context": "${Vocabulary.contexts.offset}", "@type": "Start"}""" - projections.restarts(Offset.start).compile.lastOrError.accepted shouldEqual SuccessElem( - ProjectionRestart.entityType, - ProjectionRestart.restartId(Offset.at(1L)), - None, - Instant.EPOCH, - Offset.at(1L), - ProjectionRestart( - "blazegraph-org/proj-https://bluebrain.github.io/nexus/vocabulary/indexing-view-2-1", - Instant.EPOCH, - Anonymous - ), - 1 - ) - } - } - - "return no blazegraph projection failures without write permission" in { - aclCheck.subtract(AclAddress.Root, Anonymous -> Set(permissions.write)).accepted - - Get("/v1/views/org/proj/indexing-view/failures") ~> routes ~> check { - response.status shouldBe StatusCodes.Forbidden - response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") - } - } - - "not return any failures if there aren't any" in { - aclCheck.append(AclAddress.Root, Anonymous -> Set(permissions.write)).accepted - - Get("/v1/views/org/proj/indexing-view/failures") ~> routes ~> check { - mediaType shouldBe MediaTypes.`text/event-stream` - response.status shouldBe StatusCodes.OK - chunksStream.asString(2).strip shouldBe "" - } - } - - "return all available failures when no LastEventID is provided" in { - val metadata = ProjectionMetadata("testModule", "testName", Some(projectRef), Some(indexingViewId)) - val error = new Exception("boom") - val rev = 1 - val fail1 = - FailedElem(EntityType("ACL"), nxv + "myid", Some(projectRef), Instant.EPOCH, Offset.At(42L), error, rev) - val fail2 = FailedElem(EntityType("Schema"), nxv + "myid", None, Instant.EPOCH, Offset.At(42L), error, rev) - projectionErrors.saveFailedElems(metadata, List(fail1, fail2)).accepted - - Get("/v1/views/org/proj/indexing-view/failures") ~> routes ~> check { - mediaType shouldBe MediaTypes.`text/event-stream` - response.status shouldBe StatusCodes.OK - chunksStream.asString(2).strip shouldEqual contentOf("/routes/sse/indexing-failures-1-2.txt") - } - } - - "return failures only from the given LastEventID" in { - Get("/v1/views/org/proj/indexing-view/failures") ~> `Last-Event-ID`("1") ~> routes ~> check { - mediaType shouldBe MediaTypes.`text/event-stream` - response.status shouldBe StatusCodes.OK - chunksStream.asString(3).strip shouldEqual contentOf("/routes/sse/indexing-failure-2.txt") - } - } "redirect to fusion for the latest version if the Accept header is set to text/html" in { Get("/v1/views/org/proj/indexing-view") ~> Accept(`text/html`) ~> routes ~> check { diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchPluginModule.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchPluginModule.scala index 9781e8cead..6c3abc9cba 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchPluginModule.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/ElasticSearchPluginModule.scala @@ -10,7 +10,7 @@ import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.indexing.ElasticSearc import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.ElasticSearchViewRejection.ProjectContextRejection import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.{contexts, defaultElasticsearchMapping, defaultElasticsearchSettings, schema => viewsSchemaId, ElasticSearchView, ElasticSearchViewEvent} import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.query.{DefaultViewsQuery, ElasticSearchQueryError} -import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes.{ElasticSearchAllRoutes, ElasticSearchIndexingRoutes, ElasticSearchQueryRoutes, ElasticSearchViewsRoutes} +import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes.{ElasticSearchIndexingRoutes, ElasticSearchQueryRoutes, ElasticSearchViewsRoutes, ElasticSearchViewsRoutesHandler} import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue.ContextObject @@ -328,7 +328,7 @@ class ElasticSearchPluginModule(priority: Int) extends ModuleDef { ) => PriorityRoute( priority, - ElasticSearchAllRoutes( + ElasticSearchViewsRoutesHandler( schemeDirectives, es.routes, query.routes, diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutes.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutes.scala index 60fc140f87..8a2cfc9369 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutes.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutes.scala @@ -2,9 +2,6 @@ package ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.routes import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ -import cats.syntax.all._ -import ch.epfl.bluebrain.nexus.delta.kernel.search.Pagination.FromPagination -import ch.epfl.bluebrain.nexus.delta.kernel.search.TimeRange import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.indexing.IndexingViewDef.ActiveViewDef import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model.ElasticSearchViewRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model._ @@ -25,7 +22,6 @@ import ch.epfl.bluebrain.nexus.delta.sdk.marshalling.RdfMarshalling import ch.epfl.bluebrain.nexus.delta.sdk.model._ import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SearchResults.searchResultsJsonLdEncoder import ch.epfl.bluebrain.nexus.delta.sdk.model.search.{PaginationConfig, SearchResults} -import ch.epfl.bluebrain.nexus.delta.sdk.views.ViewRef import ch.epfl.bluebrain.nexus.delta.sourcing.ProgressStatistics import ch.epfl.bluebrain.nexus.delta.sourcing.model.FailedElemLogRow.FailedElemData import ch.epfl.bluebrain.nexus.delta.sourcing.model.{FailedElemLogRow, ProjectRef} @@ -103,7 +99,7 @@ final class ElasticSearchIndexingRoutes( emit( fetch(id, ref) .map { view => - projectionErrors.failedElemSses(view.ref.project, view.ref.viewId, offset) + projectionErrors.sses(view.ref.project, view.ref.viewId, offset) } ) }, @@ -114,7 +110,7 @@ final class ElasticSearchIndexingRoutes( emit( fetch(id, ref) .flatMap { view => - listErrors(view.ref, pagination, timeRange) + projectionErrors.search(view.ref, pagination, timeRange) } ) } @@ -149,13 +145,6 @@ final class ElasticSearchIndexingRoutes( } } } - - private def listErrors(ref: ViewRef, pagination: FromPagination, timeRange: TimeRange) = { - for { - results <- projectionErrors.list(ref.project, ref.viewId, pagination, timeRange) - count <- projectionErrors.count(ref.project, ref.viewId, timeRange) - } yield SearchResults(count, results.map { _.failedElemData }) - }.widen[SearchResults[FailedElemData]] } object ElasticSearchIndexingRoutes { diff --git a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchAllRoutes.scala b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesHandler.scala similarity index 52% rename from delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchAllRoutes.scala rename to delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesHandler.scala index de30508f92..79ede8bf7d 100644 --- a/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchAllRoutes.scala +++ b/delta/plugins/elasticsearch/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesHandler.scala @@ -4,22 +4,16 @@ import akka.http.scaladsl.server.Directives.concat import akka.http.scaladsl.server.Route import ch.epfl.bluebrain.nexus.delta.plugins.elasticsearch.model._ import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives +import ch.epfl.bluebrain.nexus.delta.sdk.directives.UriDirectives.baseUriPrefix import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri -class ElasticSearchAllRoutes(schemeDirectives: DeltaSchemeDirectives, underlying: Route*)(implicit baseUri: BaseUri) - extends ElasticSearchViewsDirectives { - - import schemeDirectives._ - - def routes: Route = - (baseUriPrefix(baseUri.prefix) & replaceUri("views", schema.iri)) { - concat(underlying: _*) - } - -} - -object ElasticSearchAllRoutes { +/** + * Transforms the incoming request to consume the baseUri prefix and rewrite the generic resource endpoint + */ +object ElasticSearchViewsRoutesHandler extends { def apply(schemeDirectives: DeltaSchemeDirectives, routes: Route*)(implicit baseUri: BaseUri): Route = - new ElasticSearchAllRoutes(schemeDirectives, routes: _*).routes + (baseUriPrefix(baseUri.prefix) & schemeDirectives.replaceUri("views", schema.iri)) { + concat(routes: _*) + } } diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutesSpec.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutesSpec.scala index 02a2a2edfc..8d0c30b494 100644 --- a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutesSpec.scala +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchIndexingRoutesSpec.scala @@ -28,7 +28,7 @@ import monix.bio.IO import java.time.Instant import scala.concurrent.duration._ -class ElasticSearchIndexingRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { +class ElasticSearchIndexingRoutesSpec extends ElasticSearchViewsRoutesFixtures { private lazy val projections = Projections(xas, queryConfig, 1.hour) private lazy val projectionErrors = ProjectionErrors(xas, queryConfig) @@ -72,16 +72,13 @@ class ElasticSearchIndexingRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { private lazy val routes = Route.seal( - ElasticSearchAllRoutes( - groupDirectives, - ElasticSearchIndexingRoutes( - identities, - aclCheck, - fetchView, - projections, - projectionErrors, - groupDirectives - ) + ElasticSearchIndexingRoutes( + identities, + aclCheck, + fetchView, + projections, + projectionErrors, + groupDirectives ) ) @@ -98,7 +95,7 @@ class ElasticSearchIndexingRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { save.accepted } - private val viewEndpoint = "/v1/views/myorg/myproject/myid" + private val viewEndpoint = "/views/myorg/myproject/myid" "fail to fetch statistics and offset from view without resources/read permission" in { val endpoints = List( @@ -185,15 +182,6 @@ class ElasticSearchIndexingRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { } } - "return no elasticsearch projection failures without write permission" in { - aclCheck.subtract(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted - - Get(s"$viewEndpoint/failures/sse") ~> routes ~> check { - response.status shouldBe StatusCodes.Forbidden - response.asJson shouldEqual jsonContentOf("/routes/errors/authorization-failed.json") - } - } - "return all failures as SSE when no LastEventID is provided" in { aclCheck.append(AclAddress.Root, Anonymous -> Set(esPermissions.write)).accepted Get(s"$viewEndpoint/failures/sse") ~> routes ~> check { diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutesSpec.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutesSpec.scala index f80d09523b..f08ec74d21 100644 --- a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutesSpec.scala +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchQueryRoutesSpec.scala @@ -20,7 +20,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label import io.circe.syntax._ import io.circe.{Json, JsonObject} -class ElasticSearchQueryRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { +class ElasticSearchQueryRoutesSpec extends ElasticSearchViewsRoutesFixtures { private val myId2 = nxv + "myid2" private val myId2Encoded = UrlUtils.encode(myId2.toString) @@ -44,7 +44,7 @@ class ElasticSearchQueryRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { private lazy val routes = Route.seal( - ElasticSearchAllRoutes( + ElasticSearchViewsRoutesHandler( groupDirectives, new ElasticSearchQueryRoutes( identities, diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesBaseSpec.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesFixtures.scala similarity index 98% rename from delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesBaseSpec.scala rename to delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesFixtures.scala index 126550d223..bb74d1626d 100644 --- a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesBaseSpec.scala +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesFixtures.scala @@ -27,7 +27,7 @@ import org.scalatest.{CancelAfterFailure, Inspectors, OptionValues} import java.util.UUID -class ElasticSearchViewsRoutesBaseSpec +class ElasticSearchViewsRoutesFixtures extends RouteHelpers with DoobieScalaTestFixture with Matchers diff --git a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesSpec.scala b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesSpec.scala index b0adb4f276..7a90149c1b 100644 --- a/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesSpec.scala +++ b/delta/plugins/elasticsearch/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/elasticsearch/routes/ElasticSearchViewsRoutesSpec.scala @@ -23,7 +23,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.stream.PipeChain import io.circe.Json import monix.bio.{IO, UIO} -class ElasticSearchViewsRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { +class ElasticSearchViewsRoutesSpec extends ElasticSearchViewsRoutesFixtures { implicit private val uuidF: UUIDF = UUIDF.fixed(uuid) @@ -78,7 +78,7 @@ class ElasticSearchViewsRoutesSpec extends ElasticSearchViewsRoutesBaseSpec { private lazy val routes = Route.seal( - ElasticSearchAllRoutes( + ElasticSearchViewsRoutesHandler( groupDirectives, ElasticSearchViewsRoutes( identities, diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/implicits/package.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/implicits/package.scala index b7fc026e6f..56e5567933 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/implicits/package.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/implicits/package.scala @@ -4,7 +4,7 @@ import ch.epfl.bluebrain.nexus.delta.kernel.syntax.{ClassTagSyntax, IOSyntax, In import ch.epfl.bluebrain.nexus.delta.rdf.instances.{SecretInstances, TripleInstances, UriInstances} import ch.epfl.bluebrain.nexus.delta.rdf.syntax.{IriSyntax, IterableSyntax, JsonLdEncoderSyntax, JsonSyntax, PathSyntax, UriSyntax} import ch.epfl.bluebrain.nexus.delta.sdk.instances.{CredentialsInstances, IdentityInstances, IriInstances, ProjectRefInstances} -import ch.epfl.bluebrain.nexus.delta.sdk.syntax.{HttpRequestSyntax, HttpResponseFieldsSyntax, IORejectSyntax, IriEncodingSyntax} +import ch.epfl.bluebrain.nexus.delta.sdk.syntax.{HttpRequestSyntax, HttpResponseFieldsSyntax, IORejectSyntax, IriEncodingSyntax, ProjectionErrorsSyntax} /** * Aggregate instances and syntax from rdf plus the current sdk instances and syntax to avoid importing multiple @@ -32,3 +32,4 @@ package object implicits with IOSyntax with ClassTagSyntax with InstantSyntax + with ProjectionErrorsSyntax diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/syntax/ProjectionErrorsSyntax.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/syntax/ProjectionErrorsSyntax.scala new file mode 100644 index 0000000000..fd4cd7772b --- /dev/null +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/syntax/ProjectionErrorsSyntax.scala @@ -0,0 +1,86 @@ +package ch.epfl.bluebrain.nexus.delta.sdk.syntax + +import akka.http.scaladsl.model.sse.ServerSentEvent +import cats.syntax.all._ +import ch.epfl.bluebrain.nexus.delta.kernel.search.Pagination.FromPagination +import ch.epfl.bluebrain.nexus.delta.kernel.search.TimeRange +import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.{JsonLdApi, JsonLdJavaApi} +import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.RemoteContextResolution +import ch.epfl.bluebrain.nexus.delta.sdk.model.search.SearchResults +import ch.epfl.bluebrain.nexus.delta.sdk.syntax.ProjectionErrorsSyntax.ProjectionErrorsOps +import ch.epfl.bluebrain.nexus.delta.sdk.views.ViewRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.FailedElemLogRow.FailedElemData +import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef +import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset +import ch.epfl.bluebrain.nexus.delta.sourcing.projections.ProjectionErrors +import io.circe.Printer +import monix.bio.{Task, UIO} + +/** + * Allows to extend the methods from [[ProjectionErrors]] by adding higher-level methods + */ +trait ProjectionErrorsSyntax { + + implicit def projectionErrorsOps(projectionErrors: ProjectionErrors): ProjectionErrorsOps = new ProjectionErrorsOps( + projectionErrors + ) +} + +object ProjectionErrorsSyntax { + + implicit private val api: JsonLdApi = JsonLdJavaApi.lenient + private val defaultPrinter: Printer = Printer(dropNullValues = true, indent = "") + + final class ProjectionErrorsOps(val projectionErrors: ProjectionErrors) extends AnyVal { + + /** + * Get available failed elem entries for a given projection (provided by project and id), starting from a failed + * elem offset as a stream of Server Sent Events + * + * @param projectionProject + * the project the projection belongs to + * @param projectionId + * IRI of the projection + * @param offset + * failed elem offset + */ + def sses(projectionProject: ProjectRef, projectionId: Iri, offset: Offset)(implicit + rcr: RemoteContextResolution + ): fs2.Stream[Task, ServerSentEvent] = + projectionErrors.failedElemEntries(projectionProject, projectionId, offset).evalMap { felem => + felem.failedElemData.toCompactedJsonLd.map { compactJson => + ServerSentEvent( + defaultPrinter.print(compactJson.json), + "IndexingFailure", + felem.ordering.value.toString + ) + } + } + + /** + * Return a search results for the given view on a time window ordered by instant + * + * @param view + * its identifier + * @param pagination + * the pagination to apply + * @param timeRange + * the time range to restrict on + * @return + */ + def search(view: ViewRef, pagination: FromPagination, timeRange: TimeRange): UIO[SearchResults[FailedElemData]] = { + for { + results <- projectionErrors.list(view.project, view.viewId, pagination, timeRange) + count <- projectionErrors.count(view.project, view.viewId, timeRange) + } yield SearchResults( + count, + results.map { + _.failedElemData + } + ) + }.widen[SearchResults[FailedElemData]] + + } + +} diff --git a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/syntax/package.scala b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/syntax/package.scala index c41be294b1..f9ceadbbb5 100644 --- a/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/syntax/package.scala +++ b/delta/sdk/src/main/scala/ch/epfl/bluebrain/nexus/delta/sdk/syntax/package.scala @@ -22,3 +22,4 @@ package object syntax with IOSyntax with InstantSyntax with NonEmptySetSyntax + with ProjectionErrorsSyntax diff --git a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala index 1a54b77b3b..3818cf0462 100644 --- a/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala +++ b/delta/sourcing-psql/src/main/scala/ch/epfl/bluebrain/nexus/delta/sourcing/projections/ProjectionErrors.scala @@ -1,13 +1,9 @@ package ch.epfl.bluebrain.nexus.delta.sourcing.projections -import akka.http.scaladsl.model.sse.ServerSentEvent import cats.effect.Clock import ch.epfl.bluebrain.nexus.delta.kernel.search.Pagination.FromPagination import ch.epfl.bluebrain.nexus.delta.kernel.search.TimeRange import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.rdf.implicits._ -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.{JsonLdApi, JsonLdJavaApi} -import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.RemoteContextResolution import ch.epfl.bluebrain.nexus.delta.sourcing.Transactors import ch.epfl.bluebrain.nexus.delta.sourcing.config.QueryConfig import ch.epfl.bluebrain.nexus.delta.sourcing.model.{FailedElemLogRow, ProjectRef} @@ -15,7 +11,6 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.offset.Offset import ch.epfl.bluebrain.nexus.delta.sourcing.stream.Elem.FailedElem import ch.epfl.bluebrain.nexus.delta.sourcing.stream.ProjectionMetadata import fs2.Stream -import io.circe.Printer import monix.bio.{Task, UIO} trait ProjectionErrors { @@ -31,30 +26,32 @@ trait ProjectionErrors { def saveFailedElems(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] /** - * Get available failed elem entries for a given projection by projection name, starting from a failed elem offset. + * Get available failed elem entries for a given projection (provided by project and id), starting from a failed elem + * offset. * - * @param projectionName - * the name of the projection + * @param projectionProject + * the project the projection belongs to + * @param projectionId + * IRI of the projection * @param offset * failed elem offset - * @return */ - def failedElemEntries(projectionName: String, offset: Offset): Stream[Task, FailedElemLogRow] + def failedElemEntries( + projectionProject: ProjectRef, + projectionId: Iri, + offset: Offset + ): Stream[Task, FailedElemLogRow] /** - * Get available failed elem entries for a given projection (provided by project and id), starting from a failed elem - * offset as a stream of Server Sent Events + * Get available failed elem entries for a given projection by projection name, starting from a failed elem offset. * - * @param projectionProject - * the project the projection belongs to - * @param projectionId - * IRI of the projection + * @param projectionName + * the name of the projection * @param offset * failed elem offset + * @return */ - def failedElemSses(projectionProject: ProjectRef, projectionId: Iri, offset: Offset)(implicit - rcr: RemoteContextResolution - ): Stream[Task, ServerSentEvent] + def failedElemEntries(projectionName: String, offset: Offset): Stream[Task, FailedElemLogRow] /** * Return the total of errors for the given projection on a time window ordered by instant @@ -91,47 +88,32 @@ trait ProjectionErrors { object ProjectionErrors { - implicit private val api: JsonLdApi = JsonLdJavaApi.lenient - private val defaultPrinter: Printer = Printer(dropNullValues = true, indent = "") - - def apply(xas: Transactors, config: QueryConfig)(implicit clock: Clock[UIO]) = new ProjectionErrors { - - val store = FailedElemLogStore(xas, config) - - override def saveFailedElems(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] = - store.save(metadata, failures) - - private def failedElemEntries( - projectionProject: ProjectRef, - projectionId: Iri, - offset: Offset - ): Stream[Task, FailedElemLogRow] = store.stream(projectionProject, projectionId, offset) - - override def failedElemEntries(projectionName: String, offset: Offset): Stream[Task, FailedElemLogRow] = - store.stream(projectionName, offset) - - override def failedElemSses(projectionProject: ProjectRef, projectionId: Iri, offset: Offset)(implicit - rcr: RemoteContextResolution - ): Stream[Task, ServerSentEvent] = - failedElemEntries(projectionProject, projectionId, offset).evalMap { felem => - felem.failedElemData.toCompactedJsonLd.map { compactJson => - ServerSentEvent( - defaultPrinter.print(compactJson.json), - "IndexingFailure", - felem.ordering.value.toString - ) - } - } - - override def count(project: ProjectRef, projectionId: Iri, timeRange: TimeRange): UIO[Long] = - store.count(project, projectionId, timeRange) - - override def list( - project: ProjectRef, - projectionId: Iri, - pagination: FromPagination, - timeRange: TimeRange - ): UIO[List[FailedElemLogRow]] = store.list(project, projectionId, pagination, timeRange) - } + def apply(xas: Transactors, config: QueryConfig)(implicit clock: Clock[UIO]): ProjectionErrors = + new ProjectionErrors { + + private val store = FailedElemLogStore(xas, config) + + override def saveFailedElems(metadata: ProjectionMetadata, failures: List[FailedElem]): UIO[Unit] = + store.save(metadata, failures) + + override def failedElemEntries( + projectionProject: ProjectRef, + projectionId: Iri, + offset: Offset + ): Stream[Task, FailedElemLogRow] = store.stream(projectionProject, projectionId, offset) + + override def failedElemEntries(projectionName: String, offset: Offset): Stream[Task, FailedElemLogRow] = + store.stream(projectionName, offset) + + override def count(project: ProjectRef, projectionId: Iri, timeRange: TimeRange): UIO[Long] = + store.count(project, projectionId, timeRange) + + override def list( + project: ProjectRef, + projectionId: Iri, + pagination: FromPagination, + timeRange: TimeRange + ): UIO[List[FailedElemLogRow]] = store.list(project, projectionId, pagination, timeRange) + } } From 7b9a9c8b0ae3af47ac026a3a38c29eb1db57e1d0 Mon Sep 17 00:00:00 2001 From: Daniel Bell Date: Fri, 28 Jul 2023 11:14:24 +0200 Subject: [PATCH 6/6] Remove the ability to define credentials at the storage level (#4110) * Remove the ability to define credentials at the storage level * Remove the ability to define credentials at the storage level * fix plugins unit tests * add to release notes --- .../plugins/storage/StoragePluginModule.scala | 13 ++-- .../plugins/storage/storages/Storages.scala | 42 +++++-------- .../storage/storages/model/Storage.scala | 59 ++---------------- .../storages/model/StorageCommand.scala | 7 +-- .../storage/storages/model/StorageEvent.scala | 35 ++++------- .../storages/model/StorageFields.scala | 11 +--- .../storage/storages/model/StorageState.scala | 17 ++---- .../storage/storages/model/StorageValue.scala | 60 +++++-------------- .../storages/routes/StoragesRoutes.scala | 12 ++-- .../database/remote-storage-created.json | 2 - .../database/remote-storage-updated.json | 2 - .../storages/database/s3-storage-created.json | 4 -- .../storages/database/s3-storage-updated.json | 4 -- .../resources/storages/remote-storage.json | 1 - .../test/resources/storages/s3-storage.json | 2 - .../storages/sse/remote-storage-created.json | 1 - .../storages/sse/remote-storage-updated.json | 1 - .../storages/sse/s3-storage-created.json | 2 - .../storages/sse/s3-storage-updated.json | 2 - .../storages/storage-remote-state.json | 2 - .../resources/storages/storage-s3-state.json | 4 -- .../StorageScopeInitializationSpec.scala | 1 - .../plugins/storage/files/FilesSpec.scala | 6 +- .../files/routes/FilesRoutesSpec.scala | 7 +-- .../storage/storages/StorageFixtures.scala | 16 +++-- .../plugins/storage/storages/StorageGen.scala | 5 +- .../storage/storages/StoragesSpec.scala | 13 ++-- .../storage/storages/StoragesStmSpec.scala | 45 +++++++------- .../storages/model/StorageFieldsSpec.scala | 8 +-- .../model/StorageSerializationSuite.scala | 16 ++--- .../storage/storages/model/StorageSpec.scala | 10 ++-- .../disk/DiskStorageSaveFileSpec.scala | 3 +- .../remote/RemoteDiskStorageAccessSpec.scala | 1 - .../remote/RemoteStorageLinkFileSpec.scala | 4 +- .../RemoteStorageSaveAndFetchFileSpec.scala | 4 +- .../operations/s3/S3StorageAccessSpec.scala | 8 --- .../operations/s3/S3StorageLinkFileSpec.scala | 5 +- .../s3/S3StorageSaveAndFetchFileSpec.scala | 22 ++----- .../storages/routes/StoragesRoutesSpec.scala | 28 ++++----- .../paradox/docs/delta/api/storages-api.md | 6 -- .../docs/releases/v1.9-release-notes.md | 4 ++ tests/docker/config/delta-postgres.conf | 3 + .../resources/kg/storages/remote-disk.json | 1 - tests/src/test/resources/kg/storages/s3.json | 4 +- .../nexus/tests/kg/RemoteStorageSpec.scala | 9 +-- .../nexus/tests/kg/S3StorageSpec.scala | 12 +--- 46 files changed, 157 insertions(+), 367 deletions(-) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala index 84f13c9059..bcdbb69c5e 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StoragePluginModule.scala @@ -24,7 +24,6 @@ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteCon import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering import ch.epfl.bluebrain.nexus.delta.sdk._ import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.Crypto import ch.epfl.bluebrain.nexus.delta.sdk.deletion.ProjectDeletionTask import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives import ch.epfl.bluebrain.nexus.delta.sdk.fusion.FusionConfig @@ -68,7 +67,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { fetchContext: FetchContext[ContextRejection], contextResolution: ResolverContextResolution, permissions: Permissions, - crypto: Crypto, xas: Transactors, cfg: StoragePluginConfig, serviceAccount: ServiceAccount, @@ -86,7 +84,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { contextResolution, permissions.fetchPermissionSet, StorageAccess.apply(_, _), - crypto, xas, cfg.storages, serviceAccount @@ -112,7 +109,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { make[StoragesRoutes].from { ( - crypto: Crypto, identities: Identities, aclCheck: AclCheck, storages: Storages, @@ -136,7 +132,6 @@ class StoragePluginModule(priority: Int) extends ModuleDef { indexingAction(_, _, _)(shift, cr) )( baseUri, - crypto, s, cr, ordering, @@ -145,8 +140,8 @@ class StoragePluginModule(priority: Int) extends ModuleDef { } } - make[Storage.Shift].from { (storages: Storages, base: BaseUri, crypto: Crypto) => - Storage.shift(storages)(base, crypto) + make[Storage.Shift].from { (storages: Storages, base: BaseUri) => + Storage.shift(storages)(base) } many[ResourceShift[_, _, _]].ref[Storage.Shift] @@ -264,11 +259,11 @@ class StoragePluginModule(priority: Int) extends ModuleDef { many[ApiMappings].add(Storages.mappings + Files.mappings) - many[SseEncoder[_]].add { (crypto: Crypto, base: BaseUri) => StorageEvent.sseEncoder(crypto)(base) } + many[SseEncoder[_]].add { (base: BaseUri) => StorageEvent.sseEncoder(base) } many[SseEncoder[_]].add { (base: BaseUri, config: StorageTypeConfig) => FileEvent.sseEncoder(base, config) } many[ScopedEventMetricEncoder[_]].add { FileEvent.fileEventMetricEncoder } - many[ScopedEventMetricEncoder[_]].add { (crypto: Crypto) => StorageEvent.storageEventMetricEncoder(crypto) } + many[ScopedEventMetricEncoder[_]].add { () => StorageEvent.storageEventMetricEncoder } many[PriorityRoute].add { (storagesRoutes: StoragesRoutes) => PriorityRoute(priority, storagesRoutes.routes, requiresStrictEntity = true) diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala index b4f29e5717..8829b57e79 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/Storages.scala @@ -4,7 +4,7 @@ import cats.effect.Clock import cats.syntax.all._ import ch.epfl.bluebrain.nexus.delta.kernel.kamon.KamonMetricComponent import ch.epfl.bluebrain.nexus.delta.kernel.utils.{IOUtils, UUIDF} -import ch.epfl.bluebrain.nexus.delta.kernel.{Mapper, Secret} +import ch.epfl.bluebrain.nexus.delta.kernel.Mapper import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.Storages._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageCommand._ @@ -16,7 +16,6 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.schemas.{storage = import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.api.JsonLdApi import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteContextResolution} -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.Crypto import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.{Caller, ServiceAccount} import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.jsonld.ExpandIri @@ -63,11 +62,11 @@ final class Storages private ( */ def create( projectRef: ProjectRef, - source: Secret[Json] + source: Json )(implicit caller: Caller): IO[StorageRejection, StorageResource] = { for { pc <- fetchContext.onCreate(projectRef) - (iri, storageFields) <- sourceDecoder(projectRef, pc, source.value) + (iri, storageFields) <- sourceDecoder(projectRef, pc, source) res <- eval(CreateStorage(iri, projectRef, storageFields, source, caller.subject), pc) _ <- unsetPreviousDefaultIfRequired(projectRef, res) } yield res @@ -86,12 +85,12 @@ final class Storages private ( def create( id: IdSegment, projectRef: ProjectRef, - source: Secret[Json] + source: Json )(implicit caller: Caller): IO[StorageRejection, StorageResource] = { for { pc <- fetchContext.onCreate(projectRef) iri <- expandIri(id, pc) - storageFields <- sourceDecoder(projectRef, pc, iri, source.value) + storageFields <- sourceDecoder(projectRef, pc, iri, source) res <- eval(CreateStorage(iri, projectRef, storageFields, source, caller.subject), pc) _ <- unsetPreviousDefaultIfRequired(projectRef, res) } yield res @@ -137,7 +136,7 @@ final class Storages private ( id: IdSegment, projectRef: ProjectRef, rev: Int, - source: Secret[Json] + source: Json )(implicit caller: Caller): IO[StorageRejection, StorageResource] = update(id, projectRef, rev, source, unsetPreviousDefault = true) @@ -145,13 +144,13 @@ final class Storages private ( id: IdSegment, projectRef: ProjectRef, rev: Int, - source: Secret[Json], + source: Json, unsetPreviousDefault: Boolean )(implicit caller: Caller): IO[StorageRejection, StorageResource] = { for { pc <- fetchContext.onModify(projectRef) iri <- expandIri(id, pc) - storageFields <- sourceDecoder(projectRef, pc, iri, source.value) + storageFields <- sourceDecoder(projectRef, pc, iri, source) res <- eval(UpdateStorage(iri, projectRef, storageFields, source, rev, caller.subject), pc) _ <- IO.when(unsetPreviousDefault)(unsetPreviousDefaultIfRequired(projectRef, res)) } yield res @@ -322,7 +321,7 @@ final class Storages private ( resources .evalTap { storage => val source = - storage.value.source.map(_.replace("default" -> true, false).replace("default" -> "true", false)) + storage.value.source.replace("default" -> true, false).replace("default" -> "true", false) val io = update(storage.id, project, storage.rev, source, unsetPreviousDefault = false)( serviceAccount.caller ) @@ -407,8 +406,7 @@ object Storages { private[storages] def evaluate( access: StorageAccess, fetchPermissions: UIO[Set[Permission]], - config: StorageTypeConfig, - crypto: Crypto + config: StorageTypeConfig )( state: Option[StorageState], cmd: StorageCommand @@ -430,17 +428,9 @@ object Storages { config.amazon.as(StorageType.S3Storage) ++ config.remoteDisk.as(StorageType.RemoteDiskStorage) - def verifyCrypto(value: StorageValue) = - value.secrets.toList - .foldM(()) { case (_, Secret(value)) => - crypto.encrypt(value).flatMap(crypto.decrypt).toEither.void - } - .leftMap(t => InvalidEncryptionSecrets(value.tpe, t.getMessage)) - def validateAndReturnValue(id: Iri, fields: StorageFields): IO[StorageRejection, StorageValue] = for { value <- IO.fromOption(fields.toValue(config), InvalidStorageType(id, fields.tpe, allowedStorageTypes)) - _ <- IO.fromEither(verifyCrypto(value)) _ <- validatePermissions(fields) _ <- access(id, value) _ <- verifyAllowedDiskVolume(id, value) @@ -513,16 +503,15 @@ object Storages { def definition( config: StorageTypeConfig, access: StorageAccess, - fetchPermissions: UIO[Set[Permission]], - crypto: Crypto + fetchPermissions: UIO[Set[Permission]] )(implicit clock: Clock[UIO] ): ScopedEntityDefinition[Iri, StorageState, StorageCommand, StorageEvent, StorageRejection] = ScopedEntityDefinition( entityType, - StateMachine(None, evaluate(access, fetchPermissions, config, crypto), next), - StorageEvent.serializer(crypto), - StorageState.serializer(crypto), + StateMachine(None, evaluate(access, fetchPermissions, config), next), + StorageEvent.serializer, + StorageState.serializer, Tagger[StorageEvent]( { case r: StorageTagAdded => Some(r.tag -> r.targetRev) @@ -548,7 +537,6 @@ object Storages { contextResolution: ResolverContextResolution, fetchPermissions: UIO[Set[Permission]], access: StorageAccess, - crypto: Crypto, xas: Transactors, config: StoragesConfig, serviceAccount: ServiceAccount @@ -565,7 +553,7 @@ object Storages { } .map { sourceDecoder => new Storages( - ScopedEventLog(definition(config.storageTypeConfig, access, fetchPermissions, crypto), config.eventLog, xas), + ScopedEventLog(definition(config.storageTypeConfig, access, fetchPermissions), config.eventLog, xas), fetchContext, sourceDecoder, serviceAccount diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala index d687e78d91..5a4ca546d3 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/Storage.scala @@ -1,8 +1,6 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model import akka.actor.ActorSystem -import cats.syntax.all._ -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.Metadata import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} @@ -15,18 +13,14 @@ import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.encoder.JsonLdEncoder -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.Crypto import ch.epfl.bluebrain.nexus.delta.sdk.http.HttpClient import ch.epfl.bluebrain.nexus.delta.sdk.jsonld.JsonLdContent import ch.epfl.bluebrain.nexus.delta.sdk.model.{BaseUri, IdSegmentRef, Tags} import ch.epfl.bluebrain.nexus.delta.sdk.{OrderingFields, ResourceShift} import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef -import com.typesafe.scalalogging.Logger import io.circe.syntax._ import io.circe.{Encoder, Json, JsonObject} -import scala.util.{Failure, Success, Try} - sealed trait Storage extends Product with Serializable { /** @@ -51,7 +45,7 @@ sealed trait Storage extends Product with Serializable { * @return * the original json document provided at creation or update */ - def source: Secret[Json] + def source: Json /** * @return @@ -76,8 +70,6 @@ sealed trait Storage extends Product with Serializable { object Storage { - private val logger: Logger = Logger[Storage] - /** * A storage that stores and fetches files from a local volume */ @@ -86,7 +78,7 @@ object Storage { project: ProjectRef, value: DiskStorageValue, tags: Tags, - source: Secret[Json] + source: Json ) extends Storage { override val default: Boolean = value.default override val storageValue: StorageValue = value @@ -107,7 +99,7 @@ object Storage { project: ProjectRef, value: S3StorageValue, tags: Tags, - source: Secret[Json] + source: Json ) extends Storage { override val default: Boolean = value.default @@ -132,7 +124,7 @@ object Storage { project: ProjectRef, value: RemoteDiskStorageValue, tags: Tags, - source: Secret[Json] + source: Json ) extends Storage { override val default: Boolean = value.default override val storageValue: StorageValue = value @@ -162,45 +154,6 @@ object Storage { */ final case class Metadata(algorithm: DigestAlgorithm) - private val secretFields = List("credentials", "accessKey", "secretKey") - - private val secretFieldsDefaultEncrypted = - secretFields.foldLeft(Json.obj())((json, field) => json deepMerge Json.obj(field -> "SECRET".asJson)) - - private def getOptionalKeyValue(key: String, json: Json) = - json.hcursor.get[Option[String]](key).getOrElse(None).map(key -> _) - - /** - * Encrypts the secretFields of the passed ''json'' using the provided ''crypto''. If that fails, it encrypts the - * secretFields with the value 'SECRET' while logging the error - */ - def encryptSourceUnsafe(json: Secret[Json], crypto: Crypto): Json = - encryptSource(json, crypto) match { - case Failure(exception) => - logger.error(s"Could not encrypt the storage sensitive keys due to ", exception) - json.value deepMerge secretFieldsDefaultEncrypted - case Success(encrypted) => encrypted - } - - /** - * Attempts to encrypt the secretFields of the passed ''json'' using the provided ''crypto'' - */ - def encryptSource(json: Secret[Json], crypto: Crypto): Try[Json] = - secretFields.flatMap(getOptionalKeyValue(_, json.value)).foldM(json.value) { case (acc, (key, value)) => - crypto.encrypt(value).map(encrypted => acc deepMerge Json.obj(key -> encrypted.asJson)) - } - - /** - * Attempts to decrypt the secretFields of the passed ''json'' using the provided ''crypto'' - */ - def decryptSource(json: Json, crypto: Crypto): Try[Secret[Json]] = - secretFields - .flatMap(getOptionalKeyValue(_, json)) - .foldM(json) { case (acc, (key, value)) => - crypto.decrypt(value).map(encrypted => acc deepMerge Json.obj(key -> encrypted.asJson)) - } - .map(Secret.apply) - implicit private[storages] val storageEncoder: Encoder.AsObject[Storage] = Encoder.encodeJsonObject.contramapObject { s => s.storageValue.asJsonObject.add(keywords.tpe, s.tpe.types.asJson) @@ -221,12 +174,12 @@ object Storage { type Shift = ResourceShift[StorageState, Storage, Metadata] - def shift(storages: Storages)(implicit baseUri: BaseUri, crypto: Crypto): Shift = + def shift(storages: Storages)(implicit baseUri: BaseUri): Shift = ResourceShift.withMetadata[StorageState, Storage, Metadata]( Storages.entityType, (ref, project) => storages.fetch(IdSegmentRef(ref), project), (context, state) => state.toResource(context.apiMappings, context.base), - value => JsonLdContent(value, Storage.encryptSourceUnsafe(value.value.source, crypto), Some(value.value.metadata)) + value => JsonLdContent(value, value.value.source, Some(value.value.metadata)) ) } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageCommand.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageCommand.scala index f8e1d733b2..d0d3fca0c8 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageCommand.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageCommand.scala @@ -1,10 +1,9 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Subject -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef +import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag import io.circe.Json /** @@ -57,7 +56,7 @@ object StorageCommand { id: Iri, project: ProjectRef, fields: StorageFields, - source: Secret[Json], + source: Json, subject: Subject ) extends StorageCommand { override def rev: Int = 0 @@ -83,7 +82,7 @@ object StorageCommand { id: Iri, project: ProjectRef, fields: StorageFields, - source: Secret[Json], + source: Json, rev: Int, subject: Subject ) extends StorageCommand diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageEvent.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageEvent.scala index d3b8f11a6e..b43128c537 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageEvent.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageEvent.scala @@ -1,14 +1,11 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model -import cats.syntax.all._ -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{contexts, schemas, Storages} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.ContextValue import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.Crypto import ch.epfl.bluebrain.nexus.delta.sdk.instances._ import ch.epfl.bluebrain.nexus.delta.sdk.jsonld.IriEncoder import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri @@ -23,7 +20,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.{EntityType, Label, ProjectR import io.circe.generic.extras.Configuration import io.circe.generic.extras.semiauto.{deriveConfiguredCodec, deriveConfiguredEncoder} import io.circe.syntax._ -import io.circe.{Codec, Decoder, Encoder, Json, JsonObject} +import io.circe._ import java.time.Instant import scala.annotation.nowarn @@ -72,7 +69,7 @@ object StorageEvent { id: Iri, project: ProjectRef, value: StorageValue, - source: Secret[Json], + source: Json, rev: Int, instant: Instant, subject: Subject @@ -100,7 +97,7 @@ object StorageEvent { id: Iri, project: ProjectRef, value: StorageValue, - source: Secret[Json], + source: Json, rev: Int, instant: Instant, subject: Subject @@ -165,24 +162,18 @@ object StorageEvent { ) extends StorageEvent @nowarn("cat=unused") - def serializer(crypto: Crypto): Serializer[Iri, StorageEvent] = { + def serializer: Serializer[Iri, StorageEvent] = { import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Database._ implicit val configuration: Configuration = Serializer.circeConfiguration - implicit val jsonSecretEncryptEncoder: Encoder[Secret[Json]] = - Encoder.encodeJson.contramap(Storage.encryptSourceUnsafe(_, crypto)) - - implicit val jsonSecretDecryptDecoder: Decoder[Secret[Json]] = - Decoder.decodeJson.emap(Storage.decryptSource(_, crypto).toEither.leftMap(_.getMessage)) - - implicit val storageValueCodec: Codec.AsObject[StorageValue] = StorageValue.databaseCodec(crypto) + implicit val storageValueCodec: Codec.AsObject[StorageValue] = StorageValue.databaseCodec implicit val coder: Codec.AsObject[StorageEvent] = deriveConfiguredCodec[StorageEvent] Serializer.dropNulls() } - def storageEventMetricEncoder(crypto: Crypto): ScopedEventMetricEncoder[StorageEvent] = + def storageEventMetricEncoder: ScopedEventMetricEncoder[StorageEvent] = new ScopedEventMetricEncoder[StorageEvent] { - override def databaseDecoder: Decoder[StorageEvent] = serializer(crypto).codec + override def databaseDecoder: Decoder[StorageEvent] = serializer.codec override def entityType: EntityType = Storages.entityType @@ -201,8 +192,8 @@ object StorageEvent { ) } - def sseEncoder(crypto: Crypto)(implicit base: BaseUri): SseEncoder[StorageEvent] = new SseEncoder[StorageEvent] { - override val databaseDecoder: Decoder[StorageEvent] = serializer(crypto).codec + def sseEncoder(implicit base: BaseUri): SseEncoder[StorageEvent] = new SseEncoder[StorageEvent] { + override val databaseDecoder: Decoder[StorageEvent] = serializer.codec override def entityType: EntityType = Storages.entityType @@ -224,11 +215,9 @@ object StorageEvent { case other => other }) - implicit val subjectEncoder: Encoder[Subject] = IriEncoder.jsonEncoder[Subject] - implicit val storageValueEncoder: Encoder[StorageValue] = Encoder.instance[StorageValue](_ => Json.Null) - implicit val jsonSecretEncryptEncoder: Encoder[Secret[Json]] = - Encoder.encodeJson.contramap(Storage.encryptSourceUnsafe(_, crypto)) - implicit val projectRefEncoder: Encoder[ProjectRef] = IriEncoder.jsonEncoder[ProjectRef] + implicit val subjectEncoder: Encoder[Subject] = IriEncoder.jsonEncoder[Subject] + implicit val storageValueEncoder: Encoder[StorageValue] = Encoder.instance[StorageValue](_ => Json.Null) + implicit val projectRefEncoder: Encoder[ProjectRef] = IriEncoder.jsonEncoder[ProjectRef] Encoder.encodeJsonObject.contramapObject { event => deriveConfiguredEncoder[StorageEvent] diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala index 7a5a5133b9..1216cc2e0b 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFields.scala @@ -69,9 +69,8 @@ sealed trait StorageFields extends Product with Serializable { self => /** * Returns the decrypted Json representation of the storage fields with the passed @id */ - def toJson(iri: Iri): Secret[Json] = - Secret(self.asJsonObject.add(keywords.id, iri.asJson).asJson) - + def toJson(iri: Iri): Json = + self.asJsonObject.add(keywords.id, iri.asJson).asJson } @nowarn("cat=unused") @@ -154,8 +153,6 @@ object StorageFields { default: Boolean, bucket: String, endpoint: Option[Uri], - accessKey: Option[Secret[String]], - secretKey: Option[Secret[String]], region: Option[Region], readPermission: Option[Permission], writePermission: Option[Permission], @@ -174,8 +171,6 @@ object StorageFields { cfg.digestAlgorithm, bucket, endpoint.orElse(cfg.defaultEndpoint), - accessKey, - secretKey, region, readPermission.getOrElse(cfg.defaultReadPermission), writePermission.getOrElse(cfg.defaultWritePermission), @@ -207,7 +202,6 @@ object StorageFields { description: Option[String], default: Boolean, endpoint: Option[BaseUri], - credentials: Option[Secret[String]], folder: Label, readPermission: Option[Permission], writePermission: Option[Permission], @@ -226,7 +220,6 @@ object StorageFields { default, cfg.digestAlgorithm, endpoint = endpoint.getOrElse(cfg.defaultEndpoint), - credentials = credentials, folder, readPermission.getOrElse(cfg.defaultReadPermission), writePermission.getOrElse(cfg.defaultWritePermission), diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageState.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageState.scala index f5ec731d0f..676b10ec35 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageState.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageState.scala @@ -1,12 +1,9 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model -import cats.syntax.all._ -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.{DiskStorage, RemoteDiskStorage, S3Storage} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.{DiskStorageValue, RemoteDiskStorageValue, S3StorageValue} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{schemas, StorageResource} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.Crypto import ch.epfl.bluebrain.nexus.delta.sdk.model.{ResourceF, ResourceUris, Tags} import ch.epfl.bluebrain.nexus.delta.sdk.projects.model.{ApiMappings, ProjectBase} import ch.epfl.bluebrain.nexus.delta.sourcing.Serializer @@ -16,7 +13,7 @@ import ch.epfl.bluebrain.nexus.delta.sourcing.model.{ProjectRef, ResourceRef} import ch.epfl.bluebrain.nexus.delta.sourcing.state.State.ScopedState import io.circe.generic.extras.Configuration import io.circe.generic.extras.semiauto.deriveConfiguredCodec -import io.circe.{Codec, Decoder, Encoder, Json} +import io.circe.{Codec, Json} import java.time.Instant import scala.annotation.nowarn @@ -51,7 +48,7 @@ final case class StorageState( id: Iri, project: ProjectRef, value: StorageValue, - source: Secret[Json], + source: Json, tags: Tags, rev: Int, deprecated: Boolean, @@ -90,17 +87,11 @@ final case class StorageState( object StorageState { @nowarn("cat=unused") - implicit def serializer(implicit crypto: Crypto): Serializer[Iri, StorageState] = { + implicit def serializer: Serializer[Iri, StorageState] = { import ch.epfl.bluebrain.nexus.delta.sourcing.model.Identity.Database._ implicit val configuration: Configuration = Serializer.circeConfiguration - implicit val jsonSecretEncryptEncoder: Encoder[Secret[Json]] = - Encoder.encodeJson.contramap(Storage.encryptSourceUnsafe(_, crypto)) - - implicit val jsonSecretDecryptDecoder: Decoder[Secret[Json]] = - Decoder.decodeJson.emap(Storage.decryptSource(_, crypto).toEither.leftMap(_.getMessage)) - - implicit val storageValueCodec: Codec.AsObject[StorageValue] = StorageValue.databaseCodec(crypto) + implicit val storageValueCodec: Codec.AsObject[StorageValue] = StorageValue.databaseCodec implicit val codec: Codec.AsObject[StorageState] = deriveConfiguredCodec[StorageState] Serializer.dropNullsInjectType() } diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala index ddc3c68663..b8c5c9cd78 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageValue.scala @@ -3,11 +3,9 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model import akka.http.scaladsl.model.Uri import akka.stream.alpakka.s3 import akka.stream.alpakka.s3.{ApiVersion, MemoryBufferType} -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.StorageTypeConfig import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.JsonLdContext.keywords -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.Crypto import ch.epfl.bluebrain.nexus.delta.sdk.identities.model.AuthToken import ch.epfl.bluebrain.nexus.delta.sdk.implicits._ import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri @@ -71,12 +69,6 @@ sealed trait StorageValue extends Product with Serializable { */ def maxFileSize: Long - /** - * @return - * a set of secrets for the current storage - */ - def secrets: Set[Secret[String]] - /** * @return * the permission required in order to download a file to this storage @@ -110,8 +102,7 @@ object StorageValue { maxFileSize: Long ) extends StorageValue { - override val tpe: StorageType = StorageType.DiskStorage - override val secrets: Set[Secret[String]] = Set.empty + override val tpe: StorageType = StorageType.DiskStorage def rootDirectory(project: ProjectRef): Directory = new Directory(new File(volume.value.toFile, project.toString)) @@ -149,17 +140,14 @@ object StorageValue { algorithm: DigestAlgorithm, bucket: String, endpoint: Option[Uri], - accessKey: Option[Secret[String]], - secretKey: Option[Secret[String]], region: Option[Region], readPermission: Permission, writePermission: Permission, maxFileSize: Long ) extends StorageValue { - override val tpe: StorageType = StorageType.S3Storage - override val capacity: Option[Long] = None - override val secrets: Set[Secret[String]] = Set.empty ++ accessKey ++ secretKey + override val tpe: StorageType = StorageType.S3Storage + override val capacity: Option[Long] = None def address(bucket: String): Uri = endpoint match { @@ -173,18 +161,17 @@ object StorageValue { * these settings converted to an instance of [[akka.stream.alpakka.s3.S3Settings]] */ def alpakkaSettings(config: StorageTypeConfig): s3.S3Settings = { - val (accessKeyOrDefault, secretKeyOrDefault) = - config.amazon - .map { cfg => - accessKey.orElse(if (endpoint.forall(endpoint.contains)) cfg.defaultAccessKey else None) -> - secretKey.orElse(if (endpoint.forall(endpoint.contains)) cfg.defaultSecretKey else None) - } - .getOrElse(None -> None) - val credsProvider = (accessKeyOrDefault, secretKeyOrDefault) match { - case (Some(accessKey), Some(secretKey)) => + val keys = for { + cfg <- config.amazon + accessKey <- cfg.defaultAccessKey + secretKey <- cfg.defaultSecretKey + } yield accessKey -> secretKey + + val credsProvider = keys match { + case Some((accessKey, secretKey)) => StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKey.value, secretKey.value)) - case _ => + case _ => StaticCredentialsProvider.create(AnonymousCredentialsProvider.create().resolveCredentials()) } @@ -214,8 +201,6 @@ object StorageValue { algorithm: DigestAlgorithm, bucket: String, endpoint: Option[Uri], - accessKey: Option[Secret[String]], - secretKey: Option[Secret[String]], region: Option[Region], readPermission: Permission, writePermission: Permission, @@ -228,8 +213,6 @@ object StorageValue { algorithm, bucket, endpoint, - accessKey, - secretKey, region, readPermission, writePermission, @@ -249,16 +232,14 @@ object StorageValue { default: Boolean, algorithm: DigestAlgorithm, endpoint: BaseUri, - credentials: Option[Secret[String]], folder: Label, readPermission: Permission, writePermission: Permission, maxFileSize: Long ) extends StorageValue { - override val tpe: StorageType = StorageType.RemoteDiskStorage - override val capacity: Option[Long] = None - override val secrets: Set[Secret[String]] = Set.empty ++ credentials + override val tpe: StorageType = StorageType.RemoteDiskStorage + override val capacity: Option[Long] = None /** * Construct the auth token to query the remote storage @@ -266,7 +247,7 @@ object StorageValue { def authToken(config: StorageTypeConfig): Option[AuthToken] = config.remoteDisk .flatMap { cfg => - credentials.orElse(if (endpoint == cfg.defaultEndpoint) cfg.defaultCredentials else None) + if (endpoint == cfg.defaultEndpoint) cfg.defaultCredentials else None } .map(secret => AuthToken(secret.value)) @@ -282,7 +263,6 @@ object StorageValue { default: Boolean, algorithm: DigestAlgorithm, endpoint: BaseUri, - credentials: Option[Secret[String]], folder: Label, readPermission: Permission, writePermission: Permission, @@ -294,7 +274,6 @@ object StorageValue { default, algorithm, endpoint, - credentials, folder, readPermission, writePermission, @@ -314,19 +293,12 @@ object StorageValue { @SuppressWarnings(Array("TryGet")) @nowarn("cat=unused") - def databaseCodec(crypto: Crypto)(implicit configuration: Configuration): Codec.AsObject[StorageValue] = { + def databaseCodec(implicit configuration: Configuration): Codec.AsObject[StorageValue] = { implicit val pathEncoder: Encoder[Path] = Encoder.encodeString.contramap(_.toString) implicit val pathDecoder: Decoder[Path] = Decoder.decodeString.emapTry(str => Try(Path.of(str))) implicit val regionEncoder: Encoder[Region] = Encoder.encodeString.contramap(_.toString) implicit val regionDecoder: Decoder[Region] = Decoder.decodeString.map(Region.of) - implicit val stringSecretEncryptEncoder: Encoder[Secret[String]] = Encoder.encodeString.contramap { - case Secret(value) => crypto.encrypt(value).get - } - - implicit val stringSecretEncryptDecoder: Decoder[Secret[String]] = - Decoder.decodeString.emapTry(str => crypto.decrypt(str).map(Secret(_))) - implicit val digestCodec: Codec.AsObject[Digest] = deriveConfiguredCodec[Digest] deriveConfiguredCodec[StorageValue] diff --git a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutes.scala b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutes.scala index c95d8c3306..15e275efd8 100644 --- a/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutes.scala +++ b/delta/plugins/storage/src/main/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutes.scala @@ -4,7 +4,6 @@ import akka.http.scaladsl.model.StatusCodes.Created import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import cats.implicits._ -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{Storage, StorageRejection} @@ -14,7 +13,6 @@ import ch.epfl.bluebrain.nexus.delta.rdf.utils.JsonKeyOrdering import ch.epfl.bluebrain.nexus.delta.sdk.IndexingAction import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclCheck import ch.epfl.bluebrain.nexus.delta.sdk.circe.CirceUnmarshalling -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.Crypto import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaDirectives._ import ch.epfl.bluebrain.nexus.delta.sdk.directives.{AuthDirectives, DeltaSchemeDirectives} import ch.epfl.bluebrain.nexus.delta.sdk.fusion.FusionConfig @@ -50,7 +48,6 @@ final class StoragesRoutes( index: IndexingAction.Execute[Storage] )(implicit baseUri: BaseUri, - crypto: Crypto, s: Scheduler, cr: RemoteContextResolution, ordering: JsonKeyOrdering, @@ -74,7 +71,7 @@ final class StoragesRoutes( authorizeFor(ref, Write).apply { emit( Created, - storages.create(ref, Secret(source)).tapEval(index(ref, _, mode)).mapValue(_.metadata) + storages.create(ref, source).tapEval(index(ref, _, mode)).mapValue(_.metadata) ) } } @@ -93,7 +90,7 @@ final class StoragesRoutes( emit( Created, storages - .create(id, ref, Secret(source)) + .create(id, ref, source) .tapEval(index(ref, _, mode)) .mapValue(_.metadata) ) @@ -101,7 +98,7 @@ final class StoragesRoutes( // Update a storage emit( storages - .update(id, ref, rev, Secret(source)) + .update(id, ref, rev, source) .tapEval(index(ref, _, mode)) .mapValue(_.metadata) ) @@ -139,7 +136,7 @@ final class StoragesRoutes( authorizeFor(ref, Read).apply { val sourceIO = storages .fetch(id, ref) - .map(res => Storage.encryptSourceUnsafe(res.value.source, crypto)) + .map(res => res.value.source) emit(sourceIO.leftWiden[StorageRejection].rejectOn[StorageNotFound]) } } @@ -206,7 +203,6 @@ object StoragesRoutes { s: Scheduler, cr: RemoteContextResolution, ordering: JsonKeyOrdering, - crypto: Crypto, fusionConfig: FusionConfig ): Route = new StoragesRoutes(identities, aclCheck, storages, storagesStatistics, schemeDirectives, index).routes diff --git a/delta/plugins/storage/src/test/resources/storages/database/remote-storage-created.json b/delta/plugins/storage/src/test/resources/storages/database/remote-storage-created.json index d78d4e34d8..1ea2b0e28a 100644 --- a/delta/plugins/storage/src/test/resources/storages/database/remote-storage-created.json +++ b/delta/plugins/storage/src/test/resources/storages/database/remote-storage-created.json @@ -7,7 +7,6 @@ "description": "remoteDescription", "algorithm": "SHA-256", "endpoint" : "http://localhost", - "credentials" : "wqUglodAZzmNk3CEZeZgBA==", "folder" : "myfolder", "readPermission" : "remote/read", "writePermission" : "remote/write", @@ -15,7 +14,6 @@ "@type" : "RemoteDiskStorageValue" }, "source" : { - "credentials" : "wqUglodAZzmNk3CEZeZgBA==", "@type" : "RemoteDiskStorage", "name": "remoteName", "description": "remoteDescription", diff --git a/delta/plugins/storage/src/test/resources/storages/database/remote-storage-updated.json b/delta/plugins/storage/src/test/resources/storages/database/remote-storage-updated.json index 92db6ab655..e819e56a02 100644 --- a/delta/plugins/storage/src/test/resources/storages/database/remote-storage-updated.json +++ b/delta/plugins/storage/src/test/resources/storages/database/remote-storage-updated.json @@ -7,7 +7,6 @@ "description": "remoteDescription", "algorithm": "SHA-256", "endpoint" : "http://localhost", - "credentials" : "wqUglodAZzmNk3CEZeZgBA==", "folder" : "myfolder2", "readPermission" : "remote/read", "writePermission" : "remote/write", @@ -15,7 +14,6 @@ "@type" : "RemoteDiskStorageValue" }, "source" : { - "credentials" : "wqUglodAZzmNk3CEZeZgBA==", "@type" : "RemoteDiskStorage", "name": "remoteName", "description": "remoteDescription", diff --git a/delta/plugins/storage/src/test/resources/storages/database/s3-storage-created.json b/delta/plugins/storage/src/test/resources/storages/database/s3-storage-created.json index 22f84c7c07..403c80e44c 100644 --- a/delta/plugins/storage/src/test/resources/storages/database/s3-storage-created.json +++ b/delta/plugins/storage/src/test/resources/storages/database/s3-storage-created.json @@ -8,8 +8,6 @@ "algorithm" : "SHA-256", "bucket" : "mybucket", "endpoint" : "http://localhost", - "accessKey" : "O2vOp16ah21Fk/B0OucyxQ==", - "secretKey" : "dqO3PR6kLOCFNBk9I6zsZQ==", "readPermission" : "s3/read", "region": "eu-west-1", "writePermission" : "s3/write", @@ -17,8 +15,6 @@ "@type" : "S3StorageValue" }, "source" : { - "secretKey" : "dqO3PR6kLOCFNBk9I6zsZQ==", - "accessKey" : "O2vOp16ah21Fk/B0OucyxQ==", "@type" : "S3Storage", "name": "s3name", "description": "s3description", diff --git a/delta/plugins/storage/src/test/resources/storages/database/s3-storage-updated.json b/delta/plugins/storage/src/test/resources/storages/database/s3-storage-updated.json index fc7701ad1c..2ae1897aa6 100644 --- a/delta/plugins/storage/src/test/resources/storages/database/s3-storage-updated.json +++ b/delta/plugins/storage/src/test/resources/storages/database/s3-storage-updated.json @@ -8,8 +8,6 @@ "algorithm" : "SHA-256", "bucket" : "mybucket2", "endpoint" : "http://localhost", - "accessKey" : "O2vOp16ah21Fk/B0OucyxQ==", - "secretKey" : "dqO3PR6kLOCFNBk9I6zsZQ==", "readPermission" : "s3/read", "region": "eu-west-1", "writePermission" : "s3/write", @@ -17,8 +15,6 @@ "@type" : "S3StorageValue" }, "source" : { - "secretKey" : "dqO3PR6kLOCFNBk9I6zsZQ==", - "accessKey" : "O2vOp16ah21Fk/B0OucyxQ==", "@type" : "S3Storage", "name": "s3name", "description": "s3description", diff --git a/delta/plugins/storage/src/test/resources/storages/remote-storage.json b/delta/plugins/storage/src/test/resources/storages/remote-storage.json index 79fc9b82b2..1dc7d23af4 100644 --- a/delta/plugins/storage/src/test/resources/storages/remote-storage.json +++ b/delta/plugins/storage/src/test/resources/storages/remote-storage.json @@ -7,7 +7,6 @@ "name": "remoteName", "description": "remoteDescription", "endpoint": "http://localhost", - "credentials": "authToken", "folder": "myfolder", "readPermission": "remote/read", "writePermission": "remote/write", diff --git a/delta/plugins/storage/src/test/resources/storages/s3-storage.json b/delta/plugins/storage/src/test/resources/storages/s3-storage.json index cb36da4632..c873bfbb6c 100644 --- a/delta/plugins/storage/src/test/resources/storages/s3-storage.json +++ b/delta/plugins/storage/src/test/resources/storages/s3-storage.json @@ -8,8 +8,6 @@ "description": "s3description", "bucket": "mybucket", "endpoint": "http://localhost", - "accessKey": "accessKey", - "secretKey": "secretKey", "readPermission": "s3/read", "region": "eu-west-1", "writePermission": "s3/write", diff --git a/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-created.json b/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-created.json index 8b8277c938..4c2d63c0fb 100644 --- a/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-created.json +++ b/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-created.json @@ -13,7 +13,6 @@ "@type": "RemoteDiskStorage", "name": "remoteName", "description": "remoteDescription", - "credentials": "wqUglodAZzmNk3CEZeZgBA==", "default": true, "endpoint": "http://localhost", "folder": "myfolder", diff --git a/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-updated.json b/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-updated.json index 127abf86f4..7cc8f2aa38 100644 --- a/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-updated.json +++ b/delta/plugins/storage/src/test/resources/storages/sse/remote-storage-updated.json @@ -13,7 +13,6 @@ "@type": "RemoteDiskStorage", "name": "remoteName", "description": "remoteDescription", - "credentials": "wqUglodAZzmNk3CEZeZgBA==", "default": true, "endpoint": "http://localhost", "folder": "myfolder", diff --git a/delta/plugins/storage/src/test/resources/storages/sse/s3-storage-created.json b/delta/plugins/storage/src/test/resources/storages/sse/s3-storage-created.json index 711a005f27..350cf75ab8 100644 --- a/delta/plugins/storage/src/test/resources/storages/sse/s3-storage-created.json +++ b/delta/plugins/storage/src/test/resources/storages/sse/s3-storage-created.json @@ -13,14 +13,12 @@ "@type": "S3Storage", "name": "s3name", "description": "s3description", - "accessKey": "O2vOp16ah21Fk/B0OucyxQ==", "bucket": "mybucket", "default": true, "endpoint": "http://localhost", "maxFileSize": 51, "readPermission": "s3/read", "region": "eu-west-1", - "secretKey": "dqO3PR6kLOCFNBk9I6zsZQ==", "writePermission": "s3/write" }, "_storageId": "https://bluebrain.github.io/nexus/vocabulary/s3-storage", diff --git a/delta/plugins/storage/src/test/resources/storages/sse/s3-storage-updated.json b/delta/plugins/storage/src/test/resources/storages/sse/s3-storage-updated.json index dab62cc381..5d0ef9765f 100644 --- a/delta/plugins/storage/src/test/resources/storages/sse/s3-storage-updated.json +++ b/delta/plugins/storage/src/test/resources/storages/sse/s3-storage-updated.json @@ -13,14 +13,12 @@ "@type": "S3Storage", "name": "s3name", "description": "s3description", - "accessKey": "O2vOp16ah21Fk/B0OucyxQ==", "bucket": "mybucket", "default": true, "endpoint": "http://localhost", "maxFileSize": 51, "readPermission": "s3/read", "region": "eu-west-1", - "secretKey": "dqO3PR6kLOCFNBk9I6zsZQ==", "writePermission": "s3/write" }, "_storageId": "https://bluebrain.github.io/nexus/vocabulary/s3-storage", diff --git a/delta/plugins/storage/src/test/resources/storages/storage-remote-state.json b/delta/plugins/storage/src/test/resources/storages/storage-remote-state.json index 6b0b13d03c..4658b59fb1 100644 --- a/delta/plugins/storage/src/test/resources/storages/storage-remote-state.json +++ b/delta/plugins/storage/src/test/resources/storages/storage-remote-state.json @@ -13,7 +13,6 @@ "description": "remoteDescription", "algorithm": "SHA-256", "endpoint": "http://localhost", - "credentials": "wqUglodAZzmNk3CEZeZgBA==", "folder": "myfolder", "readPermission": "remote/read", "writePermission": "remote/write", @@ -21,7 +20,6 @@ "@type": "RemoteDiskStorageValue" }, "source": { - "credentials": "wqUglodAZzmNk3CEZeZgBA==", "@type": "RemoteDiskStorage", "name": "remoteName", "description": "remoteDescription", diff --git a/delta/plugins/storage/src/test/resources/storages/storage-s3-state.json b/delta/plugins/storage/src/test/resources/storages/storage-s3-state.json index 06c732b74d..7935114203 100644 --- a/delta/plugins/storage/src/test/resources/storages/storage-s3-state.json +++ b/delta/plugins/storage/src/test/resources/storages/storage-s3-state.json @@ -14,8 +14,6 @@ "algorithm": "SHA-256", "bucket": "mybucket", "endpoint": "http://localhost", - "accessKey": "O2vOp16ah21Fk/B0OucyxQ==", - "secretKey": "dqO3PR6kLOCFNBk9I6zsZQ==", "region": "eu-west-1", "readPermission": "s3/read", "writePermission": "s3/write", @@ -23,8 +21,6 @@ "@type": "S3StorageValue" }, "source": { - "secretKey": "dqO3PR6kLOCFNBk9I6zsZQ==", - "accessKey": "O2vOp16ah21Fk/B0OucyxQ==", "@type": "S3Storage", "name": "s3name", "description": "s3description", diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StorageScopeInitializationSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StorageScopeInitializationSpec.scala index 746d0f0616..15fe643553 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StorageScopeInitializationSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/StorageScopeInitializationSpec.scala @@ -58,7 +58,6 @@ class StorageScopeInitializationSpec new ResolverContextResolution(rcr, (_, _, _) => IO.raiseError(ResourceResolutionReport())), IO.pure(allowedPerms.toSet), (_, _) => IO.unit, - crypto, xas, StoragesConfig(eventLogConfig, pagination, config), serviceAccount diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala index 372cd6fb55..35515edb81 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/FilesSpec.scala @@ -5,7 +5,6 @@ import akka.actor.{typed, ActorSystem} import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` import akka.http.scaladsl.model.Uri import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.RemoteContextResolutionFixture import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.NotComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Storage @@ -107,7 +106,6 @@ class FilesSpec(docker: RemoteStorageDocker) new ResolverContextResolution(rcr, (_, _, _) => IO.raiseError(ResourceResolutionReport())), IO.pure(allowedPerms), (_, _) => IO.unit, - crypto, xas, StoragesConfig(eventLogConfig, pagination, cfg), ServiceAccount(User("nexus-sa", Label.unsafe("sa"))) @@ -125,12 +123,12 @@ class FilesSpec(docker: RemoteStorageDocker) "creating a file" should { "create storages for files" in { - val payload = diskFieldsJson.map(_ deepMerge json"""{"capacity": 320, "maxFileSize": 300, "volume": "$path"}""") + val payload = diskFieldsJson deepMerge json"""{"capacity": 320, "maxFileSize": 300, "volume": "$path"}""" storages.create(diskId, projectRef, payload).accepted val payload2 = json"""{"@type": "RemoteDiskStorage", "endpoint": "${docker.hostConfig.endpoint}", "folder": "${RemoteStorageDocker.BucketName}", "readPermission": "$otherRead", "writePermission": "$otherWrite", "maxFileSize": 300, "default": false}""" - storages.create(remoteId, projectRef, Secret(payload2)).accepted + storages.create(remoteId, projectRef, payload2).accepted } "succeed with the id passed" in { diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala index 35bb8ef9c4..c581b46114 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/files/routes/FilesRoutesSpec.scala @@ -82,7 +82,6 @@ class FilesRoutesSpec extends BaseRouteSpec with CancelAfterFailure with Storage new ResolverContextResolution(rcr, (_, _, _) => IO.raiseError(ResourceResolutionReport())), IO.pure(allowedPerms.toSet), (_, _) => IO.unit, - crypto, xas, StoragesConfig(eventLogConfig, pagination, config), ServiceAccount(User("nexus-sa", Label.unsafe("sa"))) @@ -117,9 +116,9 @@ class FilesRoutesSpec extends BaseRouteSpec with CancelAfterFailure with Storage caller.subject -> Set(storagesPermissions.write) ) .accepted - storages.create(s3Id, projectRef, diskFieldsJson.map(_ deepMerge defaults deepMerge s3Perms)).accepted + storages.create(s3Id, projectRef, diskFieldsJson deepMerge defaults deepMerge s3Perms).accepted storages - .create(dId, projectRef, diskFieldsJson.map(_ deepMerge defaults deepMerge json"""{"capacity":5000}""")) + .create(dId, projectRef, diskFieldsJson deepMerge defaults deepMerge json"""{"capacity":5000}""") .accepted } @@ -189,7 +188,7 @@ class FilesRoutesSpec extends BaseRouteSpec with CancelAfterFailure with Storage "fail to update a file without disk/write permission" in { aclCheck.subtract(AclAddress.Root, Anonymous -> Set(diskWrite)).accepted - Put(s"/v1/files/org/proj/file1?rev=1", s3FieldsJson.value.toEntity) ~> routes ~> check { + Put(s"/v1/files/org/proj/file1?rev=1", s3FieldsJson.toEntity) ~> routes ~> check { response.status shouldEqual StatusCodes.Forbidden response.asJson shouldEqual jsonContentOf("errors/authorization-failed.json") } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala index 65a79c5df7..e95661694d 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageFixtures.scala @@ -5,11 +5,11 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StoragesConfig.{Di import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageFields.{DiskStorageFields, RemoteDiskStorageFields, S3StorageFields} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{AbsolutePath, DigestAlgorithm} import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.{Crypto, EncryptionConfig} import ch.epfl.bluebrain.nexus.delta.sdk.model.BaseUri import ch.epfl.bluebrain.nexus.delta.sdk.permissions.model.Permission import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label +import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker import ch.epfl.bluebrain.nexus.testkit.{CirceLiteral, TestHelpers} import software.amazon.awssdk.regions.Region @@ -28,18 +28,16 @@ trait StorageFixtures extends TestHelpers with CirceLiteral { // format: off implicit val config: StorageTypeConfig = StorageTypeConfig( disk = DiskStorageConfig(diskVolume, Set(diskVolume,tmpVolume), DigestAlgorithm.default, permissions.read, permissions.write, showLocation = false, Some(5000), 50), - amazon = Some(S3StorageConfig(DigestAlgorithm.default, Some("localhost"), Some(Secret("accessKey")), Some(Secret("secretKey")), permissions.read, permissions.write, showLocation = false, 60)), + amazon = Some(S3StorageConfig(DigestAlgorithm.default, Some("localhost"), Some(Secret(MinioDocker.RootUser)), Some(Secret(MinioDocker.RootPassword)), permissions.read, permissions.write, showLocation = false, 60)), remoteDisk = Some(RemoteDiskStorageConfig(DigestAlgorithm.default, BaseUri("http://localhost", Label.unsafe("v1")), None, permissions.read, permissions.write, showLocation = false, 70, RetryStrategyConfig.AlwaysGiveUp)), ) - val crypto: Crypto = EncryptionConfig(Secret("changeme"), Secret("salt")).crypto - val diskFields = DiskStorageFields(Some("diskName"), Some("diskDescription"), default = true, Some(tmpVolume), Some(Permission.unsafe("disk/read")), Some(Permission.unsafe("disk/write")), Some(1000), Some(50)) val diskVal = diskFields.toValue(config).get val diskFieldsUpdate = DiskStorageFields(Some("diskName"), Some("diskDescription"), default = false, Some(tmpVolume), Some(Permission.unsafe("disk/read")), Some(Permission.unsafe("disk/write")), Some(2000), Some(40)) val diskValUpdate = diskFieldsUpdate.toValue(config).get - val s3Fields = S3StorageFields(Some("s3name"), Some("s3description"), default = true, "mybucket", Some("http://localhost"), Some(Secret("accessKey")), Some(Secret("secretKey")), Some(Region.EU_WEST_1), Some(Permission.unsafe("s3/read")), Some(Permission.unsafe("s3/write")), Some(51)) + val s3Fields = S3StorageFields(Some("s3name"), Some("s3description"), default = true, "mybucket", Some("http://localhost"), Some(Region.EU_WEST_1), Some(Permission.unsafe("s3/read")), Some(Permission.unsafe("s3/write")), Some(51)) val s3Val = s3Fields.toValue(config).get - val remoteFields = RemoteDiskStorageFields(Some("remoteName"), Some("remoteDescription"), default = true, Some(BaseUri.withoutPrefix("http://localhost")), Some(Secret("authToken")), Label.unsafe("myfolder"), Some(Permission.unsafe("remote/read")), Some(Permission.unsafe("remote/write")), Some(52)) + val remoteFields = RemoteDiskStorageFields(Some("remoteName"), Some("remoteDescription"), default = true, Some(BaseUri.withoutPrefix("http://localhost")), Label.unsafe("myfolder"), Some(Permission.unsafe("remote/read")), Some(Permission.unsafe("remote/write")), Some(52)) val remoteVal = remoteFields.toValue(config).get // format: on @@ -56,9 +54,9 @@ trait StorageFixtures extends TestHelpers with CirceLiteral { val s3Json = jsonContentOf("storages/s3-storage.json") val remoteJson = jsonContentOf("storages/remote-storage.json") - val diskFieldsJson = Secret(diskJson.removeKeys("@id", "@context", "_algorithm")) - val s3FieldsJson = Secret(s3Json.removeKeys("@id", "@context", "_algorithm")) - val remoteFieldsJson = Secret(remoteJson.removeKeys("@id", "@context", "_algorithm")) + val diskFieldsJson = diskJson.removeKeys("@id", "@context", "_algorithm") + val s3FieldsJson = s3Json.removeKeys("@id", "@context", "_algorithm") + val remoteFieldsJson = remoteJson.removeKeys("@id", "@context", "_algorithm") } object StorageFixtures extends StorageFixtures diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageGen.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageGen.scala index 857aa94013..3a6b893233 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageGen.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StorageGen.scala @@ -1,6 +1,5 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{StorageState, StorageValue} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv @@ -18,7 +17,7 @@ object StorageGen { id: Iri, project: ProjectRef, value: StorageValue, - source: Secret[Json] = Secret(Json.obj()), + source: Json = Json.obj(), rev: Int = 1, deprecated: Boolean = false, tags: Tags = Tags.empty, @@ -44,7 +43,7 @@ object StorageGen { id: Iri, project: ProjectRef, value: StorageValue, - source: Secret[Json] = Secret(Json.obj()), + source: Json = Json.obj(), rev: Int = 1, deprecated: Boolean = false, tags: Tags = Tags.empty, diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala index 6d6ae90a5c..7356901899 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesSpec.scala @@ -68,7 +68,6 @@ class StoragesSpec new ResolverContextResolution(rcr, (_, _, _) => IO.raiseError(ResourceResolutionReport())), IO.pure(allowedPerms.toSet), (_, _) => IO.unit, - crypto, xas, StoragesConfig(eventLogConfig, pagination, config), serviceAccount @@ -77,13 +76,13 @@ class StoragesSpec "creating a storage" should { "succeed with the id present on the payload" in { - val payload = diskFieldsJson.map(_ deepMerge Json.obj(keywords.id -> dId.asJson)) + val payload = diskFieldsJson deepMerge Json.obj(keywords.id -> dId.asJson) storages.create(projectRef, payload).accepted shouldEqual resourceFor(dId, projectRef, diskVal, payload, createdBy = bob, updatedBy = bob) } "succeed with the id present on the payload and passed" in { - val payload = s3FieldsJson.map(_ deepMerge Json.obj(keywords.id -> s3Id.asJson)) + val payload = s3FieldsJson deepMerge Json.obj(keywords.id -> s3Id.asJson) storages.create("s3-storage", projectRef, payload).accepted shouldEqual resourceFor(s3Id, projectRef, s3Val, payload, createdBy = bob, updatedBy = bob) @@ -99,7 +98,7 @@ class StoragesSpec "reject with different ids on the payload and passed" in { val otherId = nxv + "other" - val payload = s3FieldsJson.map(_ deepMerge Json.obj(keywords.id -> s3Id.asJson)) + val payload = s3FieldsJson deepMerge Json.obj(keywords.id -> s3Id.asJson) storages.create(otherId, projectRef, payload).rejected shouldEqual UnexpectedStorageId(id = otherId, payloadId = s3Id) } @@ -122,7 +121,7 @@ class StoragesSpec "updating a storage" should { "succeed" in { - val payload = diskFieldsJson.map(_ deepMerge json"""{"default": false, "capacity": 2000, "maxFileSize": 40}""") + val payload = diskFieldsJson deepMerge json"""{"default": false, "capacity": 2000, "maxFileSize": 40}""" storages.update(dId, projectRef, 2, payload).accepted shouldEqual resourceFor(dId, projectRef, diskValUpdate, payload, rev = 3, createdBy = bob, updatedBy = bob) } @@ -176,7 +175,7 @@ class StoragesSpec "deprecating a storage" should { "succeed" in { - val payload = s3FieldsJson.map(_ deepMerge json"""{"@id": "$s3Id", "default": false}""") + val payload = s3FieldsJson deepMerge json"""{"@id": "$s3Id", "default": false}""" storages.deprecate(s3Id, projectRef, 2).accepted shouldEqual resourceFor( s3Id, @@ -210,7 +209,7 @@ class StoragesSpec } "allow tagging" in { - val payload = s3FieldsJson.map(_ deepMerge json"""{"@id": "$s3Id", "default": false}""") + val payload = s3FieldsJson deepMerge json"""{"@id": "$s3Id", "default": false}""" storages.tag(s3Id, projectRef, tag, tagRev = 3, 3).accepted shouldEqual resourceFor( s3Id, diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesStmSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesStmSpec.scala index ab3b9204c3..65a84472f8 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesStmSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/StoragesStmSpec.scala @@ -1,6 +1,5 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageType.{DiskStorage => DiskStorageType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageGen.storageState import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.Storages.{evaluate, next} @@ -56,7 +55,7 @@ class StoragesStmSpec private val perms = IO.pure(allowedPerms.toSet) - private val eval = evaluate(access, perms, config, crypto)(_, _) + private val eval = evaluate(access, perms, config)(_, _) "The Storages state machine" when { @@ -109,7 +108,7 @@ class StoragesStmSpec "reject with IncorrectRev" in { val state = storageState(dId, project, diskVal) val commands = List( - UpdateStorage(dId, project, diskFields, Secret(Json.obj()), 2, alice), + UpdateStorage(dId, project, diskFields, Json.obj(), 2, alice), TagStorage(dId, project, 1, UserTag.unsafe("tag"), 2, alice), DeprecateStorage(dId, project, 2, alice) ) @@ -136,7 +135,7 @@ class StoragesStmSpec rdId -> inaccessibleRemoteVal ) ) { case (id, value) => - val createCmd = CreateStorage(id, project, value, Secret(Json.obj()), bob) + val createCmd = CreateStorage(id, project, value, Json.obj(), bob) eval(None, createCmd).rejected shouldBe a[StorageNotAccessible] } @@ -147,7 +146,7 @@ class StoragesStmSpec remoteCurrent -> inaccessibleRemoteVal ) ) { case (state, value) => - val updateCmd = UpdateStorage(state.id, project, value, Secret(Json.obj()), 1, alice) + val updateCmd = UpdateStorage(state.id, project, value, Json.obj(), 1, alice) eval(Some(state), updateCmd).rejected shouldBe a[StorageNotAccessible] } } @@ -167,7 +166,7 @@ class StoragesStmSpec (rdId, exceededSizeRemoteVal, config.remoteDisk.value.defaultMaxFileSize) ) ) { case (id, value, maxFileSize) => - val createCmd = CreateStorage(id, project, value, Secret(Json.obj()), bob) + val createCmd = CreateStorage(id, project, value, Json.obj(), bob) eval(None, createCmd).rejected shouldEqual InvalidMaxFileSize(id, 100, maxFileSize) } @@ -178,20 +177,20 @@ class StoragesStmSpec (remoteCurrent, exceededSizeRemoteVal, config.remoteDisk.value.defaultMaxFileSize) ) ) { case (state, value, maxFileSize) => - val updateCmd = UpdateStorage(state.id, project, value, Secret(Json.obj()), 1, alice) + val updateCmd = UpdateStorage(state.id, project, value, Json.obj(), 1, alice) eval(Some(state), updateCmd).rejected shouldEqual InvalidMaxFileSize(state.id, 100, maxFileSize) } } "reject with ResourceAlreadyExists when storage already exists" in { val state = storageState(dId, project, diskVal) - eval(Some(state), CreateStorage(dId, project, diskFields, Secret(Json.obj()), bob)) + eval(Some(state), CreateStorage(dId, project, diskFields, Json.obj(), bob)) .rejectedWith[ResourceAlreadyExists] } "reject with StorageNotFound" in { val commands = List( - UpdateStorage(dId, project, diskFields, Secret(Json.obj()), 2, alice), + UpdateStorage(dId, project, diskFields, Json.obj(), 2, alice), TagStorage(dId, project, 1, UserTag.unsafe("tag"), 2, alice), DeprecateStorage(dId, project, 2, alice) ) @@ -203,7 +202,7 @@ class StoragesStmSpec "reject with StorageIsDeprecated" in { val state = storageState(dId, project, diskVal, rev = 2, deprecated = true) val commands = List( - UpdateStorage(dId, project, diskFields, Secret(Json.obj()), 2, alice), + UpdateStorage(dId, project, diskFields, Json.obj(), 2, alice), DeprecateStorage(dId, project, 2, alice) ) forAll(commands) { cmd => @@ -222,12 +221,12 @@ class StoragesStmSpec val s3Current = storageState(s3Id, project, s3Val) val remoteCurrent = storageState(rdId, project, remoteVal) val list = List( - diskCurrent -> UpdateStorage(dId, project, s3Fields, Secret(Json.obj()), 1, alice), - diskCurrent -> UpdateStorage(dId, project, remoteFields, Secret(Json.obj()), 1, alice), - s3Current -> UpdateStorage(s3Id, project, diskFields, Secret(Json.obj()), 1, alice), - s3Current -> UpdateStorage(s3Id, project, remoteFields, Secret(Json.obj()), 1, alice), - remoteCurrent -> UpdateStorage(rdId, project, diskFields, Secret(Json.obj()), 1, alice), - remoteCurrent -> UpdateStorage(rdId, project, s3Fields, Secret(Json.obj()), 1, alice) + diskCurrent -> UpdateStorage(dId, project, s3Fields, Json.obj(), 1, alice), + diskCurrent -> UpdateStorage(dId, project, remoteFields, Json.obj(), 1, alice), + s3Current -> UpdateStorage(s3Id, project, diskFields, Json.obj(), 1, alice), + s3Current -> UpdateStorage(s3Id, project, remoteFields, Json.obj(), 1, alice), + remoteCurrent -> UpdateStorage(rdId, project, diskFields, Json.obj(), 1, alice), + remoteCurrent -> UpdateStorage(rdId, project, s3Fields, Json.obj(), 1, alice) ) forAll(list) { case (state, cmd) => eval(Some(state), cmd).rejectedWith[DifferentStorageType] @@ -241,8 +240,8 @@ class StoragesStmSpec val storageValue = s3Fields.copy(readPermission = Some(read), writePermission = Some(write)) val current = storageState(s3Id, project, s3Val) val list = List( - None -> CreateStorage(s3Id, project, storageValue, Secret(Json.obj()), bob), - Some(current) -> UpdateStorage(s3Id, project, storageValue, Secret(Json.obj()), 1, alice) + None -> CreateStorage(s3Id, project, storageValue, Json.obj(), bob), + Some(current) -> UpdateStorage(s3Id, project, storageValue, Json.obj(), 1, alice) ) forAll(list) { case (current, cmd) => eval(current, cmd).rejected shouldEqual PermissionsAreNotDefined(Set(read, write)) @@ -253,10 +252,10 @@ class StoragesStmSpec val s3Current = storageState(s3Id, project, s3Val) val remoteCurrent = storageState(rdId, project, remoteVal) val list = List( - None -> CreateStorage(s3Id, project, s3Fields, Secret(Json.obj()), bob), - None -> CreateStorage(s3Id, project, remoteFields, Secret(Json.obj()), bob), - Some(s3Current) -> UpdateStorage(s3Id, project, s3Fields, Secret(Json.obj()), 1, alice), - Some(remoteCurrent) -> UpdateStorage(rdId, project, remoteFields, Secret(Json.obj()), 1, alice) + None -> CreateStorage(s3Id, project, s3Fields, Json.obj(), bob), + None -> CreateStorage(s3Id, project, remoteFields, Json.obj(), bob), + Some(s3Current) -> UpdateStorage(s3Id, project, s3Fields, Json.obj(), 1, alice), + Some(remoteCurrent) -> UpdateStorage(rdId, project, remoteFields, Json.obj(), 1, alice) ) val diskVolume = AbsolutePath(Files.createTempDirectory("disk")).rightValue // format: off @@ -266,7 +265,7 @@ class StoragesStmSpec remoteDisk = None ) // format: on - val eval = evaluate(access, perms, config, crypto)(_, _) + val eval = evaluate(access, perms, config)(_, _) forAll(list) { case (current, cmd) => eval(current, cmd).rejectedWith[InvalidStorageType] } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala index 777ba6676f..00cc36490d 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageFieldsSpec.scala @@ -50,7 +50,7 @@ class StorageFieldsSpec } "dealing with S3 storages" should { - val json = s3FieldsJson.value.addContext(contexts.storages) + val json = s3FieldsJson.addContext(contexts.storages) "be created from Json-LD" in { sourceDecoder(pc, json).accepted._2 shouldEqual s3Fields @@ -70,12 +70,12 @@ class StorageFieldsSpec "region" ) sourceDecoder(pc, jsonNoDefaults).accepted._2 shouldEqual - S3StorageFields(None, None, default = true, "mybucket", None, None, None, None, None, None, None) + S3StorageFields(None, None, default = true, "mybucket", None, None, None, None, None) } } "dealing with remote storages" should { - val json = remoteFieldsJson.value.addContext(contexts.storages) + val json = remoteFieldsJson.addContext(contexts.storages) "be created from Json-LD" in { sourceDecoder(pc, json).accepted._2 shouldEqual remoteFields @@ -93,7 +93,7 @@ class StorageFieldsSpec "credentials" ) sourceDecoder(pc, jsonNoDefaults).accepted._2 shouldEqual - RemoteDiskStorageFields(None, None, default = true, None, None, Label.unsafe("myfolder"), None, None, None) + RemoteDiskStorageFields(None, None, default = true, None, Label.unsafe("myfolder"), None, None, None) } } } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSerializationSuite.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSerializationSuite.scala index 6c8602d4c5..3d745713bf 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSerializationSuite.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSerializationSuite.scala @@ -46,26 +46,26 @@ class StorageSerializationSuite extends SerializationSuite with StorageFixtures (diskDeprecated, loadEvents("storages", "storage-deprecated.json"), Deprecated) ) - private val storageEventSerializer = StorageEvent.serializer(crypto) - private val storageSseEncoder = StorageEvent.sseEncoder(crypto) - private val storageEventMetricEncoder = StorageEvent.storageEventMetricEncoder(crypto) + private val storageEventSerializer = StorageEvent.serializer + private val storageSseEncoder = StorageEvent.sseEncoder + private val storageEventMetricEncoder = StorageEvent.storageEventMetricEncoder storagesMapping.foreach { case (event, (database, sse), action) => - test(s"Correctly serialize ${event.getClass.getName}") { + test(s"Correctly serialize ${event.getClass.getSimpleName} for ${event.tpe}") { assertEquals(storageEventSerializer.codec(event), database) } - test(s"Correctly deserialize ${event.getClass.getName}") { + test(s"Correctly deserialize ${event.getClass.getSimpleName} for ${event.tpe}") { assertEquals(storageEventSerializer.codec.decodeJson(database), Right(event)) } - test(s"Correctly serialize ${event.getClass.getName} as an SSE") { + test(s"Correctly serialize ${event.getClass.getSimpleName} for ${event.tpe} as an SSE") { storageSseEncoder.toSse .decodeJson(database) .assertRight(SseData(ClassUtils.simpleName(event), Some(projectRef), sse)) } - test(s"Correctly encode ${event.getClass.getName} to metric") { + test(s"Correctly encode ${event.getClass.getSimpleName} for ${event.tpe} to metric") { storageEventMetricEncoder.toMetric.decodeJson(database).assertRight { ProjectScopedMetric( instant, @@ -102,7 +102,7 @@ class StorageSerializationSuite extends SerializationSuite with StorageFixtures ) -> v } - private val storageStateSerializer = StorageState.serializer(crypto) + private val storageStateSerializer = StorageState.serializer statesMapping.foreach { case (state, json) => test(s"Correctly serialize state ${state.value.tpe}") { diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSpec.scala index 0c24957ab4..384fbd430a 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/model/StorageSpec.scala @@ -1,15 +1,13 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.RemoteContextResolutionFixture import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageFixtures import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage._ import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary.nxv import ch.epfl.bluebrain.nexus.delta.sdk.model.Tags import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ +import ch.epfl.bluebrain.nexus.delta.sourcing.model.{Label, ProjectRef} import ch.epfl.bluebrain.nexus.delta.sourcing.model.Tag.UserTag -import ch.epfl.bluebrain.nexus.delta.sourcing.model.Label -import ch.epfl.bluebrain.nexus.delta.sourcing.model.ProjectRef import ch.epfl.bluebrain.nexus.testkit.IOValues import org.scalatest.Inspectors import org.scalatest.matchers.should.Matchers @@ -27,10 +25,10 @@ class StorageSpec val project = ProjectRef(Label.unsafe("org"), Label.unsafe("project")) val tag = UserTag.unsafe("tag") val diskStorage = - DiskStorage(nxv + "disk", project, diskVal, Tags.empty, Secret(json"""{"disk": "value"}""")) - val s3Storage = S3Storage(nxv + "s3", project, s3Val, Tags(tag -> 1), Secret(json"""{"s3": "value"}""")) + DiskStorage(nxv + "disk", project, diskVal, Tags.empty, json"""{"disk": "value"}""") + val s3Storage = S3Storage(nxv + "s3", project, s3Val, Tags(tag -> 1), json"""{"s3": "value"}""") val remoteStorage = - RemoteDiskStorage(nxv + "remote", project, remoteVal, Tags.empty, Secret(json"""{"remote": "value"}""")) + RemoteDiskStorage(nxv + "remote", project, remoteVal, Tags.empty, json"""{"remote": "value"}""") "be compacted" in { forAll( diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageSaveFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageSaveFileSpec.scala index 5142558d76..3ba8770ce3 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageSaveFileSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/disk/DiskStorageSaveFileSpec.scala @@ -4,7 +4,6 @@ import akka.actor.ActorSystem import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` import akka.http.scaladsl.model.{HttpEntity, Uri} import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDescription} @@ -47,7 +46,7 @@ class DiskStorageSaveFileSpec val iri = iri"http://localhost/disk" val project = ProjectRef.unsafe("org", "project") val value = DiskStorageValue(default = true, DigestAlgorithm.default, volume, read, write, Some(100), 10) - val storage = DiskStorage(iri, project, value, Tags.empty, Secret(Json.obj())) + val storage = DiskStorage(iri, project, value, Tags.empty, Json.obj()) val uuid = UUID.fromString("8049ba90-7cc6-4de5-93a1-802c04200dcc") val content = "file content" val entity = HttpEntity(content) diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageAccessSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageAccessSpec.scala index ea78742388..c3125b76e3 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageAccessSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteDiskStorageAccessSpec.scala @@ -47,7 +47,6 @@ class RemoteDiskStorageAccessSpec(docker: RemoteStorageDocker) default = true, DigestAlgorithm.default, BaseUri(docker.hostConfig.endpoint).rightValue, - None, Label.unsafe(RemoteStorageDocker.BucketName), read, write, diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageLinkFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageLinkFileSpec.scala index eb169b1e88..5472fa202c 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageLinkFileSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageLinkFileSpec.scala @@ -4,7 +4,6 @@ import akka.actor.ActorSystem import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` import akka.http.scaladsl.model.Uri import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.NotComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Storage import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDescription} @@ -62,13 +61,12 @@ class RemoteStorageLinkFileSpec(docker: RemoteStorageDocker) default = true, DigestAlgorithm.default, BaseUri(docker.hostConfig.endpoint).rightValue, - None, Label.unsafe(RemoteStorageDocker.BucketName), read, write, 10 ) - storage = RemoteDiskStorage(iri, project, storageValue, Tags.empty, Secret(Json.obj())) + storage = RemoteDiskStorage(iri, project, storageValue, Tags.empty, Json.obj()) } "RemoteDiskStorage linking operations" should { diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSaveAndFetchFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSaveAndFetchFileSpec.scala index fd45ee5b28..03b5bbeeff 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSaveAndFetchFileSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/remote/RemoteStorageSaveAndFetchFileSpec.scala @@ -4,7 +4,6 @@ import akka.actor.ActorSystem import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` import akka.http.scaladsl.model.{HttpEntity, Uri} import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDescription} @@ -66,13 +65,12 @@ class RemoteStorageSaveAndFetchFileSpec(docker: RemoteStorageDocker) default = true, DigestAlgorithm.default, BaseUri(docker.hostConfig.endpoint).rightValue, - None, Label.unsafe(RemoteStorageDocker.BucketName), read, write, 10 ) - storage = RemoteDiskStorage(iri, project, storageValue, Tags.empty, Secret(Json.obj())) + storage = RemoteDiskStorage(iri, project, storageValue, Tags.empty, Json.obj()) } "RemoteDiskStorage operations" should { diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpec.scala index b2ea022d62..e951818e5b 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageAccessSpec.scala @@ -2,7 +2,6 @@ package ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3 import akka.actor.ActorSystem import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.StorageFixtures import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.DigestAlgorithm import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.StorageNotAccessible @@ -11,7 +10,6 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.Mini import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.permissions.{read, write} import ch.epfl.bluebrain.nexus.delta.sdk.syntax._ import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker -import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker._ import ch.epfl.bluebrain.nexus.testkit.{IOValues, TestHelpers} import org.scalatest.concurrent.Eventually import org.scalatest.matchers.should.Matchers @@ -38,8 +36,6 @@ class S3StorageAccessSpec(docker: MinioDocker) algorithm = DigestAlgorithm.default, bucket = "bucket", endpoint = Some(docker.hostConfig.endpoint), - accessKey = Some(Secret(RootUser)), - secretKey = Some(Secret(RootPassword)), region = Some(Region.EU_CENTRAL_1), readPermission = read, writePermission = write, @@ -60,10 +56,6 @@ class S3StorageAccessSpec(docker: MinioDocker) access(iri, storage).accepted } - "fail on wrong credentials" in { - access(iri, storage.copy(secretKey = Some(Secret("other")))).rejectedWith[StorageNotAccessible] - } - "fail when bucket does not exist" in { access(iri, storage.copy(bucket = "other")).rejectedWith[StorageNotAccessible] } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageLinkFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageLinkFileSpec.scala index afab764307..8f171971a7 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageLinkFileSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageLinkFileSpec.scala @@ -4,7 +4,6 @@ import akka.actor.ActorSystem import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` import akka.http.scaladsl.model.{HttpEntity, Uri} import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client @@ -61,15 +60,13 @@ class S3StorageLinkFileSpec(docker: MinioDocker) algorithm = DigestAlgorithm.default, bucket = "bucket3", endpoint = Some(docker.hostConfig.endpoint), - accessKey = Some(Secret(RootUser)), - secretKey = Some(Secret(RootPassword)), region = Some(Region.EU_CENTRAL_1), readPermission = read, writePermission = write, maxFileSize = 20 ) createBucket(storageValue).hideErrors.accepted - storage = S3Storage(iri, project, storageValue, Tags.empty, Secret(Json.obj())) + storage = S3Storage(iri, project, storageValue, Tags.empty, Json.obj()) attributes = FileAttributes( uuid, s"http://bucket3.$VirtualHost:${docker.hostConfig.port}/org/project/8/0/4/9/b/a/9/0/myfile.txt", diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala index 09f899d76e..04076ab32a 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/operations/s3/S3StorageSaveAndFetchFileSpec.scala @@ -4,7 +4,6 @@ import akka.actor.ActorSystem import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)` import akka.http.scaladsl.model.{HttpEntity, Uri} import akka.testkit.TestKit -import ch.epfl.bluebrain.nexus.delta.kernel.Secret import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.Digest.ComputedDigest import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.FileAttributes.FileAttributesOrigin.Client import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.model.{FileAttributes, FileDescription} @@ -13,8 +12,8 @@ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.DigestAlgori import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.Storage.S3Storage import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageValue.S3StorageValue import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.AkkaSourceHelpers -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection.{FileNotFound, UnexpectedFetchError} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.SaveFileRejection.{ResourceAlreadyExists, UnexpectedSaveError} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.FetchFileRejection.FileNotFound +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.StorageFileRejection.SaveFileRejection.ResourceAlreadyExists import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.operations.s3.MinioSpec._ import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.permissions.{read, write} import ch.epfl.bluebrain.nexus.delta.sdk.model.Tags @@ -25,9 +24,9 @@ import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker import ch.epfl.bluebrain.nexus.testkit.minio.MinioDocker._ import io.circe.Json import monix.execution.Scheduler +import org.scalatest.{BeforeAndAfterAll, DoNotDiscover} import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike -import org.scalatest.{BeforeAndAfterAll, DoNotDiscover} import software.amazon.awssdk.regions.Region import java.util.UUID @@ -62,15 +61,13 @@ class S3StorageSaveAndFetchFileSpec(docker: MinioDocker) algorithm = DigestAlgorithm.default, bucket = "bucket2", endpoint = Some(docker.hostConfig.endpoint), - accessKey = Some(Secret(RootUser)), - secretKey = Some(Secret(RootPassword)), region = Some(Region.EU_CENTRAL_1), readPermission = read, writePermission = write, maxFileSize = 20 ) createBucket(storageValue).hideErrors.accepted - storage = S3Storage(iri, project, storageValue, Tags.empty, Secret(Json.obj())) + storage = S3Storage(iri, project, storageValue, Tags.empty, Json.obj()) attributes = FileAttributes( uuid, s"http://bucket2.$VirtualHost:${docker.hostConfig.port}/org/project/8/0/4/9/b/a/9/0/myfile.txt", @@ -92,12 +89,6 @@ class S3StorageSaveAndFetchFileSpec(docker: MinioDocker) val content = "file content" val entity = HttpEntity(content) - "fail saving a file to a bucket on wrong credentials" in { - val description = FileDescription(uuid, filename, Some(`text/plain(UTF-8)`)) - val otherStorage = storage.copy(value = storage.value.copy(accessKey = Some(Secret("wrong")))) - otherStorage.saveFile.apply(description, entity).rejectedWith[UnexpectedSaveError] - } - "save a file to a bucket" in { val description = FileDescription(uuid, filename, Some(`text/plain(UTF-8)`)) storage.saveFile.apply(description, entity).accepted shouldEqual attributes @@ -108,11 +99,6 @@ class S3StorageSaveAndFetchFileSpec(docker: MinioDocker) consume(sourceFetched) shouldEqual content } - "fail fetching a file to a bucket on wrong credentials" in { - val otherStorage = storage.copy(value = storage.value.copy(accessKey = Some(Secret("wrong")))) - otherStorage.fetchFile.apply(attributes).rejectedWith[UnexpectedFetchError] - } - "fail fetching a file that does not exist" in { storage.fetchFile.apply(attributes.copy(path = Uri.Path("other.txt"))).rejectedWith[FileNotFound] } diff --git a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutesSpec.scala b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutesSpec.scala index aa89a1f40c..96dfe1d277 100644 --- a/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutesSpec.scala +++ b/delta/plugins/storage/src/test/scala/ch/epfl/bluebrain/nexus/delta/plugins/storage/storages/routes/StoragesRoutesSpec.scala @@ -8,7 +8,7 @@ import akka.http.scaladsl.server.Route import ch.epfl.bluebrain.nexus.delta.kernel.utils.{UUIDF, UrlUtils} import ch.epfl.bluebrain.nexus.delta.plugins.storage.files.{contexts => fileContexts} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.StorageRejection.{ProjectContextRejection, StorageFetchRejection, StorageNotFound} -import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{DigestAlgorithm, Storage, StorageStatEntry, StorageType} +import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.model.{DigestAlgorithm, StorageStatEntry, StorageType} import ch.epfl.bluebrain.nexus.delta.plugins.storage.storages.{contexts => storageContexts, _} import ch.epfl.bluebrain.nexus.delta.rdf.IriOrBNode.Iri import ch.epfl.bluebrain.nexus.delta.rdf.Vocabulary @@ -17,7 +17,6 @@ import ch.epfl.bluebrain.nexus.delta.rdf.jsonld.context.{ContextValue, RemoteCon import ch.epfl.bluebrain.nexus.delta.sdk.IndexingAction import ch.epfl.bluebrain.nexus.delta.sdk.acls.AclSimpleCheck import ch.epfl.bluebrain.nexus.delta.sdk.acls.model.AclAddress -import ch.epfl.bluebrain.nexus.delta.sdk.crypto.Crypto import ch.epfl.bluebrain.nexus.delta.sdk.directives.DeltaSchemeDirectives import ch.epfl.bluebrain.nexus.delta.sdk.generators.ProjectGen import ch.epfl.bluebrain.nexus.delta.sdk.identities.IdentitiesDummy @@ -93,8 +92,6 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur ProjectContextRejection ) - implicit private val c: Crypto = crypto - private val storageStatistics: StoragesStatistics = (storage, project) => if (project.equals(projectRef) && storage.toString.equals("remote-disk-storage")) @@ -108,7 +105,6 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur new ResolverContextResolution(rcr, (_, _, _) => IO.raiseError(ResourceResolutionReport())), IO.pure(perms), (_, _) => IO.unit, - crypto, xas, cfg, serviceAccount @@ -125,7 +121,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur "fail to create a storage without storages/write permission" in { aclCheck.append(AclAddress.Root, Anonymous -> Set(events.read)).accepted - val payload = s3FieldsJson.value deepMerge json"""{"@id": "$s3Id"}""" + val payload = s3FieldsJson deepMerge json"""{"@id": "$s3Id"}""" Post("/v1/storages/myorg/myproject", payload.toEntity) ~> routes ~> check { response.status shouldEqual StatusCodes.Forbidden response.asJson shouldEqual jsonContentOf("errors/authorization-failed.json") @@ -136,7 +132,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur aclCheck .append(AclAddress.Root, Anonymous -> Set(permissions.write), caller.subject -> Set(permissions.write)) .accepted - val payload = s3FieldsJson.value deepMerge json"""{"@id": "$s3Id", "bucket": "mybucket2"}""" + val payload = s3FieldsJson deepMerge json"""{"@id": "$s3Id", "bucket": "mybucket2"}""" Post("/v1/storages/myorg/myproject", payload.toEntity) ~> routes ~> check { status shouldEqual StatusCodes.Created response.asJson shouldEqual storageMetadata(projectRef, s3Id, StorageType.S3Storage) @@ -146,7 +142,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur "create a storage with an authenticated user and provided id" in { Put( "/v1/storages/myorg/myproject/remote-disk-storage", - remoteFieldsJson.value.toEntity + remoteFieldsJson.toEntity ) ~> asAlice ~> routes ~> check { status shouldEqual StatusCodes.Created response.asJson shouldEqual @@ -155,7 +151,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur } "reject the creation of a storage which already exists" in { - Put("/v1/storages/myorg/myproject/s3-storage", s3FieldsJson.value.toEntity) ~> routes ~> check { + Put("/v1/storages/myorg/myproject/s3-storage", s3FieldsJson.toEntity) ~> routes ~> check { status shouldEqual StatusCodes.Conflict response.asJson shouldEqual jsonContentOf("/storages/errors/already-exists.json", "id" -> s3Id) } @@ -163,7 +159,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur "fail to update a storage without storages/write permission" in { aclCheck.subtract(AclAddress.Root, Anonymous -> Set(permissions.write)).accepted - Put(s"/v1/storages/myorg/myproject/s3-storage?rev=1", s3FieldsJson.value.toEntity) ~> routes ~> check { + Put(s"/v1/storages/myorg/myproject/s3-storage?rev=1", s3FieldsJson.toEntity) ~> routes ~> check { response.status shouldEqual StatusCodes.Forbidden response.asJson shouldEqual jsonContentOf("errors/authorization-failed.json") } @@ -177,7 +173,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur ) forAll(endpoints.zipWithIndex) { case (endpoint, idx) => // the starting revision is 2 because this storage has been updated to default = false - Put(s"$endpoint?rev=${idx + 2}", s3FieldsJson.value.toEntity) ~> routes ~> check { + Put(s"$endpoint?rev=${idx + 2}", s3FieldsJson.toEntity) ~> routes ~> check { status shouldEqual StatusCodes.OK response.asJson shouldEqual storageMetadata(projectRef, s3Id, StorageType.S3Storage, rev = idx + 3) } @@ -185,7 +181,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur } "reject the update of a non-existent storage" in { - Put("/v1/storages/myorg/myproject/myid10?rev=1", s3FieldsJson.value.toEntity) ~> routes ~> check { + Put("/v1/storages/myorg/myproject/myid10?rev=1", s3FieldsJson.toEntity) ~> routes ~> check { status shouldEqual StatusCodes.NotFound response.asJson shouldEqual jsonContentOf("/storages/errors/not-found.json", "id" -> (nxv + "myid10"), "proj" -> "myorg/myproject") @@ -193,7 +189,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur } "reject the update of a storage at a non-existent revision" in { - Put("/v1/storages/myorg/myproject/s3-storage?rev=10", s3FieldsJson.value.toEntity) ~> routes ~> check { + Put("/v1/storages/myorg/myproject/s3-storage?rev=10", s3FieldsJson.toEntity) ~> routes ~> check { status shouldEqual StatusCodes.Conflict response.asJson shouldEqual jsonContentOf("/storages/errors/incorrect-rev.json", "provided" -> 10, "expected" -> 4) @@ -288,7 +284,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur } "fetch a storage original payload" in { - val expectedSource = remoteFieldsJson.map(_ deepMerge json"""{"default": false}""") + val expectedSource = remoteFieldsJson deepMerge json"""{"default": false}""" val endpoints = List( s"/v1/storages/$uuid/$uuid/remote-disk-storage/source", s"/v1/resources/$uuid/$uuid/_/remote-disk-storage/source", @@ -301,7 +297,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur forAll(endpoints) { endpoint => Get(endpoint) ~> routes ~> check { status shouldEqual StatusCodes.OK - response.asJson shouldEqual Storage.encryptSource(expectedSource, crypto).success.value + response.asJson shouldEqual expectedSource } } } @@ -316,7 +312,7 @@ class StoragesRoutesSpec extends BaseRouteSpec with TryValues with StorageFixtur forAll(List("rev=1", "tag=mytag")) { param => Get(s"$endpoint?$param") ~> routes ~> check { status shouldEqual StatusCodes.OK - response.asJson shouldEqual Storage.encryptSource(remoteFieldsJson, crypto).success.value + response.asJson shouldEqual remoteFieldsJson } } } diff --git a/docs/src/main/paradox/docs/delta/api/storages-api.md b/docs/src/main/paradox/docs/delta/api/storages-api.md index f0fbe2d47b..97623cc149 100644 --- a/docs/src/main/paradox/docs/delta/api/storages-api.md +++ b/docs/src/main/paradox/docs/delta/api/storages-api.md @@ -70,7 +70,6 @@ In order to be able to use this storage, the configuration flag `plugins.storage "@type": "RemoteDiskStorage", "default": "{default}", "endpoint": "{endpoint}", - "credentials": "{credentials}", "folder": "{folder}", "readPermission": "{read_permission}", "writePermission": "{write_permission}", @@ -82,7 +81,6 @@ In order to be able to use this storage, the configuration flag `plugins.storage - `{default}`: Boolean - the flag to decide whether this storage is going to become the default storage for the target project or not. - `{endpoint}`: Uri - the endpoint where the storage service is listening to requests. This field is optional, defaulting to the configuration flag `plugins.storage.storages.remote-disk.default-endpoint`. -- `{credentials}`: String - the service account access token to authenticate and authorize Nexus Delta client against the storage service. This field is optional, defaulting to the configuration flag `plugins.storage.storages.remote-disk.default-credentials`. - `{folder}`: String - the storage service bucket where files using this storage are going to be saved. - `{read_permission}`: String - the permission a client must have in order to fetch files using this storage. This field is optional, defaulting to the configuration flag `plugins.storage.storages.remote-disk.default-read-permission` (`resources/read`). - `{write_permission}`: String - the permission a client must have in order to create files using this storage. This field is optional, defaulting to the configuration flag `plugins.storage.storages.remote-disk.default-write-permission` (`files/write`). @@ -99,8 +97,6 @@ In order to be able to use this storage, the configuration flag `plugins.storage "@type": "S3Storage", "default": "{default}", "endpoint": "{endpoint}", - "accessKey": "{access_key}", - "secretKey": "{secret_key}", "region": "{region}", "readPermission": "{read_permission}", "writePermission": "{write_permission}", @@ -112,8 +108,6 @@ In order to be able to use this storage, the configuration flag `plugins.storage - `{default}`: Boolean - the flag to decide whether this storage is going to become the default storage for the target project or not. - `{endpoint}`: Uri - the Amazon S3 compatible service endpoint. This field is optional, defaulting to the configuration flag `plugins.storage.storages.amazon.default-endpoint`. -- `{access_key}`: String - the Amazon S3 compatible access key. This field is optional, defaulting to the configuration flag `plugins.storage.storages.amazon.default-access-key`. -- `{secret_key}`: String - the Amazon S3 compatible secret key. This field is optional, defaulting to the configuration flag `plugins.storage.storages.amazon.default-secret-key`. - `{region}`: String - the Amazon S3 compatible region. This field is optional, defaulting to the S3 default region configuration. - `{read_permission}`: String - the permission a client must have in order to fetch files using this storage. This field is optional, defaulting to the configuration flag `plugins.storage.storages.amazon.default-read-permission` (`resources/read`). - `{write_permission}`: String - the permission a client must have in order to create files using this storage. This field is optional, defaulting to the configuration flag `plugins.storage.storages.amazon.default-write-permission` (`files/write`). diff --git a/docs/src/main/paradox/docs/releases/v1.9-release-notes.md b/docs/src/main/paradox/docs/releases/v1.9-release-notes.md index d36edf9847..6eaa93dfe6 100644 --- a/docs/src/main/paradox/docs/releases/v1.9-release-notes.md +++ b/docs/src/main/paradox/docs/releases/v1.9-release-notes.md @@ -26,6 +26,10 @@ Resources can be added to an archive using a `_self`. @ref:[More information](../delta/api/archives-api.md#payload) +### Storages + +Storages can no longer be created with credentials that would get stored. For S3 storages, this would be `accessKey`/`secretKey`. For remote storages, this means `credentials`. These should instead be stored in the Delta configuration + ## Nexus Fusion TODO diff --git a/tests/docker/config/delta-postgres.conf b/tests/docker/config/delta-postgres.conf index 306393a786..b27dded274 100644 --- a/tests/docker/config/delta-postgres.conf +++ b/tests/docker/config/delta-postgres.conf @@ -95,10 +95,13 @@ plugins { remote-disk { enabled = true default-endpoint = "http://storage-service:8080/v1" + default-credentials = "" } amazon { enabled = true + default-access-key = "MY_ACCESS_KEY" + default-secret-key = "CHUTCHUT" } } } diff --git a/tests/src/test/resources/kg/storages/remote-disk.json b/tests/src/test/resources/kg/storages/remote-disk.json index 6a6fc9ca3a..7620daa2ba 100644 --- a/tests/src/test/resources/kg/storages/remote-disk.json +++ b/tests/src/test/resources/kg/storages/remote-disk.json @@ -2,7 +2,6 @@ "@id": "https://bluebrain.github.io/nexus/vocabulary/{{id}}", "@type": "RemoteDiskStorage", "endpoint": "{{endpoint}}", - "credentials": "{{cred}}", "folder": "{{folder}}", "default": false, "readPermission": "{{read}}", diff --git a/tests/src/test/resources/kg/storages/s3.json b/tests/src/test/resources/kg/storages/s3.json index 8fa6ff2ac9..2a8b997437 100644 --- a/tests/src/test/resources/kg/storages/s3.json +++ b/tests/src/test/resources/kg/storages/s3.json @@ -3,7 +3,5 @@ "@type": "S3Storage", "default": false, "bucket": "{{bucket}}", - "endpoint": "{{endpoint}}", - "accessKey": "{{accessKey}}", - "secretKey": "{{secretKey}}" + "endpoint": "{{endpoint}}" } \ No newline at end of file diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/RemoteStorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/RemoteStorageSpec.scala index fca845e76d..65e7f629af 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/RemoteStorageSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/RemoteStorageSpec.scala @@ -3,7 +3,6 @@ package ch.epfl.bluebrain.nexus.tests.kg import akka.http.scaladsl.model.{ContentTypes, HttpCharsets, MediaTypes, StatusCodes} import akka.util.ByteString import ch.epfl.bluebrain.nexus.tests.HttpClient._ -import ch.epfl.bluebrain.nexus.tests.Identity import ch.epfl.bluebrain.nexus.tests.Identity.storages.Coyote import ch.epfl.bluebrain.nexus.tests.Optics.{filterKey, filterMetadataKeys, projections} import ch.epfl.bluebrain.nexus.tests.iam.types.Permission @@ -12,11 +11,12 @@ import io.circe.generic.semiauto.deriveDecoder import io.circe.{Decoder, Json} import monix.bio.Task import org.scalactic.source.Position -import org.scalatest.Assertion +import org.scalatest.{Assertion, Ignore} import scala.annotation.nowarn import scala.sys.process._ +@Ignore class RemoteStorageSpec extends StorageSpec { override def storageName: String = "external" @@ -44,13 +44,10 @@ class RemoteStorageSpec extends StorageSpec { () } - private def serviceAccountToken = tokensMap.get(Identity.ServiceAccount).credentials.token() - override def createStorages: Task[Assertion] = { val payload = jsonContentOf( "/kg/storages/remote-disk.json", "endpoint" -> externalEndpoint, - "cred" -> serviceAccountToken, "read" -> "resources/read", "write" -> "files/write", "folder" -> remoteFolder, @@ -60,7 +57,6 @@ class RemoteStorageSpec extends StorageSpec { val payload2 = jsonContentOf( "/kg/storages/remote-disk.json", "endpoint" -> externalEndpoint, - "cred" -> serviceAccountToken, "read" -> s"$storageName/read", "write" -> s"$storageName/write", "folder" -> remoteFolder, @@ -188,7 +184,6 @@ class RemoteStorageSpec extends StorageSpec { val payload = jsonContentOf( "/kg/storages/remote-disk.json", "endpoint" -> externalEndpoint, - "cred" -> serviceAccountToken, "read" -> "resources/read", "write" -> "files/write", "folder" -> "nexustest", diff --git a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/S3StorageSpec.scala b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/S3StorageSpec.scala index 1927c7b005..a02615e0d7 100644 --- a/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/S3StorageSpec.scala +++ b/tests/src/test/scala/ch/epfl/bluebrain/nexus/tests/kg/S3StorageSpec.scala @@ -71,18 +71,14 @@ class S3StorageSpec extends StorageSpec { "/kg/storages/s3.json", "storageId" -> s"https://bluebrain.github.io/nexus/vocabulary/$storageId", "bucket" -> bucket, - "endpoint" -> s3Endpoint, - "accessKey" -> s3Config.accessKey.get, - "secretKey" -> s3Config.secretKey.get + "endpoint" -> s3Endpoint ) val payload2 = jsonContentOf( "/kg/storages/s3.json", "storageId" -> s"https://bluebrain.github.io/nexus/vocabulary/${storageId}2", "bucket" -> bucket, - "endpoint" -> s3Endpoint, - "accessKey" -> s3Config.accessKey.get, - "secretKey" -> s3Config.secretKey.get + "endpoint" -> s3Endpoint ) deepMerge Json.obj( "region" -> Json.fromString("eu-west-2"), "readPermission" -> Json.fromString(s"$storageName/read"), @@ -140,9 +136,7 @@ class S3StorageSpec extends StorageSpec { "/kg/storages/s3.json", "storageId" -> s"https://bluebrain.github.io/nexus/vocabulary/missing", "bucket" -> "foobar", - "endpoint" -> s3Endpoint, - "accessKey" -> s3Config.accessKey.get, - "secretKey" -> s3Config.secretKey.get + "endpoint" -> s3Endpoint ) deltaClient.post[Json](s"/storages/$fullId", payload, Coyote) { (json, response) =>