From 06297bb767a1da733ba416a6a00036993890da7a Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Mon, 20 Jun 2022 12:28:23 +0100 Subject: [PATCH] Adding Helm chart --- .../on-push-master-publish-chart.yml | 36 ++++++++++ README.md | 50 +++++++++----- charts/opa-exporter/.helmignore | 23 +++++++ charts/opa-exporter/Chart.yaml | 7 ++ charts/opa-exporter/README.md | 57 ++++++++++++++++ charts/opa-exporter/README.md.gotmpl | 34 ++++++++++ charts/opa-exporter/templates/_helpers.tpl | 62 +++++++++++++++++ .../opa-exporter/templates/clusterrole.yaml | 19 ++++++ .../templates/clusterrolebinding.yaml | 22 ++++++ charts/opa-exporter/templates/deployment.yaml | 48 +++++++++++++ charts/opa-exporter/templates/service.yaml | 13 ++++ .../templates/serviceaccount.yaml | 10 +++ .../templates/servicemonitor.yaml | 18 +++++ charts/opa-exporter/values.yaml | 68 +++++++++++++++++++ 14 files changed, 449 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/on-push-master-publish-chart.yml create mode 100644 charts/opa-exporter/.helmignore create mode 100644 charts/opa-exporter/Chart.yaml create mode 100644 charts/opa-exporter/README.md create mode 100644 charts/opa-exporter/README.md.gotmpl create mode 100644 charts/opa-exporter/templates/_helpers.tpl create mode 100644 charts/opa-exporter/templates/clusterrole.yaml create mode 100644 charts/opa-exporter/templates/clusterrolebinding.yaml create mode 100644 charts/opa-exporter/templates/deployment.yaml create mode 100644 charts/opa-exporter/templates/service.yaml create mode 100644 charts/opa-exporter/templates/serviceaccount.yaml create mode 100644 charts/opa-exporter/templates/servicemonitor.yaml create mode 100644 charts/opa-exporter/values.yaml diff --git a/.github/workflows/on-push-master-publish-chart.yml b/.github/workflows/on-push-master-publish-chart.yml new file mode 100644 index 0000000..52195f4 --- /dev/null +++ b/.github/workflows/on-push-master-publish-chart.yml @@ -0,0 +1,36 @@ +--- + +name: Publish Charts + +on: + push: + branches: + - master + paths: + - charts/** + +jobs: + release: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Install Helm + uses: azure/setup-helm@v1 + with: + version: v3.8.1 + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.4.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/README.md b/README.md index a7ac214..6d0eb9a 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ If you want to read more about enforcing policies in Kubernetes, check out [this ## Design -![System design](https://raw.githubusercontent.com/mcelep/opa-scorecard/master/system_logical.png) +![System design](https://raw.githubusercontent.com/mcelep/opa-scorecard/master/system_logical.png) The goal of the system we put together is to provide insights to developers and platform users insights about OPA constraints that their application might be violating in a given namespace. We use Grafana for creating an example dashboard. Grafana fetches data it needs for creating the dashboard from Prometheus. We've written a small Go program - depicted as 'Constraint Violation Prometheus Exporter' in the diagram above - to query the Kubernetes API for constraint violations and expose data in Prometheus format. Gatekeeper/OPA is used in [Audit](https://open-policy-agent.github.io/gatekeeper/website/docs/audit) mode for our setup, we don't leverage Gatekeeper's capability to deny K8S resources that don't fulfill policy expectations. @@ -48,7 +48,7 @@ Bear in mind that, you will need to create a OPA policies off of your production For our blog post, we will be using the open source project [gatekeeper-library](https://github.com/open-policy-agent/gatekeeper-library) which contains a good set of example constraints. Moreover, the project structure is quite helpful in the sense of providing an example of how you can manage OPA constraints for your company: Rego language which is used for creating OPA policies should be unit tested thoroughly and in [src folder](https://github.com/open-policy-agent/gatekeeper-library/tree/master/src/general), you can find pure rego files and unit tests. [The library folder](https://github.com/open-policy-agent/gatekeeper-library/tree/master/library/general) finally contains the Gatekeeper constraint templates that are created out of the rego files in the src folder. Additionally, there's an example constraint for each template together with some target data that would result in both positive and negative results for the constraint. Rego based policies can get quite complex, so in our view it's a must to have Rego unit tests which cover both **happy & unhappy** paths. We'd recommend to go ahead and fork this project and remove and add policies that represent your company's requirements by following the overall project structure. With this approach you achieve compliance as code that can be easily applied to various environments. As mentioned earlier, there might be certain constraints which you don't want to directly enforce (MUST vs NICE-TO-HAVE): e.g. on a dev cluster you might not want to enforce **>1 replicas**, or before enforcing a specific constraint you might want to give platform users enough time to take the necessary precautions (as opposed to blocking their changes immediately). You control this behavious using [`enforcementAction`](https://open-policy-agent.github.io/gatekeeper/website/docs/violations#dry-run-enforcement-action). By default, `enforcementAction` is set to `deny` which is what we would describe as a **MUST** condition. -In our example, we will install all constraints with the **NICE-TO-HAVE** condition using ```enforcementAction: dryrun``` property. This will make sure that we don't directly impact any workload running on K8S clusters (we could also use [`enforcementAction: warn`](https://open-policy-agent.github.io/gatekeeper/website/docs/violations#warn-enforcement-action) for this scenario). +In our example, we will install all constraints with the **NICE-TO-HAVE** condition using `enforcementAction: dryrun` property. This will make sure that we don't directly impact any workload running on K8S clusters (we could also use [`enforcementAction: warn`](https://open-policy-agent.github.io/gatekeeper/website/docs/violations#warn-enforcement-action) for this scenario). ### Prometheus Exporter @@ -65,7 +65,7 @@ opa_scorecard_constraint_violations{kind="K8sAllowedRepos",name="repo-is-openpol Labels are used to represent each constraint violation and we will be using these labels later in the Grafana dashboard. -The Prometheus exporter program listens on tcp port ```9141``` by default and exposes metrics on path ```/metrics```. It can run locally on your development box as long as you have a valid Kubernetes configuration in your home folder (i.e. if you can run kubectl and have the right permissions). When running on the cluster a ```incluster``` parameter is passed in so that it knows where to look up for the cluster credentials. Exporter program connects to Kubernetes API every 10 seconds to scrape data from Kubernetes API. +The Prometheus exporter program listens on tcp port `9141` by default and exposes metrics on path `/metrics`. It can run locally on your development box as long as you have a valid Kubernetes configuration in your home folder (i.e. if you can run kubectl and have the right permissions). When running on the cluster a `incluster` parameter is passed in so that it knows where to look up for the cluster credentials. Exporter program connects to Kubernetes API every 10 seconds to scrape data from Kubernetes API. We've used [this](https://medium.com/teamzerolabs/15-steps-to-write-an-application-prometheus-exporter-in-go-9746b4520e26) blog post as the base for the code. @@ -82,10 +82,9 @@ Let's go ahead and prepare our components so that we have a Grafana dashboard to - [Helm](https://helm.sh/): We will install Prometheus and Grafana using helm - Optional: [Docker](https://www.docker.com/products/docker-desktop): Docker is only optional as we already publish the required image on dockerhub. - ### 1) Git submodule update -Run ```git submodule update --init``` to download gatekeeper-library dependency. This command will download the [gatekeeper-library](https://github.com/open-policy-agent/gatekeeper-library) dependency into folder ```gatekeeper-library/library```. +Run `git submodule update --init` to download gatekeeper-library dependency. This command will download the [gatekeeper-library](https://github.com/open-policy-agent/gatekeeper-library) dependency into folder `gatekeeper-library/library`. ### 2) Install OPA/Gatekeeper @@ -100,7 +99,7 @@ We've used [Tanzu Mission Control(TMC)](https://tanzu.vmware.com/mission-control ### 3) Install Gatekeeper example constraints -The script [gatekeeper-library/apply_gatekeeper_constraints.sh](gatekeeper-library/apply_gatekeeper_constraints.sh) uses kustomize to create constraint templates and then applies them on your cluster. So make sure that k8s cli is configured with the right context. After that [Ytt](https://carvel.dev/ytt/) is used to inject ```spec.enforcementAction: dryrun``` in order to have an enforcement action of [dryrun](https://open-policy-agent.github.io/gatekeeper/website/docs/violations/#dry-run-enforcement-action). +The script [gatekeeper-library/apply_gatekeeper_constraints.sh](gatekeeper-library/apply_gatekeeper_constraints.sh) uses kustomize to create constraint templates and then applies them on your cluster. So make sure that k8s cli is configured with the right context. After that [Ytt](https://carvel.dev/ytt/) is used to inject `spec.enforcementAction: dryrun` in order to have an enforcement action of [dryrun](https://open-policy-agent.github.io/gatekeeper/website/docs/violations/#dry-run-enforcement-action). Run the script with the following command: @@ -110,11 +109,25 @@ cd gatekeeper-library && ./apply_gatekeeper_constraints.sh ### 4) Install Prometheus Exporter -In folder [exporter-go](exporter-go) there's the source code of the program that exports information about constraint violations in Prometheus data format. The same folder also includes a script called [build_docker.sh](exporter-go/build_docker.sh) which builds a container and pushes it to [mcelep/opa_scorecard_exporter](https://hub.docker.com/r/mcelep/opa_scorecard_exporter). Container image is already publicly available though, so the only thing you need to do is to apply the resources that are in folder [exporter-k8s-resources](exporter-k8s-resources). The target namespace we selected for deploying our K8S resources is ```opa-exporter```. The K8S resources we want to create have the following functionality: +#### Using Helm + +The exporter can be installed by Using Helm like this: + +```bash +helm repo add opa-exporter https://mcelep.github.io/opa-scorecard +helm repo update +helm install demo opa-exporter/opa-exporter +``` + +See the [Helm chart](https://github.com/mcelep/opa-scorecard/tree/master/charts/opa-exporter) for more details. + +#### Using kubectl + +In folder [exporter-go](exporter-go) there's the source code of the program that exports information about constraint violations in Prometheus data format. The same folder also includes a script called [build_docker.sh](exporter-go/build_docker.sh) which builds a container and pushes it to [mcelep/opa_scorecard_exporter](https://hub.docker.com/r/mcelep/opa_scorecard_exporter). Container image is already publicly available though, so the only thing you need to do is to apply the resources that are in folder [exporter-k8s-resources](exporter-k8s-resources). The target namespace we selected for deploying our K8S resources is `opa-exporter`. The K8S resources we want to create have the following functionality: -- ```clusterrole.yaml``` & ```clusterrolebinding.yaml``` -> These resources create a clusterrole to access all resources of group ```constraints.gatekeeper.sh``` and a binding for that clusterrole -- ```deployment.yaml``` -> A deployment that will run the container image ```mcelep/opa_scorecard_exporter``` -- ```service.yaml``` -> A service that has annotation ```prometheus.io/scrape-slow: "true"``` to make sure that this service gets picked up by Prometheus +- `clusterrole.yaml` & `clusterrolebinding.yaml` -> These resources create a clusterrole to access all resources of group `constraints.gatekeeper.sh` and a binding for that clusterrole +- `deployment.yaml` -> A deployment that will run the container image `mcelep/opa_scorecard_exporter` +- `service.yaml` -> A service that has annotation `prometheus.io/scrape-slow: "true"` to make sure that this service gets picked up by Prometheus To apply these K8S resources: @@ -126,9 +139,9 @@ kubectl create namespace opa-exporter && kubectl -n opa-exporter apply -f export For installing Prometheus & Grafana, we will use a helm chart called [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). Folder [kube-prometheus-stack](kube-prometheus-stack) includes the relevant files for this step. -Along with Prometheus and Grafana, we also want to install a custom Grafana Dashboard that will display useful metrics about constraint violations. File [kube-prometheus-stack/cm-custom-dashboard.yaml](kube-prometheus-stack/cm-custom-dashboard.yaml) contains the dashboard configuration that we want to install, note the label ```grafana_dashboard: "1"``` in this file. This label is used as a directive for Grafana to pick up the content of this ConfigurationMap as a dashboard source. The file [grafana-opa-dashboard.json](kube-prometheus-stack/grafana-opa-dashboard.json) is a raw JSON export from Grafana and we used the content of this file to embed it into the configmap under key ```opa-dashboard.json```. +Along with Prometheus and Grafana, we also want to install a custom Grafana Dashboard that will display useful metrics about constraint violations. File [kube-prometheus-stack/cm-custom-dashboard.yaml](kube-prometheus-stack/cm-custom-dashboard.yaml) contains the dashboard configuration that we want to install, note the label `grafana_dashboard: "1"` in this file. This label is used as a directive for Grafana to pick up the content of this ConfigurationMap as a dashboard source. The file [grafana-opa-dashboard.json](kube-prometheus-stack/grafana-opa-dashboard.json) is a raw JSON export from Grafana and we used the content of this file to embed it into the configmap under key `opa-dashboard.json`. -The install script [kube-prometheus-stack/install.sh](kube-prometheus-stack/install.sh) creates a ConfigMap from file [cm-custom-dashboard.yaml](kube-prometheus-stack/cm-custom-dashboard.yaml) and then it uses helm to install kube-prometheus-stack chart into the namespace ```prometheus```. +The install script [kube-prometheus-stack/install.sh](kube-prometheus-stack/install.sh) creates a ConfigMap from file [cm-custom-dashboard.yaml](kube-prometheus-stack/cm-custom-dashboard.yaml) and then it uses helm to install kube-prometheus-stack chart into the namespace `prometheus`. Run the following command to install Prometheus & Grafana: @@ -140,19 +153,20 @@ After a few moments, all Prometheus components and Grafana should be up and runn ### 6) Log on to Grafana -We haven't provided an ingress or a service of ```type: LoadBalancer``` for our Grafana installation so the easies way to access our Grafana dashboard is by using port-forwarding from kubectl. +We haven't provided an ingress or a service of `type: LoadBalancer` for our Grafana installation so the easies way to access our Grafana dashboard is by using port-forwarding from kubectl. Execute the following command to start a port-forwarding session to Grafana: ```bash - kubectl -n prometheus port-forward $(kubectl -n prometheus get pod -l app.kubernetes.io/name=grafana -o name | cut -d/ -f2) 3000:3000 - ``` +kubectl -n prometheus port-forward $(kubectl -n prometheus get pod -l app.kubernetes.io/name=grafana -o name | cut -d/ -f2) 3000:3000 +``` + +You can now hit the following url: `http://localhost:3000` with your browser and you should see a welcome screen that looks like the screenshot below. - You can now hit the following url: ```http://localhost:3000``` with your browser and you should see a welcome screen that looks like the screenshot below. +![grafana_welcome](https://raw.githubusercontent.com/mcelep/opa-scorecard/master/grafana_welcome.png) - ![grafana_welcome](https://raw.githubusercontent.com/mcelep/opa-scorecard/master/grafana_welcome.png) +The default username/password for Grafana as of this writing is `admin / prom-operator`. If these credentials do not work out you can also discover them via the following commands: -The default username/password for Grafana as of this writing is ```admin / prom-operator```. If these credentials do not work out you can also discover them via the following commands: ```bash kubectl -n prometheus get secrets prometheus-grafana -o jsonpath='{.data.admin-user}' | base64 -d kubectl -n prometheus get secrets prometheus-grafana -o jsonpath='{.data.admin-password}' | base64 -d diff --git a/charts/opa-exporter/.helmignore b/charts/opa-exporter/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/charts/opa-exporter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/opa-exporter/Chart.yaml b/charts/opa-exporter/Chart.yaml new file mode 100644 index 0000000..59b3c82 --- /dev/null +++ b/charts/opa-exporter/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: opa-exporter +description: Prometheus exporter for OPA Gatekeeper. +type: application +version: 0.1.0 +appVersion: 0.0.4 +home: https://github.com/mcelep/opa-scorecard diff --git a/charts/opa-exporter/README.md b/charts/opa-exporter/README.md new file mode 100644 index 0000000..873c131 --- /dev/null +++ b/charts/opa-exporter/README.md @@ -0,0 +1,57 @@ +# opa-exporter + +![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.0.4](https://img.shields.io/badge/AppVersion-0.0.4-informational?style=flat-square) + +Prometheus exporter for OPA Gatekeeper. + +## Get the Helm repository + +```shell +helm repo add opa-exporter https://mcelep.github.io/opa-scorecard +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the chart + +To install the chart with the release name `my-release`: + +```shell +helm install my-release opa-exporter/opa-exporter +``` + +## Uninstalling the chart + +To uninstall the `my-release` release: + +```shell +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Pod affinity | +| image.pullSecrets | list | `[]` | List of image pull secrets | +| image.repository | string | `"mcelep/opa_scorecard_exporter"` | Image repository and name | +| image.tag | string | `""` | Overrides the image tag whose default is the chart `appVersion` | +| nodeSelector | object | `{}` | Pod node selector | +| podAnnotations | object | `{}` | Pod annotations | +| podSecurityContext | object | `{}` | Pod security context | +| rbac.create | bool | `true` | Whether to create Cluster Role and Cluster Role Binding | +| rbac.extraClusterRoleRules | list | `[]` | Extra ClusterRole rules | +| rbac.useExistingRole | string | `nil` | Use an existing ClusterRole/Role | +| replicaCount | int | `1` | Count of Pod replicas | +| resources | object | `{}` | Resources for the Agent container | +| securityContext | object | `{}` | Security context for the Agent container | +| service.port | int | `80` | Service port | +| service.type | string | `"ClusterIP"` | Service type | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Whether to create the Service Account used by the Pod | +| serviceAccount.name | string | `""` | If not set and `create` is `true`, a name is generated using the fullname template | +| serviceMonitor.enabled | bool | `true` | Wherter to install `ServiceMonitor` or not | +| tolerations | list | `[]` | Pod tolerations | diff --git a/charts/opa-exporter/README.md.gotmpl b/charts/opa-exporter/README.md.gotmpl new file mode 100644 index 0000000..5c1a2c8 --- /dev/null +++ b/charts/opa-exporter/README.md.gotmpl @@ -0,0 +1,34 @@ +{{ template "chart.header" . }} + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +{{ template "chart.description" . }} + +## Get the Helm repository + +```shell +helm repo add opa-exporter https://mcelep.github.io/opa-scorecard +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the chart + +To install the chart with the release name `my-release`: + +```shell +helm install my-release opa-exporter/opa-exporter +``` + +## Uninstalling the chart + +To uninstall the `my-release` release: + +```shell +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +{{ template "chart.valuesSection" . }} diff --git a/charts/opa-exporter/templates/_helpers.tpl b/charts/opa-exporter/templates/_helpers.tpl new file mode 100644 index 0000000..7928965 --- /dev/null +++ b/charts/opa-exporter/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "this.name" -}} + {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "this.fullname" -}} + {{- if .Values.fullnameOverride }} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} + {{- else }} + {{- $name := default .Chart.Name .Values.nameOverride }} + {{- if contains $name .Release.Name }} + {{- .Release.Name | trunc 63 | trimSuffix "-" }} + {{- else }} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} + {{- end }} + {{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "this.chart" -}} + {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "this.labels" -}} +helm.sh/chart: {{ template "this.chart" . }} +{{ template "this.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "this.selectorLabels" -}} +app.kubernetes.io/name: {{ template "this.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "this.serviceAccountName" -}} + {{- if .Values.serviceAccount.create }} + {{- default (include "this.fullname" .) .Values.serviceAccount.name }} + {{- else }} + {{- default "default" .Values.serviceAccount.name }} + {{- end }} +{{- end }} diff --git a/charts/opa-exporter/templates/clusterrole.yaml b/charts/opa-exporter/templates/clusterrole.yaml new file mode 100644 index 0000000..63eea30 --- /dev/null +++ b/charts/opa-exporter/templates/clusterrole.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "this.fullname" . }} + labels: {{- include "this.labels" . | nindent 4 }} +rules: + - apiGroups: + - constraints.gatekeeper.sh + resources: + - "*" + verbs: + - get + - list + - watch + {{- with .Values.rbac.extraClusterRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- end -}} diff --git a/charts/opa-exporter/templates/clusterrolebinding.yaml b/charts/opa-exporter/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..726568e --- /dev/null +++ b/charts/opa-exporter/templates/clusterrolebinding.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create -}} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "this.fullname" . }} + labels: {{- include "this.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: {{- toYaml . | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "this.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + {{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "this.fullname" . }} + {{- else }} + name: {{ .Values.rbac.useExistingRole }} + {{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/charts/opa-exporter/templates/deployment.yaml b/charts/opa-exporter/templates/deployment.yaml new file mode 100644 index 0000000..7b8410f --- /dev/null +++ b/charts/opa-exporter/templates/deployment.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "this.fullname" . }} + labels: {{- include "this.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "this.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + labels: {{- include "this.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "this.serviceAccountName" . }} + securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 9141 + name: http + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/opa-exporter/templates/service.yaml b/charts/opa-exporter/templates/service.yaml new file mode 100644 index 0000000..34fbb37 --- /dev/null +++ b/charts/opa-exporter/templates/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "this.fullname" . }} + labels: {{- include "this.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: {{- include "this.selectorLabels" . | nindent 4 }} diff --git a/charts/opa-exporter/templates/serviceaccount.yaml b/charts/opa-exporter/templates/serviceaccount.yaml new file mode 100644 index 0000000..599ea9a --- /dev/null +++ b/charts/opa-exporter/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "this.serviceAccountName" . }} + labels: {{- include "this.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/opa-exporter/templates/servicemonitor.yaml b/charts/opa-exporter/templates/servicemonitor.yaml new file mode 100644 index 0000000..57efcbe --- /dev/null +++ b/charts/opa-exporter/templates/servicemonitor.yaml @@ -0,0 +1,18 @@ +{{- if .Values.serviceMonitor }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: +metadata: + name: {{ include "this.serviceAccountName" . }} + labels: {{- include "this.labels" . | nindent 4 }} +spec: + endpoints: + - honorLabels: true + port: http + scheme: http + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "this.labels" . | nindent 6 }} +{{- end }} diff --git a/charts/opa-exporter/values.yaml b/charts/opa-exporter/values.yaml new file mode 100644 index 0000000..23bebf6 --- /dev/null +++ b/charts/opa-exporter/values.yaml @@ -0,0 +1,68 @@ +# -- Count of Pod replicas +replicaCount: 1 + +# Image configuration for the Agent container +image: + # -- Image repository and name + repository: mcelep/opa_scorecard_exporter + # -- Overrides the image tag whose default is the chart `appVersion` + tag: "" + # -- List of image pull secrets + pullSecrets: [] + +# Creation of the Service Account used by the Pod +serviceAccount: + # -- Whether to create the Service Account used by the Pod + create: true + # -- If not set and `create` is `true`, a name is generated using the fullname template + name: "" + # -- Annotations to add to the service account + annotations: {} + +# -- Pod annotations +podAnnotations: {} + +# -- Pod security context +podSecurityContext: {} + +# -- Security context for the Agent container +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + # -- Service type + type: ClusterIP + # -- Service port + port: 80 + +serviceMonitor: + # -- Wherter to install `ServiceMonitor` or not + enabled: true + +# -- Resources for the Agent container +resources: {} + +# -- Pod node selector +nodeSelector: {} + +# -- Pod tolerations +tolerations: [] + +# -- Pod affinity +affinity: {} + +rbac: + # -- Whether to create Cluster Role and Cluster Role Binding + create: true + # -- (string) Use an existing ClusterRole/Role + useExistingRole: + # -- Extra ClusterRole rules + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: []