diff --git a/charts/controller/templates/_helpers.tmpl b/charts/controller/templates/_helpers.tmpl deleted file mode 100644 index 422a5ae3f..000000000 --- a/charts/controller/templates/_helpers.tmpl +++ /dev/null @@ -1,12 +0,0 @@ -{{/* -Set apiVersion based on .Capabilities.APIVersions -*/}} -{{- define "rbacAPIVersion" -}} -{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1beta1" -}} -rbac.authorization.k8s.io/v1beta1 -{{- else if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1alpha1" -}} -rbac.authorization.k8s.io/v1alpha1 -{{- else -}} -rbac.authorization.k8s.io/v1 -{{- end -}} -{{- end -}} diff --git a/charts/controller/templates/_helpers.tpl b/charts/controller/templates/_helpers.tpl new file mode 100644 index 000000000..21d2b32ae --- /dev/null +++ b/charts/controller/templates/_helpers.tpl @@ -0,0 +1,166 @@ +{{/* +Set apiVersion based on .Capabilities.APIVersions +*/}} +{{- define "rbacAPIVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1beta1" -}} +rbac.authorization.k8s.io/v1beta1 +{{- else if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1alpha1" -}} +rbac.authorization.k8s.io/v1alpha1 +{{- else -}} +rbac.authorization.k8s.io/v1 +{{- end -}} +{{- end -}} + + +{{/* Generate controller deployment envs */}} +{{- define "controller.envs" -}} +{{ $redisNodeCount := .Values.redis.replicas | int }} +env: +- name: REGISTRATION_MODE + value: {{ .Values.registration_mode }} +# NOTE(bacongobbler): use drycc/registry_proxy to work around Docker --insecure-registry requirements +- name: "DRYCC_REGISTRY_PROXY_HOST" + value: "127.0.0.1" +# Environmental variable value for $INGRESS_CLASS +- name: "DRYCC_INGRESS_CLASS" + value: "{{ .Values.global.ingress_class }}" +- name: "DRYCC_PLATFORM_DOMAIN" + value: "{{ .Values.global.platform_domain }}" +- name: "K8S_API_VERIFY_TLS" + value: "{{ .Values.k8s_api_verify_tls }}" +- name: "DRYCC_REGISTRY_PROXY_PORT" + value: "{{ .Values.global.registry_proxy_port }}" +- name: "APP_STORAGE" + value: "{{ .Values.global.storage}}" +- name: "DRYCC_REGISTRY_LOCATION" + value: "{{ .Values.global.registry_location }}" +- name: "DRYCC_REGISTRY_SECRET_PREFIX" + value: "{{ .Values.global.registry_secret_prefix }}" +- name: "IMAGE_PULL_POLICY" + value: "{{ .Values.app_image_pull_policy }}" +- name: "KUBERNETES_CLUSTER_DOMAIN" + value: "{{ .Values.global.cluster_domain }}" +{{- if (.Values.app_storage_class) }} +- name: "DRYCC_APP_STORAGE_CLASS" + value: "{{ .Values.app_storage_class }}" +{{- end }} +- name: "TZ" + value: {{ .Values.time_zone | default "UTC" | quote }} +{{- if (.Values.deploy_hook_urls) }} +- name: DRYCC_DEPLOY_HOOK_URLS + value: "{{ .Values.deploy_hook_urls }}" +- name: DRYCC_DEPLOY_HOOK_SECRET_KEY + valueFrom: + secretKeyRef: + name: deploy-hook-key + key: secret-key +{{- end }} +- name: DRYCC_SECRET_KEY + valueFrom: + secretKeyRef: + name: django-secret-key + key: secret-key +- name: DRYCC_BUILDER_KEY + valueFrom: + secretKeyRef: + name: builder-key-auth + key: builder-key +{{- if eq .Values.global.database_location "off-cluster" }} +- name: DRYCC_DATABASE_NAME + valueFrom: + secretKeyRef: + name: database-creds + key: name +- name: DRYCC_DATABASE_SERVICE_HOST + valueFrom: + secretKeyRef: + name: database-creds + key: host +- name: DRYCC_DATABASE_SERVICE_PORT + valueFrom: + secretKeyRef: + name: database-creds + key: port +{{- end }} +- name: DRYCC_DATABASE_USER + valueFrom: + secretKeyRef: + name: database-creds + key: user +- name: DRYCC_DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: database-creds + key: password +- name: WORKFLOW_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{{ if eq .Values.global.redis_location "on-cluster"}} +- name: DRYCC_REDIS_ADDRS + value: "{{range $i := until $redisNodeCount}}drycc-redis-{{$i}}.drycc-redis.{{$.Release.Namespace}}.svc.{{$.Values.global.cluster_domain}}:{{$.Values.redis.port}}{{if lt (add 1 $i) $redisNodeCount}},{{end}}{{end}}" +{{- else if eq .Values.global.redis_location "off-cluster" }} +- name: DRYCC_REDIS_ADDRS + valueFrom: + secretKeyRef: + name: redis-creds + key: addrs +{{- end }} +- name: DRYCC_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-creds + key: password +{{- if eq .Values.global.influxdb_location "off-cluster" }} +- name: "INFLUXDB_URL" + valueFrom: + secretKeyRef: + name: influxdb-creds + key: url +{{- else }} +- name: "INFLUXDB_URL" + value: http://$(DRYCC_INFLUXDB_SERVICE_HOST):$(DRYCC_INFLUXDB_SERVICE_PORT_TRANSPORT) +{{- end }} +- name: "INFLUXDB_BUCKET" + valueFrom: + secretKeyRef: + name: influxdb-creds + key: bucket +- name: "INFLUXDB_ORG" + valueFrom: + secretKeyRef: + name: influxdb-creds + key: org +- name: "INFLUXDB_TOKEN" + valueFrom: + secretKeyRef: + name: influxdb-creds + key: token +{{- range $key, $value := .Values.environment }} +- name: {{ $key }} + value: {{ $value | quote }} +{{- end }} +{{- end }} + +{{/* Generate controller deployment limits */}} +{{- define "controller.limits" -}} +{{- if or (.Values.limits_cpu) (.Values.limits_memory) }} +resources: + limits: +{{- if (.Values.limits_cpu) }} + cpu: {{.Values.limits_cpu}} +{{- end }} +{{- if (.Values.limits_memory) }} + memory: {{.Values.limits_memory}} +{{- end }} +{{- end }} +{{- end }} + + +{{/* Generate controller deployment volumeMounts */}} +{{- define "controller.volumeMounts" -}} +volumeMounts: + - mountPath: /etc/slugrunner + name: slugrunner-config + readOnly: true +{{- end }} diff --git a/charts/controller/templates/controller-celery.yaml b/charts/controller/templates/controller-celery.yaml new file mode 100644 index 000000000..d7044d762 --- /dev/null +++ b/charts/controller/templates/controller-celery.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: drycc-controller-celery + labels: + heritage: drycc + annotations: + component.drycc.cc/version: {{ .Values.image_tag }} +spec: + replicas: {{ .Values.celery_replicas }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: drycc-controller-celery + template: + metadata: + labels: + app: drycc-controller-celery + spec: + serviceAccount: drycc-controller-celery + containers: + - name: drycc-controller-celery-high + image: {{.Values.image_registry}}{{.Values.org}}/controller:{{.Values.image_tag}} + command: + - /bin/bash + - -c + args: + - celery -A api worker -Q priority.high --autoscale=32,1 --loglevel=WARNING + imagePullPolicy: {{.Values.image_pull_policy}} + {{- include "controller.limits" . | indent 10 }} + {{- include "controller.envs" . | indent 10 }} + volumeMounts: + - mountPath: /etc/slugrunner + name: slugrunner-config + readOnly: true + - name: drycc-controller-celery-middle + image: {{.Values.image_registry}}{{.Values.org}}/controller:{{.Values.image_tag}} + command: + - /bin/bash + - -c + args: + - celery -A api worker -Q priority.middle --autoscale=16,1 --loglevel=WARNING + {{- include "controller.limits" . | indent 10 }} + {{- include "controller.envs" . | indent 10 }} + volumeMounts: + - mountPath: /etc/slugrunner + name: slugrunner-config + readOnly: true + imagePullPolicy: {{.Values.image_pull_policy}} + - name: drycc-controller-celery-low + image: {{.Values.image_registry}}{{.Values.org}}/controller:{{.Values.image_tag}} + command: + - /bin/bash + - -c + args: + - celery -A api worker -Q priority.low --autoscale=8,1 --loglevel=WARNING + {{- include "controller.limits" . | indent 10 }} + {{- include "controller.envs" . | indent 10 }} + volumeMounts: + - mountPath: /etc/slugrunner + name: slugrunner-config + readOnly: true + imagePullPolicy: {{.Values.image_pull_policy}} + volumes: + - name: slugrunner-config + configMap: + name: slugrunner-config diff --git a/charts/controller/templates/controller-cronjob-daily.yaml b/charts/controller/templates/controller-cronjob-daily.yaml new file mode 100644 index 000000000..ac203606a --- /dev/null +++ b/charts/controller/templates/controller-cronjob-daily.yaml @@ -0,0 +1,48 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: drycc-controller-cronjob-daily + labels: + heritage: drycc + annotations: + component.drycc.cc/version: {{ .Values.image_tag }} +spec: + failedJobsHistoryLimit: 1 + schedule: "0 0 * * *" + successfulJobsHistoryLimit: 3 + jobTemplate: + spec: + template: + spec: + restartPolicy: OnFailure + containers: + - image: {{.Values.image_registry}}{{.Values.org}}/controller:{{.Values.image_tag}} + imagePullPolicy: {{.Values.image_pull_policy}} + name: drycc-controller-push-data-to-influxdb + command: + - /bin/bash + - -c + args: + - python /app/manage.py push_data_to_influxdb + {{- include "controller.envs" . | indent 12 }} + volumeMounts: + - mountPath: /etc/slugrunner + name: slugrunner-config + readOnly: true + - image: {{.Values.image_registry}}{{.Values.org}}/controller:{{.Values.image_tag}} + imagePullPolicy: {{.Values.pull_policy}} + name: drycc-controller-load-db-state-to-k8s + command: + - /bin/bash + - -c + args: + - python -u /app/manage.py load_db_state_to_k8s + {{- include "controller.envs" . | indent 12 }} + volumeMounts: + - mountPath: /etc/slugrunner + name: slugrunner-config + readOnly: true + volumes: + - name: slugrunner-config + configMap: + name: slugrunner-config diff --git a/charts/controller/templates/controller-deployment.yaml b/charts/controller/templates/controller-deployment.yaml index 3819d238a..359f80ee7 100644 --- a/charts/controller/templates/controller-deployment.yaml +++ b/charts/controller/templates/controller-deployment.yaml @@ -1,4 +1,3 @@ -{{ $redisNodeCount := .Values.redis.replicas | int }} apiVersion: apps/v1 kind: Deployment metadata: @@ -8,7 +7,7 @@ metadata: annotations: component.drycc.cc/version: {{ .Values.image_tag }} spec: - replicas: 1 + replicas: {{ .Values.replicas }} strategy: rollingUpdate: maxSurge: 1 @@ -43,141 +42,8 @@ spec: ports: - containerPort: 8000 name: http -{{- if or (.Values.limits_cpu) (.Values.limits_memory) }} - resources: - limits: -{{- if (.Values.limits_cpu) }} - cpu: {{.Values.limits_cpu}} -{{- end }} -{{- if (.Values.limits_memory) }} - memory: {{.Values.limits_memory}} -{{- end }} -{{- end }} - env: - - name: REGISTRATION_MODE - value: {{ .Values.registration_mode }} - # NOTE(bacongobbler): use drycc/registry_proxy to work around Docker --insecure-registry requirements - - name: "DRYCC_REGISTRY_PROXY_HOST" - value: "127.0.0.1" - # Environmental variable value for $INGRESS_CLASS - - name: "DRYCC_INGRESS_CLASS" - value: "{{ .Values.global.ingress_class }}" - - name: "DRYCC_PLATFORM_DOMAIN" - value: "{{ .Values.global.platform_domain }}" - - name: "K8S_API_VERIFY_TLS" - value: "{{ .Values.k8s_api_verify_tls }}" - - name: "DRYCC_REGISTRY_PROXY_PORT" - value: "{{ .Values.global.registry_proxy_port }}" - - name: "APP_STORAGE" - value: "{{ .Values.global.storage}}" - - name: "DRYCC_REGISTRY_LOCATION" - value: "{{ .Values.global.registry_location }}" - - name: "DRYCC_REGISTRY_SECRET_PREFIX" - value: "{{ .Values.global.registry_secret_prefix }}" - - name: "IMAGE_PULL_POLICY" - value: "{{ .Values.app_image_pull_policy }}" - - name: "KUBERNETES_CLUSTER_DOMAIN" - value: "{{ .Values.global.cluster_domain }}" -{{- if (.Values.app_storage_class) }} - - name: "DRYCC_APP_STORAGE_CLASS" - value: "{{ .Values.app_storage_class }}" -{{- end }} - - name: "TZ" - value: {{ .Values.time_zone | default "UTC" | quote }} -{{- if (.Values.deploy_hook_urls) }} - - name: DRYCC_DEPLOY_HOOK_URLS - value: "{{ .Values.deploy_hook_urls }}" - - name: DRYCC_DEPLOY_HOOK_SECRET_KEY - valueFrom: - secretKeyRef: - name: deploy-hook-key - key: secret-key -{{- end }} - - name: DRYCC_SECRET_KEY - valueFrom: - secretKeyRef: - name: django-secret-key - key: secret-key - - name: DRYCC_BUILDER_KEY - valueFrom: - secretKeyRef: - name: builder-key-auth - key: builder-key -{{- if eq .Values.global.database_location "off-cluster" }} - - name: DRYCC_DATABASE_NAME - valueFrom: - secretKeyRef: - name: database-creds - key: name - - name: DRYCC_DATABASE_SERVICE_HOST - valueFrom: - secretKeyRef: - name: database-creds - key: host - - name: DRYCC_DATABASE_SERVICE_PORT - valueFrom: - secretKeyRef: - name: database-creds - key: port -{{- end }} - - name: DRYCC_DATABASE_USER - valueFrom: - secretKeyRef: - name: database-creds - key: user - - name: DRYCC_DATABASE_PASSWORD - valueFrom: - secretKeyRef: - name: database-creds - key: password - - name: WORKFLOW_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace -{{ if eq .Values.global.redis_location "on-cluster"}} - - name: DRYCC_REDIS_ADDRS - value: "{{range $i := until $redisNodeCount}}drycc-redis-{{$i}}.drycc-redis.{{$.Release.Namespace}}.svc.{{$.Values.global.cluster_domain}}:{{$.Values.redis.port}}{{if lt (add 1 $i) $redisNodeCount}},{{end}}{{end}}" -{{- else if eq .Values.global.redis_location "off-cluster" }} - - name: DRYCC_REDIS_ADDRS - valueFrom: - secretKeyRef: - name: redis-creds - key: addrs -{{- end }} - - name: DRYCC_REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-creds - key: password - {{- if eq .Values.global.influxdb_location "off-cluster" }} - - name: "INFLUXDB_URL" - valueFrom: - secretKeyRef: - name: influxdb-creds - key: url - {{- else }} - - name: "INFLUXDB_URL" - value: http://$(DRYCC_INFLUXDB_SERVICE_HOST):$(DRYCC_INFLUXDB_SERVICE_PORT_TRANSPORT) - {{- end }} - - name: "INFLUXDB_BUCKET" - valueFrom: - secretKeyRef: - name: influxdb-creds - key: bucket - - name: "INFLUXDB_ORG" - valueFrom: - secretKeyRef: - name: influxdb-creds - key: org - - name: "INFLUXDB_TOKEN" - valueFrom: - secretKeyRef: - name: influxdb-creds - key: token - {{- range $key, $value := .Values.environment }} - - name: {{ $key }} - value: {{ $value | quote }} - {{- end }} + {{- include "controller.limits" . | indent 10 }} + {{- include "controller.envs" . | indent 10 }} volumeMounts: - mountPath: /etc/slugrunner name: slugrunner-config diff --git a/charts/controller/values.yaml b/charts/controller/values.yaml index 819ba9ae6..0554c5b3a 100644 --- a/charts/controller/values.yaml +++ b/charts/controller/values.yaml @@ -18,6 +18,10 @@ k8s_api_verify_tls: "true" # Set storageClassName, It is used for application mount. app_storage_class: "" +# Set controller deployment replicas +replicas: 1 +# Set celery replicas +celery_replicas: 1 # Any custom controller environment variables # can be specified as key-value pairs under environment diff --git a/rootfs/api/management/commands/push_data_to_influxdb.py b/rootfs/api/management/commands/push_data_to_influxdb.py new file mode 100644 index 000000000..0f828997b --- /dev/null +++ b/rootfs/api/management/commands/push_data_to_influxdb.py @@ -0,0 +1,51 @@ +import json + +from django.utils import timezone +from django.core.management.base import BaseCommand +from django.conf import settings + +from influxdb_client import Point, WritePrecision +from influxdb_client.client.write_api import SYNCHRONOUS +from api.utils import get_influxdb_client, unit_to_byte + +from api.models import App + + +class Command(BaseCommand): + """Management command for push data to influxdb""" + + def handle(self, *args, **options): + print(f"push data to influxdb when {timezone.now()}") + client = get_influxdb_client() + write_api = client.write_api(write_options=SYNCHRONOUS) + + apps = App.objects.all() + records = [] + for app in apps: + config = app.config_set.latest() + limits = json.loads( + settings.KUBERNETES_NAMESPACE_DEFAULT_LIMIT_RANGES_SPEC) + limits_default = limits.get('limits')[0].get('default') + p = [Point("drycc_limit").tag("type", _).tag("namespace", config.app) + .field("cpu", int(config.cpu.get(type, limits_default.get('cpu'))[:-1])) # noqa + .field("memory", unit_to_byte(config.memory.get(type, limits_default.get('memory')))) # noqa + .time(timezone.now(), WritePrecision.MS) for _ in app.types] # noqa + records.extend(p) + + for resource in app.resource_set.all(): + p = Point("drycc_resource") \ + .tag("name", resource.name) \ + .tag("namespace", resource.app) \ + .field("plan", resource.plan) \ + .time(timezone.now(), WritePrecision.MS) + records.append(p) + + for volume in app.volume_set.all(): + p = Point("drycc_volume") \ + .tag("name", volume.name) \ + .tag("namespace", volume.app) \ + .field("size", unit_to_byte(volume.size)) \ + .time(timezone.now(), WritePrecision.MS) + records.append(p) + + write_api.write(bucket=settings.INFLUXDB_BUCKET, record=records) diff --git a/rootfs/api/models/__init__.py b/rootfs/api/models/__init__.py index af1e984b5..df9f27c0b 100644 --- a/rootfs/api/models/__init__.py +++ b/rootfs/api/models/__init__.py @@ -12,7 +12,7 @@ import urllib.parse import uuid import requests - +import json from datetime import timedelta from django.conf import settings from django.db import models @@ -26,10 +26,11 @@ from .. import __version__ as drycc_version from ..exceptions import DryccException, AlreadyExists, ServiceUnavailable, UnprocessableEntity # noqa - logger = logging.getLogger(__name__) session = None -resource_changed = Signal(providing_args=["resource_id"]) +config_changed = Signal(providing_args=["config"]) +resource_changed = Signal(providing_args=["resource"]) +volume_changed = Signal(providing_args=["volume"]) def get_session(): @@ -153,8 +154,8 @@ class Meta: from .volume import Volume # noqa from .resource import Resource # noqa -from ..tasks import retrieve_resource # noqa -from ..utils import dict_merge # noqa +from ..tasks import retrieve_resource, write_point # noqa +from ..utils import dict_merge, unit_to_byte # noqa # define update/delete callbacks for synchronizing # models with the configuration management backend @@ -264,11 +265,71 @@ def create_auth_token(sender, instance=None, created=False, **kwargs): @receiver(resource_changed) def resource_changed_handle(sender, **kwargs): + resource = kwargs.get('resource') + # retrieve_resource data = { "task_id": uuid.uuid4().hex, - "resource_id": kwargs.get("resource_id"), + "resource_id": resource.uuid, } retrieve_resource.apply_async( args=(data, ), eta=now() + timedelta(seconds=30) ) + # influxdb write point + data = { + "task_id": uuid.uuid4().hex, + "measurement": "drycc_resource", + "records": [{ + "tag": { + "name": resource.name, + "namespace": resource.app.id, + }, + "field": { + "plan": resource.plan + } + }] + } + write_point.apply_async(args=(data, )) + + +@receiver(config_changed) +def config_changed_handle(sender, **kwargs): + # influxdb write point + config = kwargs.get('config') + limits = json.loads(settings.KUBERNETES_NAMESPACE_DEFAULT_LIMIT_RANGES_SPEC) + limits_default = limits.get('limits')[0].get('default') + data = { + "task_id": uuid.uuid4().hex, + "measurement": "drycc_limit", + "records": [{ + "tag": { + "type": _, + "namespace": config.app.id, + }, + "field": { + "memory": unit_to_byte(config.memory.get(type, limits_default.get('memory'))), # noqa + "cpu": int(config.cpu.get(type, limits_default.get('cpu'))[:-1]) + } + } for _ in config.app.types] + } + write_point.apply_async(args=(data, )) + + +@receiver(volume_changed) +def volume_changed_handle(sender, **kwargs): + # influxdb write point + volume = kwargs.get('volume') + data = { + "task_id": uuid.uuid4().hex, + "measurement": "drycc_volume", + "records": [{ + "tag": { + "name": volume.name, + "namespace": volume.app.id, + }, + "field": { + "size": unit_to_byte(volume.size) + } + }] + } + write_point.apply_async(args=(data, )) diff --git a/rootfs/api/models/app.py b/rootfs/api/models/app.py index 60b5538c1..cac14048e 100644 --- a/rootfs/api/models/app.py +++ b/rootfs/api/models/app.py @@ -115,6 +115,10 @@ def _get_job_id(self, container_type): app = self.id return "{app}-{container_type}".format(**locals()) + @property + def types(self): + return list(self.procfile_structure.keys()) + def _get_command(self, container_type): """ Return the kubernetes "container arguments" to be sent off to the scheduler. diff --git a/rootfs/api/models/config.py b/rootfs/api/models/config.py index 4455e2cd5..002969d1e 100644 --- a/rootfs/api/models/config.py +++ b/rootfs/api/models/config.py @@ -5,7 +5,7 @@ import json from api.models.release import Release -from api.models import UuidAuditedModel +from api.models import UuidAuditedModel, config_changed from api.exceptions import DryccException, UnprocessableEntity @@ -186,7 +186,7 @@ def save(self, **kwargs): else: data[key] = value setattr(self, attr, data) - + config_changed.send(sender=Config, config=self) self.set_healthcheck(previous_config) self._migrate_legacy_healthcheck() self.set_registry() diff --git a/rootfs/api/models/resource.py b/rootfs/api/models/resource.py index 7c5de6c85..fa1fdc014 100644 --- a/rootfs/api/models/resource.py +++ b/rootfs/api/models/resource.py @@ -55,7 +55,7 @@ def attach(self, *args, **kwargs): self._scheduler.svcat.create_instance( self.app.id, self.name, **kwargs ) - resource_changed.send(sender=Resource, resource_id=str(self.uuid)) + resource_changed.send(sender=Resource, resource=self) except KubeException as e: msg = 'There was a problem creating the resource ' \ '{} for {}'.format(self.name, self.app_id) @@ -104,7 +104,7 @@ def bind(self, *args, **kwargs): try: self._scheduler.svcat.create_binding( self.app.id, self.name, **kwargs) - resource_changed.send(sender=Resource, resource_id=str(self.uuid)) + resource_changed.send(sender=Resource, resource=self) except KubeException as e: msg = 'There was a problem binding the resource ' \ '{} for {}'.format(self.name, self.app_id) @@ -142,7 +142,7 @@ def attach_update(self, *args, **kwargs): self._scheduler.svcat.put_instance( self.app.id, self.name, version, **kwargs ) - resource_changed.send(sender=Resource, resource_id=str(self.uuid)) + resource_changed.send(sender=Resource, resource=self) except KubeException as e: msg = 'There was a problem update the resource ' \ '{} for {}'.format(self.name, self.app_id) diff --git a/rootfs/api/models/volume.py b/rootfs/api/models/volume.py index 6dddb1625..a274adbf2 100644 --- a/rootfs/api/models/volume.py +++ b/rootfs/api/models/volume.py @@ -3,7 +3,7 @@ from django.conf import settings from jsonfield import JSONField from api.exceptions import DryccException, ServiceUnavailable, AlreadyExists -from api.models import UuidAuditedModel, validate_label +from api.models import UuidAuditedModel, validate_label, volume_changed from scheduler import KubeException logger = logging.getLogger(__name__) @@ -30,6 +30,7 @@ def save(self, *args, **kwargs): # Attach volume, updates k8s if self.created == self.updated: self.attach() + volume_changed.send(sender=Volume, volume=self) # Save to DB return super(Volume, self).save(*args, **kwargs) diff --git a/rootfs/api/tasks.py b/rootfs/api/tasks.py index 991d6fd4f..03e7047ea 100644 --- a/rootfs/api/tasks.py +++ b/rootfs/api/tasks.py @@ -1,13 +1,17 @@ # Create your tasks here import time import logging -from django.core import signals from datetime import timedelta + +from django.core import signals from django.utils.timezone import now +from django.conf import settings from celery import shared_task +from influxdb_client import Point, WritePrecision +from influxdb_client.client.write_api import SYNCHRONOUS -from .models.resource import Resource - +from api.models.resource import Resource +from api.utils import get_influxdb_client logger = logging.getLogger(__name__) @@ -33,3 +37,25 @@ def retrieve_resource(data): logger.info("retrieve task not found resource: {}".format(data['resource_id'])) # noqa finally: signals.request_finished.send(sender=data['task_id']) + + +@shared_task +def write_point(data): + signals.request_started.send(sender=data['task_id']) + try: + client = get_influxdb_client() + write_api = client.write_api(write_options=SYNCHRONOUS) + ps = [] + for r in data["records"]: + p = Point(data["measurement"]) + for k, v in r["tag"].items(): + p.tag(k, v) + for k, v in r["field"].items(): + p.field(k, v) + p.time(now(), WritePrecision.MS) + ps.append(p) + write_api.write(bucket=settings.INFLUXDB_BUCKET, record=ps) + except Exception as e: + logger.info("write influxdb point fail: {}".format(e)) + finally: + signals.request_finished.send(sender=data['task_id']) diff --git a/rootfs/api/utils.py b/rootfs/api/utils.py index b61b7f731..87f3f20d7 100644 --- a/rootfs/api/utils.py +++ b/rootfs/api/utils.py @@ -7,6 +7,7 @@ import logging import random import threading +import math from copy import deepcopy from django.conf import settings from influxdb_client import InfluxDBClient @@ -174,6 +175,31 @@ def get_influxdb_client(): return local.influxdb_client +def unit_to_byte(size): + """ + size: str + where unit in K, M, G, T convert to B + """ + if size[-2:-1].isalpha() and size[-1].isalpha(): + size = size[:-1] + if size[-1].isalpha(): + size = size.upper() + _ = float(size[:-1]) + if size[-1] == 'K': + _ *= math.pow(1024, 1) + elif size[-1] == 'M': + _ *= math.pow(1024, 2) + elif size[-1] == 'G': + _ *= math.pow(1024, 3) + elif size[-1] == 'G': + _ *= math.pow(1024, 3) + elif size[-1] == 'T': + _ *= math.pow(1024, 4) + elif size[-1] == 'P': + _ *= math.pow(1024, 5) + return round(_) + + if __name__ == "__main__": import doctest doctest.testmod() diff --git a/rootfs/api/views.py b/rootfs/api/views.py index 040992765..21d3b0533 100644 --- a/rootfs/api/views.py +++ b/rootfs/api/views.py @@ -1,6 +1,7 @@ """ RESTful view classes for presenting Drycc API objects. """ +import logging from copy import deepcopy from django.http import Http404, HttpResponse @@ -21,12 +22,10 @@ from api import authentication, models, permissions, serializers, viewsets from api.models import AlreadyExists, ServiceUnavailable, DryccException, \ UnprocessableEntity - -import logging - -from api.serializers import VolumeSerializer +from api.utils import get_influxdb_client logger = logging.getLogger(__name__) +client = get_influxdb_client() class ReadinessCheckView(View): @@ -276,7 +275,7 @@ def run(self, request, **kwargs): raise DryccException("command is a required field") volumes = request.data.get('volumes', None) if volumes: - VolumeSerializer().validate_path(volumes) + serializers.VolumeSerializer().validate_path(volumes) rc, output = app.run(self.request.user, request.data['command'], volumes) return Response({'exit_code': rc, 'output': str(output)}) @@ -724,7 +723,7 @@ def path(self, request, *args, **kwargs): raise DryccException("path is a required field") obj = self.get_object() container_types = [_ for _ in new_path.keys() - if _ not in obj.app.procfile_structure.keys() or + if _ not in obj.app.types or _ not in obj.app.structure.keys()] if container_types: raise DryccException("process type {} is not included in profile". diff --git a/rootfs/bin/boot b/rootfs/bin/boot index 6bb6abdab..a9c3a1e33 100755 --- a/rootfs/bin/boot +++ b/rootfs/bin/boot @@ -32,15 +32,6 @@ echo "" echo "Starting up Gunicorn" gunicorn -c /app/drycc/gunicorn/config.py api.wsgi & -echo "" -echo "Loading database information to Kubernetes in the background" -echo "Log of the run can be found in /app/data/logs/load_db_state_to_k8s.log" -# python -u avoids output buffering -nohup python -u /app/manage.py load_db_state_to_k8s > /app/data/logs/load_db_state_to_k8s.log & -# celery -nohup su-exec nobody celery -A api worker -Q priority.low --autoscale=8,1 --loglevel=WARNING > /app/data/logs/celery.log 2>&1 & -nohup su-exec nobody celery -A api worker -Q priority.middle --autoscale=16,1 --loglevel=WARNING > /app/data/logs/celery.log 2>&1 & -nohup su-exec nobody celery -A api worker -Q priority.high --autoscale=32,1 --loglevel=WARNING > /app/data/logs/celery.log 2>&1 & # smart shutdown on SIGTERM (SIGINT is handled by gunicorn) function on_exit() { GUNICORN_PID=$(cat /tmp/gunicorn.pid)