From 14bbf97f25fe974f78c47883af27c6ca46185161 Mon Sep 17 00:00:00 2001 From: Pablo Date: Thu, 6 Jun 2019 15:37:07 +0200 Subject: [PATCH] First wave: apis, worker, prometheus, alertmanager, nats, logstash, lmtp, filebeat, client, redis --- deploy-api.yaml | 11 - deploy-cache.yaml | 5 - deploy-lmtp.yaml | 7 - deploy-logstash.yaml | 7 - deploy-mq.yaml | 5 - deploy-prometheus.yaml | 6 - deploy-web-client.yaml | 9 - deploy-worker.yaml | 8 - roles/alertmanager/handlers/main.yml | 5 - roles/alertmanager/tasks/main.yml | 19 - .../templates/alertmanager.service.j2 | 10 - .../templates/alertmanager.yml.j2 | 19 - roles/apicommon/tasks/main.yml | 12 - roles/apicommon/templates/api.nginx.j2 | 74 -- roles/apicommon/templates/filebeat.yml.j2 | 155 ---- roles/apiv1/tasks/main.yml | 80 -- roles/apiv1/templates/apiv1.ini.j2 | 98 --- roles/apiv1/templates/apiv1.uwsgi.j2 | 9 - .../templates/email-reset-password-link.yaml | 23 - roles/apiv2/tasks/main.yml | 45 -- roles/apiv2/templates/caliopen-api.service.j2 | 10 - roles/apiv2/templates/caliopen-api.yaml.j2 | 73 -- roles/cache/handlers/main.yml | 2 - roles/cache/tasks/main.yml | 20 - .../cache/templates/redis-exporter.service.j2 | 10 - roles/cache/templates/redis.conf.j2 | 761 ------------------ roles/client/tasks/main.yml | 73 -- .../client/templates/caliopen_client.nginx.j2 | 41 - roles/client/templates/filebeat.yml.j2 | 155 ---- roles/client/templates/server.j2 | 3 - roles/client/templates/server.json.j2 | 20 - roles/client/templates/web-client.service.j2 | 9 - roles/filebeat/handlers/main.yml | 6 - roles/filebeat/tasks/main.yml | 9 - roles/lmtp/tasks/main.yml | 34 - roles/lmtp/templates/caliopen-lmtp.service.j2 | 10 - roles/lmtp/templates/caliopen-lmtp.yaml.j2 | 73 -- roles/lmtp/templates/filebeat.yml.j2 | 123 --- roles/logstash/tasks/main.yml | 18 - roles/logstash/templates/jvm.options | 77 -- roles/logstash/templates/logstash.conf | 64 -- roles/logstash/templates/logstash.yml.j2 | 200 ----- roles/nats/tasks/main.yml | 17 - roles/nats/templates/gnatsd.service.j2 | 10 - .../prometheus-nats-exporter.service.j2 | 10 - roles/prometheus/files/rule1 | 19 - roles/prometheus/handlers/main.yml | 8 - roles/prometheus/tasks/main.yml | 47 -- .../elasticsearch-exporter.service.j2 | 10 - .../templates/prometheus.service.j2 | 10 - roles/prometheus/templates/prometheus.yml.j2 | 47 -- roles/worker/tasks/main.yml | 108 --- .../templates/caliopen-worker.service.j2 | 9 - roles/worker/templates/filebeat.yml.j2 | 124 --- roles/worker/templates/idpoller.service.j2 | 9 - roles/worker/templates/idpoller.yaml.j2 | 23 - roles/worker/templates/imapworker.service.j2 | 9 - roles/worker/templates/imapworker.yaml.j2 | 54 -- .../worker/templates/twitterworker.service.j2 | 9 - roles/worker/templates/twitterworker.yaml.j2 | 46 -- 60 files changed, 2967 deletions(-) delete mode 100644 deploy-api.yaml delete mode 100644 deploy-cache.yaml delete mode 100644 deploy-lmtp.yaml delete mode 100644 deploy-logstash.yaml delete mode 100644 deploy-mq.yaml delete mode 100644 deploy-prometheus.yaml delete mode 100644 deploy-web-client.yaml delete mode 100644 deploy-worker.yaml delete mode 100644 roles/alertmanager/handlers/main.yml delete mode 100644 roles/alertmanager/tasks/main.yml delete mode 100644 roles/alertmanager/templates/alertmanager.service.j2 delete mode 100644 roles/alertmanager/templates/alertmanager.yml.j2 delete mode 100644 roles/apicommon/tasks/main.yml delete mode 100644 roles/apicommon/templates/api.nginx.j2 delete mode 100644 roles/apicommon/templates/filebeat.yml.j2 delete mode 100644 roles/apiv1/tasks/main.yml delete mode 100644 roles/apiv1/templates/apiv1.ini.j2 delete mode 100644 roles/apiv1/templates/apiv1.uwsgi.j2 delete mode 100644 roles/apiv2/files/notifiers/templates/email-reset-password-link.yaml delete mode 100644 roles/apiv2/tasks/main.yml delete mode 100644 roles/apiv2/templates/caliopen-api.service.j2 delete mode 100644 roles/apiv2/templates/caliopen-api.yaml.j2 delete mode 100644 roles/cache/handlers/main.yml delete mode 100644 roles/cache/tasks/main.yml delete mode 100644 roles/cache/templates/redis-exporter.service.j2 delete mode 100644 roles/cache/templates/redis.conf.j2 delete mode 100644 roles/client/tasks/main.yml delete mode 100644 roles/client/templates/caliopen_client.nginx.j2 delete mode 100644 roles/client/templates/filebeat.yml.j2 delete mode 100644 roles/client/templates/server.j2 delete mode 100644 roles/client/templates/server.json.j2 delete mode 100644 roles/client/templates/web-client.service.j2 delete mode 100644 roles/filebeat/handlers/main.yml delete mode 100644 roles/filebeat/tasks/main.yml delete mode 100644 roles/lmtp/tasks/main.yml delete mode 100644 roles/lmtp/templates/caliopen-lmtp.service.j2 delete mode 100644 roles/lmtp/templates/caliopen-lmtp.yaml.j2 delete mode 100644 roles/lmtp/templates/filebeat.yml.j2 delete mode 100644 roles/logstash/tasks/main.yml delete mode 100644 roles/logstash/templates/jvm.options delete mode 100644 roles/logstash/templates/logstash.conf delete mode 100644 roles/logstash/templates/logstash.yml.j2 delete mode 100644 roles/nats/tasks/main.yml delete mode 100644 roles/nats/templates/gnatsd.service.j2 delete mode 100644 roles/nats/templates/prometheus-nats-exporter.service.j2 delete mode 100644 roles/prometheus/files/rule1 delete mode 100644 roles/prometheus/handlers/main.yml delete mode 100644 roles/prometheus/tasks/main.yml delete mode 100644 roles/prometheus/templates/elasticsearch-exporter.service.j2 delete mode 100644 roles/prometheus/templates/prometheus.service.j2 delete mode 100644 roles/prometheus/templates/prometheus.yml.j2 delete mode 100644 roles/worker/tasks/main.yml delete mode 100644 roles/worker/templates/caliopen-worker.service.j2 delete mode 100644 roles/worker/templates/filebeat.yml.j2 delete mode 100644 roles/worker/templates/idpoller.service.j2 delete mode 100644 roles/worker/templates/idpoller.yaml.j2 delete mode 100644 roles/worker/templates/imapworker.service.j2 delete mode 100644 roles/worker/templates/imapworker.yaml.j2 delete mode 100644 roles/worker/templates/twitterworker.service.j2 delete mode 100644 roles/worker/templates/twitterworker.yaml.j2 diff --git a/deploy-api.yaml b/deploy-api.yaml deleted file mode 100644 index e8ceead..0000000 --- a/deploy-api.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: api - roles: - - common - - iptables - - python-process - - nginx - - commonelk - - filebeat - - apiv1 - - apiv2 - - apicommon diff --git a/deploy-cache.yaml b/deploy-cache.yaml deleted file mode 100644 index 3617a1f..0000000 --- a/deploy-cache.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- hosts: cache - roles: - - common - - iptables - - cache diff --git a/deploy-lmtp.yaml b/deploy-lmtp.yaml deleted file mode 100644 index d3dee64..0000000 --- a/deploy-lmtp.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- hosts: lmtp - roles: - - common - - iptables - - commonelk - - filebeat - - lmtp diff --git a/deploy-logstash.yaml b/deploy-logstash.yaml deleted file mode 100644 index a6d34f3..0000000 --- a/deploy-logstash.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- hosts: logstash - roles: - - common - - iptables - - java8 - - commonelk - - logstash diff --git a/deploy-mq.yaml b/deploy-mq.yaml deleted file mode 100644 index dc7c940..0000000 --- a/deploy-mq.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- hosts: mq - roles: - - common - - iptables - - nats diff --git a/deploy-prometheus.yaml b/deploy-prometheus.yaml deleted file mode 100644 index be8ad1b..0000000 --- a/deploy-prometheus.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: prometheus - roles: - - common - - iptables - - prometheus - - alertmanager diff --git a/deploy-web-client.yaml b/deploy-web-client.yaml deleted file mode 100644 index d236715..0000000 --- a/deploy-web-client.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- hosts: web_client - roles: - - common - - iptables - - nginx - - commonelk - - filebeat - - client - - maintenance diff --git a/deploy-worker.yaml b/deploy-worker.yaml deleted file mode 100644 index ee1dea5..0000000 --- a/deploy-worker.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- hosts: worker - roles: - - common - - iptables - - python-process - - commonelk - - filebeat - - worker diff --git a/roles/alertmanager/handlers/main.yml b/roles/alertmanager/handlers/main.yml deleted file mode 100644 index 45a614f..0000000 --- a/roles/alertmanager/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ -- name: start alertmanager - service: name=alertmanager state=started enabled=yes - -- name: restart alertmanager - service: name=alertmanager state=restarted diff --git a/roles/alertmanager/tasks/main.yml b/roles/alertmanager/tasks/main.yml deleted file mode 100644 index da09e2d..0000000 --- a/roles/alertmanager/tasks/main.yml +++ /dev/null @@ -1,19 +0,0 @@ -- name: install alertmanager - copy: - src: "{{ dist_directory }}/ext/alertmanager-{{ alertmanager_version }}.linux-amd64/alertmanager" - dest: /usr/local/sbin/alertmanager - mode: 0711 - -- name: install service for alertmanager - template: src=alertmanager.service.j2 dest=/etc/systemd/system/alertmanager.service - - -- name: create configuration directory - file: - path: /etc/prometheus - state: directory - -- name: configure alertmanager - template: src=alertmanager.yml.j2 dest=/etc/prometheus/alertmanager.yml - notify: restart alertmanager - diff --git a/roles/alertmanager/templates/alertmanager.service.j2 b/roles/alertmanager/templates/alertmanager.service.j2 deleted file mode 100644 index 5b6a303..0000000 --- a/roles/alertmanager/templates/alertmanager.service.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=AlertManager service - -[Service] -Restart=always -ExecStart=/usr/local/alertmanager-{{ alertmanager_version }}.linux-amd64/alertmanager -config.file=/etc/prometheus/alertmanager.yml -web.listen-address {{ facter_ipaddress_eth1 }}:9091 -ExecStop=pkill alertmanager - -[Install] -WantedBy=local.target diff --git a/roles/alertmanager/templates/alertmanager.yml.j2 b/roles/alertmanager/templates/alertmanager.yml.j2 deleted file mode 100644 index cc88d3b..0000000 --- a/roles/alertmanager/templates/alertmanager.yml.j2 +++ /dev/null @@ -1,19 +0,0 @@ -global: - smtp_from: 'alert@caliopen.org' - smtp_smarthost: 'localhost:25' - -route: - group_by: ['alertname', 'service'] - group_wait: 30s - group_interval: 5m - repeat_interval: 1h - receiver: team-ops - routes: - - match: - severity: critical - receiver: team-ops - -receivers: - - name: 'team-ops' - email_configs: - - to: 'ops@caliopen.org' diff --git a/roles/apicommon/tasks/main.yml b/roles/apicommon/tasks/main.yml deleted file mode 100644 index 36fdd5d..0000000 --- a/roles/apicommon/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ -- name: configure nginx - template: - src: api.nginx.j2 - dest: /etc/nginx/sites-enabled/api - notify: restart nginx - -- name: configure filebeat - template: - src: filebeat.yml.j2 - dest: /etc/filebeat/filebeat.yml - notify: restart filebeat - diff --git a/roles/apicommon/templates/api.nginx.j2 b/roles/apicommon/templates/api.nginx.j2 deleted file mode 100644 index d28be0e..0000000 --- a/roles/apicommon/templates/api.nginx.j2 +++ /dev/null @@ -1,74 +0,0 @@ -upstream uwsgi_apiv1 { - least_conn; - {% for host in groups['api'] %} - server {{ hostvars[host]['backend_ip'] }}:3001; - {% endfor %} -} - -upstream apiv2 { - least_conn; - {% for host in groups['api'] %} - server {{ hostvars[host]['backend_ip'] }}:6544; - {% endfor %} -} - -server { - listen 443 ssl; - listen [::]:443 ssl; - server_name api.{{ caliopen_domain_name }}; - - client_max_body_size 5m; - - ssl_certificate /etc/nginx/certs/{{ caliopen_domain_name }}.crt; - ssl_certificate_key /etc/nginx/certs/{{ caliopen_domain_name }}.key; - ssl_prefer_server_ciphers On; - ssl_protocols TLSv1.1 TLSv1.2; - ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS; - ssl_session_cache builtin:1000 shared:SSL:10m; - - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $remote_addr; - - location /api/v1 { - include uwsgi_params; - uwsgi_pass uwsgi_apiv1; - } - - location /api/v2 { - proxy_pass http://apiv2; - } -} - -server { - listen {{ facter_ipaddress_eth1 }}:80; - allow {{ facter_network_eth1 }}/24; - - server_name {{ ansible_hostname }}.{{ caliopen_domain_name }}; - - # rewrite ^(.*) https://$server_name$1 permanent; - - # XXX temporary situation until client use https - location /api/v1 { - include uwsgi_params; - uwsgi_pass uwsgi_apiv1; - } - - location /api/v2 { - proxy_pass http://apiv2; - } -} - - -server { - listen {{ facter_ipaddress_eth1 }}:9145; - allow {{ facter_network_eth1 }}/24; - deny all; - location /metrics { - content_by_lua ' - metric_connections:set(ngx.var.connections_reading, {"reading"}) - metric_connections:set(ngx.var.connections_waiting, {"waiting"}) - metric_connections:set(ngx.var.connections_writing, {"writing"}) - prometheus:collect() - '; - } -} diff --git a/roles/apicommon/templates/filebeat.yml.j2 b/roles/apicommon/templates/filebeat.yml.j2 deleted file mode 100644 index 9ff8855..0000000 --- a/roles/apicommon/templates/filebeat.yml.j2 +++ /dev/null @@ -1,155 +0,0 @@ -###################### Filebeat Configuration Example ######################### - -# This file is an example configuration file highlighting only the most common -# options. The filebeat.full.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/filebeat/index.html - -#========================== Modules configuration ============================ -filebeat.modules: - -#-------------------------------- Nginx Module ------------------------------- -- module: nginx - # Access logs - access: - enabled: true - - # Ingest Node pipeline to use. Options are `with_plugins` (default) - # and `no_plugins`. Use `no_plugins` if you don't have the geoip or - # the user agent Node ingest plugins installed. - #var.pipeline: with_plugins - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: /var/log/filebeat/nginx-access.log - - # Prospector configuration (advanced). Any prospector configuration option - # can be added under this section. - prospector: - fields: - type: "nginx-log" - exclude_lines: ['^192\.168\.1\.[0-9]+'] - - # Error logs - error: - enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: /var/log/filebeat/nginx-error.log - - # Prospector configuration (advanced). Any prospector configuration option - # can be added under this section. - #prospector: - -#=========================== Filebeat prospectors ============================= - -filebeat.prospectors: - -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. - -- input_type: log - - # Paths that should be crawled and fetched. Glob based paths. - paths: - #- /var/log/uwsgi/app/apiv1.log - - /var/log/user.log - - fields: - type: "syslog" - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. - #exclude_lines: ["^DBG"] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. - #include_lines: ["^ERR", "^WARN"] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: [".gz$"] - - # Optional additional fields. These field can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - ### Multiline options - - # Mutiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -#output.elasticsearch: - # Array of hosts to connect to. - #hosts: ["localhost:9200"] - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - -#----------------------------- Logstash output -------------------------------- -output.logstash: - # The Logstash hosts - hosts: [{% for host in groups['logstash']%}"{{ hostvars[host]['backend_ip'] }}:5044",{% endfor %}] - - - # Optional SSL. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is info. -# Available log levels are: critical, error, warning, info, debug -#logging.level: debug - -# At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", -# "publish", "service". -#logging.selectors: ["*"] diff --git a/roles/apiv1/tasks/main.yml b/roles/apiv1/tasks/main.yml deleted file mode 100644 index 06f5665..0000000 --- a/roles/apiv1/tasks/main.yml +++ /dev/null @@ -1,80 +0,0 @@ -- name: Install dependencies - action: apt pkg={{item}} state=installed - with_items: - - uwsgi - - uwsgi-plugin-python - - gcc - - python-dev - - libffi-dev - -- name: create geoip2 databases directory - file: - path: /var/lib/geoip2 - state: directory - tags: - - geoip2 - -- name: install geoip2 databases - unarchive: - src: http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.tar.gz - dest: /var/lib/geoip2/ - remote_src: yes - tags: - - geoip2 - -- name: install geoip2 databases in top directory - shell: mv /var/lib/geoip2/GeoLite2-Country_*/GeoLite2-Country.mmdb /var/lib/geoip2/GeoLite2-Country.mmdb - tags: - - geoip2 - -- name: configure uwsgi - template: - src: apiv1.uwsgi.j2 - dest: /etc/uwsgi/apps-enabled/apiv1.ini - -- name: copy caliopen packages - copy: - src: "{{ dist_directory }}/{{ caliopen_version }}/{{ item }}" - dest: /var/tmp - with_items: - - caliopen_storage-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_pi-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_pgp-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_main-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_api-{{ caliopen_version }}-py2.py3-none-any.whl - tags: - - deploy - -- name: install caliopen packages - shell: cd /var/tmp && pip install {{ item }} - with_items: - - caliopen_storage-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_pi-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_pgp-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_main-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_api-{{ caliopen_version }}-py2.py3-none-any.whl - tags: - - deploy - -- name: install some missing python dependencies - pip: - name: "{{ item }}" - with_items: - - strict-rfc3339 - - webcolors - - pastescript - -- name: install caliopen API configuration file - template: - src: apiv1.ini.j2 - dest: /etc/caliopen/apiv1.ini - tags: - - deploy - -- name: install swagger.json - copy: - src: "{{ dist_directory }}/{{ caliopen_version }}/swagger.json" - dest: /etc/caliopen/swagger.json - mode: 0644 - tags: - - deploy diff --git a/roles/apiv1/templates/apiv1.ini.j2 b/roles/apiv1/templates/apiv1.ini.j2 deleted file mode 100644 index 2bc441f..0000000 --- a/roles/apiv1/templates/apiv1.ini.j2 +++ /dev/null @@ -1,98 +0,0 @@ -### -# app configuration -# http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/environment.html -### - -[app:main] -use = egg:caliopen_api - -pyramid.reload_templates = true -pyramid.debug_authorization = false -pyramid.debug_notfound = false -pyramid.debug_routematch = false -pyramid.default_locale_name = en -pyramid.includes = - pyramid_kvs - cornice - caliopen_api.base - caliopen_api.message - caliopen_api.user - pyramid_swagger - -kvs.cache = {"kvs": "redis", - "ttl": 86400, - "kvs_kwargs": {"host": "{{ hostvars['cache1']['backend_ip'] }}"}, - "key_prefix": "tokens::", - "codec": "json"} - -# Caliopen related configuration -caliopen.config = - global:/etc/caliopen/caliopen.yaml - -caliopen_api.route_prefix = /api/v1 - -# Services to load -caliopen_api.services = caliopen_api.base - caliopen_api.user - caliopen_api.message - -# Swagger configuration -pyramid_swagger.schema_directory = /etc/caliopen -### -# wsgi server configuration -### - -[server:main] -use = egg:waitress#main -host = 0.0.0.0 -port = 6543 - -### -# logging configuration -# http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/logging.html -### - -[loggers] -keys = root, caliopen, waitress - -[handlers] -keys = console, syslog - -[formatters] -keys = generic, syslog - -[logger_root] -level = DEBUG -handlers = console, syslog - -[logger_caliopen] -level = DEBUG -handlers = console, syslog -qualname = caliopen -propagate = 0 - -[logger_waitress] -level = DEBUG -handlers = console, syslog -qualname = waitress -propagate = 0 - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[handler_syslog] -class=handlers.SysLogHandler -level=DEBUG -formatter=syslog -args=('/dev/log', handlers.SysLogHandler.LOG_USER) - - - -[formatter_generic] -format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s - -[formatter_syslog] -format = %(name)s[%(threadName)s]: %(message)s diff --git a/roles/apiv1/templates/apiv1.uwsgi.j2 b/roles/apiv1/templates/apiv1.uwsgi.j2 deleted file mode 100644 index e4f70af..0000000 --- a/roles/apiv1/templates/apiv1.uwsgi.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[uwsgi] -plugin = python -socket = {{ hostvars[ansible_hostname]['backend_ip'] }}:3001 -chdir = /usr/local/lib/python2.7/dist-packages/caliopen_api -paste = config:/etc/caliopen/apiv1.ini -paste-logger = true -workers = 8 -master = true -buffer-size = 32768 diff --git a/roles/apiv2/files/notifiers/templates/email-reset-password-link.yaml b/roles/apiv2/files/notifiers/templates/email-reset-password-link.yaml deleted file mode 100644 index 541c8ba..0000000 --- a/roles/apiv2/files/notifiers/templates/email-reset-password-link.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# django like formatting for string blocks -# fields available within template blocks : -# - user's given_name => given_name -# - user's family_name => family_name -# - instance domain name => domain -# - url of the reset password link => url - -subject: "Demande de réinitialisation de votre mot de passe" -body_plain: "\n -Bonjour {{ given_name }} {{ family_name }},\n -nous avons pris note d'une demande de réinitialisation de votre mot de passe sur {{ domain }}.\n -Pour le réinitialiser, veuillez cliquer sur ce lien :\n -\n -{{ url }}\n -\n -et suivre les indications à l'écran.\n -\n -Si vous n'êtes pas à l'origine de cette demande, vous pouvez ignorer ce mail.\n -\n -Cordialement,\n -L'équipe de Caliopen.\n -" diff --git a/roles/apiv2/tasks/main.yml b/roles/apiv2/tasks/main.yml deleted file mode 100644 index a11c6e5..0000000 --- a/roles/apiv2/tasks/main.yml +++ /dev/null @@ -1,45 +0,0 @@ -- name: create caliopen configuration directory - file: - path: /etc/caliopen/notifiers - state: directory - tags: - - deploy - -- name: api configuration file - template: src=caliopen-api.yaml.j2 dest=/etc/caliopen/caliopen-api.yaml - tags: - - deploy - -- name: install swagger.json - copy: - src: "{{ dist_directory }}/{{ caliopen_version }}/swagger.json" - dest: /etc/caliopen/swagger.json - mode: 0644 - tags: - - deploy - -- name: install notifiers templates - copy: - src: notifiers/templates - dest: /etc/caliopen/notifiers - tags: - - deploy - -- name: copy caliopen binary - copy: - src: "{{ dist_directory }}/{{ caliopen_version }}/{{ item }}" - dest: /usr/local/bin - mode: 0711 - with_items: - - caliopen_rest - tags: - - deploy - -- name: install caliopen api as a systemd service - template: src=caliopen-api.service.j2 dest=/etc/systemd/system/caliopen-api.service - -- name: enable caliopen api service - shell: systemctl enable caliopen-api.service - -- name: start caliopen api service - shell: systemctl start caliopen-api.service diff --git a/roles/apiv2/templates/caliopen-api.service.j2 b/roles/apiv2/templates/caliopen-api.service.j2 deleted file mode 100644 index 4a99e70..0000000 --- a/roles/apiv2/templates/caliopen-api.service.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=Caliopen apiv2 service - -[Service] -Restart=always -ExecStart=/usr/local/bin/caliopen_rest serve -c /etc/caliopen/caliopen-api -ExecStop=pkill caliopen_api - -[Install] -WantedBy=local.target diff --git a/roles/apiv2/templates/caliopen-api.yaml.j2 b/roles/apiv2/templates/caliopen-api.yaml.j2 deleted file mode 100644 index df0636b..0000000 --- a/roles/apiv2/templates/caliopen-api.yaml.j2 +++ /dev/null @@ -1,73 +0,0 @@ -ApiConfig: - listen_interface: {{ hostvars[ansible_hostname]['backend_ip'] }} - listen_port: 6544 - hostname: https://api.{{ caliopen_domain_name }} - port: 443 - BackendConfig: - backend_name: cassandra - backend_settings: - hosts: -{% for host in groups['store'] %} - - {{ hostvars[host]['backend_ip'] }} -{% endfor %} - keyspace: caliopen - consistency_level: 1 - raw_size_limit: 1048576 # max size in bytes for objects in db. Use S3 interface if larger. - object_store: s3 - object_store_settings: - endpoint: {{ hostvars['object_store1']['backend_ip'] }}:9000 - access_key: {{ object_store_access_key }} - secret_key: {{ object_store_secret_key }} - location: eu-fr-paris - buckets: - raw_messages: caliopen-raw-messages # bucket name to put raw messages to - temporary_attachments: caliopen-tmp-attachments # bucket name to store draft attachments - use_vault: true - vault_settings: - url: https://vault.{{ caliopen_domain_name }}:8200 - username: {{ vault_api_username }} - password: {{ vault_api_password }} - IndexConfig: - index_name: elasticsearch - index_settings: - hosts: -{% for host in groups['store'] %} - - http://{{ hostvars[host]['backend_ip'] }}:9200 -{% endfor %} - NatsConfig: - url: nats://{{ hostvars['mq1']['backend_ip'] }}:4222 - outSMTP_topic: outboundSMTP # topic's name to post "send" draft order - outIMAP_topic: outboundIMAP # topic's name for "send" draft order via remote SMTP+IMAP - outTWITTER_topic: twitter_dm # topics's name for "send" draft order via TWITTER - contacts_topic: contactAction # topic's name to post messages regarding contacts' events - keys_topic: discoverKeyAction # topic-s name to post messages regarding public key discovery - - swaggerSpec: /etc/caliopen/swagger.json #absolute path or relative path to go.server bin - RedisConfig: - host: {{ hostvars['cache1']['backend_ip'] }}:6379 - password: "" #no password set - db: 0 #use default db - NotifierConfig: - base_url: https://{{ caliopen_domain_name }} - admin_username: admin - templates_path: "/etc/caliopen/notifiers/templates/" - Providers: # temporary supported providers list for remote identities before moving this data into store facility - - name: gmail - protocol: email - infos: - client_id: {{ gmail_oauth_client_id }} - client_secret: {{ gmail_oauth_client_secret }} - imapserver: imap.gmail.com:993 - smtpserver: smtp.gmail.com:587 - - name: twitter - protocol: twitter - infos: - consumer_key: {{ twitter_oauth_app_key }} - consumer_secret: {{ twitter_oauth_app_secret }} - -ProxyConfig: - listen_interface: 0.0.0.0 - port: 31415 - routes: - - /api/v2/: 127.0.0.1:6544 - - /: {{ hostvars['api1']['backend_ip'] }}:6543 # route "/" catch all other requests diff --git a/roles/cache/handlers/main.yml b/roles/cache/handlers/main.yml deleted file mode 100644 index 923ea3d..0000000 --- a/roles/cache/handlers/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -- name: restart redis - service: name=redis-server state=restarted diff --git a/roles/cache/tasks/main.yml b/roles/cache/tasks/main.yml deleted file mode 100644 index ad89bcc..0000000 --- a/roles/cache/tasks/main.yml +++ /dev/null @@ -1,20 +0,0 @@ -- name: Install dependencies - action: apt pkg={{item}} state=installed - with_items: - - redis-server - -- name: configure redis server - template: src=redis.conf.j2 dest=/etc/redis/redis.conf - notify: restart redis - -- name: install redis_exporter - copy: - src: "{{ dist_directory }}/ext/redis_exporter" - dest: /usr/local/bin/redis_exporter - mode: 0711 - -- name: install redis_exporter service - template: src=redis-exporter.service.j2 dest=/etc/systemd/system/redis-exporter.service - -- name: enable redis_exporter service - service: name=redis-exporter state=started enabled=yes diff --git a/roles/cache/templates/redis-exporter.service.j2 b/roles/cache/templates/redis-exporter.service.j2 deleted file mode 100644 index 7f1d5da..0000000 --- a/roles/cache/templates/redis-exporter.service.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=redis prometheus exporter service - -[Service] -Restart=always -ExecStart=/usr/local/bin/redis_exporter -web.listen-address {{ facter_ipaddress_eth1 }}:9121 -redis.addr {{ facter_ipaddress_eth1 }}:6379 -ExecStop=pkill redis_exporter - -[Install] -WantedBy=local.target diff --git a/roles/cache/templates/redis.conf.j2 b/roles/cache/templates/redis.conf.j2 deleted file mode 100644 index 273d3c4..0000000 --- a/roles/cache/templates/redis.conf.j2 +++ /dev/null @@ -1,761 +0,0 @@ -# Redis configuration file example - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis server but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################ GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize yes - -# When running daemonized, Redis writes a pid file in /var/run/redis.pid by -# default. You can specify a custom pid file location here. -pidfile /var/run/redis/redis-server.pid - -# Accept connections on the specified port, default is 6379. -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# By default Redis listens for connections from all the network interfaces -# available on the server. It is possible to listen to just one or multiple -# interfaces using the "bind" configuration directive, followed by one or -# more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -bind {{ hostvars[inventory_hostname]['backend_ip'] }} - -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 60 seconds. -tcp-keepalive 0 - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile /var/log/redis/redis-server.log - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving at all commenting all the "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir /var/lib/redis - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The biggest the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEES that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -# requirepass foobared - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### LIMITS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -# Don't use more memory than the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU cache, or to set -# a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> remove the key with an expire set using an LRU algorithm -# allkeys-lru -> remove any key accordingly to the LRU algorithm -# volatile-random -> remove a random key with an expire set -# allkeys-random -> remove a random key, any key -# volatile-ttl -> remove the key with the nearest expire time (minor TTL) -# noeviction -> don't expire at all, just return an error on write operations -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are not suitable keys for eviction. -# -# At the date of writing this commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy volatile-lru - -# LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default Redis will check three keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. -# -# maxmemory-samples 3 - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead to wait for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log . Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceed the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enalbed at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# Event notification ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# by zero or multiple characters. The empty string means that notifications -# are disabled at all. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Similarly to hashes, small lists are also encoded in a special way in order -# to save a lot of space. The special representation is only used when -# you are under the following limits: -list-max-ziplist-entries 512 -list-max-ziplist-value 64 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happens to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# active rehashing the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply form time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# slave -> slave clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform accordingly to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - diff --git a/roles/client/tasks/main.yml b/roles/client/tasks/main.yml deleted file mode 100644 index 56107cf..0000000 --- a/roles/client/tasks/main.yml +++ /dev/null @@ -1,73 +0,0 @@ -- name: add node debian pgp key - apt_key: - id: 1655A0AB68576280 - url: https://deb.nodesource.com/gpgkey/nodesource.gpg.key - state: present - -- name: add node debian repository - shell: echo "deb https://deb.nodesource.com/node_6.x jessie main" > /etc/apt/sources.list.d/nodesource.list - -- name: add yarn debian pgp key - apt_key: - id: 1646B01B86E50310 - url: https://dl.yarnpkg.com/debian/pubkey.gpg - state: present - -- name: add yarn debian repository - shell: echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list - -- name: update packages - apt: - update_cache: yes - upgrade: dist - -- name: install nodejs and yarn - apt: - name: "{{ item }}" - state: present - with_items: - - nodejs - - yarn - -- name: configure nginx - template: - src: caliopen_client.nginx.j2 - dest: /etc/nginx/sites-available/caliopen_client - -- name: activate client vhost - shell: ln -s /etc/nginx/sites-available/caliopen_client /etc/nginx/sites-enabled/caliopen_client - ignore_errors: true - -- name: copy caliopen packages - copy: - src: "{{ dist_directory }}/{{ caliopen_version }}/client" - dest: /opt/caliopen - tags: - - deploy - -- name: install node packages - shell: NODE_ENV=production yarn - args: - chdir: /opt/caliopen/client - tags: - - deploy - -- name: configure client - template: src=server.json.j2 dest=/opt/caliopen/client/config/server.json - tags: - - deploy - -- name: install launcher - template: src=server.j2 dest=/opt/caliopen/client/bin/server mode=0755 - -- name: install web client service - template: src=web-client.service.j2 dest=/etc/systemd/system/web-client.service - -- name: start web client service - service: name=web-client state=started enabled=yes - -- name: configure filebeat - template: - src: filebeat.yml.j2 - dest: /etc/filebeat/filebeat.yml - notify: restart filebeat diff --git a/roles/client/templates/caliopen_client.nginx.j2 b/roles/client/templates/caliopen_client.nginx.j2 deleted file mode 100644 index 8c25097..0000000 --- a/roles/client/templates/caliopen_client.nginx.j2 +++ /dev/null @@ -1,41 +0,0 @@ - -server { - listen 443 ssl; - listen [::]:443 ssl; - server_name {{ caliopen_domain_name }}; - - client_max_body_size 5m; - - ssl_certificate /etc/nginx/certs/{{ caliopen_domain_name }}.crt; - ssl_certificate_key /etc/nginx/certs/{{ caliopen_domain_name }}.key; - ssl_prefer_server_ciphers On; - ssl_protocols TLSv1.1 TLSv1.2; - ssl_ciphers ECDH+AESGCM:ECDH+AES256:ECDH+AES128:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS; - ssl_session_cache builtin:1000 shared:SSL:10m; - - location / { - proxy_pass http://localhost:4000/; - } -} - -server { - listen 80; - listen [::]:80; - server_name {{ caliopen_domain_name }}; - rewrite ^(.*) https://$server_name$1 permanent; -} - - -server { - listen {{ facter_ipaddress_eth1 }}:9145; - allow {{ facter_network_eth1 }}/24; - deny all; - location /metrics { - content_by_lua ' - metric_connections:set(ngx.var.connections_reading, {"reading"}) - metric_connections:set(ngx.var.connections_waiting, {"waiting"}) - metric_connections:set(ngx.var.connections_writing, {"writing"}) - prometheus:collect() - '; - } -} diff --git a/roles/client/templates/filebeat.yml.j2 b/roles/client/templates/filebeat.yml.j2 deleted file mode 100644 index 1009d60..0000000 --- a/roles/client/templates/filebeat.yml.j2 +++ /dev/null @@ -1,155 +0,0 @@ -###################### Filebeat Configuration Example ######################### - -# This file is an example configuration file highlighting only the most common -# options. The filebeat.full.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/filebeat/index.html - -#========================== Modules configuration ============================ -filebeat.modules: - -#-------------------------------- Nginx Module ------------------------------- -- module: nginx - # Access logs - access: - enabled: true - - # Ingest Node pipeline to use. Options are `with_plugins` (default) - # and `no_plugins`. Use `no_plugins` if you don't have the geoip or - # the user agent Node ingest plugins installed. - #var.pipeline: with_plugins - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: /var/log/filebeat/nginx-access.log - - # Prospector configuration (advanced). Any prospector configuration option - # can be added under this section. - prospector: - fields: - type: "nginx-log" - exclude_lines: ['^192\.168\.1\.[0-9]+'] - exclude_files: ['access\.log.[1-9]+'] - - # Error logs - error: - enabled: true - - # Set custom paths for the log files. If left empty, - # Filebeat will choose the paths depending on your OS. - #var.paths: /var/log/filebeat/nginx-error.log - - # Prospector configuration (advanced). Any prospector configuration option - # can be added under this section. - #prospector: - -#=========================== Filebeat prospectors ============================= - -filebeat.prospectors: - -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. - -#- input_type: log - - # Paths that should be crawled and fetched. Glob based paths. - #paths: - #- /var/log/user.log - - #fields: - #type: "syslog" - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. - #exclude_lines: ["^DBG"] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. - #include_lines: ["^ERR", "^WARN"] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: [".gz$"] - - # Optional additional fields. These field can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - ### Multiline options - - # Mutiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -#output.elasticsearch: - # Array of hosts to connect to. - #hosts: ["localhost:9200"] - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - -#----------------------------- Logstash output -------------------------------- -output.logstash: - # The Logstash hosts - hosts: [{% for host in groups['logstash']%}"{{ hostvars[host]['backend_ip'] }}:5044",{% endfor %}] - - - # Optional SSL. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is info. -# Available log levels are: critical, error, warning, info, debug -#logging.level: debug - -# At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", -# "publish", "service". -#logging.selectors: ["*"] diff --git a/roles/client/templates/server.j2 b/roles/client/templates/server.j2 deleted file mode 100644 index 2e96258..0000000 --- a/roles/client/templates/server.j2 +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env node - -require('../dist/server'); diff --git a/roles/client/templates/server.json.j2 b/roles/client/templates/server.json.j2 deleted file mode 100644 index 2fbcd8b..0000000 --- a/roles/client/templates/server.json.j2 +++ /dev/null @@ -1,20 +0,0 @@ -{ - "protocol": "http", - "hostname": "localhost", - "port": 80, - "webServer": { - "port": 4000 - }, - "api": { - "protocol": "https", - "hostname": "api.{{ caliopen_domain_name }}", - "port": 443, - "checkCertificate": false - }, - "cookie": { - "secret": "{{ web_client_cookie_secret }}" - }, - "seal": { - "secret": "{{ web_client_seal_secret }}" - } -} diff --git a/roles/client/templates/web-client.service.j2 b/roles/client/templates/web-client.service.j2 deleted file mode 100644 index e4f388b..0000000 --- a/roles/client/templates/web-client.service.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Caliopen web client - -[Service] -WorkingDirectory=/opt/caliopen/client -ExecStart=/opt/caliopen/client/bin/server --config=config/server.json - -[Install] -WantedBy=multi-user.target diff --git a/roles/filebeat/handlers/main.yml b/roles/filebeat/handlers/main.yml deleted file mode 100644 index 69ac390..0000000 --- a/roles/filebeat/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: restart filebeat - service: name=filebeat state=restarted - -- name: start filebeat - service: name=filebeat state=started enabled=yes - diff --git a/roles/filebeat/tasks/main.yml b/roles/filebeat/tasks/main.yml deleted file mode 100644 index fa566cb..0000000 --- a/roles/filebeat/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ -- name: install filebeat - apt: - name: filebeat - state: present - -- name: start filebeat - systemd: - name: filebeat - state: started diff --git a/roles/lmtp/tasks/main.yml b/roles/lmtp/tasks/main.yml deleted file mode 100644 index fe49ce7..0000000 --- a/roles/lmtp/tasks/main.yml +++ /dev/null @@ -1,34 +0,0 @@ -- name: create caliopen configuration directory - file: - path: /etc/caliopen - state: directory - -- name: lmtp configuration file - template: src=caliopen-lmtp.yaml.j2 dest=/etc/caliopen/caliopen-lmtp.yaml - tags: - - deploy - -- name: copy caliopen binary - copy: - src: "{{ dist_directory }}/{{ caliopen_version }}/{{ item }}" - dest: /usr/local/bin - mode: 0711 - with_items: - - caliopen_lmtpd - tags: - - deploy - -- name: install caliopen lmtp as a systemd service - template: src=caliopen-lmtp.service.j2 dest=/etc/systemd/system/caliopen-lmtp.service - -- name: enable caliopen lmtp service - shell: systemctl enable caliopen-lmtp.service - -- name: start caliopen lmtp service - shell: systemctl start caliopen-lmtp.service - -- name: configure filebeat - template: - src: filebeat.yml.j2 - dest: /etc/filebeat/filebeat.yml - notify: restart filebeat diff --git a/roles/lmtp/templates/caliopen-lmtp.service.j2 b/roles/lmtp/templates/caliopen-lmtp.service.j2 deleted file mode 100644 index 3735218..0000000 --- a/roles/lmtp/templates/caliopen-lmtp.service.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=Caliopen lmtp service - -[Service] -Restart=always -ExecStart=/usr/local/bin/caliopen_lmtpd serve -c /etc/caliopen/caliopen-lmtp -ExecStop=pkill caliopen_lmtpd - -[Install] -WantedBy=local.target diff --git a/roles/lmtp/templates/caliopen-lmtp.yaml.j2 b/roles/lmtp/templates/caliopen-lmtp.yaml.j2 deleted file mode 100644 index 9d3a455..0000000 --- a/roles/lmtp/templates/caliopen-lmtp.yaml.j2 +++ /dev/null @@ -1,73 +0,0 @@ -## SMTP config ## -AppConfig: - allowed_hosts: - - localhost - - caliopen.local - - {{ caliopen_domain_name }} - primary_mail_host: {{ caliopen_domain_name }} - inbound_servers: # only one allowed for now - - is_enabled: true - host_name: localhost - max_size: 20971520 # max authorized size for emails in bytes - timeout: 180 - listen_interface: 0.0.0.0:2525 - start_tls_on: false - tls_always_on: false - max_clients: 1000 - #submit is the MTA to connect to for final delivery (postfix for example) - submit_address: mail1.local # XXX hardcoded, can't use list - submit_port: 587 - submit_user: - submit_password: - submit_workers: 2 # number of concurrent connexions to submit MTA - -## LDA (Email broker) config ## -LDAConfig: - broker_type: smtp # types are : smtp, imap, mailboxe, etc. - nats_url: nats://{{ hostvars['mq1']['backend_ip'] }}:4222 - nats_queue: SMTPqueue # NATS group queue for nats subscribers to share jobs - store_name: cassandra # backend to store raw emails and messages (inbound & outbound) - store_settings: - hosts: # many allowed -{% for host in groups['store'] %} - - {{ hostvars[host]['backend_ip'] }} -{% endfor %} - keyspace: caliopen - consistency_level: 1 - raw_size_limit: 1048576 # max size in bytes for objects in db. Use S3 interface if larger. - object_store: s3 - object_store_settings: - endpoint: {{ hostvars['object_store1']['backend_ip'] }}:9000 - access_key: {{ object_store_access_key }} - secret_key: {{ object_store_secret_key }} - location: eu-fr-paris - buckets: - raw_messages: caliopen-raw-messages # bucket name to put raw messages to - temporary_attachments: caliopen-tmp-attachments # bucket name to store draft attachments - use_vault: true - vault_settings: - url: https://vault.{{ caliopen_domain_name }}:8200 - username: {{ vault_lmtp_username }} - password: {{ vault_lmtp_password }} - index_name: elasticsearch # backend to index messages (inbound & outbound) - index_settings: - urls: # many allowed -{% for host in groups['store'] %} - - http://{{ hostvars[host]['backend_ip'] }}:9200 -{% endfor %} - - #inbound - in_topic: inboundSMTP # NATS topic to listen to - lda_workers_size: 2 # number of concurrent workers - log_received_mails: true - - # outbound - out_topic: outboundSMTP # NATS topic to listen to - nats_listeners: 2 # number of concurrent nats listeners - - # notifications - contacts_topic: contactAction - NotifierConfig: - base_url: https://{{ caliopen_domain_name }} - admin_username: admin - templates_path: "/etc/caliopen/notifiers/templates/" diff --git a/roles/lmtp/templates/filebeat.yml.j2 b/roles/lmtp/templates/filebeat.yml.j2 deleted file mode 100644 index 3340a29..0000000 --- a/roles/lmtp/templates/filebeat.yml.j2 +++ /dev/null @@ -1,123 +0,0 @@ -###################### Filebeat Configuration Example ######################### - -# This file is an example configuration file highlighting only the most common -# options. The filebeat.full.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/filebeat/index.html - -#========================== Modules configuration ============================ -#filebeat.modules: - -#=========================== Filebeat prospectors ============================= - -filebeat.prospectors: - -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. - -- input_type: log - - # Paths that should be crawled and fetched. Glob based paths. - paths: - - /var/log/daemon.log - - fields: - type: "syslog" - app: "caliopen_lmtpd" - include_lines: ['.*caliopen_lmtpd.*'] - exclude_files: ['daemon\.log.[1-9]+'] - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. - #exclude_lines: ["^DBG"] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. - #include_lines: ["^ERR", "^WARN"] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: [".gz$"] - - # Optional additional fields. These field can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - ### Multiline options - - # Mutiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -#output.elasticsearch: - # Array of hosts to connect to. - #hosts: ["localhost:9200"] - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - -#----------------------------- Logstash output -------------------------------- -output.logstash: - # The Logstash hosts - hosts: [{% for host in groups['logstash']%}"{{ hostvars[host]['backend_ip'] }}:5044",{% endfor %}] - - - # Optional SSL. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is info. -# Available log levels are: critical, error, warning, info, debug -#logging.level: debug - -# At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", -# "publish", "service". -#logging.selectors: ["*"] diff --git a/roles/logstash/tasks/main.yml b/roles/logstash/tasks/main.yml deleted file mode 100644 index 243f55b..0000000 --- a/roles/logstash/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ -- name: install logstash - apt: - name: logstash - state: present - -- name: configure logstash - template: src=logstash.yml.j2 dest=/etc/logstash/conf.d/logstash.yml - -- name: set logstash pipeline - template: src=logstash.conf dest=/etc/logstash/conf.d/logstash.conf - -- name: configure logstash jvm - template: src=jvm.options dest=/etc/logstash/jvm.options - -- name: start logstash - systemd: - name: logstash - state: started diff --git a/roles/logstash/templates/jvm.options b/roles/logstash/templates/jvm.options deleted file mode 100644 index daae9d9..0000000 --- a/roles/logstash/templates/jvm.options +++ /dev/null @@ -1,77 +0,0 @@ -## JVM configuration - -# Xms represents the initial size of total heap space -# Xmx represents the maximum size of total heap space - --Xms1g --Xmx1g - -################################################################ -## Expert settings -################################################################ -## -## All settings below this section are considered -## expert settings. Don't tamper with them unless -## you understand what you are doing -## -################################################################ - -## GC configuration --XX:+UseParNewGC --XX:+UseConcMarkSweepGC --XX:CMSInitiatingOccupancyFraction=75 --XX:+UseCMSInitiatingOccupancyOnly - -## Locale -# Set the locale language -#-Duser.language=en - -# Set the locale country -#-Duser.country=US - -# Set the locale variant, if any -#-Duser.variant= - -## basic - -# set the I/O temp directory -#-Djava.io.tmpdir=$HOME - -# set to headless, just in case --Djava.awt.headless=true - -# ensure UTF-8 encoding by default (e.g. filenames) --Dfile.encoding=UTF-8 - -# use our provided JNA always versus the system one -#-Djna.nosys=true - -# Turn on JRuby invokedynamic --Djruby.compile.invokedynamic=true -# Force Compilation --Djruby.jit.threshold=0 - -## heap dumps - -# generate a heap dump when an allocation from the Java heap fails -# heap dumps are created in the working directory of the JVM --XX:+HeapDumpOnOutOfMemoryError - -# specify an alternative path for heap dumps -# ensure the directory exists and has sufficient space -#-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof - -## GC logging -#-XX:+PrintGCDetails -#-XX:+PrintGCTimeStamps -#-XX:+PrintGCDateStamps -#-XX:+PrintClassHistogram -#-XX:+PrintTenuringDistribution -#-XX:+PrintGCApplicationStoppedTime - -# log GC status to a file with time stamps -# ensure the directory exists -#-Xloggc:${LS_GC_LOG_FILE} - -# Entropy source for randomness --Djava.security.egd=file:/dev/urandom diff --git a/roles/logstash/templates/logstash.conf b/roles/logstash/templates/logstash.conf deleted file mode 100644 index 343aa46..0000000 --- a/roles/logstash/templates/logstash.conf +++ /dev/null @@ -1,64 +0,0 @@ -input { - beats { - host => "{{ ansible_eth1.ipv4.address }}" - port => "5044" - } -} - -filter { - if [fields][type] == "syslog" { - dissect { - mapping => { - "message" => '<%{facility}.%{pri}> %{ts} %{+ts} %{+ts} %{host} %{prog}: %{msg}' - } - } - mutate { - remove_field => [ "beat", "fileset", "ts","message","offset","input_type","tags"] - } - } - - if [fields][type] == "nginx-log" { - dissect { - mapping => { - "message" => '%{ip} - - [%{access_date}] "%{req} %{path} %{version}" %{respcode} %{size} "%{url}" "%{client}"' - } - } - mutate { - remove_field => [ "beat", "fileset", "message","offset","input_type","tags"] - } - } - - if [fields][app] == "caliopen_lmtpd" { - dissect { - mapping => { - "msg" => 'time="%{ts}" level=%{level} msg="%{msg}"' - } - } - mutate { - remove_field => [ "ts", "level"] - } - } - - if [fields][app] == "python_worker1" { - dissect { - mapping => { - "msg" => '%{pri}:%{prog}:%{msg}' - } - } - mutate { - remove_field => [ "ts" ] - lowercase => [ "pri" ] - } - } -} - -output { - elasticsearch { - hosts => [{% for host in groups['store']%} - "{{ hostvars[host]['backend_ip']}}:9200" - {%- if not loop.last -%} - , - {% endif %} - {% endfor %}] - } -} \ No newline at end of file diff --git a/roles/logstash/templates/logstash.yml.j2 b/roles/logstash/templates/logstash.yml.j2 deleted file mode 100644 index 7784264..0000000 --- a/roles/logstash/templates/logstash.yml.j2 +++ /dev/null @@ -1,200 +0,0 @@ -# Settings file in YAML -# -# Settings can be specified either in hierarchical form, e.g.: -# -# pipeline: -# batch: -# size: 125 -# delay: 5 -# -# Or as flat keys: -# -# pipeline.batch.size: 125 -# pipeline.batch.delay: 5 -# -# ------------ Node identity ------------ -# -# Use a descriptive name for the node: -# -# node.name: test -# -# If omitted the node name will default to the machine's host name -# -# ------------ Data path ------------------ -# -# Which directory should be used by logstash and its plugins -# for any persistent needs. Defaults to LOGSTASH_HOME/data -# -# path.data: -# -# ------------ Pipeline Settings -------------- -# -# Set the number of workers that will, in parallel, execute the filters+outputs -# stage of the pipeline. -# -# This defaults to the number of the host's CPU cores. -# -# pipeline.workers: 2 -# -# How many workers should be used per output plugin instance -# -# pipeline.output.workers: 1 -# -# How many events to retrieve from inputs before sending to filters+workers -# -# pipeline.batch.size: 125 -# -# How long to wait before dispatching an undersized batch to filters+workers -# Value is in milliseconds. -# -# pipeline.batch.delay: 5 -# -# Force Logstash to exit during shutdown even if there are still inflight -# events in memory. By default, logstash will refuse to quit until all -# received events have been pushed to the outputs. -# -# WARNING: enabling this can lead to data loss during shutdown -# -# pipeline.unsafe_shutdown: false -# -# ------------ Pipeline Configuration Settings -------------- -# -# Where to fetch the pipeline configuration for the main pipeline -# -# path.config: -# -# Pipeline configuration string for the main pipeline -# -# config.string: -# -# At startup, test if the configuration is valid and exit (dry run) -# -# config.test_and_exit: false -# -# Periodically check if the configuration has changed and reload the pipeline -# This can also be triggered manually through the SIGHUP signal -# -# config.reload.automatic: false -# -# How often to check if the pipeline configuration has changed (in seconds) -# -# config.reload.interval: 3 -# -# Show fully compiled configuration as debug log message -# NOTE: --log.level must be 'debug' -# -# config.debug: false -# -# When enabled, process escaped characters such as \n and \" in strings in the -# pipeline configuration files. -# -# config.support_escapes: false -# -# ------------ Module Settings --------------- -# Define modules here. Modules definitions must be defined as an array. -# The simple way to see this is to prepend each `name` with a `-`, and keep -# all associated variables under the `name` they are associated with, and -# above the next, like this: -# -# modules: -# - name: MODULE_NAME -# var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE -# var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE -# var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE -# var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE -# -# Module variable names must be in the format of -# -# var.PLUGIN_TYPE.PLUGIN_NAME.KEY -# -# modules: -# -# ------------ Queuing Settings -------------- -# -# Internal queuing model, "memory" for legacy in-memory based queuing and -# "persisted" for disk-based acked queueing. Defaults is memory -# -# queue.type: memory -# -# If using queue.type: persisted, the directory path where the data files will be stored. -# Default is path.data/queue -# -# path.queue: -# -# If using queue.type: persisted, the page data files size. The queue data consists of -# append-only data files separated into pages. Default is 250mb -# -# queue.page_capacity: 250mb -# -# If using queue.type: persisted, the maximum number of unread events in the queue. -# Default is 0 (unlimited) -# -# queue.max_events: 0 -# -# If using queue.type: persisted, the total capacity of the queue in number of bytes. -# If you would like more unacked events to be buffered in Logstash, you can increase the -# capacity using this setting. Please make sure your disk drive has capacity greater than -# the size specified here. If both max_bytes and max_events are specified, Logstash will pick -# whichever criteria is reached first -# Default is 1024mb or 1gb -# -# queue.max_bytes: 1024mb -# -# If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint -# Default is 1024, 0 for unlimited -# -# queue.checkpoint.acks: 1024 -# -# If using queue.type: persisted, the maximum number of written events before forcing a checkpoint -# Default is 1024, 0 for unlimited -# -# queue.checkpoint.writes: 1024 -# -# If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page -# Default is 1000, 0 for no periodic checkpoint. -# -# queue.checkpoint.interval: 1000 -# -# ------------ Dead-Letter Queue Settings -------------- -# Flag to turn on dead-letter queue. -# -# dead_letter_queue.enable: false - -# If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries -# will be dropped if they would increase the size of the dead letter queue beyond this setting. -# Default is 1024mb -# dead_letter_queue.max_bytes: 1024mb - -# If using dead_letter_queue.enable: true, the directory path where the data files will be stored. -# Default is path.data/dead_letter_queue -# -# path.dead_letter_queue: -# -# ------------ Metrics Settings -------------- -# -# Bind address for the metrics REST endpoint -# -# http.host: "127.0.0.1" -# -# Bind port for the metrics REST endpoint, this option also accept a range -# (9600-9700) and logstash will pick up the first available ports. -# -# http.port: 9600-9700 -# -# ------------ Debugging Settings -------------- -# -# Options for log.level: -# * fatal -# * error -# * warn -# * info (default) -# * debug -# * trace -# -# log.level: info -# path.logs: -# -# ------------ Other Settings -------------- -# -# Where to find custom plugins -# path.plugins: [] diff --git a/roles/nats/tasks/main.yml b/roles/nats/tasks/main.yml deleted file mode 100644 index 1edcebe..0000000 --- a/roles/nats/tasks/main.yml +++ /dev/null @@ -1,17 +0,0 @@ -- name: install nats - copy: - src: "{{ dist_directory }}/ext/gnatsd-v{{ gnats_version }}-linux-amd64/gnatsd" - dest: /usr/local/sbin/gnatsd - mode: 0711 - -- name: install gnats service - template: src=gnatsd.service.j2 dest=/etc/systemd/system/gnatsd.service - -- name: start nats - service: name=gnatsd state=started enabled=yes - -- name: install prometheus-nats-exporter service - template: src=prometheus-nats-exporter.service.j2 dest=/etc/systemd/system/prometheus-nats-exporter.service - -- name: start prometheus-nats-exporter service - service: name=prometheus-nats-exporter state=started enabled=yes diff --git a/roles/nats/templates/gnatsd.service.j2 b/roles/nats/templates/gnatsd.service.j2 deleted file mode 100644 index 7c687f4..0000000 --- a/roles/nats/templates/gnatsd.service.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=gnats service - -[Service] -Restart=always -ExecStart=/usr/local/sbin/gnatsd -a {{ hostvars[inventory_hostname]['backend_ip'] }} -l /var/log/gnatsd.log -P /var/run/gnatsd.pid -m 8222 -ExecStop=kill /var/run/gnatsd.pid - -[Install] -WantedBy=local.target diff --git a/roles/nats/templates/prometheus-nats-exporter.service.j2 b/roles/nats/templates/prometheus-nats-exporter.service.j2 deleted file mode 100644 index d16c4ad..0000000 --- a/roles/nats/templates/prometheus-nats-exporter.service.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=Nats prometheus exporter - -[Service] -Restart=always -ExecStart=/var/tmp/prometheus-nats-exporter -connz -routez -subz -a {{ facter_ipaddress_eth1 }} http://{{ facter_ipaddress_eth1}}:8222 -ExecStop=pkill prometheus-nats-exporter - -[Install] -WantedBy=local.target diff --git a/roles/prometheus/files/rule1 b/roles/prometheus/files/rule1 deleted file mode 100644 index c4ab792..0000000 --- a/roles/prometheus/files/rule1 +++ /dev/null @@ -1,19 +0,0 @@ -# Alert for any instance that is unreachable for >5 minutes. -ALERT InstanceDown - IF up == 0 - FOR 5m - LABELS { severity = "critical" } - ANNOTATIONS { - summary = "Instance {{ $labels.instance }} down", - description = "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.", - } - - -ALERT FilesystemFull - IF node_filesystem_free / node_filesystem_size < 0.3 - FOR 5m - LABELS { severity = "critical" } - ANNOTATIONS { - summary = "Filesystem {{ $labels.instance }} full", - description = "{{ $labels.instance }} of job {{ $labels.job }} free space less than 30%.", - } diff --git a/roles/prometheus/handlers/main.yml b/roles/prometheus/handlers/main.yml deleted file mode 100644 index d89ba7b..0000000 --- a/roles/prometheus/handlers/main.yml +++ /dev/null @@ -1,8 +0,0 @@ -- name: start prometheus - service: name=prometheus state=started enabled=yes - -- name: restart prometheus - service: name=prometheus state=restarted - -- name: start elasticsearch-exporter - service: name=elasticsearch-exporter state=started enabled=yes diff --git a/roles/prometheus/tasks/main.yml b/roles/prometheus/tasks/main.yml deleted file mode 100644 index 25b4636..0000000 --- a/roles/prometheus/tasks/main.yml +++ /dev/null @@ -1,47 +0,0 @@ -- name: download prometheus - get_url: - url: https://github.com/prometheus/prometheus/releases/download/v{{ prometheus_version}}/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz - dest: /var/tmp - -- name: decompress prometheus - unarchive: - src: /var/tmp/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz - dest: /usr/local - remote_src: yes - -- name: install service for prometheus - template: src=prometheus.service.j2 dest=/etc/systemd/system/prometheus.service - -- name: install elasticsearch_exporter - get_url: - url: https://github.com/justwatchcom/elasticsearch_exporter/releases/download/v1.0.1/elasticsearch_exporter-1.0.1.linux-amd64.tar.gz - dest: /var/tmp - -- name: uncompress elasticsearch_exporter - unarchive: - src: /var/tmp/elasticsearch_exporter-1.0.1.linux-amd64.tar.gz - dest: /var/tmp - remote_src: yes - -- name: ensure elasticsearch_exporter is not running - service: name=elasticsearch-exporter state=stopped - ignore_errors: yes - -- name: copy elasticsearch_exporter to correct location - shell: cp -p /var/tmp/elasticsearch_exporter-1.0.1.linux-amd64/elasticsearch_exporter /usr/local/bin - -- name: install elasticsearch_exporter service - template: src=elasticsearch-exporter.service.j2 dest=/etc/systemd/system/elasticsearch-exporter.service - notify: start elasticsearch-exporter - -- name: create configuration directory - file: - path: /etc/prometheus - state: directory - -- name: upload rule files - copy: src=rule1 dest=/etc/prometheus/rule1 - -- name: configure prometheus - template: src=prometheus.yml.j2 dest=/etc/prometheus/prometheus.yml - notify: restart prometheus diff --git a/roles/prometheus/templates/elasticsearch-exporter.service.j2 b/roles/prometheus/templates/elasticsearch-exporter.service.j2 deleted file mode 100644 index 3e44722..0000000 --- a/roles/prometheus/templates/elasticsearch-exporter.service.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=elasticsearch exporter service - -[Service] -Restart=always -ExecStart=/usr/local/bin/elasticsearch_exporter -es.uri http://{{ hostvars['store1']['backend_ip']}}:9200 -es.all -ExecStop=pkill elasticsearch_exporter - -[Install] -WantedBy=local.target diff --git a/roles/prometheus/templates/prometheus.service.j2 b/roles/prometheus/templates/prometheus.service.j2 deleted file mode 100644 index 020b520..0000000 --- a/roles/prometheus/templates/prometheus.service.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=Prometheus service - -[Service] -Restart=always -ExecStart=/usr/local/prometheus-{{ prometheus_version }}.linux-amd64/prometheus -config.file=/etc/prometheus/prometheus.yml -web.listen-address {{ facter_ipaddress_eth1 }}:9090 -alertmanager.url http://{{ facter_ipaddress_eth1 }}:9091 -ExecStop=pkill prometheus - -[Install] -WantedBy=local.target diff --git a/roles/prometheus/templates/prometheus.yml.j2 b/roles/prometheus/templates/prometheus.yml.j2 deleted file mode 100644 index 031d581..0000000 --- a/roles/prometheus/templates/prometheus.yml.j2 +++ /dev/null @@ -1,47 +0,0 @@ -global: - scrape_interval: 15s # By default, scrape targets every 15 seconds. - - # Attach these labels to any time series or alerts when communicating with - # external systems (federation, remote storage, Alertmanager). - external_labels: - monitor: 'codelab-monitor' - -# A scrape configuration containing exactly one endpoint to scrape: -# Here it's Prometheus itself. -scrape_configs: - - - job_name: 'http' - scrape_interval: 5s - static_configs: - # nginx servers - - targets: [{% for host in groups['monitor'] %}'{{ host }}.local:9145',{% endfor %}{% for host in groups['api'] %}'{{ host }}.local:9145',{% endfor %}{% for host in groups['web_client'] %}'{{ host }}.local:9145',{% endfor %}] - - - job_name: 'stores' - scrape_interval: 5s - static_configs: - - # elasticsearch exporter - - targets: ['localhost:9108'] - - # cassandra cluster - - targets: [{% for host in groups['store'] %}'{{ host }}.local:7070',{% endfor %}] - - # cache - - targets: [{% for host in groups['cache'] %}'{{ host }}.local:9121',{% endfor %}] - - - job_name: 'queues' - scrape_interval: 5s - static_configs: - - # nats exporter - - targets: [{% for host in groups['mq'] %}'{{ host }}.local:7777',{% endfor %}] - - - job_name: 'hosts' - scrape_interval: 5s - static_configs: - - # node exporter - - targets: [{% for host in groups['all'] %}'{{ host }}.local:9100',{% endfor %}] - -rule_files: - - /etc/prometheus/rule* diff --git a/roles/worker/tasks/main.yml b/roles/worker/tasks/main.yml deleted file mode 100644 index d00a81b..0000000 --- a/roles/worker/tasks/main.yml +++ /dev/null @@ -1,108 +0,0 @@ -- name: Install dependencies - apt: - name: "{{ item }}" - state: latest - update_cache: true - cache_valid_time: 3600 - with_items: - - gcc - - python-dev - - libffi-dev - - libssl-dev - -- name: copy caliopen packages - copy: - src: "{{ dist_directory }}/{{ caliopen_version }}/{{ item }}" - dest: /var/tmp - with_items: - - caliopen_storage-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_pi-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_main-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_pgp-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_nats-{{ caliopen_version }}-py2.py3-none-any.whl - tags: - - deploy - -- name: install caliopen packages - shell: cd /var/tmp && pip install {{ item }} - with_items: - - caliopen_storage-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_pi-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_main-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_pgp-{{ caliopen_version }}-py2.py3-none-any.whl - - caliopen_nats-{{ caliopen_version }}-py2.py3-none-any.whl - tags: - - deploy - -- name: install some missing python dependencies - pip: - name: "{{ item }}" - with_items: - - strict-rfc3339 - - webcolors - -- name: install worker service - template: src=caliopen-worker.service.j2 dest=/etc/systemd/system/caliopen-worker.service - tags: - - deploy - -- name: start worker service - service: name=caliopen-worker state=started enabled=yes - tags: - - deploy - -- name: configure filebeat - template: - src: filebeat.yml.j2 - dest: /etc/filebeat/filebeat.yml - notify: restart filebeat - -- name: install caliopen remote identities poller and fetcher - copy: - src: "{{ dist_directory }}/{{ caliopen_version }}/{{ item }}" - dest: /usr/local/bin - with_items: - - imapworker - - twitterworker - - idpoller - tags: - - deploy - -- name: copy caliopen imap worker configuration - template: - src: imapworker.yaml.j2 - dest: /etc/caliopen/imapworker.yaml - tags: - - deploy - -- name: copy caliopen imap worker configuration - template: - src: twitterworker.yaml.j2 - dest: /etc/caliopen/twitterworker.yaml - tags: - - deploy - -- name: copy caliopen identity poller configuration - template: - src: idpoller.yaml.j2 - dest: /etc/caliopen/idpoller.yaml - tags: - - deploy - -- name: install idpoller and imapworker service - template: src={{ item }}.service.j2 dest=/etc/systemd/system/{{ item }}.service - with_items: - - imapworker - - twitterworker - - idpoller - tags: - - deploy - -- name: start imapworker and idpoller service - service: name={{ item }} state=started enabled=yes - with_items: - - imapworker - - twitterworker - - idpoller - tags: - - deploy \ No newline at end of file diff --git a/roles/worker/templates/caliopen-worker.service.j2 b/roles/worker/templates/caliopen-worker.service.j2 deleted file mode 100644 index a085f28..0000000 --- a/roles/worker/templates/caliopen-worker.service.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Caliopen worker service - -[Service] -Restart=always -ExecStart=/usr/bin/python /usr/local/lib/python2.7/dist-packages/caliopen_nats/listener.py -f /etc/caliopen/caliopen.yaml - -[Install] -WantedBy=local.target diff --git a/roles/worker/templates/filebeat.yml.j2 b/roles/worker/templates/filebeat.yml.j2 deleted file mode 100644 index 7650aa1..0000000 --- a/roles/worker/templates/filebeat.yml.j2 +++ /dev/null @@ -1,124 +0,0 @@ -###################### Filebeat Configuration Example ######################### - -# This file is an example configuration file highlighting only the most common -# options. The filebeat.full.yml file from the same directory contains all the -# supported options with more comments. You can use it as a reference. -# -# You can find the full configuration reference here: -# https://www.elastic.co/guide/en/beats/filebeat/index.html - -#========================== Modules configuration ============================ -#filebeat.modules: - -#=========================== Filebeat prospectors ============================= - -filebeat.prospectors: - -# Each - is a prospector. Most options can be set at the prospector level, so -# you can use different prospectors for various configurations. -# Below are the prospector specific configurations. - -- input_type: log - - # Paths that should be crawled and fetched. Glob based paths. - paths: - - /var/log/daemon.log - - fields: - type: "syslog" - app: "python_worker1" - include_lines: ['.*python.*'] - exclude_lines: ['.*DEBUG.*heartbeat.*','.*DEBUG.*Received options.*'] - exclude_files: ['daemon\.log.[1-9]+'] - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. - #exclude_lines: ["^DBG"] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. - #include_lines: ["^ERR", "^WARN"] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: [".gz$"] - - # Optional additional fields. These field can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - ### Multiline options - - # Mutiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - - -#================================ General ===================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. -#fields: -# env: staging - -#================================ Outputs ===================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#-------------------------- Elasticsearch output ------------------------------ -#output.elasticsearch: - # Array of hosts to connect to. - #hosts: ["localhost:9200"] - - # Optional protocol and basic auth credentials. - #protocol: "https" - #username: "elastic" - #password: "changeme" - -#----------------------------- Logstash output -------------------------------- -output.logstash: - # The Logstash hosts - hosts: [{% for host in groups['logstash']%}"{{ hostvars[host]['backend_ip'] }}:5044",{% endfor %}] - - - # Optional SSL. By default is off. - # List of root certificates for HTTPS server verifications - #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - -#================================ Logging ===================================== - -# Sets log level. The default log level is info. -# Available log levels are: critical, error, warning, info, debug -#logging.level: debug - -# At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", -# "publish", "service". -#logging.selectors: ["*"] diff --git a/roles/worker/templates/idpoller.service.j2 b/roles/worker/templates/idpoller.service.j2 deleted file mode 100644 index 0b6f113..0000000 --- a/roles/worker/templates/idpoller.service.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Caliopen id poller service - -[Service] -Restart=always -ExecStart=/usr/local/bin/idpoller start --configpath /etc/caliopen -p /var/run/idpoller.pid >> /var/log/idpoller.log 2>&1 & - -[Install] -WantedBy=local.target diff --git a/roles/worker/templates/idpoller.yaml.j2 b/roles/worker/templates/idpoller.yaml.j2 deleted file mode 100644 index 2bb140d..0000000 --- a/roles/worker/templates/idpoller.yaml.j2 +++ /dev/null @@ -1,23 +0,0 @@ -#polling config -scan_interval: 180 # in minutes. How often storage is scanned to retrieve and cache remote identities data -remote_protocols: # which kind of remote identities poller must handle - - email - - imap - - twitter -#storage facility -store_name: cassandra # backend for remote identities data -store_settings: - hosts: # many allowed -{% for host in groups['store'] %} - - {{ hostvars[host]['backend_ip'] }} -{% endfor %} - keyspace: caliopen - consistency_level: 1 -#messaging system -nats_url: nats://{{ hostvars['mq1']['backend_ip'] }}:4222 -nats_queue : IDpoller -nats_topics: # NATS topics to work with - id_cache: idCache # receiving orders to update poller's cache - imap: IMAPfetcher # emitting orders for IMAP operations - twitter_worker: twitter_worker # emitting orders for workers operations - twitter_dm: twitter_dm # emitting orders for direct messages operations diff --git a/roles/worker/templates/imapworker.service.j2 b/roles/worker/templates/imapworker.service.j2 deleted file mode 100644 index 5229646..0000000 --- a/roles/worker/templates/imapworker.service.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Caliopen imap worker service - -[Service] -Restart=always -ExecStart=/usr/local/bin/imapworker start --configpath /etc/caliopen -p /var/run/imapworker.pid >> /var/log/imapworker.log 2>&1 & - -[Install] -WantedBy=local.target diff --git a/roles/worker/templates/imapworker.yaml.j2 b/roles/worker/templates/imapworker.yaml.j2 deleted file mode 100644 index a140b84..0000000 --- a/roles/worker/templates/imapworker.yaml.j2 +++ /dev/null @@ -1,54 +0,0 @@ -workers: 2 # number of concurrent workers -hostname: {{ caliopen_domain_name }} -#messaging system -nats_url: nats://{{ hostvars['mq1']['backend_ip'] }}:4222 -nats_queue: IMAPworkers # NATS group queue for workers -nats_topic_fetcher: IMAPfetcher # NATS topic to listen to actions to execute -nats_topic_sender: outboundIMAP # NATS topic to listen to actions to execute -#storage facility -store_name: cassandra # backend to store raw emails and messages (inbound & outbound) -store_settings: - hosts: # many allowed -{% for host in groups['store'] %} - - {{ hostvars[host]['backend_ip'] }} -{% endfor %} - keyspace: caliopen - consistency_level: 1 - raw_size_limit: 1048576 # max size in bytes for objects in db. Use S3 interface if larger. - object_store: s3 - object_store_settings: - endpoint: {{ hostvars['object_store1']['backend_ip'] }}:9000 - access_key: {{ object_store_access_key }} - secret_key: {{ object_store_secret_key }} - location: eu-fr-paris # S3 region. - buckets: - raw_messages: caliopen-raw-messages # bucket name to put raw messages to - temporary_attachments: caliopen-tmp-attachments # bucket name to store draft attachments - use_vault: true - vault_settings: - url: https://vault.{{ caliopen_domain_name }}:8200 - username: {{ vault_worker_username }} - password: {{ vault_worker_password }} -LDAConfig: - broker_type: imap # types are : smtp, imap, mailboxe, etc. - lda_workers_size: 2 # number of concurrent workers - log_received_mails: true - #index facility - index_name: elasticsearch # backend to index messages (inbound & outbound) - index_settings: - urls: # many allowed -{% for host in groups['store'] %} - - http://{{ hostvars[host]['backend_ip'] }}:9200 -{% endfor %} #messaging system - in_topic: inboundSMTP # NATS topic to listen to - out_topic: outboundSMTP - nats_queue: SMTPqueue - # notifications - NotifierConfig: - admin_username: admin # username on whose behalf notifiers will act. This admin user must have been created before by other means. - Providers: # temporary supported providers list for remote identities before moving this data into store facility - - name: gmail - protocol: email - infos: - client_id: {{ gmail_oauth_client_id }} - client_secret: {{ gmail_oauth_client_secret }} diff --git a/roles/worker/templates/twitterworker.service.j2 b/roles/worker/templates/twitterworker.service.j2 deleted file mode 100644 index b052b4a..0000000 --- a/roles/worker/templates/twitterworker.service.j2 +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=Caliopen twitter worker service - -[Service] -Restart=always -ExecStart=/usr/local/bin/twitterworker start --configpath /etc/caliopen -p /var/run/twitterworker.pid >> /var/log/twitterworker.log 2>&1 & - -[Install] -WantedBy=local.target diff --git a/roles/worker/templates/twitterworker.yaml.j2 b/roles/worker/templates/twitterworker.yaml.j2 deleted file mode 100644 index 98e538e..0000000 --- a/roles/worker/templates/twitterworker.yaml.j2 +++ /dev/null @@ -1,46 +0,0 @@ -twitter_app_key: {{ twitter_oauth_app_key }} -twitter_app_secret: {{ twitter_oauth_app_secret }} -BrokerConfig: - #messaging system - nats_url: nats://{{ hostvars['mq1']['backend_ip'] }}:4222 - nats_queue: Twitterworkers # NATS group queue for workers - nats_topic_worker: twitter_worker # NATS topic to listen to orders for handling workers (add, update, delete) - nats_topic_direct_message: twitter_dm # NATS topic to listen to orders for handling DMs (fetch, send) - #storage facility - store_name: cassandra # backend to store raw emails and messages (inbound & outbound) - store_settings: - hosts: # many allowed -{% for host in groups['store'] %} - - {{ hostvars[host]['backend_ip'] }} -{% endfor %} - keyspace: caliopen - consistency_level: 1 - raw_size_limit: 1048576 # max size in bytes for objects in db. Use S3 interface if larger. - object_store: s3 - object_store_settings: - endpoint: {{ hostvars['object_store1']['backend_ip'] }}:9000 - access_key: {{ object_store_access_key }} - secret_key: {{ object_store_secret_key }} - location: eu-fr-paris # S3 region. - buckets: - raw_messages: caliopen-raw-messages # bucket name to put raw messages to - temporary_attachments: caliopen-tmp-attachments # bucket name to store draft attachments - use_vault: true - vault_settings: - url: https://vault.{{ caliopen_domain_name }}:8200 - username: {{ vault_worker_username }} - password: {{ vault_worker_password }} - LDAConfig: - broker_type: twitter # types are : smtp, imap, mailboxe, etc. - #index facility - index_name: elasticsearch # backend to index messages (inbound & outbound) - index_settings: - urls: # many allowed -{% for host in groups['store'] %} - - http://{{ hostvars[host]['backend_ip'] }}:9200 -{% endfor %} - #messaging system - in_topic: inboundTwitter - # notifications - NotifierConfig: - admin_username: admin # username on whose behalf notifiers will act. This admin user must have been created before by other means. \ No newline at end of file