From 44ae5a7dcc7e7257345cffe91e1d29084fe4bc32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arturo=20Filast=C3=B2?= Date: Wed, 17 Apr 2024 16:16:53 +0200 Subject: [PATCH] Remove th rotation script and systemd unit --- analysis/debian/changelog | 6 + analysis/debian/ooni-rotation.service | 11 - analysis/debian/ooni-rotation.timer | 11 - analysis/debian/rules | 1 - analysis/rotation.py | 665 -------------------------- analysis/setup.py | 3 +- analysis/tests/test_rotation.py | 159 ------ 7 files changed, 7 insertions(+), 849 deletions(-) delete mode 100644 analysis/debian/ooni-rotation.service delete mode 100644 analysis/debian/ooni-rotation.timer delete mode 100644 analysis/rotation.py delete mode 100644 analysis/tests/test_rotation.py diff --git a/analysis/debian/changelog b/analysis/debian/changelog index 7c23a151..07e03551 100644 --- a/analysis/debian/changelog +++ b/analysis/debian/changelog @@ -1,3 +1,9 @@ +analysis (1.10) unstable; urgency=medium + + * Drop th rotation script + + -- Arturo Wed, 17 Apr 2024 16:16:16 +0100 + analysis (1.10) unstable; urgency=medium * Install certs for Vecton in the TH rotation script diff --git a/analysis/debian/ooni-rotation.service b/analysis/debian/ooni-rotation.service deleted file mode 100644 index f6096232..00000000 --- a/analysis/debian/ooni-rotation.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Rotate test helpers -Wants=ooni-rotation.timer - -[Service] -Type=oneshot -ExecStart=/usr/bin/rotation - -[Install] -WantedBy=multi-user.target - diff --git a/analysis/debian/ooni-rotation.timer b/analysis/debian/ooni-rotation.timer deleted file mode 100644 index 4c7226ac..00000000 --- a/analysis/debian/ooni-rotation.timer +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Rotate test helpers -Requires=ooni-rotation.service - -[Timer] -Unit=ooni-rotation.service -OnCalendar=Mon *-*-* 15:00:00 -Persistent=true - -[Install] -WantedBy=timers.target diff --git a/analysis/debian/rules b/analysis/debian/rules index f315cb71..00b3fe2a 100755 --- a/analysis/debian/rules +++ b/analysis/debian/rules @@ -7,7 +7,6 @@ export DH_VERBOSE = 1 override_dh_installsystemd: dh_installsystemd --no-start --no-restart-on-upgrade --name ooni-clickhouse-feeder dh_installsystemd --no-start --no-restart-on-upgrade --name ooni-db-backup - dh_installsystemd --no-start --no-restart-on-upgrade --name ooni-rotation dh_installsystemd --no-start --no-restart-on-upgrade --name ooni-update-asn-metadata dh_installsystemd --no-start --no-restart-on-upgrade --name ooni-update-citizenlab dh_installsystemd --no-start --no-restart-on-upgrade --name ooni-update-counters diff --git a/analysis/rotation.py b/analysis/rotation.py deleted file mode 100644 index 0eb1676c..00000000 --- a/analysis/rotation.py +++ /dev/null @@ -1,665 +0,0 @@ -#!/usr/bin/env python3 -""" -Spaws and rotates hosts on Digital Ocean. -- Select datacenters and spawn VMs -- Runs a setup script on the host at first boot -- Keeps a list of live and old hosts in a dedicated db table -- Create SSL certificates using the Digital Ocean API -- Performs end-to-end test on newly created VMs -- Update DNS to publish new services -- Drain and destroy old VMs - -Reads /etc/ooni/rotation.conf -Runs as a SystemD timer. - -Interfaces, APIs and stateful contents: - - Digital Ocean API: DNS A/AAAA records - - Digital Ocean API: Live droplets - - test_helper_instances database table - - certbot certificates stored on local host and pushed to the test hepers - -Table setup: - -CREATE TABLE test_helper_instances -( - `rdn` String, - `dns_zone` String, - `name` String, - `provider` String, - `region` String, - `ipaddr` IPv4, - `ipv6addr` IPv6, - `draining_at` Nullable(DateTime('UTC')), - `destroyed_at` Nullable(DateTime('UTC')), - `sign` Int8 -) -ENGINE = CollapsingMergeTree(sign) -ORDER BY name - - -Example for /etc/ooni/rotation.conf --- -[DEFAULT] -token = CHANGEME -active_droplets_count = 4 -size_slug = s-1vcpu-1gb -image_name = debian-10-x64 -draining_time_minutes = 240 -dns_zone = th.ooni.org --- - -Example for /etc/ooni/rotation_setup.sh --- -#!/bin/bash -# Configure test-helper droplet -# This script is run as root and with CWD=/ -set -euo pipefail -exec 1>setup.log 2>&1 -echo "deb [trusted=yes] https://ooni-internal-deb.s3.eu-central-1.amazonaws.com unstable main" > /etc/apt/sources.list.d/ooni.list -apt-key adv --verbose --keyserver hkp://keyserver.ubuntu.com --recv-keys 'B5A08F01796E7F521861B449372D1FF271F2DD50' -apt-get update -apt-get upgrade -qy -echo > /etc/motd -apt-get install -qy oohelperd -apt-get install -qy oohelperd nginx-light --- - -Example for /etc/ooni/certbot-digitalocean ---- -dns_digitalocean_token = CHANGEME ---- -""" - -from configparser import ConfigParser -from datetime import datetime -from functools import wraps -from subprocess import check_output -from pathlib import Path -from ipaddress import IPv4Address as IP4a -import logging -import random -import sys -import time - -# debdeps: python3-clickhouse-driver -from clickhouse_driver import Client as Clickhouse -import statsd # debdeps: python3-statsd -import digitalocean # debdeps: python3-digitalocean -import requests # debdeps: python3-requests - -metrics = statsd.StatsClient("127.0.0.1", 8125, prefix="rotation") - -log = logging.getLogger("rotation") -log.addHandler(logging.StreamHandler()) # Writes to console -log.setLevel(logging.DEBUG) - -conffile_path = "/etc/ooni/rotation.conf" -setup_script_path = "/etc/ooni/rotation_setup.sh" -nginx_conf = "/etc/ooni/rotation_nginx_conf" -certbot_creds = "/etc/ooni/certbot-digitalocean" -TAG = "roaming-th" -ssh_cmd_base = [ - "ssh", - "-o", - "StrictHostKeyChecking=no", - "-o", - "UserKnownHostsFile=/dev/null", - "-o", - "ConnectTimeout=10", - "-o", - "BatchMode=yes", - "-i", - "/etc/ooni/testhelper_ssh_key", -] - - -def retry(func): - @wraps(func) - def wrapped(*args, **kwargs): - for attempt in range(5): - try: - return func(*args, **kwargs) - except Exception as e: - log.info(f"Catched {e} - retrying") - time.sleep((attempt + 1) * 5) - return func(*args, **kwargs) - - return wrapped - - -# # Database helpers - - -def insert(click, table: str, cols: dict) -> None: - cs = ", ".join(sorted(cols.keys())) - q = f"INSERT INTO {table} ({cs}) VALUES" - log.info(q) - click.execute(q, [cols]) - - -def optimize_table(click, tblname: str) -> None: - q = f"OPTIMIZE TABLE {tblname} FINAL" - log.info(q) - click.execute(q) - - -def add_droplet_to_db_table(click, dr, rdn, dns_zone) -> None: - cols = dict( - dns_zone=dns_zone, - ipaddr=dr.ip_address, - ipv6addr=dr.ip_v6_address, - name=dr.name, - provider="Digital Ocean", - rdn=rdn, - region=dr.region["slug"], - sign=1, - ) - insert(click, "test_helper_instances", cols) - - -def drain_droplet_in_db_table(click, now, dr, rdn: str, dns_zone: str) -> None: - cols = dict( - dns_zone=dns_zone, - ipaddr=dr.ip_address, - ipv6addr=dr.ip_v6_address, - name=dr.name, - provider="Digital Ocean", - rdn=rdn, - region=dr.region["slug"], - sign=-1, - ) - insert(click, "test_helper_instances", cols.copy()) - - cols["draining_at"] = now - cols["sign"] = 1 - insert(click, "test_helper_instances", cols) - optimize_table(click, "test_helper_instances") - - -def destroy_droplet_in_db_table( - click, dr, rdn: str, draining_at, now, dns_zone: str -) -> None: - # NOTE: doing updates using 1/-1 signs is scalable on multiple hosts but - # error-prone - cols = dict( - dns_zone=dns_zone, - draining_at=draining_at, - ipaddr=dr.ip_address, - ipv6addr=dr.ip_v6_address, - name=dr.name, - provider="Digital Ocean", - rdn=rdn, - region=dr.region["slug"], - sign=-1, - ) - insert(click, "test_helper_instances", cols.copy()) - - cols["destroyed_at"] = now - cols["sign"] = 1 - insert(click, "test_helper_instances", cols) - optimize_table(click, "test_helper_instances") - - -# # End - - -@metrics.timer("destroy_drained_droplets") -def destroy_drained_droplets( - click, draining_time_minutes: int, live_droplets: list, dns_zone: str -) -> None: - # Pick droplets to be destroyed - q = """SELECT name, rdn, draining_at FROM test_helper_instances - FINAL - WHERE provider = 'Digital Ocean' - AND dns_zone = %(dns_zone)s - AND draining_at IS NOT NULL - AND destroyed_at IS NULL - AND draining_at < NOW() - interval %(mins)s minute - ORDER BY draining_at - """ - log.info(q) - rows = click.execute(q, dict(dns_zone=dns_zone, mins=draining_time_minutes)) - if not rows: - log.info("No droplet to destroy") - return - - for name, rdn, draining_at in rows: - to_delete = [d for d in live_droplets if d.name == name] - if not to_delete: - log.error(f"{name} found in database but not found on Digital Ocean") - continue - - for droplet in to_delete: - log.info(f"Destroying {droplet.name} droplet") - now = datetime.utcnow() - destroy_droplet_in_db_table(click, droplet, rdn, draining_at, now, dns_zone) - droplet.destroy() - - -def pick_regions(api, live_regions: set) -> list: - """Pick regions that are available and have no droplet yet""" - available_regions = set( - r.slug for r in api.get_all_regions() if r.available is True - ) - # Pick only regions with good, unfiltered network connectivity - acceptable_regions = set(("ams3", "fra1", "lon1", "nyc3", "sfo3", "tor1")) - ok_regions = acceptable_regions.intersection(available_regions) - best_regions = ok_regions - live_regions - if not len(best_regions): - log.error("No regions available!") - raise Exception("No regions available") - if len(best_regions): - return list(best_regions) - log.info(f"No 'best' region available. Falling back to {ok_regions}") - return list(ok_regions) - - -@metrics.timer("spawn_new_droplet") -def spawn_new_droplet(api, dig_oc_token: str, live_regions, conf): - regions = pick_regions(api, live_regions) - - ssh_keys = api.get_all_sshkeys() - img = conf["image_name"] - assert img - size_slug = conf["size_slug"] - assert size_slug - with open(setup_script_path) as f: - user_data = f.read() - for attempt in range(20): - name = datetime.utcnow().strftime("roaming-th-%Y%m%d%H%M%S") - region = random.choice(regions) - log.info(f"Trying to spawn {name} in {region}") - try: - droplet = digitalocean.Droplet( - backups=False, - image=img, - name=name, - region=region, - size_slug=size_slug, - ssh_keys=ssh_keys, - token=dig_oc_token, - user_data=user_data, - ipv6=True, - tags=[ - TAG, - ], - ) - droplet.create() - break - except digitalocean.baseapi.DataReadError as e: - log.info(f"Unable to spawn {e}") - time.sleep(1) - - log.info(f"Spawning {name} in {region}") - timeout = time.time() + 60 * 20 - while time.time() < timeout: - time.sleep(5) - for action in droplet.get_actions(): - action.load() - if action.status == "completed": - log.info("Spawning completed, waiting warmup") - new_droplet = api.get_droplet(droplet.id) - if new_droplet.ip_address: - ssh_wait_droplet_warmup(new_droplet.ip_address) - return new_droplet - - log.debug("Waiting for droplet to start") - - log.error("Timed out waiting for droplet start") - raise Exception - - -# sz=api.get_all_sizes() -# sorted((s.price_monthly, s.slug, s.memory) for s in sz) -# imgs = api.get_all_images() -# [(i.name, i.slug) for i in imgs if i.distribution == "Debian"] - - -def load_conf(): - cp = ConfigParser() - with open(conffile_path) as f: - cp.read_file(f) - return cp["DEFAULT"] - - -def list_active_droplets(click, live_droplets, dns_zone: str): - q = """SELECT name, rdn FROM test_helper_instances - FINAL - WHERE provider = 'Digital Ocean' - AND dns_zone = %(dns_zone)s - AND draining_at IS NULL - """ - log.info(q) - rows = click.execute(q, dict(dns_zone=dns_zone)) - log.info(rows) - active = set(row[0] for row in rows) - active_droplets = [d for d in live_droplets if d.name in active] - return active_droplets, rows - - -def drain_droplet(click, dns_zone, active_droplets: list, rows: list) -> str: - by_age = sorted(active_droplets, key=lambda d: d.created_at) - oldest = by_age[0] - rdn = [row[1] for row in rows if row[0] == oldest.name][0] - log.info(f"Draining {oldest.name} {rdn}.{dns_zone} droplet") - now = datetime.utcnow() - drain_droplet_in_db_table(click, now, oldest, rdn, dns_zone) - return rdn - - -@metrics.timer("create_le_do_ssl_cert") -def create_le_do_ssl_cert(dns_zone: str) -> None: - """Create/renew Let's Encrypt SSL Certificate through the Digital Ocean API - - Namecheap DNS entry to delegate to DO: - th.ooni.org - NS ns1.digitalocean.com - NS ns2.digitalocean.com - NS ns3.digitalocean.com - - On Digital Ocean, creates a wildcard cert for *.th.ooni.org using DNS - """ - # TODO create_le_do_ssl_cert only if needed - # debdeps: certbot python3-certbot-dns-digitalocean - cmd = [ - "certbot", - "certonly", - "--dns-digitalocean", - "--dns-digitalocean-credentials", - certbot_creds, - "-d", - f"*.{dns_zone}", - "-n", - ] - log.info(f"Creating/refreshing wildcard certificate *.{dns_zone}") - log.info(" ".join(cmd)) - check_output(cmd) - - -@metrics.timer("scp_file") -@retry -def scp_file(local_fn: str, host: str, remote_fn: str) -> None: - """SCP file""" - assert remote_fn.startswith("/") - dest = f"{host}:{remote_fn}" - cmd = [ - "scp", - "-o", - "StrictHostKeyChecking=accept-new", - "-o", - "ConnectTimeout=10", - "-i", - "/etc/ooni/testhelper_ssh_key", - "-Bpr", - local_fn, - dest, - ] - log.info("Copying file") - log.info(" ".join(cmd)) - check_output(cmd) - - -def ssh_restart_service(host: str, service_name: str) -> None: - cmd = ssh_cmd_base + [ - f"{host}", - "systemctl", - "restart", - service_name, - ] - log.info(f"Restarting {service_name}") - log.info(" ".join(cmd)) - check_output(cmd) - - -@metrics.timer("ssh_restart_nginx") -@retry -def ssh_restart_nginx(host: str) -> None: - ssh_restart_service(host, "nginx") - - -@metrics.timer("ssh_restart_netdata") -@retry -def ssh_restart_netdata(host: str) -> None: - ssh_restart_service(host, "netdata") - - -@metrics.timer("ssh_restart_vector") -@retry -def ssh_restart_vector(host: str) -> None: - ssh_restart_service(host, "vector.service") - - -@metrics.timer("ssh_wait_droplet_warmup") -def ssh_wait_droplet_warmup(ipaddr: str) -> None: - cmd = ssh_cmd_base + [ - f"root@{ipaddr}", - "cat", - "/var/run/rotation_setup_completed", - ] - timeout = time.time() + 60 * 15 - while time.time() < timeout: - log.info("Checking flag") - log.info(" ".join(cmd)) - try: - check_output(cmd) - log.info("Flag file found") - return - except Exception: - log.debug("Flag file not found") - time.sleep(5) - - log.error("Timed out waiting for droplet start") - raise Exception - - -def delete_dns_record(api, zone: str, name: str, ip_address, rtype, dig_oc_token=None): - assert not name.endswith(zone) - records = api.get_records() - for r in records: - if r.domain == zone and r.data == ip_address and r.type == rtype: - log.info(f"Deleting {r.type} {r.zone} {r.name} {r.data}") - r.destroy() - return - - log.error(f"{name} {ip_address} not found") - - -def update_or_create_dns_record( - api, zone: str, name: str, rtype: str, ip_address, records -) -> None: - ip_address = str(ip_address) - x = [r for r in records if r.name == name and r.type == rtype] - if x: - x = x[0] - url = f"domains/{zone}/records/{x.id}" # type: ignore - changes = dict(data=ip_address) - m = f"Updating existing DNS record {x.id} {rtype} {name} {zone} {ip_address}" - log.info(m) - api.get_data(url, type=digitalocean.baseapi.PUT, params=changes) - return - - log.info(f"Creating DNS record {rtype} {name} {zone} {ip_address}") - api.create_new_domain_record(type=rtype, name=name, data=ip_address, ttl=60) - - -def update_or_create_dns_records(dig_oc_token: str, zone: str, vals: list) -> None: - """Upsert A/AAAA records in Digital Ocean - vals: [(n, ip_address, ip_v6_address), ... ] - """ - api = digitalocean.Domain(name=zone, token=dig_oc_token) - records = api.get_records() - # An initial filtering for safety - records = [r for r in records if r.domain == zone and r.type in ("A", "AAAA")] - for n, ip_address, ip_v6_address in vals: - name = str(n) - # pick the existing record to be updated, if any: - update_or_create_dns_record(api, zone, name, "A", ip_address, records) - # Same for IPv6 - update_or_create_dns_record(api, zone, name, "AAAA", ip_v6_address, records) - - -@metrics.timer("update_dns_records") -def update_dns_records(click, dig_oc_token: str, dns_zone: str, droplets) -> None: - q = """SELECT rdn, ipaddr, ipv6addr FROM test_helper_instances - FINAL - WHERE provider = 'Digital Ocean' - AND dns_zone = %(dns_zone)s - AND draining_at IS NULL - ORDER BY name - """ - log.info(q) - rows = click.execute(q, dict(dns_zone=dns_zone)) - log.info(rows) - update_or_create_dns_records(dig_oc_token, dns_zone, rows) - - -@metrics.timer("setup_nginx") -def setup_nginx(host: str, zone: str) -> None: - """Deploy TLS certificates, configure Nginx and [re]start it. - Then restart Netdata to enable Nginx monitoring. - """ - cert_fname = f"/etc/letsencrypt/live/{zone}/fullchain.pem" - privkey_fname = f"/etc/letsencrypt/live/{zone}/privkey.pem" - scp_file(cert_fname, host, "/etc/ssl/private/th_fullchain.pem") - scp_file(privkey_fname, host, "/etc/ssl/private/th_privkey.pem") - scp_file(nginx_conf, host, "/etc/nginx/sites-enabled/default") - ssh_restart_nginx(host) - ssh_restart_netdata(host) - - -@metrics.timer("setup_vector") -def setup_vector(host: str) -> None: - """Deploy TLS certificates, configure Vector and [re]start it.""" - fns = [ - "/etc/vector/oonicacert.pem", - "/etc/vector/node-cert.pem", - "/etc/vector/node.key", - ] - for fn in fns: - scp_file(fn, host, fn) - ssh_restart_vector(host) - - -def assign_rdn(click, dns_zone: str, wanted_droplet_num: int) -> str: - q = """SELECT rdn FROM test_helper_instances - FINAL - WHERE provider = 'Digital Ocean' - AND dns_zone = %(dns_zone)s - AND draining_at IS NULL - """ - log.info(q) - rows = click.execute(q, dict(dns_zone=dns_zone)) - log.info(rows) - in_use = set(r[0] for r in rows) - log.info(f"In use RDNs: {in_use}") - for n in range(wanted_droplet_num): - rdn = str(n) - if rdn not in in_use: - log.info(f"Selected RDN {rdn}") - return rdn - - raise Exception("Unable to pick an RDN") - - -@metrics.timer("end_to_end_test") -def end_to_end_test(ipaddr: IP4a, fqdn: str) -> None: - # Test the new TH with real traffic - j = { - "http_request": "https://google.com/", - "http_request_headers": {}, - "tcp_connect": ["8.8.8.8:443"], - } - hdr = {"Host": fqdn, "Pragma": "no-cache"} - log.info(f"Testing TH on {fqdn} using http_request and tcp_connect") - r = requests.post(f"https://{ipaddr}", headers=hdr, verify=False, json=j) - keys = set(r.json().keys()) - expected = {"dns", "http_request", "tcp_connect"} - if r.ok and expected.issubset(keys): - # This is true when keys >= expected - log.info(f"End-to-end test successful") - return - - log.error(f"Failed end to end test: {r.status_code}\nHeaders: {r.headers}") - log.error(f"Body: {r.text}") - raise Exception("End to end test failed") - - -def list_regions_with_live_droplets(click, dns_zone: str) -> set[str]: - q = """SELECT DISTINCT(region) FROM test_helper_instances - FINAL - WHERE provider = 'Digital Ocean' - AND dns_zone = %(dns_zone)s - AND draining_at IS NULL - """ - log.info(q) - rows = click.execute(q, dict(dns_zone=dns_zone)) - return set(r[0] for r in rows) - - -@metrics.timer("run_time") -def main() -> None: - conf = load_conf() - - dig_oc_token = conf["token"] - assert dig_oc_token - assert len(dig_oc_token) == 64 - draining_time_minutes = int(conf["draining_time_minutes"]) - assert draining_time_minutes >= 1 - - wanted_droplet_num = int(conf["active_droplets_count"]) - assert 0 <= wanted_droplet_num < 20 - assert Path(setup_script_path).is_file() - assert Path(certbot_creds).is_file() - assert Path(nginx_conf).is_file() - dns_zone: str = conf["dns_zone"].strip(".") - assert dns_zone - - click = Clickhouse("localhost", user="rotation") - api = digitalocean.Manager(token=dig_oc_token) - # Fetch all test-helper droplets - droplets = api.get_all_droplets(tag_name=TAG) - for d in droplets: - assert TAG in d.tags - # Naming: live_droplets - all VMs running on Digital Ocean in active status - # active_droplets - live_droplets without the ones being drained - live_droplets = [d for d in droplets if d.status == "active"] - active_droplets, rows = list_active_droplets(click, live_droplets, dns_zone) - log.info(f"{len(droplets)} droplets") - log.info(f"{len(live_droplets)} live droplets") - log.info(f"{len(active_droplets)} active droplets") - - # Avoid failure modes where we destroy all VMs or create unlimited amounts - # or churn too quickly - destroy_drained_droplets(click, draining_time_minutes, live_droplets, dns_zone) - - if len(droplets) > wanted_droplet_num + 5: - log.error("Unexpected amount of running droplets") - sys.exit(1) - - if len(active_droplets) >= wanted_droplet_num: - # Drain one droplet if needed - rdn = drain_droplet(click, dns_zone, active_droplets, rows) - - else: - # special case: creating a new rdn not seen before - rdn = assign_rdn(click, dns_zone, wanted_droplet_num) - - # Spawn a new droplet - log.info(f"Spawning droplet be become {rdn}.{dns_zone}") - live_regions = list_regions_with_live_droplets(click, dns_zone) - new_droplet = spawn_new_droplet(api, dig_oc_token, live_regions, conf) - log.info(f"Droplet {new_droplet.name} ready at {new_droplet.ip_address}") - add_droplet_to_db_table(click, new_droplet, rdn, dns_zone) - live_droplets.append(new_droplet) - - create_le_do_ssl_cert(dns_zone) - setup_nginx(f"root@{new_droplet.ip_address}", dns_zone) - setup_vector(f"root@{new_droplet.ip_address}") - end_to_end_test(new_droplet.ip_address, f"{rdn}.{dns_zone}") - - # Update DNS A/AAAA records only when a new droplet is deployed - update_dns_records(click, dig_oc_token, dns_zone, live_droplets) - - -if __name__ == "__main__": - main() diff --git a/analysis/setup.py b/analysis/setup.py index e6953d06..8a069be7 100644 --- a/analysis/setup.py +++ b/analysis/setup.py @@ -8,13 +8,12 @@ setup( name=NAME, python_requires=">=3.7.0", - py_modules=["rotation", "ooni_db_backup"], + py_modules=["ooni_db_backup"], packages=["analysis"], entry_points={ "console_scripts": [ "analysis=analysis.analysis:main", "ooni-db-backup=ooni_db_backup:main", - "rotation=rotation:main", ] }, scripts=["analysis/clickhouse_feeder.py"], diff --git a/analysis/tests/test_rotation.py b/analysis/tests/test_rotation.py deleted file mode 100644 index 39ad4c50..00000000 --- a/analysis/tests/test_rotation.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -Test helper rotation - basic functional testing -""" - -import datetime -from unittest.mock import Mock, create_autospec, call - -import pytest # debdeps: python3-pytest - -import rotation as ro - - -@pytest.fixture(autouse=True, scope="session") -def mock_everything(): - ro.Clickhouse = create_autospec(ro.Clickhouse) - ro.digitalocean = create_autospec(ro.digitalocean) - ro.requests = Mock() - ro.metrics = Mock() - ro.datetime = create_autospec(ro.datetime) - ro.datetime.utcnow = Mock(return_value=datetime.datetime(2000, 1, 1)) - - -class Droplet: - region = dict(slug="slug") - ip_address = "1.2.3.4" - ip_v6_address = "::9" - name = "foo_name" - - -@pytest.fixture -def dr(): - return Droplet() - - -def test_rotation_db_list(): - click = ro.Clickhouse("localhost", user="rotation") - d = Mock() - active_droplets, rows = ro.list_active_droplets(click, [d], "foo") - click.execute.assert_called() - click.execute.assert_called_with( - """SELECT name, rdn FROM test_helper_instances - FINAL - WHERE provider = 'Digital Ocean' - AND dns_zone = %(dns_zone)s - AND draining_at IS NULL - """, - {"dns_zone": "foo"}, - ), click.execute.call_args_list - - -def test_rotation_db_add(dr): - click = ro.Clickhouse("localhost", user="rotation") - click.execute.reset_mock() - ro.add_droplet_to_db_table(click, dr, "0", "foo") - exp = [ - call( - "INSERT INTO test_helper_instances (dns_zone, ipaddr, ipv6addr, name, provider, rdn, region, sign) VALUES", - [ - { - "dns_zone": "foo", - "rdn": "0", - "ipaddr": "1.2.3.4", - "ipv6addr": "::9", - "name": "foo_name", - "provider": "Digital Ocean", - "region": "slug", - "sign": 1, - } - ], - ) - ] - assert click.execute.call_args_list == exp, click.execute.call_args_list - - -def test_rotation_db_drain(dr): - click = ro.Clickhouse("localhost", user="rotation") - click.execute.reset_mock() - now = datetime.datetime(2000, 1, 1, 0, 0) - ro.drain_droplet_in_db_table(click, now, dr, "0", "foo") - exp = [ - call( - "INSERT INTO test_helper_instances (dns_zone, ipaddr, ipv6addr, name, provider, rdn, region, sign) VALUES", - [ - { - "dns_zone": "foo", - "rdn": "0", - "ipaddr": "1.2.3.4", - "ipv6addr": "::9", - "name": "foo_name", - "provider": "Digital Ocean", - "region": "slug", - "sign": -1, - } - ], - ), - call( - "INSERT INTO test_helper_instances (dns_zone, draining_at, ipaddr, ipv6addr, name, provider, rdn, region, sign) VALUES", - [ - { - "dns_zone": "foo", - "rdn": "0", - "ipaddr": "1.2.3.4", - "ipv6addr": "::9", - "name": "foo_name", - "provider": "Digital Ocean", - "region": "slug", - "sign": 1, - "draining_at": datetime.datetime(2000, 1, 1, 0, 0), - } - ], - ), - call("OPTIMIZE TABLE test_helper_instances FINAL"), - ] - assert click.execute.call_args_list == exp, click.execute.call_args_list - - -def test_rotation_db_destroy(dr): - click = ro.Clickhouse("localhost", user="rotation") - click.execute.reset_mock() - drain_t = datetime.datetime(2000, 1, 1, 0, 0) - destroy_t = datetime.datetime(2000, 2, 2, 0, 0) - ro.destroy_droplet_in_db_table(click, dr, "0", drain_t, destroy_t, "foo") - exp = [ - call( - "INSERT INTO test_helper_instances (dns_zone, draining_at, ipaddr, ipv6addr, name, provider, rdn, region, sign) VALUES", - [ - { - "dns_zone": "foo", - "draining_at": drain_t, - "ipaddr": "1.2.3.4", - "ipv6addr": "::9", - "name": "foo_name", - "provider": "Digital Ocean", - "rdn": "0", - "region": "slug", - "sign": -1, - } - ], - ), - call( - "INSERT INTO test_helper_instances (destroyed_at, dns_zone, draining_at, ipaddr, ipv6addr, name, provider, rdn, region, sign) VALUES", - [ - { - "dns_zone": "foo", - "draining_at": drain_t, - "ipaddr": "1.2.3.4", - "ipv6addr": "::9", - "name": "foo_name", - "provider": "Digital Ocean", - "rdn": "0", - "region": "slug", - "sign": 1, - "destroyed_at": destroy_t, - } - ], - ), - call("OPTIMIZE TABLE test_helper_instances FINAL"), - ] - assert click.execute.call_args_list == exp, click.execute.call_args_list